From 4c64aa643ae5e19ac8ec3febe896c16f954224a2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Feb 2023 22:37:41 +0000 Subject: [PATCH] Bump github.com/influxdata/telegraf from 1.16.3 to 1.25.2 Bumps [github.com/influxdata/telegraf](https://github.com/influxdata/telegraf) from 1.16.3 to 1.25.2. - [Release notes](https://github.com/influxdata/telegraf/releases) - [Changelog](https://github.com/influxdata/telegraf/blob/master/CHANGELOG.md) - [Commits](https://github.com/influxdata/telegraf/compare/v1.16.3...v1.25.2) --- updated-dependencies: - dependency-name: github.com/influxdata/telegraf dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 201 +- go.sum | 2917 +- .../.release-please-manifest-individual.json | 13 + .../.release-please-manifest-submodules.json | 115 + .../go/.release-please-manifest.json | 3 + vendor/cloud.google.com/go/CHANGES.md | 66 + vendor/cloud.google.com/go/CONTRIBUTING.md | 2 +- vendor/cloud.google.com/go/README.md | 9 +- .../go/compute/internal/version.go | 18 + .../go/compute/metadata/CHANGES.md | 12 + .../go/compute/metadata}/LICENSE | 1 + .../go/compute/metadata/README.md | 27 + .../go/compute/metadata/metadata.go | 4 +- .../metadata/tidyfix.go} | 9 +- vendor/cloud.google.com/go/doc.go | 195 +- vendor/cloud.google.com/go/iam/CHANGES.md | 56 + .../go/iam/apiv1/iampb}/iam_policy.pb.go | 101 +- .../go/iam/apiv1/iampb}/options.pb.go | 58 +- .../go/iam/apiv1/iampb/policy.pb.go | 1169 + .../go/internal/.repo-metadata-full.json | 802 +- .../cloud.google.com/go/internal/annotate.go | 3 +- .../go/internal/pubsub/message.go | 221 + .../go/internal/pubsub/publish.go | 57 + .../go/internal/testutil/context.go | 26 +- .../go/longrunning/CHANGES.md | 12 + .../go/longrunning}/LICENSE | 17 +- .../cloud.google.com/go/longrunning/README.md | 26 + .../go/longrunning/autogen/doc.go | 119 +- .../longrunning/autogen/gapic_metadata.json | 30 + .../autogen/longrunningpb}/operations.pb.go | 19 +- .../longrunning/autogen/operations_client.go | 461 +- .../tidyfix.go} | 5 +- vendor/cloud.google.com/go/migration.md | 50 + vendor/cloud.google.com/go/pubsub/CHANGES.md | 354 + vendor/cloud.google.com/go/pubsub/README.md | 8 +- .../cloud.google.com/go/pubsub/apiv1/doc.go | 82 +- .../go/pubsub/apiv1/gapic_metadata.json | 236 + .../cloud.google.com/go/pubsub/apiv1/info.go | 33 + .../go/pubsub/apiv1/path_funcs.go | 24 +- .../go/pubsub/apiv1/publisher_client.go | 453 +- .../go/pubsub/apiv1/pubsubpb}/pubsub.pb.go | 1035 +- .../go/pubsub/apiv1/pubsubpb}/schema.pb.go | 5 +- .../go/pubsub/apiv1/schema_client.go | 491 + .../go/pubsub/apiv1/subscriber_client.go | 674 +- .../go/pubsub/apiv1/version.go | 23 + vendor/cloud.google.com/go/pubsub/debug.go | 1 + vendor/cloud.google.com/go/pubsub/doc.go | 117 +- .../go/pubsub/flow_controller.go | 190 +- .../internal/scheduler/publish_scheduler.go | 207 + .../internal/scheduler/receive_scheduler.go | 141 + .../go/pubsub/internal/version.go | 18 + vendor/cloud.google.com/go/pubsub/iterator.go | 533 +- vendor/cloud.google.com/go/pubsub/message.go | 216 +- vendor/cloud.google.com/go/pubsub/nodebug.go | 1 + .../cloud.google.com/go/pubsub/pstest/fake.go | 637 +- vendor/cloud.google.com/go/pubsub/pubsub.go | 115 +- .../cloud.google.com/go/pubsub/pullstream.go | 24 +- vendor/cloud.google.com/go/pubsub/schema.go | 263 + vendor/cloud.google.com/go/pubsub/service.go | 62 +- vendor/cloud.google.com/go/pubsub/snapshot.go | 46 +- .../go/pubsub/subscription.go | 753 +- vendor/cloud.google.com/go/pubsub/topic.go | 372 +- vendor/cloud.google.com/go/pubsub/trace.go | 59 +- .../go/release-please-config-individual.json | 45 + ...elease-please-config-yoshi-submodules.json | 347 + .../go/release-please-config.json | 11 + vendor/cloud.google.com/go/storage/CHANGES.md | 247 + vendor/cloud.google.com/go/storage/README.md | 10 +- vendor/cloud.google.com/go/storage/acl.go | 193 +- vendor/cloud.google.com/go/storage/bucket.go | 1325 +- vendor/cloud.google.com/go/storage/client.go | 333 + vendor/cloud.google.com/go/storage/copy.go | 151 +- vendor/cloud.google.com/go/storage/doc.go | 368 +- .../go/storage/emulator_test.sh | 92 + vendor/cloud.google.com/go/storage/go110.go | 48 - .../go/storage/grpc_client.go | 1751 + vendor/cloud.google.com/go/storage/hmac.go | 169 +- .../go/storage/http_client.go | 1351 + vendor/cloud.google.com/go/storage/iam.go | 51 +- .../go/storage/internal/apiv2/doc.go | 174 + .../internal/apiv2/gapic_metadata.json | 168 + .../go/storage/internal/apiv2/metadata.go | 26 + .../storage/internal/apiv2/storage_client.go | 1605 + .../internal/apiv2/stubs/storage.pb.go | 10652 ++ .../apiv2/version.go} | 13 +- .../go/storage/internal/version.go | 18 + vendor/cloud.google.com/go/storage/invoke.go | 129 +- .../cloud.google.com/go/storage/not_go110.go | 42 - .../go/storage/notifications.go | 72 +- .../go/storage/post_policy_v4.go | 119 +- vendor/cloud.google.com/go/storage/reader.go | 263 +- vendor/cloud.google.com/go/storage/storage.go | 1039 +- vendor/cloud.google.com/go/storage/writer.go | 217 +- .../Azure/go-autorest/autorest/adal/README.md | 8 +- .../Azure/go-autorest/autorest/adal/token.go | 90 +- .../go-autorest/autorest/azure/auth/auth.go | 4 + .../autorest/azure/auth/go_mod_tidy_hack.go | 1 + .../autorest/azure/cli/go_mod_tidy_hack.go | 1 + .../go-autorest/autorest/azure/cli/token.go | 104 +- .../autorest/azure/environments.go | 38 +- .../Azure/go-autorest/autorest/azure/rp.go | 2 +- .../github.com/Microsoft/go-winio/README.md | 29 +- vendor/github.com/Microsoft/go-winio/file.go | 6 + .../github.com/Microsoft/go-winio/hvsock.go | 17 +- .../Microsoft/go-winio/pkg/guid/guid.go | 9 - .../go-winio/pkg/guid/guid_nonwindows.go | 15 + .../go-winio/pkg/guid/guid_windows.go | 10 + .../Microsoft/go-winio/privilege.go | 5 +- .../github.com/PuerkitoBio/purell/.gitignore | 5 - .../github.com/PuerkitoBio/purell/.travis.yml | 12 - vendor/github.com/PuerkitoBio/purell/LICENSE | 12 - .../github.com/PuerkitoBio/purell/README.md | 188 - .../github.com/PuerkitoBio/purell/purell.go | 379 - .../github.com/PuerkitoBio/urlesc/.travis.yml | 15 - vendor/github.com/PuerkitoBio/urlesc/LICENSE | 27 - .../github.com/PuerkitoBio/urlesc/README.md | 16 - .../github.com/PuerkitoBio/urlesc/urlesc.go | 180 - vendor/github.com/Shopify/sarama/.gitignore | 8 +- .../github.com/Shopify/sarama/.golangci.yml | 24 +- vendor/github.com/Shopify/sarama/CHANGELOG.md | 280 +- .../Shopify/sarama/Dockerfile.kafka | 27 + vendor/github.com/Shopify/sarama/Makefile | 20 +- vendor/github.com/Shopify/sarama/README.md | 6 +- vendor/github.com/Shopify/sarama/admin.go | 209 +- .../Shopify/sarama/async_producer.go | 382 +- .../Shopify/sarama/balance_strategy.go | 87 +- vendor/github.com/Shopify/sarama/broker.go | 632 +- vendor/github.com/Shopify/sarama/client.go | 251 +- vendor/github.com/Shopify/sarama/compress.go | 4 +- vendor/github.com/Shopify/sarama/config.go | 103 +- vendor/github.com/Shopify/sarama/consumer.go | 278 +- .../Shopify/sarama/consumer_group.go | 309 +- .../Shopify/sarama/consumer_group_members.go | 49 +- .../sarama/create_partitions_response.go | 4 + .../Shopify/sarama/create_topics_response.go | 4 + .../github.com/Shopify/sarama/decompress.go | 4 +- .../Shopify/sarama/delete_offsets_response.go | 4 +- .../Shopify/sarama/describe_groups_request.go | 29 +- .../sarama/describe_groups_response.go | 189 +- vendor/github.com/Shopify/sarama/dev.yml | 2 +- .../Shopify/sarama/docker-compose.yml | 218 +- .../Shopify/sarama/encoder_decoder.go | 18 +- .../Shopify/sarama/end_txn_response.go | 2 +- .../github.com/Shopify/sarama/entrypoint.sh | 26 + vendor/github.com/Shopify/sarama/errors.go | 294 +- .../Shopify/sarama/fetch_request.go | 60 +- .../Shopify/sarama/fetch_response.go | 72 +- .../Shopify/sarama/heartbeat_request.go | 26 +- .../Shopify/sarama/heartbeat_response.go | 20 +- .../sarama/init_producer_id_request.go | 67 +- .../sarama/init_producer_id_response.go | 30 +- .../Shopify/sarama/join_group_request.go | 16 +- .../Shopify/sarama/join_group_response.go | 40 +- .../Shopify/sarama/leave_group_request.go | 58 +- .../Shopify/sarama/leave_group_response.go | 60 +- vendor/github.com/Shopify/sarama/message.go | 22 + .../github.com/Shopify/sarama/message_set.go | 11 +- vendor/github.com/Shopify/sarama/metrics.go | 77 + .../github.com/Shopify/sarama/mockbroker.go | 6 +- .../Shopify/sarama/mockresponses.go | 168 +- .../Shopify/sarama/offset_commit_request.go | 53 +- .../Shopify/sarama/offset_commit_response.go | 2 + .../Shopify/sarama/offset_manager.go | 87 +- .../Shopify/sarama/offset_response.go | 1 - .../Shopify/sarama/packet_decoder.go | 5 + .../github.com/Shopify/sarama/produce_set.go | 13 +- .../github.com/Shopify/sarama/real_decoder.go | 43 +- .../github.com/Shopify/sarama/record_batch.go | 7 +- vendor/github.com/Shopify/sarama/records.go | 17 +- vendor/github.com/Shopify/sarama/request.go | 4 +- .../Shopify/sarama/response_header.go | 5 - vendor/github.com/Shopify/sarama/sarama.go | 11 +- .../sarama/sasl_authenticate_request.go | 12 +- .../sarama/sasl_authenticate_response.go | 35 +- .../Shopify/sarama/sync_group_request.go | 132 +- .../Shopify/sarama/sync_group_response.go | 27 +- .../Shopify/sarama/sync_producer.go | 49 + .../Shopify/sarama/transaction_manager.go | 887 + vendor/github.com/Shopify/sarama/utils.go | 49 +- vendor/github.com/Shopify/sarama/version.go | 19 +- vendor/github.com/Shopify/sarama/zstd.go | 48 +- .../armon/go-metrics/prometheus/prometheus.go | 21 +- vendor/github.com/cespare/xxhash/v2/README.md | 31 +- .../github.com/cespare/xxhash/v2/testall.sh | 10 + vendor/github.com/cespare/xxhash/v2/xxhash.go | 47 +- .../cespare/xxhash/v2/xxhash_amd64.s | 336 +- .../cespare/xxhash/v2/xxhash_arm64.s | 183 + .../v2/{xxhash_amd64.go => xxhash_asm.go} | 2 + .../cespare/xxhash/v2/xxhash_other.go | 22 +- .../cespare/xxhash/v2/xxhash_safe.go | 1 + .../cespare/xxhash/v2/xxhash_unsafe.go | 3 +- .../github.com/containerd/containerd/NOTICE | 16 - .../containerd/containerd/errdefs/errors.go | 93 - .../containerd/containerd/errdefs/grpc.go | 147 - .../containerd/containerd/log/context.go | 68 - .../containerd/platforms/compare.go | 193 - .../containerd/platforms/cpuinfo.go | 131 - .../containerd/platforms/database.go | 114 - .../containerd/platforms/defaults.go | 43 - .../containerd/platforms/defaults_unix.go | 24 - .../containerd/platforms/defaults_windows.go | 81 - .../containerd/platforms/platforms.go | 278 - .../coreos/go-semver/semver/semver.go | 2 +- .../distribution/reference/normalize.go | 29 + .../distribution/reference/reference.go | 2 +- .../registry/api/errcode/errors.go | 267 - .../registry/api/errcode/handler.go | 40 - .../registry/api/errcode/register.go | 138 - .../docker/docker/api/common_unix.go | 1 + .../github.com/docker/docker/api/swagger.yaml | 13 +- .../api/types/container/hostconfig_unix.go | 1 + .../docker/docker/client/client_unix.go | 1 + .../docker/docker/client/container_create.go | 19 +- .../daemon/logger/loggerutils/file_unix.go | 1 + .../daemon/logger/loggerutils/follow.go | 211 + .../daemon/logger/loggerutils/logfile.go | 174 - .../docker/daemon/logger/plugin_unix.go | 1 + .../daemon/logger/plugin_unsupported.go | 1 + .../docker/docker/errdefs/http_helpers.go | 144 +- .../docker/docker/pkg/ioutils/temp_unix.go | 1 + .../docker/pkg/plugins/discovery_unix.go | 1 + .../docker/docker/pkg/plugins/plugins_unix.go | 1 + vendor/github.com/docker/go-units/size.go | 70 +- vendor/github.com/go-kit/log/README.md | 5 + vendor/github.com/go-kit/log/json_logger.go | 4 +- vendor/github.com/go-kit/log/level/doc.go | 11 + vendor/github.com/go-kit/log/level/level.go | 53 +- vendor/github.com/go-kit/log/staticcheck.conf | 1 + vendor/github.com/go-logr/logr/README.md | 4 + vendor/github.com/go-logr/logr/funcr/funcr.go | 36 +- vendor/github.com/go-logr/logr/logr.go | 9 + .../jsonreference/internal/normalize_url.go | 63 + .../go-openapi/jsonreference/reference.go | 6 +- .../github.com/go-openapi/swag/.gitattributes | 2 + .../github.com/go-openapi/swag/.golangci.yml | 13 + vendor/github.com/go-openapi/swag/.travis.yml | 37 - vendor/github.com/go-openapi/swag/doc.go | 15 +- vendor/github.com/go-openapi/swag/file.go | 33 + vendor/github.com/go-openapi/swag/loading.go | 11 +- .../github.com/go-openapi/swag/post_go18.go | 1 + .../github.com/go-openapi/swag/post_go19.go | 1 + vendor/github.com/go-openapi/swag/pre_go18.go | 1 + vendor/github.com/go-openapi/swag/pre_go19.go | 1 + vendor/github.com/go-openapi/swag/util.go | 17 +- vendor/github.com/go-openapi/swag/yaml.go | 248 +- .../go-redis/redis/v8/.golangci.yml | 23 - .../github.com/go-redis/redis/v8/CHANGELOG.md | 28 + vendor/github.com/go-redis/redis/v8/README.md | 39 +- .../github.com/go-redis/redis/v8/command.go | 14 +- .../github.com/go-redis/redis/v8/commands.go | 82 +- vendor/github.com/go-redis/redis/v8/error.go | 2 +- .../go-redis/redis/v8/internal/proto/scan.go | 8 + .../redis/v8/internal/proto/writer.go | 2 + .../github.com/go-redis/redis/v8/package.json | 2 +- .../github.com/go-redis/redis/v8/pipeline.go | 10 + vendor/github.com/go-redis/redis/v8/ring.go | 2 +- .../github.com/go-redis/redis/v8/universal.go | 2 + .../github.com/go-redis/redis/v8/version.go | 2 +- vendor/github.com/go-stack/stack/.travis.yml | 15 - vendor/github.com/go-stack/stack/LICENSE.md | 21 - vendor/github.com/go-stack/stack/README.md | 38 - vendor/github.com/go-stack/stack/stack.go | 400 - vendor/github.com/golang-jwt/jwt/v4/README.md | 30 +- .../github.com/golang-jwt/jwt/v4/SECURITY.md | 19 + vendor/github.com/golang-jwt/jwt/v4/claims.go | 12 +- vendor/github.com/golang-jwt/jwt/v4/errors.go | 48 + .../golang-jwt/jwt/v4/map_claims.go | 3 + .../golang-jwt/jwt/v4/parser_option.go | 4 +- .../github.com/golang-jwt/jwt/v4/rsa_pss.go | 1 + vendor/github.com/golang-jwt/jwt/v4/token.go | 26 +- vendor/github.com/golang-jwt/jwt/v4/types.go | 24 +- .../golang/protobuf/jsonpb/decode.go | 524 + .../golang/protobuf/jsonpb/encode.go | 559 + .../github.com/golang/protobuf/jsonpb/json.go | 69 + .../github.com/google/go-cmp/cmp/compare.go | 66 +- .../google/go-cmp/cmp/internal/diff/diff.go | 44 +- .../google/go-cmp/cmp/internal/value/zero.go | 48 - .../github.com/google/go-cmp/cmp/options.go | 10 +- vendor/github.com/google/go-cmp/cmp/path.go | 20 +- .../google/go-cmp/cmp/report_compare.go | 13 +- .../google/go-cmp/cmp/report_reflect.go | 21 +- .../google/go-cmp/cmp/report_slices.go | 25 +- .../google/go-cmp/cmp/report_text.go | 1 + vendor/github.com/google/gofuzz/.travis.yml | 11 +- .../github.com/google/gofuzz/CONTRIBUTING.md | 2 +- vendor/github.com/google/gofuzz/README.md | 18 + .../google/gofuzz/bytesource/bytesource.go | 81 + vendor/github.com/google/gofuzz/fuzz.go | 137 +- vendor/github.com/google/uuid/null.go | 118 + vendor/github.com/google/uuid/uuid.go | 45 +- vendor/github.com/google/uuid/version4.go | 27 +- .../enterprise-certificate-proxy/LICENSE | 202 + .../client/client.go | 182 + .../client/util/util.go | 71 + .../gax-go/v2/.release-please-manifest.json | 3 + .../googleapis/gax-go/v2/CHANGES.md | 47 + .../googleapis/gax-go/v2/apierror/apierror.go | 76 +- .../internal/proto/custom_error.pb.go | 256 + .../internal/proto/custom_error.proto | 50 + .../v2/apierror/internal/proto/error.pb.go | 110 +- .../googleapis/gax-go/v2/call_option.go | 55 + .../googleapis/gax-go/v2/content_type.go | 112 + vendor/github.com/googleapis/gax-go/v2/gax.go | 4 +- .../googleapis/gax-go/v2/internal/version.go | 33 + .../googleapis/gax-go/v2/proto_json_stream.go | 126 + .../gax-go/v2/release-please-config.json | 10 + .../gophercloud/gophercloud/.zuul.yaml | 533 - .../gophercloud/gophercloud/CHANGELOG.md | 59 + .../gophercloud/gophercloud/LICENSE | 1 + .../gophercloud/gophercloud/README.md | 4 +- .../gophercloud/gophercloud/auth_options.go | 18 +- .../github.com/gophercloud/gophercloud/doc.go | 7 +- .../gophercloud/gophercloud/errors.go | 28 + .../openstack/compute/v2/servers/requests.go | 20 +- .../openstack/compute/v2/servers/results.go | 3 +- .../openstack/identity/v2/tokens/requests.go | 2 +- .../identity/v3/extensions/ec2tokens/doc.go | 1 - .../identity/v3/extensions/oauth1/doc.go | 1 - .../openstack/identity/v3/tokens/doc.go | 1 - .../openstack/identity/v3/tokens/requests.go | 2 +- .../gophercloud/gophercloud/params.go | 46 +- .../gophercloud/provider_client.go | 25 +- vendor/github.com/gorilla/websocket/README.md | 39 +- vendor/github.com/gorilla/websocket/client.go | 77 +- .../gorilla/websocket/client_clone.go | 16 - .../gorilla/websocket/client_clone_legacy.go | 38 - vendor/github.com/gorilla/websocket/conn.go | 63 +- .../gorilla/websocket/conn_write.go | 15 - .../gorilla/websocket/conn_write_legacy.go | 18 - vendor/github.com/gorilla/websocket/mask.go | 1 + .../github.com/gorilla/websocket/mask_safe.go | 1 + vendor/github.com/gorilla/websocket/proxy.go | 2 +- vendor/github.com/gorilla/websocket/server.go | 8 +- .../gorilla/websocket/tls_handshake.go | 21 + .../gorilla/websocket/tls_handshake_116.go | 21 + vendor/github.com/gorilla/websocket/trace.go | 19 - .../github.com/gorilla/websocket/trace_17.go | 12 - .../github.com/hashicorp/consul/api/LICENSE | 2 + vendor/github.com/hashicorp/consul/api/acl.go | 40 + .../github.com/hashicorp/consul/api/agent.go | 8 +- vendor/github.com/hashicorp/consul/api/api.go | 62 +- .../hashicorp/consul/api/catalog.go | 1 + .../hashicorp/consul/api/config_entry.go | 59 +- .../consul/api/config_entry_discoverychain.go | 28 +- .../consul/api/config_entry_exports.go | 12 +- .../consul/api/config_entry_gateways.go | 20 + .../consul/api/config_entry_intentions.go | 1 + .../hashicorp/consul/api/config_entry_mesh.go | 29 +- .../hashicorp/consul/api/connect_intention.go | 5 + .../github.com/hashicorp/consul/api/debug.go | 9 +- .../hashicorp/consul/api/discovery_chain.go | 55 +- .../github.com/hashicorp/consul/api/health.go | 9 +- .../hashicorp/consul/api/namespace.go | 48 +- .../hashicorp/consul/api/peering.go | 282 + .../hashicorp/consul/api/prepared_query.go | 21 +- vendor/github.com/hashicorp/consul/api/txn.go | 1 + .../github.com/hashicorp/errwrap/errwrap.go | 9 + .../github.com/hashicorp/go-hclog/README.md | 12 +- .../hashicorp/go-hclog/colorize_unix.go | 4 +- .../hashicorp/go-hclog/colorize_windows.go | 9 +- .../github.com/hashicorp/go-hclog/global.go | 6 +- .../hashicorp/go-hclog/interceptlogger.go | 7 +- .../hashicorp/go-hclog/intlogger.go | 72 +- .../github.com/hashicorp/go-hclog/logger.go | 36 +- .../github.com/hashicorp/go-hclog/stdlog.go | 21 +- .../hashicorp/go-multierror/.travis.yml | 12 - .../hashicorp/go-multierror/README.md | 29 +- .../hashicorp/go-multierror/append.go | 2 + .../hashicorp/go-multierror/multierror.go | 15 +- vendor/github.com/hashicorp/go-uuid/LICENSE | 2 + .../hashicorp/memberlist/.golangci.yml | 80 + .../hashicorp/memberlist/awareness.go | 12 +- .../github.com/hashicorp/memberlist/config.go | 11 + .../hashicorp/memberlist/memberlist.go | 29 +- vendor/github.com/hashicorp/memberlist/net.go | 36 +- .../hashicorp/memberlist/net_transport.go | 17 +- .../github.com/hashicorp/memberlist/state.go | 59 +- .../github.com/hashicorp/memberlist/util.go | 21 +- .../hashicorp/serf/coordinate/client.go | 2 +- .../hashicorp/serf/coordinate/config.go | 7 + vendor/github.com/huandu/xstrings/common.go | 8 +- vendor/github.com/huandu/xstrings/convert.go | 15 +- vendor/github.com/huandu/xstrings/format.go | 13 +- .../github.com/huandu/xstrings/manipulate.go | 5 +- .../huandu/xstrings/stringbuilder.go | 7 + .../huandu/xstrings/stringbuilder_go110.go | 9 + .../github.com/huandu/xstrings/translate.go | 9 +- .../influxdata/telegraf/.gitattributes | 1 + .../github.com/influxdata/telegraf/.gitignore | 10 + .../influxdata/telegraf/.golangci.yml | 219 + .../influxdata/telegraf/.markdownlint.yml | 6 + .../influxdata/telegraf/.markdownlintignore | 1 + .../influxdata/telegraf/CHANGELOG.md | 2236 +- .../influxdata/telegraf/CODE_OF_CONDUCT.md | 77 + .../influxdata/telegraf/CONTRIBUTING.md | 103 +- .../influxdata/telegraf/EXTERNAL_PLUGINS.md | 40 +- .../github.com/influxdata/telegraf/Makefile | 489 +- .../github.com/influxdata/telegraf/README.md | 496 +- .../influxdata/telegraf/SECURITY.md | 6 + .../influxdata/telegraf/accumulator.go | 2 +- .../influxdata/telegraf/appveyor.yml | 35 - .../influxdata/telegraf/build_version.txt | 2 +- .../influxdata/telegraf/docker-compose.yml | 105 - .../github.com/influxdata/telegraf/info.plist | 16 + .../github.com/influxdata/telegraf/metric.go | 16 +- .../github.com/influxdata/telegraf/parser.go | 39 + .../github.com/influxdata/telegraf/plugin.go | 41 +- .../telegraf/plugins/inputs/EXAMPLE_README.md | 77 - .../telegraf/plugins/inputs/deprecations.go | 56 + .../telegraf/plugins/inputs/mock_Plugin.go | 31 - .../influxdata/telegraf/processor.go | 2 +- .../influxdata/telegraf/secretstore.go | 25 + .../jcmturner/gokrb5/v8/client/TGSExchange.go | 7 +- .../jcmturner/gokrb5/v8/client/network.go | 3 +- .../jcmturner/gokrb5/v8/client/session.go | 4 +- .../jcmturner/gokrb5/v8/config/krb5conf.go | 8 +- .../jcmturner/gokrb5/v8/credentials/ccache.go | 4 +- .../jcmturner/gokrb5/v8/keytab/keytab.go | 6 +- .../gokrb5/v8/types/Authenticator.go | 4 +- .../github.com/klauspost/compress/.gitignore | 7 + .../klauspost/compress/.goreleaser.yml | 2 +- .../github.com/klauspost/compress/README.md | 164 +- .../klauspost/compress/flate/deflate.go | 222 +- .../klauspost/compress/flate/dict_decoder.go | 24 +- .../klauspost/compress/flate/fast_encoder.go | 61 +- .../compress/flate/huffman_bit_writer.go | 258 +- .../klauspost/compress/flate/huffman_code.go | 121 +- .../klauspost/compress/flate/inflate.go | 222 +- .../klauspost/compress/flate/inflate_gen.go | 729 +- .../klauspost/compress/flate/level1.go | 96 +- .../klauspost/compress/flate/level2.go | 45 +- .../klauspost/compress/flate/level3.go | 80 +- .../klauspost/compress/flate/level4.go | 21 +- .../klauspost/compress/flate/level5.go | 38 +- .../klauspost/compress/flate/level6.go | 40 +- .../klauspost/compress/flate/stateless.go | 33 +- .../klauspost/compress/flate/token.go | 49 +- .../klauspost/compress/fse/compress.go | 31 +- .../klauspost/compress/gzip/gunzip.go | 60 +- .../klauspost/compress/huff0/bitreader.go | 130 +- .../klauspost/compress/huff0/bitwriter.go | 115 - .../klauspost/compress/huff0/bytereader.go | 10 - .../klauspost/compress/huff0/compress.go | 124 +- .../klauspost/compress/huff0/decompress.go | 620 +- .../compress/huff0/decompress_amd64.go | 226 + .../compress/huff0/decompress_amd64.s | 830 + .../compress/huff0/decompress_generic.go | 299 + .../klauspost/compress/huff0/huff0.go | 2 + .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../compress/internal/snapref/encode_other.go | 6 +- .../klauspost/compress/zstd/README.md | 172 +- .../klauspost/compress/zstd/bitreader.go | 11 +- .../klauspost/compress/zstd/bitwriter.go | 76 - .../klauspost/compress/zstd/blockdec.go | 520 +- .../klauspost/compress/zstd/bytebuf.go | 23 +- .../klauspost/compress/zstd/bytereader.go | 6 - .../klauspost/compress/zstd/decodeheader.go | 93 +- .../klauspost/compress/zstd/decoder.go | 690 +- .../compress/zstd/decoder_options.go | 103 +- .../klauspost/compress/zstd/dict.go | 8 +- .../klauspost/compress/zstd/enc_base.go | 26 +- .../klauspost/compress/zstd/enc_best.go | 64 +- .../klauspost/compress/zstd/enc_better.go | 43 +- .../klauspost/compress/zstd/enc_dfast.go | 33 +- .../klauspost/compress/zstd/enc_fast.go | 24 +- .../klauspost/compress/zstd/encoder.go | 107 +- .../compress/zstd/encoder_options.go | 49 +- .../klauspost/compress/zstd/framedec.go | 339 +- .../klauspost/compress/zstd/fse_decoder.go | 128 +- .../compress/zstd/fse_decoder_amd64.go | 65 + .../compress/zstd/fse_decoder_amd64.s | 126 + .../compress/zstd/fse_decoder_generic.go | 72 + .../klauspost/compress/zstd/fse_encoder.go | 23 - .../klauspost/compress/zstd/hash.go | 6 - .../klauspost/compress/zstd/history.go | 67 +- .../compress/zstd/internal/xxhash/README.md | 49 +- .../compress/zstd/internal/xxhash/xxhash.go | 47 +- .../zstd/internal/xxhash/xxhash_amd64.s | 337 +- .../zstd/internal/xxhash/xxhash_arm64.s | 165 +- .../zstd/internal/xxhash/xxhash_asm.go | 5 +- .../zstd/internal/xxhash/xxhash_other.go | 23 +- .../klauspost/compress/zstd/seqdec.go | 325 +- .../klauspost/compress/zstd/seqdec_amd64.go | 379 + .../klauspost/compress/zstd/seqdec_amd64.s | 4079 + .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../github.com/klauspost/compress/zstd/zip.go | 69 +- .../klauspost/compress/zstd/zstd.go | 51 +- .../mailru/easyjson/jlexer/lexer.go | 14 + .../github.com/mattn/go-colorable/.travis.yml | 15 - .../github.com/mattn/go-colorable/README.md | 2 +- .../mattn/go-colorable/colorable_appengine.go | 1 + .../mattn/go-colorable/colorable_others.go | 4 +- .../mattn/go-colorable/colorable_windows.go | 14 +- .../mattn/go-colorable/noncolorable.go | 15 +- vendor/github.com/miekg/dns/client.go | 6 +- vendor/github.com/miekg/dns/defaults.go | 5 + vendor/github.com/miekg/dns/dnssec.go | 48 +- vendor/github.com/miekg/dns/edns.go | 16 +- vendor/github.com/miekg/dns/hash.go | 31 + vendor/github.com/miekg/dns/msg.go | 5 + vendor/github.com/miekg/dns/msg_helpers.go | 20 +- vendor/github.com/miekg/dns/server.go | 4 +- vendor/github.com/miekg/dns/sig0.go | 53 +- vendor/github.com/miekg/dns/svcb.go | 330 +- vendor/github.com/miekg/dns/tsig.go | 26 +- vendor/github.com/miekg/dns/version.go | 2 +- vendor/github.com/miekg/dns/xfr.go | 4 +- .../mitchellh/copystructure/.travis.yml | 12 - .../mitchellh/copystructure/README.md | 42 +- .../mitchellh/copystructure/copystructure.go | 93 +- .../mitchellh/mapstructure/CHANGELOG.md | 13 + .../mitchellh/mapstructure/decode_hooks.go | 22 + .../mitchellh/mapstructure/mapstructure.go | 83 +- .../mitchellh/reflectwalk/reflectwalk.go | 18 + .../image-spec/specs-go/v1/annotations.go | 6 + .../image-spec/specs-go/v1/config.go | 11 + .../image-spec/specs-go/v1/mediatype.go | 9 + .../image-spec/specs-go/version.go | 2 +- .../opentracing/opentracing-go/.golangci.yml | 3 + .../opentracing/opentracing-go/.travis.yml | 20 - vendor/github.com/pierrec/lz4/.gitignore | 34 - vendor/github.com/pierrec/lz4/.travis.yml | 24 - vendor/github.com/pierrec/lz4/LICENSE | 28 - vendor/github.com/pierrec/lz4/README.md | 90 - vendor/github.com/pierrec/lz4/block.go | 413 - vendor/github.com/pierrec/lz4/debug.go | 23 - vendor/github.com/pierrec/lz4/debug_stub.go | 7 - vendor/github.com/pierrec/lz4/decode_amd64.go | 8 - vendor/github.com/pierrec/lz4/decode_amd64.s | 375 - vendor/github.com/pierrec/lz4/decode_other.go | 98 - vendor/github.com/pierrec/lz4/errors.go | 30 - .../pierrec/lz4/internal/xxh32/xxh32zero.go | 223 - vendor/github.com/pierrec/lz4/lz4.go | 116 - vendor/github.com/pierrec/lz4/lz4_go1.10.go | 29 - .../github.com/pierrec/lz4/lz4_notgo1.10.go | 29 - vendor/github.com/pierrec/lz4/reader.go | 335 - .../github.com/pierrec/lz4/reader_legacy.go | 207 - .../pierrec/lz4/v4/internal/lz4block/block.go | 33 +- .../lz4/v4/internal/lz4block/blocks.go | 5 +- .../lz4/v4/internal/lz4block/decode_amd64.s | 110 +- .../lz4/v4/internal/lz4block/decode_arm.s | 8 +- .../lz4/v4/internal/lz4block/decode_arm64.s | 110 +- .../lz4/v4/internal/lz4block/decode_asm.go | 1 + .../lz4/v4/internal/lz4block/decode_other.go | 75 +- .../lz4/v4/internal/lz4stream/block.go | 4 +- .../lz4/v4/internal/xxh32/xxh32zero.go | 2 +- vendor/github.com/pierrec/lz4/v4/reader.go | 4 +- vendor/github.com/pierrec/lz4/v4/writer.go | 27 +- vendor/github.com/pierrec/lz4/writer.go | 422 - .../github.com/pierrec/lz4/writer_legacy.go | 182 - .../prometheus/client_golang/api/client.go | 29 +- .../client_golang/api/prometheus/v1/api.go | 115 +- .../client_golang/prometheus/collector.go | 6 +- .../prometheus/collectors/collectors.go | 24 + .../collectors/dbstats_collector.go | 4 +- ...{go_collector.go => go_collector_go116.go} | 26 +- .../collectors/go_collector_latest.go | 160 + .../client_golang/prometheus/counter.go | 13 +- .../client_golang/prometheus/desc.go | 5 +- .../client_golang/prometheus/doc.go | 107 +- .../client_golang/prometheus/gauge.go | 6 +- ...tats_collector_pre_go115.go => get_pid.go} | 23 +- .../prometheus/get_pid_gopherjs.go | 23 + .../client_golang/prometheus/go_collector.go | 20 +- .../prometheus/go_collector_go116.go | 17 +- ...lector_go117.go => go_collector_latest.go} | 314 +- .../client_golang/prometheus/histogram.go | 980 +- .../prometheus/internal/almost_equal.go | 60 + .../prometheus/internal/difflib.go | 654 + .../go_collector_options.go} | 27 +- .../prometheus/internal/go_runtime_metrics.go | 18 +- .../prometheus/internal/metric.go | 28 +- .../client_golang/prometheus/labels.go | 9 +- .../client_golang/prometheus/metric.go | 112 +- .../client_golang/prometheus/num_threads.go | 25 + .../prometheus/num_threads_gopherjs.go | 22 + .../client_golang/prometheus/observer.go | 2 +- .../prometheus/process_collector.go | 10 +- .../prometheus/process_collector_js.go | 26 + .../prometheus/process_collector_other.go | 4 +- .../client_golang/prometheus/promauto/auto.go | 170 +- .../prometheus/promhttp/delegator.go | 18 +- .../client_golang/prometheus/promhttp/http.go | 20 +- .../prometheus/promhttp/instrument_client.go | 40 +- .../prometheus/promhttp/instrument_server.go | 123 +- .../prometheus/promhttp/option.go | 39 +- .../client_golang/prometheus/push/push.go | 64 +- .../client_golang/prometheus/registry.go | 152 +- .../client_golang/prometheus/summary.go | 9 +- .../client_golang/prometheus/testutil/lint.go | 4 +- .../prometheus/testutil/promlint/promlint.go | 5 +- .../prometheus/testutil/testutil.go | 158 +- .../client_golang/prometheus/timer.go | 11 +- .../client_golang/prometheus/value.go | 47 +- .../client_golang/prometheus/vec.go | 88 +- .../client_golang/prometheus/wrap.go | 4 +- .../prometheus/client_model/go/metrics.pb.go | 333 +- .../prometheus/common/config/config.go | 26 +- .../prometheus/common/config/http_config.go | 335 +- .../prometheus/common/expfmt/fuzz.go | 5 +- .../common/expfmt/openmetrics_create.go | 26 +- .../prometheus/common/expfmt/text_create.go | 3 +- .../bitbucket.org/ww/goautoneg/autoneg.go | 22 +- .../prometheus/common/model/time.go | 2 +- .../prometheus/common/version/info.go | 18 +- .../prometheus/common/version/info_default.go | 21 + .../prometheus/common/version/info_go118.go | 58 + .../github.com/prometheus/procfs/.gitignore | 3 +- .../prometheus/procfs/.golangci.yml | 10 +- .../prometheus/procfs/CODE_OF_CONDUCT.md | 4 +- .../prometheus/procfs/CONTRIBUTING.md | 4 +- vendor/github.com/prometheus/procfs/Makefile | 10 +- .../prometheus/procfs/Makefile.common | 89 +- .../github.com/prometheus/procfs/SECURITY.md | 2 +- vendor/github.com/prometheus/procfs/arp.go | 45 +- .../github.com/prometheus/procfs/cpuinfo.go | 5 +- .../prometheus/procfs/cpuinfo_armx.go | 1 + .../prometheus/procfs/cpuinfo_mipsx.go | 1 + .../prometheus/procfs/cpuinfo_others.go | 4 +- .../prometheus/procfs/cpuinfo_ppcx.go | 1 + .../prometheus/procfs/cpuinfo_riscvx.go | 1 + .../prometheus/procfs/cpuinfo_s390x.go | 1 + .../prometheus/procfs/cpuinfo_x86.go | 1 + .../prometheus/procfs/fixtures.ttar | 7673 - .../prometheus/procfs/internal/fs/fs.go | 2 +- .../prometheus/procfs/internal/util/parse.go | 6 +- .../procfs/internal/util/readfile.go | 11 +- .../procfs/internal/util/sysreadfile.go | 8 +- .../internal/util/sysreadfile_compat.go | 3 +- vendor/github.com/prometheus/procfs/ipvs.go | 3 +- .../prometheus/procfs/kernel_random.go | 1 + .../github.com/prometheus/procfs/loadavg.go | 2 +- vendor/github.com/prometheus/procfs/mdstat.go | 10 +- .../prometheus/procfs/net_conntrackstat.go | 12 +- .../github.com/prometheus/procfs/net_dev.go | 8 +- .../prometheus/procfs/net_ip_socket.go | 2 +- .../prometheus/procfs/net_protocols.go | 4 +- .../prometheus/procfs/net_softnet.go | 8 +- .../procfs/{xfrm.go => net_xfrm.go} | 9 +- .../github.com/prometheus/procfs/netstat.go | 8 +- vendor/github.com/prometheus/procfs/proc.go | 10 +- .../prometheus/procfs/proc_cgroup.go | 6 +- .../prometheus/procfs/proc_cgroups.go | 98 + .../prometheus/procfs/proc_environ.go | 2 +- .../prometheus/procfs/proc_fdinfo.go | 3 +- .../prometheus/procfs/proc_limits.go | 2 +- .../github.com/prometheus/procfs/proc_maps.go | 12 +- .../prometheus/procfs/proc_netstat.go | 440 + .../github.com/prometheus/procfs/proc_psi.go | 14 +- .../prometheus/procfs/proc_smaps.go | 23 +- .../github.com/prometheus/procfs/proc_snmp.go | 353 + .../prometheus/procfs/proc_snmp6.go | 381 + .../github.com/prometheus/procfs/proc_stat.go | 11 +- .../prometheus/procfs/proc_status.go | 32 +- .../github.com/prometheus/procfs/proc_sys.go | 51 + .../github.com/prometheus/procfs/schedstat.go | 6 +- vendor/github.com/prometheus/procfs/slab.go | 2 +- .../github.com/prometheus/procfs/softirqs.go | 160 + vendor/github.com/prometheus/procfs/stat.go | 10 +- vendor/github.com/prometheus/procfs/vm.go | 6 +- .../github.com/prometheus/procfs/zoneinfo.go | 5 +- vendor/github.com/sirupsen/logrus/README.md | 4 +- .../github.com/sirupsen/logrus/buffer_pool.go | 9 - vendor/github.com/sirupsen/logrus/entry.go | 21 +- vendor/github.com/sirupsen/logrus/logger.go | 13 + vendor/github.com/stretchr/objx/.travis.yml | 30 - vendor/github.com/stretchr/objx/Taskfile.yml | 2 +- vendor/github.com/stretchr/objx/accessors.go | 106 +- vendor/github.com/stretchr/objx/map.go | 57 +- .../stretchr/objx/type_specific_codegen.go | 10 + .../testify/assert/assertion_compare.go | 76 +- .../assert/assertion_compare_can_convert.go | 16 + .../assert/assertion_compare_legacy.go | 16 + .../testify/assert/assertion_format.go | 22 + .../testify/assert/assertion_forward.go | 44 + .../testify/assert/assertion_order.go | 8 +- .../stretchr/testify/assert/assertions.go | 190 +- .../github.com/stretchr/testify/mock/mock.go | 162 +- .../stretchr/testify/require/require.go | 56 + .../testify/require/require_forward.go | 44 + vendor/github.com/xdg-go/scram/CHANGELOG.md | 8 + vendor/github.com/xdg-go/scram/doc.go | 6 +- vendor/github.com/xdg-go/scram/scram.go | 5 + .../github.com/xdg-go/stringprep/CHANGELOG.md | 7 + .../etcd/api/v3/v3rpc/rpctypes/error.go | 5 + .../go.etcd.io/etcd/api/v3/version/version.go | 2 +- .../go.mongodb.org/mongo-driver/bson/bson.go | 8 +- .../mongo-driver/bson/bson_1_8.go | 81 - .../mongo-driver/bson/bsoncodec/bsoncodec.go | 24 +- .../bson/bsoncodec/default_value_decoders.go | 28 +- .../bson/bsoncodec/default_value_encoders.go | 11 +- .../mongo-driver/bson/bsoncodec/doc.go | 18 +- .../bson/bsoncodec/empty_interface_codec.go | 17 +- .../mongo-driver/bson/bsoncodec/map_codec.go | 40 +- .../mongo-driver/bson/bsoncodec/registry.go | 15 +- .../bson/bsoncodec/struct_codec.go | 11 +- .../bson/bsoncodec/struct_tag_parser.go | 42 +- .../mongo-driver/bson/bsoncodec/types.go | 25 - .../mongo-driver/bson/bsonoptions/doc.go | 8 + .../bson/bsonrw/extjson_parser.go | 2 +- .../bson/bsonrw/extjson_wrappers.go | 6 +- .../bson/bsonrw/extjson_writer.go | 9 +- .../mongo-driver/bson/bsonrw/value_reader.go | 29 +- .../mongo-driver/bson/bsonrw/value_writer.go | 2 +- .../mongo-driver/bson/bsonrw/writer.go | 24 - .../mongo-driver/bson/bsontype/bsontype.go | 2 + .../mongo-driver/bson/decoder.go | 23 + .../go.mongodb.org/mongo-driver/bson/doc.go | 111 +- .../mongo-driver/bson/marshal.go | 25 + .../mongo-driver/bson/primitive/decimal.go | 18 +- .../mongo-driver/bson/primitive/objectid.go | 30 +- .../mongo-driver/bson/primitive/primitive.go | 8 +- .../mongo-driver/bson/primitive_codecs.go | 25 +- .../go.mongodb.org/mongo-driver/bson/raw.go | 7 - .../mongo-driver/bson/registry.go | 2 +- .../go.mongodb.org/mongo-driver/bson/types.go | 49 - .../mongo-driver/bson/unmarshal.go | 2 +- .../x/bsonx/bsoncore/bson_documentbuilder.go | 2 +- .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 22 +- .../mongo-driver/x/bsonx/bsoncore/document.go | 30 +- .../x/bsonx/bsoncore/document_sequence.go | 10 +- .../mongo-driver/x/bsonx/bsoncore/value.go | 4 +- vendor/go.opencensus.io/Makefile | 8 +- vendor/go.opencensus.io/opencensus.go | 2 +- .../plugin/ocgrpc/client_metrics.go | 9 + .../plugin/ocgrpc/server_metrics.go | 9 + .../plugin/ocgrpc/stats_common.go | 23 +- .../go.opencensus.io/plugin/ochttp/server.go | 8 +- vendor/go.opencensus.io/stats/doc.go | 7 +- .../go.opencensus.io/stats/internal/record.go | 6 + vendor/go.opencensus.io/stats/record.go | 21 +- .../stats/view/aggregation.go | 6 +- .../go.opencensus.io/stats/view/collector.go | 9 +- vendor/go.opencensus.io/stats/view/doc.go | 2 +- vendor/go.opencensus.io/stats/view/worker.go | 27 +- vendor/go.opencensus.io/tag/profile_19.go | 1 + vendor/go.opencensus.io/tag/profile_not19.go | 1 + vendor/go.opencensus.io/trace/doc.go | 13 +- vendor/go.opencensus.io/trace/lrumap.go | 2 +- vendor/go.opencensus.io/trace/trace_go11.go | 1 + .../go.opencensus.io/trace/trace_nongo11.go | 1 + vendor/go.opentelemetry.io/otel/.gitignore | 2 +- vendor/go.opentelemetry.io/otel/.golangci.yml | 217 +- vendor/go.opentelemetry.io/otel/.lycheeignore | 4 + .../otel/.markdown-link.json | 16 - vendor/go.opentelemetry.io/otel/CHANGELOG.md | 550 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 2 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 7 +- vendor/go.opentelemetry.io/otel/Makefile | 58 +- vendor/go.opentelemetry.io/otel/README.md | 45 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 43 +- .../otel/attribute/encoder.go | 86 +- .../otel/attribute/iterator.go | 78 +- .../go.opentelemetry.io/otel/attribute/set.go | 151 +- .../otel/attribute/value.go | 97 +- .../otel/baggage/baggage.go | 30 +- .../go.opentelemetry.io/otel/codes/codes.go | 10 + vendor/go.opentelemetry.io/otel/handler.go | 28 +- .../otel/internal/attribute/attribute.go | 45 + .../otel/internal/baggage/context.go | 9 +- .../otel/internal/global/internal_logging.go | 34 +- .../otel/internal/global/state.go | 53 +- .../otel/internal/metric/global/meter.go | 287 - .../otel/internal/metric/registry/doc.go | 24 - .../otel/internal/metric/registry/registry.go | 139 - .../otel/internal/rawhelpers.go | 2 +- .../go.opentelemetry.io/otel/metric/config.go | 65 +- vendor/go.opentelemetry.io/otel/metric/doc.go | 44 - .../otel/metric/global/global.go | 42 + .../otel/metric/global/metric.go | 49 - .../otel/metric/instrument/asyncfloat64.go | 131 + .../otel/metric/instrument/asyncint64.go | 131 + .../otel/metric/instrument/instrument.go | 90 + .../otel/metric/instrument/syncfloat64.go | 86 + .../otel/metric/instrument/syncint64.go | 86 + .../metric/internal/global/instruments.go | 355 + .../otel/metric/internal/global/meter.go | 354 + .../internal/global/state.go} | 52 +- .../go.opentelemetry.io/otel/metric/meter.go | 138 + .../go.opentelemetry.io/otel/metric/metric.go | 538 - .../otel/metric/metric_instrument.go | 464 - .../go.opentelemetry.io/otel/metric/noop.go | 182 +- .../otel/metric/number/doc.go | 23 - .../otel/metric/number/kind_string.go | 24 - .../otel/metric/number/number.go | 538 - .../otel/metric/sdkapi/descriptor.go | 70 - .../otel/metric/sdkapi/instrumentkind.go | 80 - .../metric/sdkapi/instrumentkind_string.go | 28 - .../otel/metric/sdkapi/noop.go | 66 - .../otel/metric/sdkapi/sdkapi.go | 159 - .../otel/metric/unit/unit.go | 1 + .../otel/semconv/internal/http.go | 336 + .../otel/semconv/v1.7.0/http.go | 274 +- .../otel/semconv/v1.7.0/schema.go | 2 +- vendor/go.opentelemetry.io/otel/trace.go | 7 +- .../go.opentelemetry.io/otel/trace/config.go | 2 +- vendor/go.opentelemetry.io/otel/trace/doc.go | 2 +- vendor/go.opentelemetry.io/otel/trace/noop.go | 2 +- .../go.opentelemetry.io/otel/trace/trace.go | 84 +- .../otel/trace/tracestate.go | 29 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 18 +- vendor/go.uber.org/atomic/CHANGELOG.md | 17 + vendor/go.uber.org/atomic/bool.go | 11 +- vendor/go.uber.org/atomic/duration.go | 11 +- vendor/go.uber.org/atomic/error.go | 13 +- vendor/go.uber.org/atomic/error_ext.go | 4 +- vendor/go.uber.org/atomic/float32.go | 77 + vendor/go.uber.org/atomic/float32_ext.go | 76 + vendor/go.uber.org/atomic/float64.go | 2 +- vendor/go.uber.org/atomic/float64_ext.go | 35 +- vendor/go.uber.org/atomic/int32.go | 9 +- vendor/go.uber.org/atomic/int64.go | 9 +- vendor/go.uber.org/atomic/nocmp.go | 12 +- vendor/go.uber.org/atomic/pointer_go118.go | 60 + vendor/go.uber.org/atomic/pointer_go119.go | 61 + vendor/go.uber.org/atomic/string.go | 13 +- vendor/go.uber.org/atomic/string_ext.go | 6 +- vendor/go.uber.org/atomic/time.go | 2 +- vendor/go.uber.org/atomic/uint32.go | 9 +- vendor/go.uber.org/atomic/uint64.go | 9 +- vendor/go.uber.org/atomic/uintptr.go | 9 +- vendor/go.uber.org/atomic/unsafe_pointer.go | 9 +- vendor/go.uber.org/atomic/value.go | 4 +- vendor/go.uber.org/multierr/CHANGELOG.md | 14 + vendor/go.uber.org/multierr/error.go | 358 +- vendor/go.uber.org/zap/.readme.tmpl | 12 +- vendor/go.uber.org/zap/CHANGELOG.md | 103 +- vendor/go.uber.org/zap/CONTRIBUTING.md | 21 +- vendor/go.uber.org/zap/README.md | 59 +- vendor/go.uber.org/zap/array_go118.go | 156 + vendor/go.uber.org/zap/config.go | 4 +- vendor/go.uber.org/zap/doc.go | 60 +- vendor/go.uber.org/zap/encoder.go | 2 +- vendor/go.uber.org/zap/global.go | 1 + vendor/go.uber.org/zap/http_handler.go | 25 +- vendor/go.uber.org/zap/internal/exit/exit.go | 22 +- .../level_enabler.go} | 19 +- vendor/go.uber.org/zap/level.go | 20 + vendor/go.uber.org/zap/logger.go | 124 +- vendor/go.uber.org/zap/options.go | 21 +- vendor/go.uber.org/zap/sink.go | 100 +- vendor/go.uber.org/zap/stacktrace.go | 175 +- vendor/go.uber.org/zap/sugar.go | 147 +- vendor/go.uber.org/zap/writer.go | 11 +- .../zap/zapcore/buffered_write_syncer.go | 31 + vendor/go.uber.org/zap/zapcore/clock.go | 4 +- .../zap/zapcore/console_encoder.go | 6 +- vendor/go.uber.org/zap/zapcore/core.go | 9 + vendor/go.uber.org/zap/zapcore/encoder.go | 30 +- vendor/go.uber.org/zap/zapcore/entry.go | 72 +- vendor/go.uber.org/zap/zapcore/error.go | 14 +- vendor/go.uber.org/zap/zapcore/hook.go | 9 + .../go.uber.org/zap/zapcore/increase_level.go | 9 + .../go.uber.org/zap/zapcore/json_encoder.go | 96 +- vendor/go.uber.org/zap/zapcore/level.go | 54 + .../reflected_encoder.go} | 25 +- vendor/go.uber.org/zap/zapcore/sampler.go | 38 +- vendor/go.uber.org/zap/zapcore/tee.go | 17 +- vendor/go.uber.org/zap/zapgrpc/zapgrpc.go | 10 +- vendor/golang.org/x/crypto/AUTHORS | 3 - vendor/golang.org/x/crypto/CONTRIBUTORS | 3 - vendor/golang.org/x/crypto/argon2/argon2.go | 10 +- vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 2 +- vendor/golang.org/x/crypto/md4/md4block.go | 14 +- vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 2 +- vendor/golang.org/x/crypto/pkcs12/crypto.go | 2 +- .../x/crypto/pkcs12/internal/rc2/rc2.go | 53 +- vendor/golang.org/x/crypto/scrypt/scrypt.go | 2 +- vendor/golang.org/x/crypto/sha3/doc.go | 12 +- vendor/golang.org/x/crypto/sha3/keccakf.go | 194 +- vendor/golang.org/x/crypto/sha3/sha3.go | 2 +- vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 2 + vendor/golang.org/x/mod/semver/semver.go | 10 - vendor/golang.org/x/net/AUTHORS | 3 - vendor/golang.org/x/net/CONTRIBUTORS | 3 - vendor/golang.org/x/net/bpf/doc.go | 6 +- .../golang.org/x/net/bpf/vm_instructions.go | 4 +- vendor/golang.org/x/net/context/context.go | 6 +- vendor/golang.org/x/net/context/go17.go | 14 +- vendor/golang.org/x/net/context/pre_go17.go | 10 +- .../golang.org/x/net/http/httpguts/httplex.go | 54 +- .../golang.org/x/net/http/httpproxy/proxy.go | 6 +- .../x/net/http2/client_conn_pool.go | 3 +- vendor/golang.org/x/net/http2/errors.go | 2 +- vendor/golang.org/x/net/http2/flow.go | 88 +- vendor/golang.org/x/net/http2/frame.go | 25 +- vendor/golang.org/x/net/http2/go118.go | 17 + vendor/golang.org/x/net/http2/headermap.go | 18 + vendor/golang.org/x/net/http2/hpack/encode.go | 7 +- vendor/golang.org/x/net/http2/hpack/hpack.go | 16 +- .../golang.org/x/net/http2/hpack/huffman.go | 87 +- .../x/net/http2/hpack/static_table.go | 188 + vendor/golang.org/x/net/http2/hpack/tables.go | 78 +- vendor/golang.org/x/net/http2/http2.go | 22 +- vendor/golang.org/x/net/http2/not_go118.go | 17 + vendor/golang.org/x/net/http2/server.go | 444 +- vendor/golang.org/x/net/http2/transport.go | 370 +- .../x/net/http2/writesched_priority.go | 9 +- vendor/golang.org/x/net/idna/trieval.go | 34 +- .../x/net/internal/socket/mmsghdr_unix.go | 18 +- .../x/net/internal/socket/msghdr_linux.go | 3 - .../x/net/internal/socket/rawconn_msg.go | 48 +- .../x/net/internal/socket/sys_stub.go | 6 +- .../x/net/internal/socket/sys_unix.go | 101 +- .../x/net/internal/socket/sys_windows.go | 7 +- .../x/net/internal/socket/sys_zos_s390x.go | 38 +- .../x/net/internal/socket/zsys_darwin_arm.go | 30 - ..._darwin_386.go => zsys_freebsd_riscv64.go} | 8 +- .../x/net/internal/socket/zsys_linux_ppc.go | 32 +- .../net/internal/socket/zsys_openbsd_ppc64.go | 30 + .../internal/socket/zsys_openbsd_riscv64.go | 30 + vendor/golang.org/x/net/ipv4/doc.go | 12 +- .../x/net/ipv4/zsys_freebsd_riscv64.go | 52 + vendor/golang.org/x/net/ipv6/doc.go | 12 +- .../x/net/ipv6/zsys_freebsd_riscv64.go | 64 + .../x/net/publicsuffix/data/children | Bin 0 -> 2876 bytes .../golang.org/x/net/publicsuffix/data/nodes | Bin 0 -> 48280 bytes .../golang.org/x/net/publicsuffix/data/text | 1 + vendor/golang.org/x/net/publicsuffix/list.go | 34 +- vendor/golang.org/x/net/publicsuffix/table.go | 10567 +- vendor/golang.org/x/net/trace/trace.go | 2 +- vendor/golang.org/x/oauth2/AUTHORS | 3 - vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 - .../x/oauth2/authhandler/authhandler.go | 44 +- vendor/golang.org/x/oauth2/google/default.go | 38 +- vendor/golang.org/x/oauth2/google/doc.go | 25 +- vendor/golang.org/x/oauth2/google/error.go | 64 + vendor/golang.org/x/oauth2/google/google.go | 19 +- .../google/internal/externalaccount/aws.go | 165 +- .../externalaccount/basecredentials.go | 43 +- .../externalaccount/executablecredsource.go | 309 + .../internal/externalaccount/impersonate.go | 9 +- vendor/golang.org/x/oauth2/google/jwt.go | 3 +- vendor/golang.org/x/oauth2/jws/jws.go | 2 +- vendor/golang.org/x/sync/AUTHORS | 3 - vendor/golang.org/x/sync/CONTRIBUTORS | 3 - vendor/golang.org/x/sync/errgroup/errgroup.go | 74 +- vendor/golang.org/x/sys/AUTHORS | 3 - vendor/golang.org/x/sys/CONTRIBUTORS | 3 - vendor/golang.org/x/sys/cpu/byteorder.go | 1 + vendor/golang.org/x/sys/cpu/cpu.go | 4 +- vendor/golang.org/x/sys/cpu/cpu_arm64.go | 12 +- vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 29 +- .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 44 +- vendor/golang.org/x/sys/cpu/cpu_loong64.go | 13 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 65 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.s | 11 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 4 +- .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 15 + .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 12 + vendor/golang.org/x/sys/cpu/parse.go | 43 + .../x/sys/cpu/proc_cpuinfo_linux.go | 54 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 2 +- vendor/golang.org/x/sys/execabs/execabs.go | 2 +- .../golang.org/x/sys/execabs/execabs_go118.go | 12 + .../golang.org/x/sys/execabs/execabs_go119.go | 17 + vendor/golang.org/x/sys/plan9/mkerrors.sh | 4 +- vendor/golang.org/x/sys/plan9/syscall.go | 11 +- .../golang.org/x/sys/plan9/syscall_plan9.go | 10 + vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s | 31 + .../golang.org/x/sys/unix/asm_bsd_riscv64.s | 29 + .../golang.org/x/sys/unix/asm_linux_loong64.s | 54 + vendor/golang.org/x/sys/unix/dirent.go | 4 +- vendor/golang.org/x/sys/unix/endian_little.go | 4 +- .../x/sys/unix/errors_freebsd_386.go | 233 - .../x/sys/unix/errors_freebsd_amd64.go | 233 - .../x/sys/unix/errors_freebsd_arm.go | 226 - .../x/sys/unix/errors_freebsd_arm64.go | 17 - vendor/golang.org/x/sys/unix/gccgo.go | 4 +- vendor/golang.org/x/sys/unix/gccgo_c.c | 4 +- vendor/golang.org/x/sys/unix/ifreq_linux.go | 9 +- vendor/golang.org/x/sys/unix/ioctl.go | 4 +- vendor/golang.org/x/sys/unix/ioctl_linux.go | 43 +- vendor/golang.org/x/sys/unix/mkall.sh | 62 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 20 +- vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 14 + vendor/golang.org/x/sys/unix/str.go | 27 - vendor/golang.org/x/sys/unix/syscall.go | 10 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 75 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 77 +- .../x/sys/unix/syscall_darwin.1_12.go | 32 - .../x/sys/unix/syscall_darwin.1_13.go | 108 - .../golang.org/x/sys/unix/syscall_darwin.go | 144 +- .../x/sys/unix/syscall_dragonfly.go | 12 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 337 +- .../x/sys/unix/syscall_freebsd_386.go | 6 +- .../x/sys/unix/syscall_freebsd_amd64.go | 6 +- .../x/sys/unix/syscall_freebsd_arm.go | 4 +- .../x/sys/unix/syscall_freebsd_arm64.go | 4 +- .../x/sys/unix/syscall_freebsd_riscv64.go | 63 + vendor/golang.org/x/sys/unix/syscall_hurd.go | 22 + .../golang.org/x/sys/unix/syscall_hurd_386.go | 29 + .../golang.org/x/sys/unix/syscall_illumos.go | 111 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 283 +- .../x/sys/unix/syscall_linux_386.go | 8 +- .../x/sys/unix/syscall_linux_amd64.go | 9 +- .../x/sys/unix/syscall_linux_arm.go | 8 +- .../x/sys/unix/syscall_linux_arm64.go | 9 +- .../x/sys/unix/syscall_linux_loong64.go | 222 + .../x/sys/unix/syscall_linux_mips64x.go | 8 +- .../x/sys/unix/syscall_linux_mipsx.go | 8 +- .../x/sys/unix/syscall_linux_ppc.go | 8 +- .../x/sys/unix/syscall_linux_ppc64x.go | 8 +- .../x/sys/unix/syscall_linux_riscv64.go | 9 +- .../x/sys/unix/syscall_linux_s390x.go | 8 +- .../x/sys/unix/syscall_linux_sparc64.go | 8 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 24 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 12 +- .../x/sys/unix/syscall_openbsd_libc.go | 27 + .../x/sys/unix/syscall_openbsd_mips64.go | 4 + .../x/sys/unix/syscall_openbsd_ppc64.go | 42 + .../x/sys/unix/syscall_openbsd_riscv64.go | 42 + .../golang.org/x/sys/unix/syscall_solaris.go | 361 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 176 +- .../golang.org/x/sys/unix/syscall_unix_gc.go | 6 +- .../x/sys/unix/syscall_zos_s390x.go | 173 +- vendor/golang.org/x/sys/unix/sysvshm_unix.go | 13 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 95 +- .../x/sys/unix/zerrors_freebsd_386.go | 109 +- .../x/sys/unix/zerrors_freebsd_amd64.go | 107 +- .../x/sys/unix/zerrors_freebsd_arm.go | 220 +- .../x/sys/unix/zerrors_freebsd_arm64.go | 100 +- .../x/sys/unix/zerrors_freebsd_riscv64.go | 2148 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 434 +- .../x/sys/unix/zerrors_linux_386.go | 6 +- .../x/sys/unix/zerrors_linux_amd64.go | 6 +- .../x/sys/unix/zerrors_linux_arm.go | 6 +- .../x/sys/unix/zerrors_linux_arm64.go | 7 +- .../x/sys/unix/zerrors_linux_loong64.go | 818 + .../x/sys/unix/zerrors_linux_mips.go | 6 +- .../x/sys/unix/zerrors_linux_mips64.go | 6 +- .../x/sys/unix/zerrors_linux_mips64le.go | 6 +- .../x/sys/unix/zerrors_linux_mipsle.go | 6 +- .../x/sys/unix/zerrors_linux_ppc.go | 6 +- .../x/sys/unix/zerrors_linux_ppc64.go | 6 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 6 +- .../x/sys/unix/zerrors_linux_riscv64.go | 6 +- .../x/sys/unix/zerrors_linux_s390x.go | 6 +- .../x/sys/unix/zerrors_linux_sparc64.go | 6 +- .../x/sys/unix/zerrors_openbsd_386.go | 356 +- .../x/sys/unix/zerrors_openbsd_amd64.go | 189 +- .../x/sys/unix/zerrors_openbsd_arm.go | 348 +- .../x/sys/unix/zerrors_openbsd_arm64.go | 160 +- .../x/sys/unix/zerrors_openbsd_mips64.go | 95 +- .../x/sys/unix/zerrors_openbsd_ppc64.go | 1905 + .../x/sys/unix/zerrors_openbsd_riscv64.go | 1904 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 4 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 4 +- .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 40 - .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 25 - .../x/sys/unix/zsyscall_darwin_amd64.go | 73 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 37 +- .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 40 - .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 25 - .../x/sys/unix/zsyscall_darwin_arm64.go | 73 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 37 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 10 + .../x/sys/unix/zsyscall_freebsd_386.go | 155 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 153 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 187 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 153 +- .../x/sys/unix/zsyscall_freebsd_riscv64.go | 1899 + .../x/sys/unix/zsyscall_illumos_amd64.go | 28 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 109 + .../x/sys/unix/zsyscall_linux_386.go | 44 +- .../x/sys/unix/zsyscall_linux_amd64.go | 55 +- .../x/sys/unix/zsyscall_linux_arm.go | 44 +- .../x/sys/unix/zsyscall_linux_arm64.go | 55 +- .../x/sys/unix/zsyscall_linux_loong64.go | 487 + .../x/sys/unix/zsyscall_linux_mips.go | 44 +- .../x/sys/unix/zsyscall_linux_mips64.go | 44 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 44 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 44 +- .../x/sys/unix/zsyscall_linux_ppc.go | 44 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 44 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 44 +- .../x/sys/unix/zsyscall_linux_riscv64.go | 55 +- .../x/sys/unix/zsyscall_linux_s390x.go | 44 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 44 +- .../x/sys/unix/zsyscall_netbsd_386.go | 14 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 14 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 14 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 14 +- .../x/sys/unix/zsyscall_openbsd_386.go | 816 +- .../x/sys/unix/zsyscall_openbsd_386.s | 669 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 816 +- .../x/sys/unix/zsyscall_openbsd_amd64.s | 669 + .../x/sys/unix/zsyscall_openbsd_arm.go | 816 +- .../x/sys/unix/zsyscall_openbsd_arm.s | 669 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 816 +- .../x/sys/unix/zsyscall_openbsd_arm64.s | 669 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 816 +- .../x/sys/unix/zsyscall_openbsd_mips64.s | 669 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 2235 + .../x/sys/unix/zsyscall_openbsd_ppc64.s | 802 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 2235 + .../x/sys/unix/zsyscall_openbsd_riscv64.s | 669 + .../x/sys/unix/zsyscall_solaris_amd64.go | 71 +- .../x/sys/unix/zsysctl_openbsd_386.go | 51 +- .../x/sys/unix/zsysctl_openbsd_amd64.go | 17 +- .../x/sys/unix/zsysctl_openbsd_arm.go | 51 +- .../x/sys/unix/zsysctl_openbsd_arm64.go | 11 +- .../x/sys/unix/zsysctl_openbsd_mips64.go | 3 +- .../x/sys/unix/zsysctl_openbsd_ppc64.go | 281 + .../x/sys/unix/zsysctl_openbsd_riscv64.go | 282 + .../x/sys/unix/zsysnum_freebsd_386.go | 107 +- .../x/sys/unix/zsysnum_freebsd_amd64.go | 107 +- .../x/sys/unix/zsysnum_freebsd_arm.go | 107 +- .../x/sys/unix/zsysnum_freebsd_arm64.go | 107 +- .../x/sys/unix/zsysnum_freebsd_riscv64.go | 394 + .../x/sys/unix/zsysnum_linux_386.go | 3 +- .../x/sys/unix/zsysnum_linux_amd64.go | 3 +- .../x/sys/unix/zsysnum_linux_arm.go | 3 +- .../x/sys/unix/zsysnum_linux_arm64.go | 3 +- .../x/sys/unix/zsysnum_linux_loong64.go | 311 + .../x/sys/unix/zsysnum_linux_mips.go | 3 +- .../x/sys/unix/zsysnum_linux_mips64.go | 3 +- .../x/sys/unix/zsysnum_linux_mips64le.go | 3 +- .../x/sys/unix/zsysnum_linux_mipsle.go | 3 +- .../x/sys/unix/zsysnum_linux_ppc.go | 3 +- .../x/sys/unix/zsysnum_linux_ppc64.go | 3 +- .../x/sys/unix/zsysnum_linux_ppc64le.go | 3 +- .../x/sys/unix/zsysnum_linux_riscv64.go | 4 +- .../x/sys/unix/zsysnum_linux_s390x.go | 3 +- .../x/sys/unix/zsysnum_linux_sparc64.go | 3 +- .../x/sys/unix/zsysnum_openbsd_386.go | 1 + .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 + .../x/sys/unix/zsysnum_openbsd_arm.go | 1 + .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 + .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 + .../x/sys/unix/zsysnum_openbsd_ppc64.go | 218 + .../x/sys/unix/zsysnum_openbsd_riscv64.go | 219 + .../x/sys/unix/ztypes_darwin_amd64.go | 73 +- .../x/sys/unix/ztypes_darwin_arm64.go | 73 +- .../x/sys/unix/ztypes_freebsd_386.go | 114 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 112 +- .../x/sys/unix/ztypes_freebsd_arm.go | 163 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 110 +- .../x/sys/unix/ztypes_freebsd_riscv64.go | 638 + .../x/sys/unix/ztypes_illumos_amd64.go | 42 - vendor/golang.org/x/sys/unix/ztypes_linux.go | 1545 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 28 +- .../x/sys/unix/ztypes_linux_amd64.go | 28 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 28 +- .../x/sys/unix/ztypes_linux_arm64.go | 28 +- .../x/sys/unix/ztypes_linux_loong64.go | 691 + .../x/sys/unix/ztypes_linux_mips.go | 28 +- .../x/sys/unix/ztypes_linux_mips64.go | 28 +- .../x/sys/unix/ztypes_linux_mips64le.go | 28 +- .../x/sys/unix/ztypes_linux_mipsle.go | 28 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 28 +- .../x/sys/unix/ztypes_linux_ppc64.go | 28 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 28 +- .../x/sys/unix/ztypes_linux_riscv64.go | 28 +- .../x/sys/unix/ztypes_linux_s390x.go | 32 +- .../x/sys/unix/ztypes_linux_sparc64.go | 28 +- .../x/sys/unix/ztypes_netbsd_386.go | 84 + .../x/sys/unix/ztypes_netbsd_amd64.go | 84 + .../x/sys/unix/ztypes_netbsd_arm.go | 84 + .../x/sys/unix/ztypes_netbsd_arm64.go | 84 + .../x/sys/unix/ztypes_openbsd_386.go | 105 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 41 +- .../x/sys/unix/ztypes_openbsd_arm.go | 17 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 17 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 17 +- .../x/sys/unix/ztypes_openbsd_ppc64.go | 571 + .../x/sys/unix/ztypes_openbsd_riscv64.go | 571 + .../x/sys/unix/ztypes_solaris_amd64.go | 37 +- .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 11 +- .../golang.org/x/sys/windows/exec_windows.go | 10 +- .../golang.org/x/sys/windows/registry/key.go | 1 - .../x/sys/windows/setupapi_windows.go | 2 +- .../x/sys/windows/svc/eventlog/log.go | 1 - vendor/golang.org/x/sys/windows/syscall.go | 10 +- .../x/sys/windows/syscall_windows.go | 183 +- .../golang.org/x/sys/windows/types_windows.go | 126 +- .../x/sys/windows/zsyscall_windows.go | 117 +- vendor/golang.org/x/term/AUTHORS | 3 - vendor/golang.org/x/term/CONTRIBUTORS | 3 - vendor/golang.org/x/term/term.go | 10 +- vendor/golang.org/x/term/terminal.go | 3 +- vendor/golang.org/x/text/AUTHORS | 3 - vendor/golang.org/x/text/CONTRIBUTORS | 3 - vendor/golang.org/x/text/unicode/bidi/core.go | 26 +- .../golang.org/x/text/unicode/bidi/trieval.go | 12 - .../x/text/unicode/norm/forminfo.go | 9 +- .../x/text/unicode/norm/normalize.go | 11 +- .../x/text/unicode/norm/tables13.0.0.go | 4 +- vendor/golang.org/x/text/width/kind_string.go | 28 - .../golang.org/x/text/width/tables10.0.0.go | 1319 - .../golang.org/x/text/width/tables11.0.0.go | 1331 - .../golang.org/x/text/width/tables12.0.0.go | 1351 - .../golang.org/x/text/width/tables13.0.0.go | 1352 - vendor/golang.org/x/text/width/tables9.0.0.go | 1287 - vendor/golang.org/x/text/width/transform.go | 239 - vendor/golang.org/x/text/width/trieval.go | 30 - vendor/golang.org/x/text/width/width.go | 206 - vendor/golang.org/x/time/AUTHORS | 3 - vendor/golang.org/x/time/CONTRIBUTORS | 3 - vendor/golang.org/x/time/rate/rate.go | 117 +- vendor/golang.org/x/tools/AUTHORS | 3 - vendor/golang.org/x/tools/CONTRIBUTORS | 3 - .../x/tools/go/gcexportdata/gcexportdata.go | 49 +- .../x/tools/go/gcexportdata/importer.go | 2 + .../x/tools/go/internal/gcimporter/bexport.go | 20 +- .../go/internal/gcimporter/gcimporter.go | 161 +- .../x/tools/go/internal/gcimporter/iexport.go | 5 +- .../x/tools/go/internal/gcimporter/iimport.go | 93 +- .../go/internal/gcimporter/unified_no.go | 10 + .../go/internal/gcimporter/unified_yes.go | 10 + .../go/internal/gcimporter/ureader_no.go | 19 + .../go/internal/gcimporter/ureader_yes.go | 612 + .../x/tools/go/internal/pkgbits/codes.go | 77 + .../x/tools/go/internal/pkgbits/decoder.go | 433 + .../x/tools/go/internal/pkgbits/doc.go | 32 + .../x/tools/go/internal/pkgbits/encoder.go | 379 + .../x/tools/go/internal/pkgbits/flags.go | 9 + .../x/tools/go/internal/pkgbits/frames_go1.go | 21 + .../tools/go/internal/pkgbits/frames_go17.go | 28 + .../x/tools/go/internal/pkgbits/reloc.go | 42 + .../x/tools/go/internal/pkgbits/support.go | 17 + .../x/tools/go/internal/pkgbits/sync.go | 113 + .../go/internal/pkgbits/syncmarker_string.go | 89 + vendor/golang.org/x/tools/go/packages/doc.go | 1 - .../golang.org/x/tools/go/packages/golist.go | 92 +- .../x/tools/go/packages/loadmode_string.go | 4 +- .../x/tools/go/packages/packages.go | 73 +- .../x/tools/internal/gocommand/invoke.go | 22 +- .../x/tools/internal/gocommand/vendor.go | 22 +- .../internal/packagesinternal/packages.go | 2 + .../x/tools/internal/typeparams/common.go | 108 +- .../x/tools/internal/typeparams/coretype.go | 122 + .../x/tools/internal/typeparams/normalize.go | 12 +- .../x/tools/internal/typeparams/termlist.go | 9 - .../internal/typeparams/typeparams_go117.go | 5 + .../internal/typeparams/typeparams_go118.go | 5 + .../tools/internal/typesinternal/errorcode.go | 158 + .../typesinternal/errorcode_string.go | 18 +- .../x/tools/internal/typesinternal/types.go | 2 + .../tools/internal/typesinternal/types_118.go | 19 + vendor/golang.org/x/xerrors/doc.go | 3 +- vendor/golang.org/x/xerrors/fmt.go | 3 + vendor/golang.org/x/xerrors/wrap.go | 6 + .../v1/cloudresourcemanager-api.json | 26 +- .../v1/cloudresourcemanager-gen.go | 381 +- .../api/compute/v1/compute-api.json | 32297 ++-- .../api/compute/v1/compute-gen.go | 113374 +++++++++------ .../api/googleapi/googleapi.go | 16 +- .../api/googleapi/transport/apikey.go | 2 +- .../iamcredentials/v1/iamcredentials-api.json | 372 + .../iamcredentials/v1/iamcredentials-gen.go | 1094 + .../google.golang.org/api/impersonate/doc.go | 32 + .../api/impersonate/idtoken.go | 129 + .../api/impersonate/impersonate.go | 184 + .../google.golang.org/api/impersonate/user.go | 169 + .../google.golang.org/api/internal/creds.go | 11 +- .../api/internal/gensupport/error.go | 24 + .../api/internal/gensupport/json.go | 7 +- .../api/internal/gensupport/media.go | 124 +- .../api/internal/gensupport/params.go | 2 +- .../api/internal/gensupport/resumable.go | 18 +- .../api/internal/gensupport/retry.go | 11 + .../api/internal/gensupport/send.go | 47 +- .../google.golang.org/api/internal/version.go | 2 +- .../api/option/credentials_go19.go | 24 - .../api/option/credentials_notgo19.go | 23 - vendor/google.golang.org/api/option/option.go | 33 +- .../api/storage/v1/storage-api.json | 252 +- .../api/storage/v1/storage-gen.go | 1512 +- .../api/transport/cert/default_cert.go | 123 +- .../api/transport/cert/enterprise_cert.go | 56 + .../api/transport/cert/secureconnect_cert.go | 123 + .../google.golang.org/api/transport/dial.go | 48 + vendor/google.golang.org/api/transport/doc.go | 11 + .../api/transport/grpc/dial.go | 51 +- .../api/transport/grpc/dial_socketopt.go | 6 +- .../transport/http/default_transport_go113.go | 21 - .../http/default_transport_not_go113.go | 16 - .../api/transport/http/dial.go | 11 + .../api/transport/internal/dca/dca.go | 18 +- .../googleapis/api/annotations/client.pb.go | 1558 +- .../googleapis/api/annotations/http.pb.go | 173 +- .../googleapis/api/annotations/resource.pb.go | 54 +- .../googleapis/api/annotations/routing.pb.go | 408 +- .../googleapis/api/launch_stage.pb.go | 203 + .../admin/v2/bigtable_instance_admin.pb.go | 1432 +- .../admin/v2/bigtable_table_admin.pb.go | 1886 +- .../googleapis/bigtable/admin/v2/common.pb.go | 66 +- .../bigtable/admin/v2/instance.pb.go | 664 +- .../googleapis/bigtable/admin/v2/table.pb.go | 373 +- .../googleapis/bigtable/v2/bigtable.pb.go | 899 +- .../bigtable/v2/request_stats.pb.go | 497 + .../bigtable/v2/response_params.pb.go | 191 + .../genproto/googleapis/iam/v1/alias.go | 208 + .../genproto/googleapis/iam/v1/policy.pb.go | 813 - .../genproto/googleapis/longrunning/alias.go | 115 + .../genproto/googleapis/pubsub/v1/alias.go | 503 + .../genproto/googleapis/rpc/code/code.pb.go | 1 - .../rpc/errdetails/error_details.pb.go | 26 +- .../genproto/googleapis/type/date/date.pb.go | 200 + .../genproto/googleapis/type/expr/expr.pb.go | 24 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 7 +- .../grpc/attributes/attributes.go | 2 +- vendor/google.golang.org/grpc/backoff.go | 2 +- .../grpc/balancer/balancer.go | 73 +- .../grpc/balancer/base/balancer.go | 8 +- .../grpc/balancer/conn_state_evaluator.go | 74 + .../grpclb/grpc_lb_v1/load_balancer.pb.go | 9 +- .../grpc_lb_v1/load_balancer_grpc.pb.go | 19 + .../grpc/balancer/grpclb/grpclb.go | 28 +- .../grpc/balancer/grpclb/grpclb_config.go | 2 +- .../balancer/grpclb/grpclb_remote_balancer.go | 8 +- .../grpc/balancer/roundrobin/roundrobin.go | 16 +- .../grpc/balancer_conn_wrappers.go | 389 +- .../grpc_binarylog_v1/binarylog.pb.go | 20 +- .../grpc/channelz/channelz.go | 36 + vendor/google.golang.org/grpc/clientconn.go | 495 +- .../alts/internal/handshaker/handshaker.go | 4 +- .../internal/handshaker/service/service.go | 3 +- .../internal/proto/grpc_gcp/altscontext.pb.go | 7 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 9 +- .../proto/grpc_gcp/handshaker_grpc.pb.go | 17 + .../grpc_gcp/transport_security_common.pb.go | 7 +- .../grpc/credentials/credentials.go | 20 +- .../grpc/credentials/google/google.go | 5 +- .../grpc/credentials/google/xds.go | 55 +- .../grpc/credentials/insecure/insecure.go | 26 + .../google.golang.org/grpc/credentials/tls.go | 2 +- vendor/google.golang.org/grpc/dialoptions.go | 111 +- .../grpc/encoding/encoding.go | 7 +- .../grpc/encoding/gzip/gzip.go | 2 +- .../grpc/grpclog/loggerv2.go | 2 +- .../grpc/health/grpc_health_v1/health.pb.go | 7 +- .../health/grpc_health_v1/health_grpc.pb.go | 17 + vendor/google.golang.org/grpc/interceptor.go | 9 +- .../balancer/gracefulswitch/gracefulswitch.go | 384 + .../grpc/internal/binarylog/binarylog.go | 107 +- .../grpc/internal/binarylog/env_config.go | 26 +- .../grpc/internal/binarylog/method_logger.go | 31 +- .../grpc/internal/channelz/funcs.go | 228 +- .../grpc/internal/channelz/id.go | 75 + .../grpc/internal/channelz/logging.go | 91 +- .../grpc/internal/channelz/types.go | 39 +- .../grpc/internal/envconfig/envconfig.go | 8 +- .../grpc/internal/envconfig/observability.go | 36 + .../grpc/internal/envconfig/xds.go | 14 +- .../grpc/internal/googlecloud/googlecloud.go | 76 +- .../grpc/internal/googlecloud/manufacturer.go | 26 + .../googlecloud/manufacturer_linux.go | 27 + .../googlecloud/manufacturer_windows.go | 50 + .../grpc/internal/grpclog/grpclog.go | 2 +- .../grpc/internal/grpcrand/grpcrand.go | 7 + .../grpc/internal/grpcsync/oncefunc.go | 32 + .../grpc/internal/grpcutil/compressor.go | 47 + .../grpc/internal/grpcutil/method.go | 6 +- .../grpc/internal/internal.go | 77 +- .../grpc/internal/metadata/metadata.go | 46 + .../grpc/internal/pretty/pretty.go | 82 + .../internal/resolver/dns/dns_resolver.go | 4 +- .../resolver/passthrough/passthrough.go | 9 +- .../grpc/internal/resolver/unix/unix.go | 9 +- .../internal/serviceconfig/serviceconfig.go | 8 +- .../grpc/internal/status/status.go | 10 + .../grpc/internal/transport/controlbuf.go | 56 +- .../grpc/internal/transport/handler_server.go | 75 +- .../grpc/internal/transport/http2_client.go | 340 +- .../grpc/internal/transport/http2_server.go | 247 +- .../grpc/internal/transport/http_util.go | 34 +- .../grpc/internal/transport/transport.go | 33 +- .../grpc/metadata/metadata.go | 69 +- .../google.golang.org/grpc/picker_wrapper.go | 15 +- vendor/google.golang.org/grpc/pickfirst.go | 126 +- vendor/google.golang.org/grpc/preloader.go | 2 +- vendor/google.golang.org/grpc/regenerate.sh | 26 +- vendor/google.golang.org/grpc/resolver/map.go | 55 +- .../grpc/resolver/resolver.go | 22 +- .../grpc/resolver_conn_wrapper.go | 23 +- vendor/google.golang.org/grpc/rpc_util.go | 39 +- vendor/google.golang.org/grpc/server.go | 402 +- .../google.golang.org/grpc/service_config.go | 12 +- .../grpc/serviceconfig/serviceconfig.go | 2 +- .../google.golang.org/grpc/status/status.go | 12 +- vendor/google.golang.org/grpc/stream.go | 444 +- vendor/google.golang.org/grpc/tap/tap.go | 2 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 12 +- vendor/google.golang.org/protobuf/AUTHORS | 3 - .../google.golang.org/protobuf/CONTRIBUTORS | 3 - .../protobuf/encoding/protojson/decode.go | 174 +- .../protobuf/encoding/protojson/encode.go | 51 +- .../encoding/protojson/well_known_types.go | 76 +- .../protobuf/encoding/prototext/decode.go | 116 +- .../protobuf/encoding/prototext/encode.go | 39 +- .../protobuf/encoding/protowire/wire.go | 23 +- .../protobuf/internal/descfmt/stringer.go | 66 +- .../internal/encoding/defval/default.go | 78 +- .../encoding/messageset/messageset.go | 7 +- .../protobuf/internal/encoding/tag/tag.go | 96 +- .../protobuf/internal/encoding/text/decode.go | 32 +- .../internal/encoding/text/decode_number.go | 6 +- .../protobuf/internal/encoding/text/doc.go | 4 +- .../protobuf/internal/errors/is_go112.go | 1 + .../protobuf/internal/errors/is_go113.go | 1 + .../protobuf/internal/filedesc/build.go | 19 +- .../protobuf/internal/filedesc/desc.go | 380 +- .../protobuf/internal/filedesc/desc_init.go | 36 +- .../protobuf/internal/filedesc/desc_lazy.go | 80 +- .../protobuf/internal/filedesc/desc_list.go | 167 +- .../protobuf/internal/filedesc/placeholder.go | 136 +- .../protobuf/internal/filetype/build.go | 87 +- .../internal/flags/proto_legacy_disable.go | 1 + .../internal/flags/proto_legacy_enable.go | 1 + .../protobuf/internal/impl/api_export.go | 42 +- .../protobuf/internal/impl/checkinit.go | 12 +- .../protobuf/internal/impl/codec_extension.go | 36 +- .../protobuf/internal/impl/codec_field.go | 90 +- .../protobuf/internal/impl/codec_map.go | 20 +- .../protobuf/internal/impl/codec_map_go111.go | 1 + .../protobuf/internal/impl/codec_map_go112.go | 1 + .../protobuf/internal/impl/codec_message.go | 30 +- .../protobuf/internal/impl/codec_reflect.go | 1 + .../protobuf/internal/impl/codec_tables.go | 290 +- .../protobuf/internal/impl/codec_unsafe.go | 1 + .../protobuf/internal/impl/convert.go | 228 +- .../protobuf/internal/impl/convert_list.go | 42 +- .../protobuf/internal/impl/convert_map.go | 32 +- .../protobuf/internal/impl/decode.go | 29 +- .../protobuf/internal/impl/enum.go | 10 +- .../protobuf/internal/impl/extension.go | 26 +- .../protobuf/internal/impl/legacy_enum.go | 57 +- .../protobuf/internal/impl/legacy_export.go | 18 +- .../internal/impl/legacy_extension.go | 100 +- .../protobuf/internal/impl/legacy_message.go | 122 +- .../protobuf/internal/impl/merge.go | 32 +- .../protobuf/internal/impl/message.go | 41 +- .../protobuf/internal/impl/message_reflect.go | 74 +- .../internal/impl/message_reflect_field.go | 118 +- .../protobuf/internal/impl/pointer_reflect.go | 1 + .../protobuf/internal/impl/pointer_unsafe.go | 1 + .../protobuf/internal/impl/validate.go | 50 +- .../protobuf/internal/impl/weak.go | 16 +- .../protobuf/internal/order/order.go | 16 +- .../protobuf/internal/order/range.go | 22 +- .../protobuf/internal/strs/strings_pure.go | 1 + .../protobuf/internal/strs/strings_unsafe.go | 7 +- .../protobuf/internal/version/version.go | 54 +- .../protobuf/proto/decode.go | 20 +- .../google.golang.org/protobuf/proto/doc.go | 21 +- .../protobuf/proto/encode.go | 5 +- .../google.golang.org/protobuf/proto/equal.go | 50 +- .../protobuf/proto/proto_methods.go | 1 + .../protobuf/proto/proto_reflect.go | 1 + .../reflect/protodesc/desc_resolve.go | 6 +- .../protobuf/reflect/protoreflect/methods.go | 1 + .../protobuf/reflect/protoreflect/proto.go | 32 +- .../protobuf/reflect/protoreflect/source.go | 1 + .../protobuf/reflect/protoreflect/type.go | 1 + .../reflect/protoreflect/value_pure.go | 1 + .../reflect/protoreflect/value_union.go | 27 + .../reflect/protoreflect/value_unsafe.go | 1 + .../reflect/protoregistry/registry.go | 2 + .../protobuf/runtime/protoiface/methods.go | 1 + .../protobuf/runtime/protoimpl/version.go | 8 +- .../types/known/fieldmaskpb/field_mask.pb.go | 2 +- vendor/gopkg.in/ini.v1/.editorconfig | 12 + vendor/gopkg.in/ini.v1/.gitignore | 1 + vendor/gopkg.in/ini.v1/.golangci.yml | 27 + vendor/gopkg.in/ini.v1/README.md | 4 +- vendor/gopkg.in/ini.v1/codecov.yml | 9 +- vendor/gopkg.in/ini.v1/deprecated.go | 5 +- vendor/gopkg.in/ini.v1/error.go | 15 + vendor/gopkg.in/ini.v1/file.go | 62 +- vendor/gopkg.in/ini.v1/ini.go | 22 +- vendor/gopkg.in/ini.v1/key.go | 36 +- vendor/gopkg.in/ini.v1/parser.go | 51 +- vendor/gopkg.in/ini.v1/section.go | 12 +- vendor/gopkg.in/ini.v1/struct.go | 62 +- vendor/gopkg.in/yaml.v3/decode.go | 78 +- vendor/gopkg.in/yaml.v3/parserc.go | 11 +- vendor/k8s.io/klog/v2/OWNERS | 16 +- vendor/k8s.io/klog/v2/README.md | 17 +- vendor/k8s.io/klog/v2/contextual.go | 185 + vendor/k8s.io/klog/v2/exit.go | 69 + vendor/k8s.io/klog/v2/imports.go | 38 + .../k8s.io/klog/v2/internal/buffer/buffer.go | 159 + .../k8s.io/klog/v2/internal/clock/README.md | 7 + vendor/k8s.io/klog/v2/internal/clock/clock.go | 178 + vendor/k8s.io/klog/v2/internal/dbg/dbg.go | 42 + .../klog/v2/internal/serialize/keyvalues.go | 253 + .../klog/v2/internal/severity/severity.go | 58 + vendor/k8s.io/klog/v2/k8s_references.go | 158 + vendor/k8s.io/klog/v2/klog.go | 1029 +- vendor/k8s.io/klog/v2/klogr.go | 87 + vendor/k8s.io/utils/trace/trace.go | 2 +- vendor/modules.txt | 348 +- .../v4/schema/elements.go | 131 +- .../structured-merge-diff/v4/schema/equals.go | 3 + .../v4/schema/schemaschema.go | 3 + .../structured-merge-diff/v4/typed/helpers.go | 6 +- .../structured-merge-diff/v4/typed/merge.go | 187 +- .../v4/typed/reconcile_schema.go | 73 +- .../structured-merge-diff/v4/typed/remove.go | 37 +- .../structured-merge-diff/v4/typed/typed.go | 13 +- 1506 files changed, 212415 insertions(+), 122131 deletions(-) create mode 100644 vendor/cloud.google.com/go/.release-please-manifest-individual.json create mode 100644 vendor/cloud.google.com/go/.release-please-manifest-submodules.json create mode 100644 vendor/cloud.google.com/go/.release-please-manifest.json create mode 100644 vendor/cloud.google.com/go/compute/internal/version.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/CHANGES.md rename vendor/{go.opentelemetry.io/otel/internal/metric => cloud.google.com/go/compute/metadata}/LICENSE (99%) create mode 100644 vendor/cloud.google.com/go/compute/metadata/README.md rename vendor/cloud.google.com/go/{pubsub/go_mod_tidy_hack.go => compute/metadata/tidyfix.go} (80%) rename vendor/{google.golang.org/genproto/googleapis/iam/v1 => cloud.google.com/go/iam/apiv1/iampb}/iam_policy.pb.go (85%) rename vendor/{google.golang.org/genproto/googleapis/iam/v1 => cloud.google.com/go/iam/apiv1/iampb}/options.pb.go (70%) create mode 100644 vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go create mode 100644 vendor/cloud.google.com/go/internal/pubsub/message.go create mode 100644 vendor/cloud.google.com/go/internal/pubsub/publish.go create mode 100644 vendor/cloud.google.com/go/longrunning/CHANGES.md rename vendor/{github.com/containerd/containerd => cloud.google.com/go/longrunning}/LICENSE (93%) create mode 100644 vendor/cloud.google.com/go/longrunning/README.md rename vendor/{google.golang.org/genproto/googleapis/longrunning => cloud.google.com/go/longrunning/autogen/longrunningpb}/operations.pb.go (99%) rename vendor/cloud.google.com/go/{iam/go_mod_tidy_hack.go => longrunning/tidyfix.go} (88%) create mode 100644 vendor/cloud.google.com/go/migration.md create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/gapic_metadata.json create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/info.go rename vendor/{google.golang.org/genproto/googleapis/pubsub/v1 => cloud.google.com/go/pubsub/apiv1/pubsubpb}/pubsub.pb.go (89%) rename vendor/{google.golang.org/genproto/googleapis/pubsub/v1 => cloud.google.com/go/pubsub/apiv1/pubsubpb}/schema.pb.go (99%) create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/version.go create mode 100644 vendor/cloud.google.com/go/pubsub/internal/scheduler/publish_scheduler.go create mode 100644 vendor/cloud.google.com/go/pubsub/internal/scheduler/receive_scheduler.go create mode 100644 vendor/cloud.google.com/go/pubsub/internal/version.go create mode 100644 vendor/cloud.google.com/go/pubsub/schema.go create mode 100644 vendor/cloud.google.com/go/release-please-config-individual.json create mode 100644 vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json create mode 100644 vendor/cloud.google.com/go/release-please-config.json create mode 100644 vendor/cloud.google.com/go/storage/client.go create mode 100644 vendor/cloud.google.com/go/storage/emulator_test.sh delete mode 100644 vendor/cloud.google.com/go/storage/go110.go create mode 100644 vendor/cloud.google.com/go/storage/grpc_client.go create mode 100644 vendor/cloud.google.com/go/storage/http_client.go create mode 100644 vendor/cloud.google.com/go/storage/internal/apiv2/doc.go create mode 100644 vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json create mode 100644 vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go create mode 100644 vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go create mode 100644 vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go rename vendor/cloud.google.com/go/storage/{go_mod_tidy_hack.go => internal/apiv2/version.go} (62%) create mode 100644 vendor/cloud.google.com/go/storage/internal/version.go delete mode 100644 vendor/cloud.google.com/go/storage/not_go110.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go delete mode 100644 vendor/github.com/PuerkitoBio/purell/.gitignore delete mode 100644 vendor/github.com/PuerkitoBio/purell/.travis.yml delete mode 100644 vendor/github.com/PuerkitoBio/purell/LICENSE delete mode 100644 vendor/github.com/PuerkitoBio/purell/README.md delete mode 100644 vendor/github.com/PuerkitoBio/purell/purell.go delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/.travis.yml delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/LICENSE delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/README.md delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/urlesc.go create mode 100644 vendor/github.com/Shopify/sarama/Dockerfile.kafka create mode 100644 vendor/github.com/Shopify/sarama/entrypoint.sh create mode 100644 vendor/github.com/Shopify/sarama/transaction_manager.go create mode 100644 vendor/github.com/cespare/xxhash/v2/testall.sh create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s rename vendor/github.com/cespare/xxhash/v2/{xxhash_amd64.go => xxhash_asm.go} (73%) delete mode 100644 vendor/github.com/containerd/containerd/NOTICE delete mode 100644 vendor/github.com/containerd/containerd/errdefs/errors.go delete mode 100644 vendor/github.com/containerd/containerd/errdefs/grpc.go delete mode 100644 vendor/github.com/containerd/containerd/log/context.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/compare.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/cpuinfo.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/database.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/defaults.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_unix.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_windows.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/platforms.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/errors.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/handler.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/register.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go create mode 100644 vendor/github.com/go-kit/log/staticcheck.conf create mode 100644 vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go create mode 100644 vendor/github.com/go-openapi/swag/.gitattributes delete mode 100644 vendor/github.com/go-openapi/swag/.travis.yml create mode 100644 vendor/github.com/go-openapi/swag/file.go delete mode 100644 vendor/github.com/go-stack/stack/.travis.yml delete mode 100644 vendor/github.com/go-stack/stack/LICENSE.md delete mode 100644 vendor/github.com/go-stack/stack/README.md delete mode 100644 vendor/github.com/go-stack/stack/stack.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/SECURITY.md create mode 100644 vendor/github.com/golang/protobuf/jsonpb/decode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/encode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/json.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/zero.go create mode 100644 vendor/github.com/google/gofuzz/bytesource/bytesource.go create mode 100644 vendor/github.com/google/uuid/null.go create mode 100644 vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE create mode 100644 vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go create mode 100644 vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json create mode 100644 vendor/github.com/googleapis/gax-go/v2/CHANGES.md create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto create mode 100644 vendor/github.com/googleapis/gax-go/v2/content_type.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/internal/version.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/release-please-config.json delete mode 100644 vendor/github.com/gophercloud/gophercloud/.zuul.yaml delete mode 100644 vendor/github.com/gorilla/websocket/client_clone.go delete mode 100644 vendor/github.com/gorilla/websocket/client_clone_legacy.go delete mode 100644 vendor/github.com/gorilla/websocket/conn_write.go delete mode 100644 vendor/github.com/gorilla/websocket/conn_write_legacy.go create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake.go create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake_116.go delete mode 100644 vendor/github.com/gorilla/websocket/trace.go delete mode 100644 vendor/github.com/gorilla/websocket/trace_17.go create mode 100644 vendor/github.com/hashicorp/consul/api/peering.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/.travis.yml create mode 100644 vendor/github.com/hashicorp/memberlist/.golangci.yml create mode 100644 vendor/github.com/huandu/xstrings/stringbuilder.go create mode 100644 vendor/github.com/huandu/xstrings/stringbuilder_go110.go create mode 100644 vendor/github.com/influxdata/telegraf/.golangci.yml create mode 100644 vendor/github.com/influxdata/telegraf/.markdownlint.yml create mode 100644 vendor/github.com/influxdata/telegraf/.markdownlintignore create mode 100644 vendor/github.com/influxdata/telegraf/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/influxdata/telegraf/SECURITY.md delete mode 100644 vendor/github.com/influxdata/telegraf/appveyor.yml delete mode 100644 vendor/github.com/influxdata/telegraf/docker-compose.yml create mode 100644 vendor/github.com/influxdata/telegraf/info.plist create mode 100644 vendor/github.com/influxdata/telegraf/parser.go delete mode 100644 vendor/github.com/influxdata/telegraf/plugins/inputs/EXAMPLE_README.md create mode 100644 vendor/github.com/influxdata/telegraf/plugins/inputs/deprecations.go delete mode 100644 vendor/github.com/influxdata/telegraf/plugins/inputs/mock_Plugin.go create mode 100644 vendor/github.com/influxdata/telegraf/secretstore.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go delete mode 100644 vendor/github.com/mattn/go-colorable/.travis.yml create mode 100644 vendor/github.com/miekg/dns/hash.go delete mode 100644 vendor/github.com/mitchellh/copystructure/.travis.yml create mode 100644 vendor/github.com/opentracing/opentracing-go/.golangci.yml delete mode 100644 vendor/github.com/opentracing/opentracing-go/.travis.yml delete mode 100644 vendor/github.com/pierrec/lz4/.gitignore delete mode 100644 vendor/github.com/pierrec/lz4/.travis.yml delete mode 100644 vendor/github.com/pierrec/lz4/LICENSE delete mode 100644 vendor/github.com/pierrec/lz4/README.md delete mode 100644 vendor/github.com/pierrec/lz4/block.go delete mode 100644 vendor/github.com/pierrec/lz4/debug.go delete mode 100644 vendor/github.com/pierrec/lz4/debug_stub.go delete mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.go delete mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.s delete mode 100644 vendor/github.com/pierrec/lz4/decode_other.go delete mode 100644 vendor/github.com/pierrec/lz4/errors.go delete mode 100644 vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4_go1.10.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4_notgo1.10.go delete mode 100644 vendor/github.com/pierrec/lz4/reader.go delete mode 100644 vendor/github.com/pierrec/lz4/reader_legacy.go delete mode 100644 vendor/github.com/pierrec/lz4/writer.go delete mode 100644 vendor/github.com/pierrec/lz4/writer_legacy.go rename vendor/github.com/prometheus/client_golang/prometheus/collectors/{go_collector.go => go_collector_go116.go} (64%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go rename vendor/github.com/prometheus/client_golang/prometheus/{collectors/dbstats_collector_pre_go115.go => get_pid.go} (61%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go rename vendor/github.com/prometheus/client_golang/prometheus/{go_collector_go117.go => go_collector_latest.go} (53%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go rename vendor/github.com/prometheus/client_golang/prometheus/{collectors/dbstats_collector_go115.go => internal/go_collector_options.go} (51%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/num_threads.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go create mode 100644 vendor/github.com/prometheus/common/version/info_default.go create mode 100644 vendor/github.com/prometheus/common/version/info_go118.go delete mode 100644 vendor/github.com/prometheus/procfs/fixtures.ttar rename vendor/github.com/prometheus/procfs/{xfrm.go => net_xfrm.go} (96%) create mode 100644 vendor/github.com/prometheus/procfs/proc_cgroups.go create mode 100644 vendor/github.com/prometheus/procfs/proc_netstat.go create mode 100644 vendor/github.com/prometheus/procfs/proc_snmp.go create mode 100644 vendor/github.com/prometheus/procfs/proc_snmp6.go create mode 100644 vendor/github.com/prometheus/procfs/proc_sys.go create mode 100644 vendor/github.com/prometheus/procfs/softirqs.go delete mode 100644 vendor/github.com/stretchr/objx/.travis.yml create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/.lycheeignore delete mode 100644 vendor/go.opentelemetry.io/otel/.markdown-link.json create mode 100644 vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/global/global.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/global/metric.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go rename vendor/go.opentelemetry.io/otel/{internal/metric/global/metric.go => metric/internal/global/state.go} (54%) create mode 100644 vendor/go.opentelemetry.io/otel/metric/meter.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/metric.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/metric_instrument.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/number/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/number/kind_string.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/number/number.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/http.go create mode 100644 vendor/go.uber.org/atomic/float32.go create mode 100644 vendor/go.uber.org/atomic/float32_ext.go create mode 100644 vendor/go.uber.org/atomic/pointer_go118.go create mode 100644 vendor/go.uber.org/atomic/pointer_go119.go create mode 100644 vendor/go.uber.org/zap/array_go118.go rename vendor/go.uber.org/zap/{global_go112.go => internal/level_enabler.go} (70%) rename vendor/go.uber.org/zap/{global_prego112.go => zapcore/reflected_encoder.go} (64%) delete mode 100644 vendor/golang.org/x/crypto/AUTHORS delete mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/net/AUTHORS delete mode 100644 vendor/golang.org/x/net/CONTRIBUTORS create mode 100644 vendor/golang.org/x/net/http2/go118.go create mode 100644 vendor/golang.org/x/net/http2/hpack/static_table.go create mode 100644 vendor/golang.org/x/net/http2/not_go118.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go rename vendor/golang.org/x/net/internal/socket/{zsys_darwin_386.go => zsys_freebsd_riscv64.go} (79%) create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/net/publicsuffix/data/children create mode 100644 vendor/golang.org/x/net/publicsuffix/data/nodes create mode 100644 vendor/golang.org/x/net/publicsuffix/data/text delete mode 100644 vendor/golang.org/x/oauth2/AUTHORS delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS create mode 100644 vendor/golang.org/x/oauth2/google/error.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go delete mode 100644 vendor/golang.org/x/sync/AUTHORS delete mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/sys/AUTHORS delete mode 100644 vendor/golang.org/x/sys/CONTRIBUTORS create mode 100644 vendor/golang.org/x/sys/cpu/cpu_loong64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/parse.go create mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs_go118.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs_go119.go create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_loong64.s delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/str.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go delete mode 100644 vendor/golang.org/x/term/AUTHORS delete mode 100644 vendor/golang.org/x/term/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/text/AUTHORS delete mode 100644 vendor/golang.org/x/text/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/text/width/kind_string.go delete mode 100644 vendor/golang.org/x/text/width/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/width/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/width/tables12.0.0.go delete mode 100644 vendor/golang.org/x/text/width/tables13.0.0.go delete mode 100644 vendor/golang.org/x/text/width/tables9.0.0.go delete mode 100644 vendor/golang.org/x/text/width/transform.go delete mode 100644 vendor/golang.org/x/text/width/trieval.go delete mode 100644 vendor/golang.org/x/text/width/width.go delete mode 100644 vendor/golang.org/x/time/AUTHORS delete mode 100644 vendor/golang.org/x/time/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/tools/AUTHORS delete mode 100644 vendor/golang.org/x/tools/CONTRIBUTORS create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/codes.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/doc.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/encoder.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/flags.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/frames_go1.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/frames_go17.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/reloc.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/support.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/sync.go create mode 100644 vendor/golang.org/x/tools/go/internal/pkgbits/syncmarker_string.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/coretype.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/types_118.go create mode 100644 vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json create mode 100644 vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go create mode 100644 vendor/google.golang.org/api/impersonate/doc.go create mode 100644 vendor/google.golang.org/api/impersonate/idtoken.go create mode 100644 vendor/google.golang.org/api/impersonate/impersonate.go create mode 100644 vendor/google.golang.org/api/impersonate/user.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/error.go delete mode 100644 vendor/google.golang.org/api/option/credentials_go19.go delete mode 100644 vendor/google.golang.org/api/option/credentials_notgo19.go create mode 100644 vendor/google.golang.org/api/transport/cert/enterprise_cert.go create mode 100644 vendor/google.golang.org/api/transport/cert/secureconnect_cert.go create mode 100644 vendor/google.golang.org/api/transport/dial.go create mode 100644 vendor/google.golang.org/api/transport/doc.go delete mode 100644 vendor/google.golang.org/api/transport/http/default_transport_go113.go delete mode 100644 vendor/google.golang.org/api/transport/http/default_transport_not_go113.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/v2/request_stats.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/v2/response_params.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/longrunning/alias.go create mode 100644 vendor/google.golang.org/genproto/googleapis/pubsub/v1/alias.go create mode 100644 vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go create mode 100644 vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go create mode 100644 vendor/google.golang.org/grpc/channelz/channelz.go create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/id.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/observability.go create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/compressor.go create mode 100644 vendor/google.golang.org/grpc/internal/pretty/pretty.go delete mode 100644 vendor/google.golang.org/protobuf/AUTHORS delete mode 100644 vendor/google.golang.org/protobuf/CONTRIBUTORS create mode 100644 vendor/gopkg.in/ini.v1/.editorconfig create mode 100644 vendor/gopkg.in/ini.v1/.golangci.yml create mode 100644 vendor/k8s.io/klog/v2/contextual.go create mode 100644 vendor/k8s.io/klog/v2/exit.go create mode 100644 vendor/k8s.io/klog/v2/imports.go create mode 100644 vendor/k8s.io/klog/v2/internal/buffer/buffer.go create mode 100644 vendor/k8s.io/klog/v2/internal/clock/README.md create mode 100644 vendor/k8s.io/klog/v2/internal/clock/clock.go create mode 100644 vendor/k8s.io/klog/v2/internal/dbg/dbg.go create mode 100644 vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go create mode 100644 vendor/k8s.io/klog/v2/internal/severity/severity.go create mode 100644 vendor/k8s.io/klog/v2/k8s_references.go create mode 100644 vendor/k8s.io/klog/v2/klogr.go diff --git a/go.mod b/go.mod index ec04c64b10ca..6cb1600fa27b 100644 --- a/go.mod +++ b/go.mod @@ -4,14 +4,14 @@ go 1.17 require ( cloud.google.com/go/bigtable v1.3.0 - cloud.google.com/go/pubsub v1.3.1 - cloud.google.com/go/storage v1.10.0 + cloud.google.com/go/pubsub v1.27.1 + cloud.google.com/go/storage v1.28.1 github.com/Azure/azure-pipeline-go v0.2.3 - github.com/Azure/azure-storage-blob-go v0.13.0 - github.com/Azure/go-autorest/autorest/adal v0.9.18 + github.com/Azure/azure-storage-blob-go v0.15.0 + github.com/Azure/go-autorest/autorest/adal v0.9.21 github.com/Masterminds/sprig/v3 v3.2.2 github.com/NYTimes/gziphandler v1.1.1 - github.com/Shopify/sarama v1.30.0 + github.com/Shopify/sarama v1.37.2 github.com/Workiva/go-datastructures v1.0.53 github.com/alicebob/miniredis/v2 v2.14.3 github.com/aws/aws-sdk-go v1.43.10 @@ -20,12 +20,12 @@ require ( github.com/buger/jsonparser v1.1.1 github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee github.com/cespare/xxhash v1.1.0 - github.com/cespare/xxhash/v2 v2.1.2 + github.com/cespare/xxhash/v2 v2.2.0 github.com/cloudflare/cloudflare-go v0.27.0 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf github.com/cristalhq/hedgedhttp v0.7.0 github.com/davecgh/go-spew v1.1.1 - github.com/docker/docker v20.10.12+incompatible + github.com/docker/docker v20.10.17+incompatible github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8 github.com/drone/envsubst v1.0.2 github.com/dustin/go-humanize v1.0.0 @@ -34,37 +34,37 @@ require ( github.com/felixge/fgprof v0.9.1 github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c github.com/fsouza/fake-gcs-server v1.7.0 - github.com/go-kit/log v0.2.0 + github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.5.1 - github.com/go-redis/redis/v8 v8.11.4 + github.com/go-redis/redis/v8 v8.11.5 github.com/gocql/gocql v0.0.0-20200526081602-cd04bd7f22a7 github.com/gogo/protobuf v1.3.2 // remember to update loki-build-image/Dockerfile too github.com/gogo/status v1.1.0 github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 - github.com/google/go-cmp v0.5.7 + github.com/google/go-cmp v0.5.9 github.com/google/renameio/v2 v2.0.0 - github.com/google/uuid v1.2.0 + github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 - github.com/gorilla/websocket v1.4.2 + github.com/gorilla/websocket v1.5.0 github.com/grafana/dskit v0.0.0-20220331160727-49faf69f72ca github.com/grafana/regexp v0.0.0-20220304100321-149c8afcd6cb github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 - github.com/hashicorp/consul/api v1.12.0 + github.com/hashicorp/consul/api v1.18.0 github.com/hashicorp/golang-lru v0.5.4 github.com/hpcloud/tail v1.0.0 github.com/imdario/mergo v0.3.12 github.com/influxdata/go-syslog/v3 v3.0.1-0.20201128200927-a1889d947b48 - github.com/influxdata/telegraf v1.16.3 + github.com/influxdata/telegraf v1.25.2 github.com/jmespath/go-jmespath v0.4.0 github.com/joncrlsn/dque v2.2.1-0.20200515025108-956d14155fa2+incompatible github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.14.1 + github.com/klauspost/compress v1.15.15 github.com/klauspost/pgzip v1.2.5 github.com/mattn/go-ieproxy v0.0.1 github.com/minio/minio-go/v7 v7.0.10 - github.com/mitchellh/mapstructure v1.4.3 + github.com/mitchellh/mapstructure v1.5.0 github.com/modern-go/reflect2 v1.0.2 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/ncw/swift v1.0.52 @@ -72,55 +72,56 @@ require ( github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e github.com/opentracing-contrib/go-stdlib v1.0.0 - github.com/opentracing/opentracing-go v1.2.0 + github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // github.com/pierrec/lz4 v2.0.5+incompatible - github.com/pierrec/lz4/v4 v4.1.12 + github.com/pierrec/lz4/v4 v4.1.17 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.12.1 - github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.32.1 + github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_model v0.3.0 + github.com/prometheus/common v0.39.0 github.com/prometheus/prometheus v1.8.2-0.20220303173753-edfe657b5405 github.com/segmentio/fasthash v1.0.2 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 github.com/sony/gobreaker v0.4.1 github.com/spf13/afero v1.6.0 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.8.1 github.com/thanos-io/thanos v0.22.0 github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20211015155308-ebe5bdc2c89e - github.com/xdg-go/scram v1.0.2 + github.com/xdg-go/scram v1.1.1 go.etcd.io/bbolt v1.3.6 - go.uber.org/atomic v1.9.0 + go.uber.org/atomic v1.10.0 go.uber.org/goleak v1.1.12 - golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220222172238-00053529121e - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 - google.golang.org/api v0.70.0 - google.golang.org/grpc v1.44.0 + golang.org/x/crypto v0.3.0 + golang.org/x/net v0.5.0 + golang.org/x/sync v0.1.0 + golang.org/x/sys v0.4.0 + golang.org/x/time v0.1.0 + google.golang.org/api v0.104.0 + google.golang.org/grpc v1.52.3 gopkg.in/Graylog2/go-gelf.v2 v2.0.0-20191017102106-1550ee647df0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/yaml.v2 v2.4.0 - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b + gopkg.in/yaml.v3 v3.0.1 inet.af/netaddr v0.0.0-20210707202901-70468d781e6c k8s.io/klog v1.0.0 ) require ( - cloud.google.com/go v0.100.2 // indirect - cloud.google.com/go/compute v1.3.0 // indirect - cloud.google.com/go/iam v0.1.0 // indirect - cloud.google.com/go/kms v1.0.0 // indirect - github.com/Azure/azure-sdk-for-go v62.0.0+incompatible // indirect + cloud.google.com/go v0.107.0 // indirect + cloud.google.com/go/compute v1.14.0 // indirect + cloud.google.com/go/compute/metadata v0.2.2 // indirect + cloud.google.com/go/iam v0.8.0 // indirect + cloud.google.com/go/longrunning v0.3.0 // indirect + github.com/Azure/azure-sdk-for-go v65.0.0+incompatible // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.24 // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect + github.com/Azure/go-autorest/autorest v0.11.28 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect @@ -128,156 +129,152 @@ require ( github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.1.1 // indirect - github.com/Microsoft/go-winio v0.4.17 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/Microsoft/go-winio v0.5.2 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect - github.com/armon/go-metrics v0.3.9 // indirect + github.com/armon/go-metrics v0.3.10 // indirect github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/containerd/containerd v1.5.9 // indirect github.com/containerd/fifo v1.0.0 // indirect github.com/coreos/etcd v3.3.25+incompatible // indirect - github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/digitalocean/godo v1.75.0 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/docker/go-units v0.4.0 // indirect - github.com/eapache/go-resiliency v1.2.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/eapache/go-resiliency v1.3.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-kit/kit v0.12.0 // indirect - github.com/go-logr/logr v1.2.2 // indirect + github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.20.0 // indirect github.com/go-openapi/errors v0.20.0 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/loads v0.20.2 // indirect github.com/go-openapi/runtime v0.19.29 // indirect github.com/go-openapi/spec v0.20.3 // indirect github.com/go-openapi/strfmt v0.21.2 // indirect - github.com/go-openapi/swag v0.19.15 // indirect + github.com/go-openapi/swag v0.22.1 // indirect github.com/go-openapi/validate v0.20.2 // indirect - github.com/go-stack/stack v1.8.0 // indirect github.com/go-zookeeper/zk v1.0.2 // indirect github.com/gofrs/flock v0.7.1 // indirect github.com/gogo/googleapis v1.4.0 // indirect - github.com/golang-jwt/jwt/v4 v4.2.0 // indirect + github.com/golang-jwt/jwt/v4 v4.4.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.0.1 // indirect github.com/google/go-querystring v1.0.0 // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20220218203455-0368bd9e19a7 // indirect - github.com/googleapis/gax-go/v2 v2.1.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect + github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/googleapis/gnostic v0.4.1 // indirect - github.com/gophercloud/gophercloud v0.24.0 // indirect + github.com/gophercloud/gophercloud v1.0.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v0.16.2 // indirect + github.com/hashicorp/go-hclog v1.2.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect - github.com/hashicorp/go-multierror v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect - github.com/hashicorp/go-uuid v1.0.2 // indirect - github.com/hashicorp/memberlist v0.3.0 // indirect - github.com/hashicorp/serf v0.9.6 // indirect - github.com/huandu/xstrings v1.3.1 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/memberlist v0.5.0 // indirect + github.com/hashicorp/serf v0.10.1 // indirect + github.com/huandu/xstrings v1.3.2 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect - github.com/jcmturner/gofork v1.0.0 // indirect - github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.3 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/klauspost/cpuid v1.3.1 // indirect github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect - github.com/mailru/easyjson v0.7.6 // indirect - github.com/mattn/go-colorable v0.1.9 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/miekg/dns v1.1.46 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/miekg/dns v1.1.50 // indirect github.com/minio/md5-simd v1.1.0 // indirect github.com/minio/sha256-simd v0.1.1 // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/reflectwalk v1.0.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.2 // indirect - github.com/pierrec/lz4 v2.6.1+incompatible // indirect + github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/alertmanager v0.23.1-0.20210914172521-e35efbddb66a // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 // indirect - github.com/prometheus/procfs v0.7.3 // indirect + github.com/prometheus/procfs v0.8.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/rs/xid v1.2.1 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sercand/kuberesolver v2.4.0+incompatible // indirect github.com/shopspring/decimal v1.2.0 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/cast v1.3.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/ugorji/go/codec v1.1.7 // indirect github.com/weaveworks/promrus v1.2.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect - github.com/xdg-go/stringprep v1.0.2 // indirect + github.com/xdg-go/stringprep v1.0.3 // indirect github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da // indirect go.etcd.io/etcd v3.3.25+incompatible // indirect - go.etcd.io/etcd/api/v3 v3.5.0 // indirect + go.etcd.io/etcd/api/v3 v3.5.4 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect go.etcd.io/etcd/client/v3 v3.5.0 // indirect - go.mongodb.org/mongo-driver v1.7.5 // indirect - go.opencensus.io v0.23.0 // indirect + go.mongodb.org/mongo-driver v1.11.1 // indirect + go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 // indirect - go.opentelemetry.io/otel v1.4.1 // indirect - go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect - go.opentelemetry.io/otel/metric v0.27.0 // indirect - go.opentelemetry.io/otel/trace v1.4.1 // indirect - go.uber.org/multierr v1.7.0 // indirect - go.uber.org/zap v1.19.1 // indirect + go.opentelemetry.io/otel v1.12.0 // indirect + go.opentelemetry.io/otel/metric v0.35.0 // indirect + go.opentelemetry.io/otel/trace v1.12.0 // indirect + go.uber.org/multierr v1.9.0 // indirect + go.uber.org/zap v1.24.0 // indirect go4.org/intern v0.0.0-20210108033219-3eb7198706b2 // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222180813-1025295fd063 // indirect - golang.org/x/mod v0.5.1 // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.9 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect + golang.org/x/oauth2 v0.3.0 // indirect + golang.org/x/term v0.4.0 // indirect + golang.org/x/text v0.6.0 // indirect + golang.org/x/tools v0.1.12 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220222154240-daf995802d7b // indirect - google.golang.org/protobuf v1.27.1 // indirect + google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect + google.golang.org/protobuf v1.28.1 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.57.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - k8s.io/api v0.22.7 // indirect - k8s.io/apimachinery v0.22.7 // indirect + k8s.io/api v0.25.3 // indirect + k8s.io/apimachinery v0.25.3 // indirect k8s.io/client-go v12.0.0+incompatible // indirect - k8s.io/klog/v2 v2.40.1 // indirect - k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect + k8s.io/klog/v2 v2.70.1 // indirect + k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect rsc.io/binaryregexp v0.2.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.1.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.2.0 // indirect ) diff --git a/go.sum b/go.sum index 0224659d21b1..68902aa109aa 100644 --- a/go.sum +++ b/go.sum @@ -1,11 +1,18 @@ +4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= +bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= +bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c/go.mod h1:hSVuE3qU7grINVSwrmzHfpg9k87ALBk+XaualNyUzI4= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -19,6 +26,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -28,61 +36,436 @@ cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWc cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.92.2/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.0/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.45.0/go.mod h1:frTreZmdFlTornn7K+IsIBrvCqQP0XccOvUjEker3AM= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/bigtable v1.3.0 h1:PAplkJLXheOLlK5PPyy4/HXtPzHn+1/LaYDWIeGxnio= cloud.google.com/go/bigtable v1.3.0/go.mod h1:z5EyKrPE8OQmeg4h5MNdKvuSnI9CCT49Ki3f23aBzio= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0 h1:mPL/MzDDYHsh5tHRS9mhmhWlcgClCrCa6ApQCU6wnHI= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k= +cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.1.0 h1:W2vbGCrE3Z7J/x3WXLxxGl9LMSB2uhsAA7Ss/6u/qRY= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/kms v1.0.0 h1:YkIeqPXqTAlwXk3Z2/WG0d6h1tqJQjU354WftjEoP9E= -cloud.google.com/go/kms v1.0.0/go.mod h1:nhUehi+w7zht2XrUfvTRNpxrfayBHqP4lu2NSywui/0= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0 h1:OWRZzrPmOZUzurjI2FBGtgY2mB1WaJkqhw6oIwSj0Yg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1 h1:q+J/Nfr6Qx4RQeu3rJcnN48SNC0qzlYzSeqkPq93VHs= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.17.0/go.mod h1:+17t2ixFwRG4lWRwE+5kipDR9Ef07Jkmc8z0IbMDKUs= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.16.1/go.mod h1:LaNorbty3ehnU3rEjXSNV/NRgQA0O8Y+uh6bPe5UOk4= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= cloud.google.com/go/trace v0.1.0/go.mod h1:wxEwsoeRVPbeSkt7ZC9nWCgmoKQRAoySN7XHW2AmI7g= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6/go.mod h1:wN/zk7mhREp/oviagqUXY3EwuHhWyOvAdsn5Y4CzOrc= +code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= +code.gitea.io/sdk/gitea v0.11.3/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +collectd.org v0.5.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= +contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= +contrib.go.opencensus.io/exporter/prometheus v0.4.1/go.mod h1:t9wvfitlUjGXG2IXAZsuFq26mDGid/JwCEXp+gTG/9U= +contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ= +contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= +contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= +contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= +contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= -github.com/Azure/azure-event-hubs-go/v3 v3.2.0/go.mod h1:BPIIJNH/l/fVHYq3Rm6eg4clbrULrQ3q7+icmqHyyLc= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= +github.com/Azure/azure-amqp-common-go/v3 v3.2.3/go.mod h1:7rPmbSfszeovxGfc5fSAXE4ehlXQZHpMja2OtxC2Tas= +github.com/Azure/azure-event-hubs-go/v3 v3.3.20/go.mod h1:5GkwDWncbqGCPjf76khiylOAD2NjkrUrLFb/S99BiA8= +github.com/Azure/azure-kusto-go v0.8.0/go.mod h1:i7WCtgt4XeHge3+Oi5sq84HYhneNi7VY7hr35wsUdrg= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= -github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v36.2.0+incompatible h1:09cv2WoH0g6jl6m2iT+R9qcIPZKhXEL0sbmLhxP895s= github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= -github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.0/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.13.0/go.mod h1:TmXReXZ9yPp5D5TBRMTAtyz+UyOl15Py4hL5E5p6igQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.13.2/go.mod h1:+nVKciyKD2J9TyVcEQ82Bo9b+3F92PiQfHrIE/zqLqM= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.1/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.4.1/go.mod h1:s5NeyuuFrHc/83W6N/vmD4Zmc42WDPty3Db2+37cF/w= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v0.3.1/go.mod h1:PoA99xQAlN13MyUjzgIPWMZEr0etHpYSuT25LmhR3zQ= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= +github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= +github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= +github.com/Azure/go-amqp v0.17.0/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v10.7.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v10.15.3+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= @@ -91,9 +474,11 @@ github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUd github.com/Azure/go-autorest/autorest v0.11.9/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.22/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs= -github.com/Azure/go-autorest/autorest v0.11.24 h1:1fIGgHKqVm54KIPT+q8Zmd1QlVsmHqeUGso5qm2BqqE= github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= +github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= @@ -104,15 +489,18 @@ github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJ github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.17/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= +github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/auth v0.5.3/go.mod h1:4bJZhUhcq8LB20TruwHbAQsmUs2Xh+QR7utuJpLXX3A= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54aI7p3cAT6u//TOXGPmFMOXg= github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= @@ -120,8 +508,9 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= @@ -135,43 +524,70 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/Azure/go-ntlmssp v0.0.0-20220621081337-cb9428e4ac1e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= +github.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= +github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DataDog/datadog-go v0.0.0-20160329135253-cc2f4770f4d6/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/sketches-go v0.0.0-20190923095040-43f19ad77ff7/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60= +github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= +github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= +github.com/GeertJohan/go.rice v1.0.2/go.mod h1:af5vUNlDNkCjOZeSGFgIJxDje9qdjsO6hshx0gTmZt4= +github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo= +github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/Jeffail/gabs v1.1.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20220216145902-b5e698eff68e h1:HisBR+gQKIwJqDe1iNVqUDk+GTRE2IZAbl+fLoDKNBs= github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20220216145902-b5e698eff68e/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig v2.16.0+incompatible h1:QZbMUPxRQ50EKAq3LFMnxddMu88/EUUG3qmxwtDmPsY= +github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.16.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5/go.mod h1:xnKTFzjGUiZtiOagBsfnvomW+nJg2usB1ZpordQWqNM= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= -github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg= -github.com/Microsoft/go-winio v0.4.3/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -179,44 +595,67 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= -github.com/SAP/go-hdb v0.12.0/go.mod h1:etBT+FAi1t5k3K3tf5vQTnosgYmhDkRi8jEnQqCnxF0= +github.com/Rican7/retry v0.3.0/go.mod h1:CxSDrhAyXmTMeEuRAnArMu1FHu48vtfjLREWqVl7Vw0= +github.com/Rican7/retry v0.3.1/go.mod h1:CxSDrhAyXmTMeEuRAnArMu1FHu48vtfjLREWqVl7Vw0= github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= -github.com/SermoDigital/jose v0.0.0-20180104203859-803625baeddc/go.mod h1:ARgCUhI1MHQH+ONky/PAtmVHQrP5JlGY0F3poXOp/fA= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.27.1/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= -github.com/Shopify/sarama v1.30.0 h1:TOZL6r37xJBDEMLx4yjB77jxbZYXPaDow08TSK6vIL0= -github.com/Shopify/sarama v1.30.0/go.mod h1:zujlQQx1kzHsh4jfV1USnptCQrHAEZ2Hk8fTKCulPVs= +github.com/Shopify/sarama v1.22.2-0.20190604114437-cd910a683f9f/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= +github.com/Shopify/sarama v1.30.1/go.mod h1:hGgx05L/DiW8XYBXeJdKIN6V2QUy2H6JqME5VT1NLRw= +github.com/Shopify/sarama v1.32.0/go.mod h1:+EmJJKZWVT/faR9RcOxJerP+LId4iWdQPBGLy1Y1Njs= +github.com/Shopify/sarama v1.37.2 h1:LoBbU0yJPte0cE5TZCGdlzZRmMgMtZU/XgnUKZg9Cv4= +github.com/Shopify/sarama v1.37.2/go.mod h1:Nxye/E+YPru//Bpaorfhc3JsSGYwCaDDj+R4bK52U5o= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae h1:ePgznFqEG1v3AjMklnK8H7BSc++FDSo7xfK9K7Af+0Y= github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae/go.mod h1:/cvHQkZ1fst0EmZnA5dFtiQdWCNCFYzb+uE2vqVgvx0= +github.com/Shopify/toxiproxy/v2 v2.3.0/go.mod h1:KvQTtB6RjCJY4zqNJn7C7JDFgsG5uoHYDirfUfpIm0c= +github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= +github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig= github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= -github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= -github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/a8m/tree v0.0.0-20210115125333-10a5fd5b637d/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg= +github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da/go.mod h1:eHEWzANqSiWQsof+nXEI9bUVUyV6F53Fp89EuCh2EAA= +github.com/aerospike/aerospike-client-go/v5 v5.10.0/go.mod h1:e/zYeIoBg9We63fLKa+h+198+fT1GdoLfKa+Pu4QSpg= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/go-thrift v0.0.0-20170109061633-7914173639b2/go.mod h1:CxCgO+NdpMdi9SsTlGbc0W+/UNxO3I0AabOEJZ3w61w= +github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE= +github.com/alecthomas/kong v0.2.1/go.mod h1:+inYUSluD+p4L8KdviBSgzcqEjUQOfC5fQDRFuc36lI= +github.com/alecthomas/participle v0.4.1/go.mod h1:T8u4bQOSMwrkTWOSyt8/jSFPEnRtd0FKFMjVfYBlqPs= +github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ= +github.com/alecthomas/repr v0.0.0-20210301060118-828286944d6a/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -228,22 +667,52 @@ github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8V github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= +github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= github.com/alicebob/miniredis/v2 v2.14.3 h1:QWoo2wchYmLgOB6ctlTt2dewQ1Vu6phl+iQbwT8SYGo= github.com/alicebob/miniredis/v2 v2.14.3/go.mod h1:gquAfGbzn92jvtrSC69+6zZnwSODVXVpYDRaGhWaL6I= +github.com/aliyun/alibaba-cloud-sdk-go v1.62.77/go.mod h1:Api2AkmMgGaSUAhmk76oaFObkoeCPc/bKAqcyplPODs= github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible h1:EaK5256H3ELiyaq5O/Zwd6fnghD6DqmZDQmmzzJklUU= github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antchfx/jsonquery v1.3.1/go.mod h1:R4LXEqMGhHoCkDfuKt7K5hBxdTlINB9nubLE848juxw= +github.com/antchfx/xmlquery v1.3.15/go.mod h1:zMDv5tIGjOxY/JCNNinnle7V/EwthZ5IT8eeCGJKRWA= +github.com/antchfx/xpath v1.1.7/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= +github.com/antchfx/xpath v1.1.11/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antchfx/xpath v1.2.2/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antchfx/xpath v1.2.3/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antonmedv/expr v1.9.0/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= +github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= +github.com/apache/beam v2.28.0+incompatible/go.mod h1:/8NX3Qi8vGstDLLaeaU7+lzVEu/ACaQhYjeefzQ0y1o= +github.com/apache/iotdb-client-go v0.12.2-0.20220722111104-cd17da295b46/go.mod h1:1z89VPGCUGHGqxkPW8p2Haq6WJwrRBKZN+WOjDBiQQM= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.15.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apex/log v1.1.4/go.mod h1:AlpoD9aScyQfJDVHmLMEcx4oU6LqzkWp4Mg9GdAcEvQ= +github.com/apex/log v1.6.0/go.mod h1:x7s+P9VtvFBXge9Vbn+8TrqKmuzmD35TTkeBHul8UtY= +github.com/apex/logs v0.0.4/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= +github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= +github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -251,44 +720,152 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20180319081651-7d2e70ef918f/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/ashanbrown/forbidigo v1.1.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= +github.com/ashanbrown/makezero v0.0.0-20201205152432-7b7cdbb3025a/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= +github.com/awnumar/memcall v0.1.2/go.mod h1:S911igBPR9CThzd/hYQQmTc9SWNu3ZHIlCGaWsWsoJo= +github.com/awnumar/memguard v0.22.3/go.mod h1:mmGunnffnLHlxE5rRgQc3j+uwPZ27eYb61ccr8Clz2Y= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.15.24/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.19.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.29.11/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= +github.com/aws/aws-sdk-go v1.30.8/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go v1.40.34/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go v1.42.8/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go v1.43.10 h1:lFX6gzTBltYBnlJBjd2DWRCmqn2CbTcs6PW99/Dme7k= github.com/aws/aws-sdk-go v1.43.10/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= +github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2 v1.11.0/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= +github.com/aws/aws-sdk-go-v2 v1.11.2/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= +github.com/aws/aws-sdk-go-v2 v1.16.10/go.mod h1:WTACcleLz6VZTp7fak4EO5b9Q4foxbn+8PIz3PmyKlo= +github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= +github.com/aws/aws-sdk-go-v2 v1.17.1/go.mod h1:JLnGeGONAyi2lWXI1p0PCIOIy333JMVK1U7Hf0aRFLw= +github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0/go.mod h1:Xn6sxgRuIDflLRJFj5Ev7UxABIkNbccFPV/p8itDReM= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8/go.mod h1:JTnlBSot91steJeti4ryyu/tLd4Sk84O5W22L7O2EQU= +github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= +github.com/aws/aws-sdk-go-v2/config v1.10.1/go.mod h1:auIv5pIIn3jIBHNRcVQcsczn6Pfa6Dyv80Fai0ueoJU= +github.com/aws/aws-sdk-go-v2/config v1.17.8/go.mod h1:UkCI3kb0sCdvtjiXYiU4Zx5h07BOpgBTtkPu/49r+kA= +github.com/aws/aws-sdk-go-v2/credentials v1.3.3/go.mod h1:oVieKMT3m9BSfqhOfuQ+E0j/yN84ZAJ7Qv8Sfume/ak= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= +github.com/aws/aws-sdk-go-v2/credentials v1.6.1/go.mod h1:QyvQk1IYTqBWSi1T6UgT/W8DMxBVa5pVuLFSRLLhGf8= +github.com/aws/aws-sdk-go-v2/credentials v1.12.21/go.mod h1:O+4XyAt4e+oBAoIwNUYkRg3CVMscaIJdmZBOcPgJ8D8= +github.com/aws/aws-sdk-go-v2/credentials v1.13.2/go.mod h1:eAT5aj/WJ2UDIA0IVNFc2byQLeD89SDEi4cjzH/MKoQ= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0/go.mod h1:UVFtSYSWCHj2+brBLDHUdlJXmz8LxUpZhA+Ewypc+xQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1/go.mod h1:+GTydg3uHmVlQdkRoetz6VHKbOMEYof70m19IpMLifc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0/go.mod h1:5E1J3/TTYy6z909QNR0QnXGBpfESYGDqd3O0zqONghU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19/go.mod h1:VihW95zQpeKQWVPGkwT+2+WJNQV8UXFfMTWdU6VErL8= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.7.1/go.mod h1:wN/mvkow08GauDwJ70jnzJ1e+hE+Q3Q7TwpYLXOe9oI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4/go.mod h1:W5gGbtNXFpF9/ssYZTaItzG/B+j0bjTnwStiCP2AtWU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0/go.mod h1:NO3Q5ZTTQtO2xIg2+xTXYDiT7knSejfeDm7WGDaOo0U= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.17/go.mod h1:6qtGip7sJEyvgsLjphRZWF9qPe3xJf1mL/MM01E35Wc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25/go.mod h1:Zb29PYkf42vVYQY6pvSyJCJcFHlPIiY+YKdPtwnvMkY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0/go.mod h1:anlUzBoEWglcUxUQwZA7HQOEVEnQALVZsizAapB2hq8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.11/go.mod h1:cYAfnB+9ZkmZWpQWmPDsuIGm4EA+6k2ZVtxKjw/XJBY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19/go.mod h1:6Q0546uHDp421okhmmGfbxzq2hBqbXFNpi4k+Q1JnQA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0/go.mod h1:6oXGy4GLpypD3uCh8wcqztigGgmhLToMfjavgh+VySg= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24/go.mod h1:jULHjqqjDlbyTa7pfM7WICATnOv+iOhjletM3N0Xbu8= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.21.6/go.mod h1:CCrqOzLQ6d1+zauyTah8o50m9dQu0NS/kaC0heWCu0c= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.15.13/go.mod h1:2iXBqg3ifNLAaZxVd+Y4BBsNvAitiuG4ZGAzQmPm9Kk= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0/go.mod h1:XY5YhCS9SLul3JSQ08XG/nfxXxrkh6RR21XPq/J//NY= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.17.3/go.mod h1:BiglbKCG56L8tmMnUEyEQo422BO9xnNR8vVHnOsByf8= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0/go.mod h1:bYsEP8w5YnbYyrx/Zi5hy4hTwRRQISSJS3RWrsGRijg= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.80.1/go.mod h1:mV0E7631M1eXdB+tlGFIw6JxfsC7Pz7+7Aw15oLVhZw= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0/go.mod h1:80NaCIH9YU3rzTTs/J/ECATjXuRqzo/wB6ukO6MZ0XY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.10/go.mod h1:9cBNUHI2aW4ho0A5T87O294iPDuuUOSIEDjnd1Lq/z0= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0/go.mod h1:enkU5tq2HoXY+ZMiQprgF3Q83T3PbO77E83yXXzRZWE= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.19/go.mod h1:2WpVWFC5n4DYhjNXzObtge8xfgId9UP6GWca46KJFLo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3/go.mod h1:7gcsONBmFoCcKrAqrm95trrMd2+C/ReYKP7Vfu8yHHA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0/go.mod h1:Mq6AEc+oEjCUlBuLiK5YwW4shSOAKCQ3tXN0sQeYoBA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19/go.mod h1:02CP6iuYP+IVnBX5HULVdSAku/85eHB2Y9EsFhrkEwU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21/go.mod h1:lRToEJsn+DRA9lW4O9L9+/3hjTkUzlzyzHqn8MTds5k= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.0/go.mod h1:xKCZ4YFSF2s4Hnb/J0TLeOsKuGzICzcElaOKNGrVnx4= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0/go.mod h1:9O7UG2pELnP0hq35+Gd7XDjOLBkg7tmgRQ0y14ZjoJI= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.15.19/go.mod h1:9rLNg+J9SEe7rhge/YzKU3QTovlLqOmqH8akb0IB1ko= +github.com/aws/aws-sdk-go-v2/service/s3 v1.19.0/go.mod h1:Gwz3aVctJe6mUY9T//bcALArPUaFmNAy2rTB9qN4No8= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= +github.com/aws/aws-sdk-go-v2/service/sso v1.6.0/go.mod h1:Q/l0ON1annSU+mc0JybDy1Gy6dnJxIcWjphO6qJPzvM= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.25/go.mod h1:IARHuzTXmj1C0KS35vboR0FeJ89OkEy1M9mWbK2ifCI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8/go.mod h1:er2JHN+kBY6FcMfcBBKNGCT3CarImmdFzishsqBmSRI= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.2/go.mod h1:RBhoMJB8yFToaCnbe0jNq5Dcdy0jp6LhHqg55rjClkM= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= +github.com/aws/aws-sdk-go-v2/service/sts v1.10.0/go.mod h1:jLKCFqS+1T4i7HDqCP9GM4Uk75YW1cS0o82LdxpMyOE= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM= +github.com/aws/aws-sdk-go-v2/service/sts v1.17.4/go.mod h1:bXcN3koeVYiJcdDU89n3kCYILob7Y34AeLopUbZgLT4= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.14.5/go.mod h1:MBzzN6y6AQg34OROcjgh/1KWuIR1disH01on6CVgAqQ= +github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.9.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.12.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.13.4/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/baidubce/bce-sdk-go v0.9.81 h1:n8KfThLG9fvGv3A+RtTt/jKhg/FPPRpo+iNnS2r+iPI= github.com/baidubce/bce-sdk-go v0.9.81/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -296,25 +873,39 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= +github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/blues/jsonata-go v1.5.4/go.mod h1:uns2jymDrnI7y+UFYCqsRTEiAH22GyHnNXrkupAVFWI= github.com/bmatcuk/doublestar v1.2.2 h1:oC24CykoSAB8zd7XgruHo33E0cHJf/WhQA/7BeXj+x0= github.com/bmatcuk/doublestar v1.2.2/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/bmatcuk/doublestar/v3 v3.0.0/go.mod h1:6PcTVMw80pCY1RVuoqu3V++99uQB3vsSYKPTd8AWA0k= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= +github.com/bombsimon/wsl v1.2.8/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= +github.com/bombsimon/wsl/v3 v3.2.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= @@ -324,25 +915,45 @@ github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3k github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee h1:BnPxIde0gjtTnc9Er7cxvBk8DHLWhEux0SxayC8dP6I= github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= +github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw= github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= -github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= +github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= +github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= +github.com/canonical/candid v1.12.1/go.mod h1:qi/NwPxGC2vA7zvRIybPRp9qnlqcNcK3xTucNSf3nMY= +github.com/canonical/go-dqlite v1.11.4/go.mod h1:Dwp/03G1r4dtc+QSB9tV84RAAS7Mc6gy7tEvzCgcuQ8= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/casbin/casbin/v2 v2.31.6/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= +github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A= github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.0.0/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.6/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chromedp/cdproto v0.0.0-20200116234248-4da64dd111ac/go.mod h1:PfAWWKJqjlGFYJEidUM6aVIWPr0EpobeyVWEEmplX7g= github.com/chromedp/cdproto v0.0.0-20200424080200-0de008e41fa0/go.mod h1:PfAWWKJqjlGFYJEidUM6aVIWPr0EpobeyVWEEmplX7g= github.com/chromedp/chromedp v0.5.3/go.mod h1:YLdPtndaHQ4rCpSpBG+IPpy9JvX0VD+7aaLxYgYj28w= @@ -353,18 +964,27 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/circonus-labs/circonus-gometrics v0.0.0-20161109192337-d17a8420c36e/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.0.0-20161110002650-365d370cc145/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= +github.com/cisco-ie/nx-telemetry-proto v0.0.0-20220628142927-f4160bcb943c/go.mod h1:rJDd05J5hqWVU9MjJ+5jw1CuLn/jRhvU0xtFEzzqjwM= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY= +github.com/cloudflare/cfssl v0.0.0-20190726000631-633726f6bcb7/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= +github.com/cloudflare/cfssl v1.4.1/go.mod h1:KManx/OJPb5QY+y0+o/898AMcM128sF0bURvoVUSjTo= +github.com/cloudflare/cfssl v1.6.1/go.mod h1:ENhCj4Z17+bY2XikpxVmTHDg/C2IsG2Q0ZBeXpAqhCk= +github.com/cloudflare/go-metrics v0.0.0-20151117154305-6a9aea36fb41/go.mod h1:eaZPlJWD+G9wseg1BuRXlHnjntPMrywMsyxf+LTOdP4= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo= +github.com/cloudflare/redoctober v0.0.0-20201013214028-99c99a8e7544/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210322005330-6414d713912e/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -372,9 +992,14 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 h1:KwaoQzs/WeUxxJqiJsZ4euOly1Az/IgZXXSxlD/UBNk= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= @@ -390,17 +1015,21 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -411,16 +1040,20 @@ github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09Zvgq github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4= +github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= -github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= +github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200709052629-daa8e1ccc0bc/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= @@ -430,6 +1063,9 @@ github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= @@ -439,9 +1075,12 @@ github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -460,50 +1099,71 @@ github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNR github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= +github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/coredns/coredns v1.1.2/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/wo1+AM0= +github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= +github.com/coocood/freecache v1.2.2/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.25+incompatible h1:0GQEw6h3YnuOVdtwygkIfJ+Omx0tZ8/QkVyXI4LkbeY= github.com/coreos/etcd v3.3.25+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/coreos/go-oidc v0.0.0-20170119174436-2cc7913f9f6f/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534 h1:rtAn27wIbmOGUs7RIbVgPEjb31ehTVniDwPGXyMxm5U= +github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cortexproject/cortex v1.10.1-0.20211124141505-4e9fc3a2b5ab h1:THN4VQQqsZn5gNwcmQJO1GarnfZkSWfp5824ifoD9fQ= github.com/cortexproject/cortex v1.10.1-0.20211124141505-4e9fc3a2b5ab/go.mod h1:njSBkQ1wUNx9X4knV/j65Pi4ItlJXX4QwXRKoMflJd8= -github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= -github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= -github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/couchbase/go-couchbase v0.1.1/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A= +github.com/couchbase/gomemcached v0.1.3/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo= +github.com/couchbase/goutils v0.1.0/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cristalhq/hedgedhttp v0.7.0 h1:C2XPDC+AQH4QJt6vZI4jB5WNyF86QbSJD4C4fW3H3ro= github.com/cristalhq/hedgedhttp v0.7.0/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI= +github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ= github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/cyriltovena/cloudflare-go v0.27.1-0.20211118103540-ff77400bcb93 h1:PEBeRA25eDfHWkXNJs0HOnMhjIuKMcxKg/Z3VeuoRbU= github.com/cyriltovena/cloudflare-go v0.27.1-0.20211118103540-ff77400bcb93/go.mod h1:sPWL/lIC6biLEdyGZwBQ1rGQKF1FhM7N60fuNiFdYTI= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= @@ -520,53 +1180,76 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= +github.com/daaku/go.zipexe v1.0.1/go.mod h1:5xWogtqlYnfBXkSB1o9xysukNP9GTvaNkqzUZbt3Bw8= +github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= -github.com/denisenkom/go-mssqldb v0.0.0-20180620032804-94c9c97e8c9f/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc= +github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= -github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denisenkom/go-mssqldb v0.12.3/go.mod h1:k0mtMFOnU+AihqFxPMiF05rtiDrorD1Vrm1KEz5hxDo= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M= +github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200624174652-8d2f3be8b2d9/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= -github.com/digitalocean/godo v1.1.1/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= -github.com/digitalocean/godo v1.10.0/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= +github.com/digitalocean/go-libvirt v0.0.0-20220407213524-fde04463c367/go.mod h1:o129ljs6alsIQTc8d6eweihqpmmrbxZ2g1jhgjhPykI= +github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e/go.mod h1:o129ljs6alsIQTc8d6eweihqpmmrbxZ2g1jhgjhPykI= +github.com/digitalocean/go-libvirt v0.0.0-20220811165305-15feff002086/go.mod h1:yhKBkgJm/PWVHCFHLlFwqhIzS7FcutIYmS/fmzex5LQ= +github.com/digitalocean/go-qemu v0.0.0-20220826173844-d5f5e3ceed89/go.mod h1:GUMRsCBc7W7BzUF0zizFJ7Pna929325F+v9hvnWD+pI= +github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= +github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/digitalocean/godo v1.71.0/go.mod h1:GBmu8MkjZmNARE7IXRPmkbbnocNN8+uBm0xbEVw2LCs= github.com/digitalocean/godo v1.75.0 h1:UijUv60I095CqJqGKdjY2RTPnnIa4iFddmq+1wfyS4Y= github.com/digitalocean/godo v1.75.0/go.mod h1:GBmu8MkjZmNARE7IXRPmkbbnocNN8+uBm0xbEVw2LCs= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/djherbis/times v1.5.0/go.mod h1:5q7FDLvbNg1L/KaBmPcWlVR9NmoKo3+ucqUA3ijQhA0= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= -github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20200319182547-c7ad2b866182/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.10+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= +github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= @@ -577,26 +1260,43 @@ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHz github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8 h1:9Hsno4vmXpQ0yVAp07bLxS5dHH24w80xzmUCLil47ME= github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8/go.mod h1:LFyLie6XcDbyKGeVK6bHe+9aJTYCxWLBg5IrJZOaXKA= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/doclambda/protobufquery v0.0.0-20220727165953-0da287796ee9/go.mod h1:8Ia4zp86glrUhC29AAdK9hwTYh8RB6v0WRCtpplYqEg= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02/go.mod h1:7NQ3kWOx2cZOSjtcveTa5nqupVr2s6/83sG+rTlI7uA= github.com/drone/envsubst v1.0.2 h1:dpYLMAspQHW0a8dZpLRKe9jCNvIGZPhCPrycZzIHdqo= github.com/drone/envsubst v1.0.2/go.mod h1:bkZbnc/2vh1M12Ecn7EYScpI4YGYU0etwLJICOWi8Z0= -github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo= +github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97hP/JuXb+WMYCizc4PIFuzw1lCR97mwbe1VVXhQ= +github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= +github.com/duo-labs/webauthn v0.0.0-20210727191636-9f1b88ef44cc/go.mod h1:/X2OJiJxjQ7alqWZqX9EtBTmZc+4qQ0LvZ1k5wP67RM= +github.com/duo-labs/webauthn v0.0.0-20220815211337-00c9fb5711f5/go.mod h1:Jcj7rFNlTknb18v9jpSA58BveX2LDhXqaoy+6YV1N9g= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0/go.mod h1:V+Qd57rJe8gd4eiGzZyg4h54VLHmYVVw54iMnlAMrF8= +github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= +github.com/dylanmei/iso8601 v0.1.0/go.mod h1:w9KhXSgIyROl1DefbMYIE7UVSIvELTbMrCfx+QkYnoQ= +github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08/go.mod h1:VBVDFSBXCIW8JaHQpI8lldSKfYaLMzP9oyq6IJ4fhzY= +github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0/go.mod h1:PseHFo8Leko7J4A/TfZ6kkHdkzKBLUta6hRZR/OEbbc= +github.com/eapache/channels v1.1.0/go.mod h1:jMm2qB5Ubtg9zLd+inMZd2/NUvXgzmWXsDaLyQIGfH0= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= +github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/echlebek/crock v1.0.1/go.mod h1:/kvwHRX3ZXHj/kHWJkjXDmzzRow54EJuHtQ/PapL/HI= +github.com/echlebek/timeproxy v1.0.0/go.mod h1:0dg2Lnb8no/jFwoMQKMTU6iAivgoMptGqSTprhnrRtk= +github.com/eclipse/paho.golang v0.10.0/go.mod h1:rhrV37IEwauUyx8FHrvmXOKo+QRKng5ncoN1vJiJMcs= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/eclipse/paho.mqtt.golang v1.4.2/go.mod h1:JGt0RsEwEX+Xa/agj90YJ9d9DH2b7upDZMK9HRbFvCA= github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= @@ -610,12 +1310,12 @@ github.com/efficientgo/tools/extkingpin v0.0.0-20210609125236-d73259166f20 h1:kM github.com/efficientgo/tools/extkingpin v0.0.0-20210609125236-d73259166f20/go.mod h1:ZV0utlglOczUWv3ih2AbqPSoLoFzdplUYxwV62eZi6Q= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= -github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.0.0-20180919002855-2137d9196328/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -625,63 +1325,119 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.1 h1:cgDRLG7bs59Zd+apAWuzLQL95obVYAymNJek76W3mgw= github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.3.0-java/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.1/go.mod h1:txg5va2Qkip90uYoSKH+nkAAmXrb2j3iq4FLwdrCbXQ= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/envoyproxy/protoc-gen-validate v0.6.6 h1:BApABShi05CepE340unZKC07YxY/I8KgnWPICc3U5yM= github.com/envoyproxy/protoc-gen-validate v0.6.6/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4= +github.com/esimonov/ifshort v1.0.1/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= +github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4/go.mod h1:SBHk9aNQtiw4R4bEuzHjVmZikkUKCnO1v3lPQ21HZGk= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/structs v0.0.0-20180123065059-ebf56d35bba7/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/fgprof v0.9.1 h1:E6FUJ2Mlv043ipLOCFqo8+cHo9MhQ203E2cdEK/isEs= github.com/felixge/fgprof v0.9.1/go.mod h1:7/HK6JFtFaARhIljgP2IV8rJLIoHDoOYoUphsnGvqxE= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3/go.mod h1:bJWSKrZyQvfTnb2OudyUjurSG4/edverV7n82+K3JiM= github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c h1:QwbffUs/+ptC4kTFPEN9Ej2latTq3bZJ5HO/OwPXYMs= github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c/go.mod h1:WQX+afhrekY9rGK+WT4xvKSlzmia9gDoLYu4GGYGASQ= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp4nseejPd+UKxtCVQ2hUxNTZ7qQZJa7CLriIeo= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/frankban/quicktest v1.0.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBavT6B6CuGq2k= +github.com/frankban/quicktest v1.1.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBavT6B6CuGq2k= +github.com/frankban/quicktest v1.1.1/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBavT6B6CuGq2k= +github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20= +github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= +github.com/frankban/quicktest v1.5.0/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= +github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= +github.com/frankban/quicktest v1.7.3/go.mod h1:V1d2J5pfxYH6EjBAgSK7YNXcXlTWxUHdE1sVDXkjnig= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsouza/fake-gcs-server v1.7.0 h1:Un0BXUXrRWYSmYyC1Rqm2e2WJfTPyDy/HGMz31emTi8= github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZUCkZIqFxsQf1o= +github.com/fullstorydev/grpcurl v1.8.1/go.mod h1:3BWhvHZwNO7iLXaQlojdg5NA6SxUDePli4ecpK1N7gw= +github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/fxamacker/cbor/v2 v2.2.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= +github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= -github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= +github.com/go-critic/go-critic v0.3.5-0.20190904082202-d79a9f0c64db/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= +github.com/go-critic/go-critic v0.5.4/go.mod h1:cjB4YGw+n/+X8gREApej7150Uyy1Tg8If6F2XOAUXNE= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -689,12 +1445,20 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.11.0/go.mod h1:73/6Ixaufkvb5Osvkls8C79vuQ49Ba1rUEUYNSf+FUw= github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-ldap/ldap/v3 v3.4.4/go.mod h1:fe1MsuN5eJJ1FeLT/LEBVdWfNWKh459R7aXgXtJC+aI= +github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -705,11 +1469,17 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.0.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-macaroon-bakery/macaroonpb v1.0.0/go.mod h1:UzrGOcbiwTXISFP2XDLDPjfhMINZa+fX/7A2lMd31zc= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -720,6 +1490,8 @@ github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgT github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= github.com/go-openapi/analysis v0.20.0 h1:UN09o0kNhleunxW7LR+KnltD0YrJ8FF03pSqvAN3Vro= github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= +github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU= +github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= @@ -731,18 +1503,24 @@ github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpX github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.0 h1:Sxpo9PjEHDzhs3FbnGNonvDgWcMW2U7wGTcDDSFSceM= github.com/go-openapi/errors v0.20.0/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -755,6 +1533,8 @@ github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hs github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= github.com/go-openapi/loads v0.20.2 h1:z5p5Xf5wujMxS1y8aP+vxwW5qYT2zdJBbXKmQUG3lcc= github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= +github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0= +github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= @@ -763,6 +1543,9 @@ github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiS github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= github.com/go-openapi/runtime v0.19.29 h1:5IIvCaIDbxetN674vX9eOxvoZ9mYGQ16fV1Q0VSG+NA= github.com/go-openapi/runtime v0.19.29/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= +github.com/go-openapi/runtime v0.23.3 h1:/dxjx4KCOQI5ImBMz036F6v/DzZ2NUjSRvbLJs1rgoE= +github.com/go-openapi/runtime v0.23.3/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= @@ -773,8 +1556,13 @@ github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHK github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= +github.com/go-openapi/spec v0.20.2/go.mod h1:RW6Xcbs6LOyWLU/mXGdzn2Qc+3aj+ASfI7rvSZh1Vls= github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ= github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.5/go.mod h1:QbfOSIVt3/sac+a1wzmKbbcLXm5NdZnyBZYtCijp43o= +github.com/go-openapi/spec v0.20.6 h1:ich1RQ3WDbfoeTqTAb+5EIxNmpKVJZWBNah9RAT0jIQ= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= @@ -786,8 +1574,12 @@ github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLs github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= +github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.2 h1:5NDNgadiX1Vhemth/TH4gCGopWSTdDjxl60H3B7f+os= github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= @@ -797,8 +1589,10 @@ github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfT github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.1 h1:S6xFhsBKAtvfphnJwRzeCh3OEGsTL/crXdEetSxLs0Q= +github.com/go-openapi/swag v0.22.1/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= @@ -809,22 +1603,50 @@ github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9G github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3hTT4ts= github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= +github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y= +github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg= +github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= +github.com/go-redis/redis/v8 v8.0.0-beta.6/go.mod h1:g79Vpae8JMzg5qjk8BiwU9tK+HmU3iDVyS4UAJLFycI= github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= -github.com/go-sql-driver/mysql v0.0.0-20180618115901-749ddf1598b4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/go-stomp/stomp v2.1.4+incompatible/go.mod h1:VqCtqNZv1226A1/79yh+rMiFUcfY3R109np+7ke4n0c= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= @@ -852,7 +1674,7 @@ github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGt github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/goburrow/modbus v0.1.0/go.mod h1:Kx552D5rLIS8E7TyUwQ/UdHEqvX5T8tyiGBTlzMcZBg= -github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA= +github.com/goburrow/serial v0.1.1-0.20211022031912-bfb69110f8dd/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= @@ -861,18 +1683,28 @@ github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.7.1 h1:DP+LD/t0njgoPBvT5MJLeliUIVQR03hiKR6vezdwHlc= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= @@ -885,16 +1717,22 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= +github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-migrate/migrate/v4 v4.7.0/go.mod h1:Qvut3N4xKWjoH3sokBccML6WyHSnggXm/DvMMnTsQIc= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v0.0.0-20210429001901-424d2337a529/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -902,7 +1740,7 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -912,6 +1750,7 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -932,17 +1771,55 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM= +github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= +github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.17.2-0.20190910081718-bad04bb7378f/go.mod h1:kaqo8l0OZKYPtjNmG4z4HrWLgcYNIJ9B9q3LWri9uLg= +github.com/golangci/golangci-lint v1.21.0/go.mod h1:phxpHK52q7SE+5KpPnti4oZTdFCEsn/tKN+nFvCKXfk= +github.com/golangci/golangci-lint v1.38.0/go.mod h1:Knp/sd5ATrVp7EOzWzwIIFH+c8hUfpW+oOQb8NvdZDo= +github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU= +github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= +github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/revgrep v0.0.0-20180812185044-276a5c0a1039/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/gomodule/oauth1 v0.2.0/go.mod h1:4r/a8/3RkhMBxJQWL5qzbOEcaQmNPIkNoI7P8sXeI08= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/addlicense v0.0.0-20200906110928-a0294312aa76/go.mod h1:EMjYTRimagHs1FwlIqKyX3wAM0u3rA+McvlIIWmSamA= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/certificate-transparency-go v1.1.0/go.mod h1:i+Q7XY+ArBveOUT36jiHGfuSK1fHICIg6sUkRxPAbCs= +github.com/google/certificate-transparency-go v1.1.2-0.20210422104406-9f33727a7a18/go.mod h1:6CKh9dscIRoqc2kC6YUFICHZMT9NrClyPrRVFrdw1QQ= +github.com/google/certificate-transparency-go v1.1.2-0.20210511102531-373a877eec92/go.mod h1:kXWPsHVPSKVuxPPG69BRtumCbAW537FydV/GH89oBhM= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnxi v0.0.0-20221016143401-2aeceb5a2901/go.mod h1:dq13Cnket+zAlR8lHw89hk4b3CcdnJiA4NXeMwhKMRk= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -954,23 +1831,36 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= -github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d/go.mod h1:+TYOmkVoJOpwnS0wfdsJCV9CoD5nJYsHoFk/0CrTK4M= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= +github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/licensecheck v0.3.1/go.mod h1:ORkR35t/JjW+emNKtfJDII0zlciG9JgbT7SmsohlHmY= +github.com/google/licenseclassifier v0.0.0-20210325184830-bb04aff29e72/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE= +github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/monologue v0.0.0-20190606152607-4b11a32b5934/go.mod h1:6NTfaQoUpg5QmPsCUWLR3ig33FHrKXhTtWzF0DVdmuk= +github.com/google/monologue v0.0.0-20191105172128-0324c8b45f6f/go.mod h1:6NTfaQoUpg5QmPsCUWLR3ig33FHrKXhTtWzF0DVdmuk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -982,58 +1872,108 @@ github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210323184331-8eee2492667d/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/pprof v0.0.0-20220218203455-0368bd9e19a7 h1:IR7vKogOtQRR3JhSl1UftKpFLnuJ2o+woqCFXqJ7wXw= github.com/google/pprof v0.0.0-20220218203455-0368bd9e19a7/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= +github.com/google/protobuf v3.11.4+incompatible/go.mod h1:lUQ9D1ePzbH2PrIS7ob/bjm9HXyH5WHB0Akwh7URreM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/renameio v1.0.1 h1:Lh/jXZmvZxb0BBeSY5VKEfidcbcbenKjZFzM/q0fSeU= +github.com/google/renameio v1.0.1/go.mod h1:t/HQoYBZSsWSNK35C6CO/TpPLDVWvxOHboWUAweKUpk= github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= +github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/trillian v1.2.2-0.20190612132142-05461f4df60a/go.mod h1:YPmUVn5NGwgnDUgqlVyFGMTgaWlnSvH7W5p+NdOG8UA= +github.com/google/trillian v1.3.14-0.20210409160123-c5ea3abd4a41/go.mod h1:1dPv0CUjNQVFEDuAUFhZql16pw/VlPgaX8qj+g5pVzQ= +github.com/google/trillian v1.3.14-0.20210428093031-b4ddea2e86b1/go.mod h1:FdIJX+NoDk/dIN2ZxTyz5nAJWgf+NSSSriPAMThChTY= +github.com/google/trillian-examples v0.0.0-20190603134952-4e75ba15216c/go.mod h1:WgL3XZ3pA8/9cm7yxqWrZE6iZkESB2ItGxy5Fo6k2lk= +github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gopcua/opcua v0.1.12/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= -github.com/gophercloud/gophercloud v0.0.0-20180828235145-f29afc2cceca/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gookit/color v1.3.6/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= +github.com/gopcua/opcua v0.3.7/go.mod h1:n/qSWDVB/KSPIG4vYhBSbs5zdYAW3yOcDCRrWd1BZo0= github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= +github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gophercloud/gophercloud v0.23.0/go.mod h1:MRw6uyLj8uCGbIvBlqL7QW67t0QtNZnzydUzewo1Ioc= -github.com/gophercloud/gophercloud v0.24.0 h1:jDsIMGJ1KZpAjYfQgGI2coNQj5Q83oPzuiGJRFWgMzw= github.com/gophercloud/gophercloud v0.24.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c= -github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gophercloud/gophercloud v1.0.0 h1:9nTGx0jizmHxDobe4mck89FyQHVyA3CaXLIUSGJjP9k= +github.com/gophercloud/gophercloud v1.0.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20220104163920-15ed2e8cf2bd/go.mod h1:cz9oNYuRUWGdHmLF2IodMLkAhcPtXeULvcBNagUrxTI= +github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= +github.com/goreleaser/goreleaser v0.134.0/go.mod h1:ZT6Y2rSYa6NxQzIsdfWWNWAlYGXGbreo66NmE+3X3WQ= +github.com/goreleaser/nfpm v1.2.1/go.mod h1:TtWrABZozuLOttX2uDlYyECfQX7x5XYkVxhjYcR6G9w= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v0.0.0-20170224193955-13d73096a474/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosexy/gettext v0.0.0-20160830220431-74466a0a0c4a/go.mod h1:IEJaV4/6J0VpoQ33kFCUUP6umRjrcBVEbOva6XCub/Q= +github.com/gosnmp/gosnmp v1.35.0/go.mod h1:2AvKZ3n9aEl5TJEo/fFmf/FGO4Nj4cVeEc5yuk88CYc= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= +github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/grafana/dskit v0.0.0-20211021180445-3bd016e9d7f1/go.mod h1:uPG2nyK4CtgNDmWv7qyzYcdI+S90kHHRWvHnBtEMBXM= github.com/grafana/dskit v0.0.0-20220331160727-49faf69f72ca h1:0qHzm6VS0bCsSWKHuyfpt+pdpyScdZbzY/IFIyKSYOk= github.com/grafana/dskit v0.0.0-20220331160727-49faf69f72ca/go.mod h1:q51XdMLLHNZJSG6KOGujC20ed2OoLFdx0hBmOEVfRs0= @@ -1047,9 +1987,13 @@ github.com/grafana/regexp v0.0.0-20220304100321-149c8afcd6cb/go.mod h1:M5qHK+eWf github.com/grafana/tail v0.0.0-20201004203643-7aa4e4a91f03 h1:fGgFrAraMB0BaPfYumu+iulfDXwHm+GFyHA4xEtBqI8= github.com/grafana/tail v0.0.0-20201004203643-7aa4e4a91f03/go.mod h1:GIMXMPB/lRAllP5rVDvcGif87ryO2hgD7tCtHMdHrho= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grid-x/modbus v0.0.0-20211113184042-7f2251c342c9/go.mod h1:qVX2WhsI5xyAoM6I/MV1bXSKBPdLAjp7pCvieO/S0AY= +github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= +github.com/grid-x/serial v0.0.0-20211107191517-583c7356b3aa/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-middleware/providers/kit/v2 v2.0.0-20201002093600-73cf2ae9d891/go.mod h1:516cTXxZzi4NBUBbKcwmO4Eqbb6GHAEd3o4N+GYyCBY= @@ -1057,176 +2001,295 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-20200501113911-9a95f0fdbf github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 h1:guQyUpELu4I0wKgdsRBZDA5blfGiUleuppRSVy9Qbi0= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7/go.mod h1:GhphxcdlaRyAuBSvo6rV71BvQcvB/vuX8ugCyybuS2k= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.14.4/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= +github.com/grpc-ecosystem/grpc-gateway v1.14.5/go.mod h1:UJ0EZAp832vCd54Wev9N1BMKEyvcZ5+IM0AwDrnlkEc= +github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/gwos/tcg/sdk v0.0.0-20220621192633-df0eac0a1a4c/go.mod h1:4yzxLBACr76Is0AMAkE0F/fqWBk28p2tzeO06yDGR/Y= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= -github.com/hashicorp/consul v1.5.1 h1:p7tRmQ4m3ZMYkGQkuyjLXKbdU1weeumgZFqZOvw7o4c= -github.com/hashicorp/consul v1.5.1/go.mod h1:QsmgXh2YA9Njv6y3/FHXqHYhsMye++3oBoAZ6SR8R8I= +github.com/harlow/kinesis-consumer v0.3.6-0.20211204214318-c2b9f79d7ab6/go.mod h1:hNEr2hL0WPpm4BSILcClbOE/+Tew0JJnqCbTlc6jCUc= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= +github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= -github.com/hashicorp/consul/api v1.12.0 h1:k3y1FYv6nuKyNTqj6w9gXOx5r5CfLj/k/euUeBXj1OY= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= +github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= +github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g= +github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= -github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= +github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU= +github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-bexpr v0.1.0/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU= -github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de/go.mod h1:xIwEieBHERyEvaeKF/TcHh1Hu+lxPM+n2vT1+g9I4m4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-discover v0.0.0-20190403160810-22221edb15cd/go.mod h1:ueUgD9BeIocT7QNuvxSyJyPAM9dfifBcaWmeybb67OY= -github.com/hashicorp/go-hclog v0.0.0-20180402200405-69ff559dc25f/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-getter/gcs/v2 v2.1.0/go.mod h1:dVyTnX1BynHAjbumB4Pk14GoJ+v3VbDUJtbI7G0oOlU= +github.com/hashicorp/go-getter/s3/v2 v2.1.0/go.mod h1:rwzJPQaBuc5riYOucPx84DOE74xIhKENOWgBjK3XVEs= +github.com/hashicorp/go-getter/v2 v2.1.0/go.mod h1:w65fE5glbccYjndAuj1kA5lnVBGZYEaH0e5qA1kpIks= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.2.2 h1:ihRI7YFwcZdiSD7SIenIhHfQH3OuDvWerAUBZbeQS3M= +github.com/hashicorp/go-hclog v1.2.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71/go.mod h1:kbfItVoBJwCfKXDXN4YoAXjxcFVZ7MRrJzyTX6H4giE= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-plugin v0.0.0-20180331002553-e8d22c780116/go.mod h1:JSqWYsict+jzcj0+xElxyrBQRPNoiWQuddnxArJ7XHQ= -github.com/hashicorp/go-retryablehttp v0.0.0-20180531211321-3b087ef2d313/go.mod h1:fXcdFsQoipQa7mwORhKad5jmDCeSy/RCGzWA08PO0lM= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-plugin v1.4.4/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= +github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v0.0.0-20170202080759-03c5bf6be031/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= +github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v0.0.0-20180906183839-65a6292f0157/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hil v0.0.0-20160711231837-1e86c6b523c5/go.mod h1:KHvg/R2/dPtaePb16oW4qIyzkMxXOL38xjRN64adsts= +github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= +github.com/hashicorp/hcl/v2 v2.14.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.2.3/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.2.4/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.3.0 h1:8+567mCcFDnS5ADl7lrpxPMWiFCElyUEeW0gtj34fMA= github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69/go.mod h1:/z+jUGRBlwVpUZfjute9jWaF6/HuhjuFQuL1YXzVD1Q= -github.com/hashicorp/raft v1.0.1-0.20190409200437-d9fe23f7d472/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= -github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= -github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/packer-plugin-sdk v0.3.1/go.mod h1:+GzydiXdn0CkueigqXBsX4Poz5gfmFXZ/DkxKt4fmt4= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= -github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/vault v0.10.3/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0= -github.com/hashicorp/vault-plugin-secrets-kv v0.0.0-20190318174639-195e0e9d07f1/go.mod h1:VJHHT2SC1tAPrfENQeBhLlb5FbZoKZM+oC/ROmEftz0= -github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= +github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/api v1.0.5-0.20200519221902-385fac77e20f/go.mod h1:euTFbi2YJgwcju3imEt919lhJKF68nN1cQPq3aA+kBE= +github.com/hashicorp/vault/api v1.1.1/go.mod h1:29UXcn/1cLOPHQNMWA7bCz2By4PSd0VKPAydKXS5yN0= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/vault/sdk v0.1.14-0.20200519221530-14615acda45f/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= +github.com/hashicorp/vault/sdk v0.2.1/go.mod h1:WfUiO1vYzfBkz1TmoE4ZGU7HD0T0Cl/rZwaxjBkgN4U= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20210826001029-26ff87cf9493/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA= github.com/hetznercloud/hcloud-go v1.33.1 h1:W1HdO2bRLTKU4WsyqAasDSpt54fYO4WNckWYfH5AuCQ= github.com/hetznercloud/hcloud-go v1.33.1/go.mod h1:XX/TQub3ge0yWR2yHWmnDVIrB+MQbda1pHxkUmDlUME= +github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= +github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/flux v0.131.0/go.mod h1:CKvnYe6FHpTj/E0YGI7TcOZdGiYHoToOPSnoa12RtKI= -github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo= +github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q= github.com/influxdata/go-syslog/v3 v3.0.1-0.20201128200927-a1889d947b48 h1:0WbZ+ZVg74wbyQoRx1TD4D1Xoz8MsXJSTwdP9F7RMeQ= github.com/influxdata/go-syslog/v3 v3.0.1-0.20201128200927-a1889d947b48/go.mod h1:aXdIdfn2OcGnMhOTojXmwZqXKgC3MU5riiNvzwwG9OY= github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA= github.com/influxdata/influxdb v1.8.0/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= +github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= github.com/influxdata/influxdb v1.9.5/go.mod h1:4uPVvcry9KWQVWLxyT9641qpkRXUBN+xa0MJFFNNLKo= github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= +github.com/influxdata/influxdb-observability/common v0.3.0/go.mod h1:YQWL/mn2SmAvGJfL5uKjDxcwS5M7GgCvDmP+Yk1n3+Q= +github.com/influxdata/influxdb-observability/common v0.3.1/go.mod h1:YQWL/mn2SmAvGJfL5uKjDxcwS5M7GgCvDmP+Yk1n3+Q= +github.com/influxdata/influxdb-observability/influx2otel v0.3.1/go.mod h1:s2xfQkuaKVw3h9nKWfuXfz4mQKZQ8OvaD6GOUvnOMFo= +github.com/influxdata/influxdb-observability/otel2influx v0.3.1/go.mod h1:ihgMwZ0t9X4wSHqvicTIedZ5i0gaYceEYXMJKbs1pto= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/line-protocol-corpus v0.0.0-20210519164801-ca6fa5da0184/go.mod h1:03nmhxzZ7Xk2pdG+lmMd7mHDfeVOYFyhOgwO61qWU98= +github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937/go.mod h1:BKR9c0uHSmRgM/se9JhFHtTT7JTO67X23MtKMHtZcpo= +github.com/influxdata/line-protocol/v2 v2.0.0-20210312151457-c52fdecb625a/go.mod h1:6+9Xt5Sq1rWx+glMgxhcg2c0DUaehK+5TDcPZ76GypY= +github.com/influxdata/line-protocol/v2 v2.1.0/go.mod h1:QKw43hdUBg3GTk2iC3iyCxksNj7PX9aUSeYOYE/ceHY= +github.com/influxdata/line-protocol/v2 v2.2.1/go.mod h1:DmB3Cnh+3oxmG6LOBIxce4oaL4CPj3OmMPgvauXh+tM= github.com/influxdata/pkg-config v0.2.8/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= -github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4/go.mod h1:VeiWgI3qaGdJWust2fP27a6J+koITo/1c/UhxeOxgaM= +github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7/go.mod h1:VeiWgI3qaGdJWust2fP27a6J+koITo/1c/UhxeOxgaM= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= -github.com/influxdata/telegraf v1.16.3 h1:x0qeuSGGMg5y+YqP/5ZHwXZu3bcBrO8AAQOTNlYEb1c= -github.com/influxdata/telegraf v1.16.3/go.mod h1:fX/6k7qpIqzVPWyeIamb0wN5hbwc0ANUaTS80lPYFB8= +github.com/influxdata/telegraf v1.25.2 h1:uwO8mR7oDBIoFj9W5s5T7hedoyxkmP/M1rUTnaUILQk= +github.com/influxdata/telegraf v1.25.2/go.mod h1:V27Z7ogTXi2KnPt4qvs8Ar+KWc+zeEf4kAeJG/wA0dM= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= +github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= +github.com/intel/iaevents v1.0.0/go.mod h1:nFsAQmrbF6MoZUomrSl4jgmHhe0SrLxTGtyqvqU2X9Y= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.17.1/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jaegertracing/jaeger v1.22.0/go.mod h1:WnwW68MjJEViSLRQhe0nkIsBDaF3CzfFd8wJcpJv24k= +github.com/jaegertracing/jaeger v1.34.1/go.mod h1:md+YcRcDgMCAgB9qyXl0PdstYiq8fjA8KG5cNuyV2kA= +github.com/jaegertracing/jaeger v1.35.2/go.mod h1:e7FBVZ14ptsRjwiHEnLyxvOa4bSnZA0BDFE1OcvNiHs= +github.com/jaegertracing/jaeger v1.36.0/go.mod h1:67uyR2zQgEk7EfguOR3eZOGvGDRzY5Yos6n2aaXxq1Y= +github.com/jaegertracing/jaeger v1.38.0/go.mod h1:4MBTMxfCp3d4buDLxRlHnESQvTFCkN16OUIeE9BEdl4= +github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQDsAXYfUuF/Z0rtK5eT8x9D6Pi7S3PjXAg= +github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jaypipes/pcidb v1.0.0/go.mod h1:TnYUvqhPBzCKnH34KrIX22kAeEbDCSRJ9cqLRCuNDfk= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= -github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/gokrb5/v8 v8.4.3 h1:iTonLeSJOn7MVUtyMT+arAn5AKAPrkilzhGw8wE/Tq8= +github.com/jcmturner/gokrb5/v8 v8.4.3/go.mod h1:dqRwJGXznQrzw6cWmyo6kH+E7jksEQG/CyVWsJEsJO0= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/jefferai/jsonx v0.0.0-20160721235117-9cc31c3135ee/go.mod h1:N0t2vlmpe8nyZB5ouIbJQPDSR+mH6oe7xHB9VZHSUzM= +github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869/go.mod h1:cJ6Cj7dQo+O6GJNiMx+Pa94qKj+TG8ONdKHgMNIyyag= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jgautheron/goconst v1.4.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= +github.com/jhump/protoreflect v1.8.2/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= +github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= +github.com/jingyugao/rowserrcheck v0.0.0-20210130005344-c6a0c12dd98d/go.mod h1:/EZlaYCnEX24i7qdVhT9du5JrtFWYRQr67bVgR7JJC8= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jkeiser/iter v0.0.0-20200628201005-c8aa0ae784d1/go.mod h1:fP/NdyhRVOv09PLRbVXrSqHhrfQypdZwgE2L4h2U5C8= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -1235,20 +2298,35 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo= +github.com/jmoiron/sqlx v0.0.0-20180124204410-05cef0741ade/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/jochenvg/go-udev v0.0.0-20171110120927-d6b62d56d37b/go.mod h1:IBDUGq30U56w969YNPomhMbRje1GrhUsCh7tHdwgLXA= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/joncrlsn/dque v2.2.1-0.20200515025108-956d14155fa2+incompatible h1:f4ZGkY12AQ+YvzWDDWMLMGejA4ceg7nIPlqJ9fQ9T4c= github.com/joncrlsn/dque v2.2.1-0.20200515025108-956d14155fa2+incompatible/go.mod h1:hDZb8oMj3Kp8MxtbNLg9vrtAUDHjgI1yZvqivT4O8Iw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= +github.com/jsimonetti/rtnetlink v0.0.0-20201216134343-bde56ed16391/go.mod h1:cR77jAZG3Y3bsb8hF6fHJbFoyFukLFOkQ98S0pQz3xw= +github.com/jsimonetti/rtnetlink v0.0.0-20201220180245-69540ac93943/go.mod h1:z4c53zj6Eex712ROyh8WI0ihysb5j2ROyV42iNogmAs= +github.com/jsimonetti/rtnetlink v0.0.0-20210122163228-8d122574c736/go.mod h1:ZXpIyOK59ZnN7J0BV99cZUPmsqDRZ3eq5X+st7u/oSA= +github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9Rh8m+aHZIG69YPGGem1i5VzoyRC8nw2kA8B+ik5U= +github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmKSdU4VGSiv1bMsdqNALI4RSvvjtz65tTMCnD05qLo= +github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786/go.mod h1:v4hqbTdfQngbVSZJVWUhGE/lbTFf9jb+ygmNUDQMuOs= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -1262,42 +2340,163 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/aclstore/v2 v2.1.0/go.mod h1:c2AibPW3K+JKHmkqlvPET9hfbe2vPa3e1GuNw76NlKM= +github.com/juju/ansiterm v0.0.0-20160907234532-b99631de12cf/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= +github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= +github.com/juju/ansiterm v0.0.0-20210706145210-9283cdf370b5/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= +github.com/juju/clock v0.0.0-20180808021310-bab88fc67299/go.mod h1:nD0vlnrUjcjJhqN5WuCWZyzfd5AHZAC9/ajvbSx69xA= +github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c/go.mod h1:nD0vlnrUjcjJhqN5WuCWZyzfd5AHZAC9/ajvbSx69xA= +github.com/juju/clock v0.0.0-20220202072423-1b0f830854c4/go.mod h1:zDZCPSgCJQINeZtQwHx2/cFk4seaBC8Yiqe8V82xiP0= +github.com/juju/clock v0.0.0-20220203021603-d9deb868a28a/go.mod h1:GZ/FY8Cqw3KHG6DwRVPUKbSPTAwyrU28xFi5cqZnLsc= +github.com/juju/clock v1.0.0/go.mod h1:GZ/FY8Cqw3KHG6DwRVPUKbSPTAwyrU28xFi5cqZnLsc= +github.com/juju/cmd v0.0.0-20171107070456-e74f39857ca0/go.mod h1:yWJQHl73rdSX4DHVKGqkAip+huBslxRwS8m9CrOLq18= +github.com/juju/cmd/v3 v3.0.0-20210809234809-65029dab4cd0/go.mod h1:wFYE8X/PTiXeQoxBB4xN8rdx7e5/pr7uOlAdas0g7x8= +github.com/juju/cmd/v3 v3.0.0-20220202061353-b1cc80b193b0/go.mod h1:EoGJiEG+vbMwO9l+Es0SDTlaQPjH6nLcnnc4NfZB3cY= +github.com/juju/collections v0.0.0-20200605021417-0d0ec82b7271/go.mod h1:5XgO71dV1JClcOJE+4dzdn4HrI5LiyKd7PlVG6eZYhY= +github.com/juju/collections v0.0.0-20220203020748-febd7cad8a7a/go.mod h1:JWeZdyttIEbkR51z2S13+J+aCuHVe0F6meRy+P0YGDo= +github.com/juju/collections v1.0.0/go.mod h1:JWeZdyttIEbkR51z2S13+J+aCuHVe0F6meRy+P0YGDo= +github.com/juju/errors v0.0.0-20150916125642-1b5e39b83d18/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/errors v0.0.0-20180726005433-812b06ada177/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/errors v0.0.0-20190207033735-e65537c515d7/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/errors v0.0.0-20200330140219-3fe23663418f/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/errors v0.0.0-20210818161939-5560c4c073ff/go.mod h1:i1eL7XREII6aHpQ2gApI/v6FkVUDEBremNkcBCKYAcY= +github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9/go.mod h1:TRm7EVGA3mQOqSVcBySRY7a9Y1/gyVhh/WTCnc5sD4U= +github.com/juju/errors v1.0.0/go.mod h1:B5x9thDqx0wIMH3+aLIMP9HjItInYWObRovoCFM5Qe8= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= +github.com/juju/go4 v0.0.0-20160222163258-40d72ab9641a/go.mod h1:RVHtZuvrpETIepiNUrNlih2OynoFf1eM6DGC6dloXzk= +github.com/juju/gomaasapi v0.0.0-20200602032615-aa561369c767/go.mod h1:ppx9XlnQMX/+h/kH+cU9kfDUT6GimqGtNRWdobUZVRE= +github.com/juju/httpprof v0.0.0-20141217160036-14bf14c30767/go.mod h1:+MaLYz4PumRkkyHYeXJ2G5g5cIW0sli2bOfpmbaMV/g= +github.com/juju/loggo v0.0.0-20170605014607-8232ab8918d9/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/loggo v0.0.0-20190212223446-d976af380377/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/loggo v0.0.0-20200526014432-9ce3a2e09b5e/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/loggo v0.0.0-20210728185423-eebad3a902c4/go.mod h1:NIXFioti1SmKAlKNuUwbMenNdef59IF52+ZzuOmHYkg= +github.com/juju/loggo v1.0.0/go.mod h1:NIXFioti1SmKAlKNuUwbMenNdef59IF52+ZzuOmHYkg= +github.com/juju/mgo/v2 v2.0.0-20210302023703-70d5d206e208/go.mod h1:0OChplkvPTZ174D2FYZXg4IB9hbEwyHkD+zT+/eK+Fg= +github.com/juju/mgo/v2 v2.0.0-20220111072304-f200228f1090/go.mod h1:N614SE0a4e+ih2rg96Vi2PeC3cTpUOWgCTv3Cgk974c= +github.com/juju/mgo/v2 v2.0.2/go.mod h1:Z2QbXIrR9JuJcSyankQOw31tINNA5p3qevW73oDoHsM= +github.com/juju/mgotest v1.0.2/go.mod h1:04v1Xi2RiTO3h77YWtaXB2LAaGRSSi+Vl4hOV1coD0k= +github.com/juju/mgotest v1.0.3/go.mod h1:Dnzi6seljG9GoZpqFdTqRV3ybB3UcIj+H8iQqy1so1A= +github.com/juju/mutex v0.0.0-20171110020013-1fe2a4bf0a3a/go.mod h1:Y3oOzHH8CQ0Ppt0oCKJ2JFO81/EsWenH5AEqigLH+yY= +github.com/juju/mutex/v2 v2.0.0-20220128011612-57176ebdcfa3/go.mod h1:TTCG9BJD9rCC4DZFz3jA0QvCqFDHw8Eqz0jstwY7RTQ= +github.com/juju/mutex/v2 v2.0.0-20220203023141-11eeddb42c6c/go.mod h1:jwCfBs/smYDaeZLqeaCi8CB8M+tOes4yf827HoOEoqk= +github.com/juju/names/v4 v4.0.0-20200929085019-be23e191fee0/go.mod h1:gdlBx0aNufAMkEx3GjT8Yz4MChs3oVjCp/nEz/PrTX4= +github.com/juju/persistent-cookiejar v0.0.0-20170428161559-d67418f14c93/go.mod h1:zrbmo4nBKaiP/Ez3F67ewkMbzGYfXyMvRtbOfuAwG0w= +github.com/juju/persistent-cookiejar v1.0.0/go.mod h1:zrbmo4nBKaiP/Ez3F67ewkMbzGYfXyMvRtbOfuAwG0w= +github.com/juju/postgrestest v1.1.0/go.mod h1:/n17Y2T6iFozzXwSCO0JYJ5gSiz2caEtSwAjh/uLXDM= +github.com/juju/postgrestest v1.1.1/go.mod h1:/n17Y2T6iFozzXwSCO0JYJ5gSiz2caEtSwAjh/uLXDM= +github.com/juju/qthttptest v0.0.1/go.mod h1://LCf/Ls22/rPw2u1yWukUJvYtfPY4nYpWUl2uZhryo= +github.com/juju/qthttptest v0.1.1/go.mod h1:aTlAv8TYaflIiTDIQYzxnl1QdPjAg8Q8qJMErpKy6A4= +github.com/juju/qthttptest v0.1.3/go.mod h1:2gayREyVSs/IovPmwYAtU+HZzuhDjytJQRRLzPTtDYE= +github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= +github.com/juju/retry v0.0.0-20151029024821-62c620325291/go.mod h1:OohPQGsr4pnxwD5YljhQ+TZnuVRYpa5irjugL1Yuif4= +github.com/juju/retry v0.0.0-20160928201858-1998d01ba1c3/go.mod h1:OohPQGsr4pnxwD5YljhQ+TZnuVRYpa5irjugL1Yuif4= +github.com/juju/retry v0.0.0-20180821225755-9058e192b216/go.mod h1:OohPQGsr4pnxwD5YljhQ+TZnuVRYpa5irjugL1Yuif4= +github.com/juju/retry v1.0.0/go.mod h1:SssN1eYeK3A2qjnFGTiVMbdzGJ2BfluaJblJXvuvgqA= +github.com/juju/schema v1.0.0/go.mod h1:Y+ThzXpUJ0E7NYYocAbuvJ7vTivXfrof/IfRPq/0abI= +github.com/juju/schema v1.0.1-0.20190814234152-1f8aaeef0989/go.mod h1:Y+ThzXpUJ0E7NYYocAbuvJ7vTivXfrof/IfRPq/0abI= +github.com/juju/schema v1.0.1/go.mod h1:Y+ThzXpUJ0E7NYYocAbuvJ7vTivXfrof/IfRPq/0abI= +github.com/juju/simplekv v1.1.0/go.mod h1:OZjCrSxeKfEpNNp3JtM4B8NOVR4EJTffgvRY1qpPZ+w= +github.com/juju/testing v0.0.0-20180402130637-44801989f0f7/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= +github.com/juju/testing v0.0.0-20180517134105-72703b1e95eb/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= +github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= +github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= +github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= +github.com/juju/testing v0.0.0-20210302031854-2c7ee8570c07/go.mod h1:7lxZW0B50+xdGFkvhAb8bwAGt6IU87JB1H9w4t8MNVM= +github.com/juju/testing v0.0.0-20210324180055-18c50b0c2098/go.mod h1:7lxZW0B50+xdGFkvhAb8bwAGt6IU87JB1H9w4t8MNVM= +github.com/juju/testing v0.0.0-20220202055744-1ad0816210a6/go.mod h1:QgWc2UdIPJ8t3rnvv95tFNOsQDfpXYEZDbP281o3b2c= +github.com/juju/testing v0.0.0-20220203020004-a0ff61f03494/go.mod h1:rUquetT0ALL48LHZhyRGvjjBH8xZaZ8dFClulKK5wK4= +github.com/juju/testing v1.0.0/go.mod h1:rUquetT0ALL48LHZhyRGvjjBH8xZaZ8dFClulKK5wK4= +github.com/juju/usso v1.0.1/go.mod h1:3cvBcGVmWXyHhrBHBQtpNBzca/JRg4S5XH88Hj/NsYA= +github.com/juju/utils v0.0.0-20180424094159-2000ea4ff043/go.mod h1:6/KLg8Wz/y2KVGWEpkK9vMNGkOnu4k/cqs8Z1fKjTOk= +github.com/juju/utils v0.0.0-20180619112806-c746c6e86f4f/go.mod h1:6/KLg8Wz/y2KVGWEpkK9vMNGkOnu4k/cqs8Z1fKjTOk= +github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d/go.mod h1:6/KLg8Wz/y2KVGWEpkK9vMNGkOnu4k/cqs8Z1fKjTOk= +github.com/juju/utils v0.0.0-20200116185830-d40c2fe10647/go.mod h1:6/KLg8Wz/y2KVGWEpkK9vMNGkOnu4k/cqs8Z1fKjTOk= +github.com/juju/utils/v2 v2.0.0-20200923005554-4646bfea2ef1/go.mod h1:fdlDtQlzundleLLz/ggoYinEt/LmnrpNKcNTABQATNI= +github.com/juju/utils/v2 v2.0.0-20210305225158-eedbe7b6b3e2/go.mod h1:p35YIk2Pj1lxjhWuYsYbKvMpJ/iX9F8DBgJkNbGF0nQ= +github.com/juju/utils/v3 v3.0.0-20220130232349-cd7ecef0e94a/go.mod h1:LzwbbEN7buYjySp4nqnti6c6olSqRXUk6RkbSUUP1n8= +github.com/juju/utils/v3 v3.0.0-20220202114721-338bb0530e89/go.mod h1:wf5w+8jyTh2IYnSX0sHnMJo4ZPwwuiBWn+xN3DkQg4k= +github.com/juju/utils/v3 v3.0.0-20220203023959-c3fbc78a33b0/go.mod h1:8csUcj1VRkfjNIRzBFWzLFCMLwLqsRWvkmhfVAUwbC4= +github.com/juju/utils/v3 v3.0.0/go.mod h1:8csUcj1VRkfjNIRzBFWzLFCMLwLqsRWvkmhfVAUwbC4= +github.com/juju/version v0.0.0-20161031051906-1f41e27e54f2/go.mod h1:kE8gK5X0CImdr7qpSKl3xB2PmpySSmfj7zVbkZFs81U= +github.com/juju/version v0.0.0-20180108022336-b64dbd566305/go.mod h1:kE8gK5X0CImdr7qpSKl3xB2PmpySSmfj7zVbkZFs81U= +github.com/juju/version v0.0.0-20191219164919-81c1be00b9a6/go.mod h1:kE8gK5X0CImdr7qpSKl3xB2PmpySSmfj7zVbkZFs81U= +github.com/juju/version v0.0.0-20210303051006-2015802527a8/go.mod h1:YUOK6VK9+V4hpmsBYx8ymvOzEXRzoIGjRDKpIezTi60= +github.com/juju/version/v2 v2.0.0-20211007103408-2e8da085dc23/go.mod h1:Ljlbryh9sYaUSGXucslAEDf0A2XUSGvDbHJgW8ps6nc= +github.com/juju/version/v2 v2.0.0/go.mod h1:ZeFjNy+UFEWJDDPdzW7Cm9NeU6dsViGaFYhXzycLQrw= +github.com/juju/webbrowser v0.0.0-20160309143629-54b8c57083b4/go.mod h1:G6PCelgkM6cuvyD10iYJsjLBsSadVXtJ+nBxFAxE2BU= +github.com/juju/webbrowser v1.0.0/go.mod h1:RwVlbBcF91Q4vS+iwlkJ6bZTE3EwlrjbYlM3WMVD6Bc= +github.com/julienschmidt/httprouter v0.0.0-20151013225520-77a895ad01eb/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.1.1-0.20151013225520-77a895ad01eb/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.0.0-20210226073942-60b4fa260dd0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/k-sone/critbitgo v1.4.0/go.mod h1:7E6pyoyADnFxlUBEKcnfS49b7SUAQGMK+OAp/UQvo0s= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= +github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/keybase/go-crypto v0.0.0-20180614160407-5114a9a81e1b/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kisielk/sqlstruct v0.0.0-20150923205031-648daed35d49/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= +github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= +github.com/kisom/goutils v1.1.0/go.mod h1:+UBTfd78habUYWFbNWTJNG+jNG/i/lGURakr4A/yNRw= +github.com/kisom/goutils v1.4.3/go.mod h1:Lp5qrquG7yhYnWzZCI/68Pa/GpFynw//od6EkGnWpac= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.14.1 h1:hLQYb23E8/fO+1u53d02A97a8UnsddcvYzq4ERRU4ds= github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.10/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= +github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.1.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.2/go.mod h1:4NCo0q4pmU398vF9vq2jStF9MWQZ8JEDcDMHlDCr4h0= +github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= github.com/knq/sysutil v0.0.0-20191005231841-15668db23d08/go.mod h1:dFWs1zEqDjFtnBXsd1vPOZaLsESovai349994nHx3e0= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= @@ -1308,17 +2507,24 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= -github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee/go.mod h1:Pe/YBTPc3vqoMkbuIWPH8CF9ehINdvNyS0dP3J6HC0s= +github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= +github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= +github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28/go.mod h1:T/T7jsxVqf9k/zYOqbgNAsANsjxTd1Yq3htjDhQ1H0c= +github.com/kylelemons/go-gypsy v1.0.0/go.mod h1:chkXM0zjdpXOiqkCW1XcCHDfjfk14PH2KKkQWxfJUcU= github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= @@ -1328,108 +2534,222 @@ github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= -github.com/lib/pq v0.0.0-20180523175426-90697d60dd84/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/letsencrypt/pkcs11key v2.0.1-0.20170608213348-396559074696+incompatible/go.mod h1:iGYXKqDXt0cpBthCHdr9ZdsQwyGlYFh/+8xa4WzIQ34= +github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= +github.com/lib/pq v0.0.0-20180201184707-88edab080323/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.1/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= +github.com/libvirt/libvirt-go-xml v7.4.0+incompatible/go.mod h1:oBlgD3xOA01ihiK5stbhFzvieyW+jVS6kbbsMVF623A= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linode/linodego v1.2.1/go.mod h1:x/7+BoaKd4unViBmS2umdjYyVAmpFtBtEXZ0wou7FYQ= github.com/linode/linodego v1.3.0 h1:77BPapuzhfIhXodiDUt/M76H46UiFYOytEupVN2auDI= github.com/linode/linodego v1.3.0/go.mod h1:PVsRxSlOiJyvG4/scTszpmZDTdgS+to3X6eS8pRrWI8= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/logzio/azure-monitor-metrics-receiver v1.0.0/go.mod h1:UIaQ7UgxZ8jO3L0JB2hctsHFBbZqL6mbxYscQAeFpl4= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= +github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lunixbochs/vtclean v0.0.0-20160125035106-4fbf7632a2c6/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lxc/go-lxc v0.0.0-20220627182551-ad3d9f7cb822/go.mod h1:ZSmlCm3aSNy4awYsId5tsGQUrB1MGd1Y8UTsf4DXNbI= +github.com/lxc/lxd v0.0.0-20220920163450-e9b4b514106a/go.mod h1:Y+Ny8KSylQRtfyOxVN0Cha/jAfd2l1AlHiDulGP+GQk= +github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-validate v0.0.0-20180911180927-64fcb82c878e/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/masterzen/azure-sdk-for-go v3.2.0-beta.0.20161014135628-ee4f0065d00c+incompatible/go.mod h1:mf8fjOu33zCqxUjuiU3I8S1lJMyEAlH+0F2+M5xl3hE= +github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= +github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= +github.com/masterzen/winrm v0.0.0-20161014151040-7a535cd943fc/go.mod h1:CfZSN7zwz5gJiFhZJz49Uzk7mEBHIceWmbFmYx7Hf7E= +github.com/masterzen/winrm v0.0.0-20210623064412-3b76017826b0/go.mod h1:l31LCh9VvG43RJ83A5JLkFPjuz48cZAxBSLQLaIn1p8= +github.com/masterzen/winrm v0.0.0-20211231115050-232efb40349e/go.mod h1:Iju3u6NzoTAvjuhsGCZc+7fReNnr/Bd6DsWj3WTokIU= +github.com/masterzen/xmlpath v0.0.0-20140218185901-13f4951698ad/go.mod h1:A0zPC53iKKKcXYxr4ROjpQRQ5FgJXtelNdSmHHuq/tY= +github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/mattn/go-colorable v0.0.6/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-isatty v0.0.0-20160806122752-66b8e73f3f5c/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= +github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= +github.com/mdlayher/apcupsd v0.0.0-20220319200143-473c7b5f3c6a/go.mod h1:960H6oqSawdujauTeLX9BOx+ZdYX0TdG9xE9br5bino= +github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= +github.com/mdlayher/ethtool v0.0.0-20211028163843-288d040e9d60/go.mod h1:aYbhishWc4Ai3I2U4Gaa2n3kHWSwzme6EsG/46HRQbE= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/genetlink v1.1.0/go.mod h1:1cAHdejIIk9zbWfP3gW30vY1QUlwyuhaqfkyANVVf10= +github.com/mdlayher/genetlink v1.2.0/go.mod h1:ra5LDov2KrUCZJiAtEvXXZBxGMInICMXIwshlJ+qRxQ= +github.com/mdlayher/ndp v0.10.0/go.mod h1:Uv6IWvgvqirNUu2N3ZXJEB86xu6foyUsG0NrClSSfek= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= +github.com/mdlayher/netlink v1.2.0/go.mod h1:kwVW1io0AZy9A1E2YYgaD4Cj+C+GPkU6klXCMzIJ9p8= +github.com/mdlayher/netlink v1.2.1/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.2.2-0.20210123213345-5cc92139ae3e/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuriDdoPSWys= +github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= +github.com/mdlayher/netlink v1.4.1/go.mod h1:e4/KuJ+s8UhfUpO9z00/fDZZmhSrs+oxyqAS9cNgn6Q= +github.com/mdlayher/netlink v1.4.2/go.mod h1:13VaingaArGUTUxFLf/iEovKxXji32JAtF858jZYEug= +github.com/mdlayher/netlink v1.6.0/go.mod h1:0o3PlBmGst1xve7wQ7j/hwpNaFaH4qCRyWCdcZk8/vA= +github.com/mdlayher/netx v0.0.0-20220422152302-c711c2f8512f/go.mod h1:fwIXeygydjUR2l8Be1+G0PJSwHI4MpG5muNgH3qJqeQ= +github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= +github.com/mdlayher/socket v0.0.0-20211007213009-516dcbdf0267/go.mod h1:nFZ1EtZYK8Gi/k6QNu7z7CgO20i/4ExeQswwWuPmG/g= +github.com/mdlayher/socket v0.0.0-20211102153432-57e3fa563ecb/go.mod h1:nFZ1EtZYK8Gi/k6QNu7z7CgO20i/4ExeQswwWuPmG/g= +github.com/mdlayher/socket v0.1.1/go.mod h1:mYV5YIZAfHh4dzDVzI8x8tWLWCliuX8Mon5Awbj+qDs= +github.com/mdlayher/socket v0.2.0/go.mod h1:QLlNPkFR88mRUNQIzRBMfXxwKal8H7u1h3bL1CV+f0E= +github.com/mdlayher/socket v0.2.3/go.mod h1:bz12/FozYNH/VbvC3q7TRIK/Y6dH1kCKsXaUeXi/FmY= +github.com/mdlayher/vsock v1.1.1/go.mod h1:Y43jzcy7KM3QB+/FK15pfqGxDMCMzUXWegEfIbSM18U= github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee/go.mod h1:Evt/EIne46u9PtQbeTx2NTcqURpr5K4SvKtGmBuDPN8= +github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.0.3/go.mod h1:POGGZagSo/0frdr7VeAifzS5Uka0d0GPiM35MsTO8nE= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mhilton/openid v0.0.0-20150511103207-7922a4e937d8/go.mod h1:Alv076OXc0MA78hV0BTU06FTh1Q9sWKk3Ru20SykbTA= +github.com/mholt/archiver/v3 v3.5.0/go.mod h1:qqTTPUK/HZPFgFQ/TJ3BzvTpF/dPtFVJXdQbCmeMxwc= +github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.46 h1:uzwpxRtSVxtcIZmz/4Uz6/Rn7G11DvsaslXoy5LxQio= github.com/miekg/dns v1.1.46/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/minio/madmin-go v1.4.29/go.mod h1:ez87VmMtsxP7DRxjKJKD4RDNW+nhO2QF9KSzwxBDQ98= github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= +github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.10 h1:1oUKe4EOPUEhw2qnPQaPsJ0lmVTYLFu03SiItauXs94= github.com/minio/minio-go/v7 v7.0.10/go.mod h1:td4gW1ldOsj1PbSNS+WYK43j+P1XVhX/8W8awaYlBFo= +github.com/minio/minio-go/v7 v7.0.23/go.mod h1:ei5JjmxwHaMrgsMrn4U/+Nmg+d8MKS1U2DAn1ou4+Do= +github.com/minio/minio-go/v7 v7.0.37 h1:aJvYMbtpVPSFBck6guyvOkxK03MycxDOCs49ZBuY5M8= +github.com/minio/minio-go/v7 v7.0.37/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v0.0.0-20160804032330-cdac8253d00f/go.mod h1:eOsF2yLPlBBJPvD+nhl5QMTBSOBbOph6N7j/IDUw7PY= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-fs v0.0.0-20180402235330-b7b9ca407fff/go.mod h1:g7SZj7ABpStq3tM4zqHiVEG5un/DZ1+qJJKO7qx1EvU= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -1437,18 +2757,30 @@ github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mjibson/esc v0.2.0/go.mod h1:9Hw9gxxfHulMF5OJKCyhYD7PzlSdhzXyaGEBRPH1OPs= +github.com/moby/ipvs v1.1.0/go.mod h1:4VJMWuf098bsUMmZEiD4Tjk/O7mOn3l1PTD3s4OoYAs= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/moby v1.13.1/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1459,49 +2791,81 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mostynb/go-grpc-compression v1.1.16/go.mod h1:xxa6UoYynYS2h+5HB/Hglu81iYAp87ARaNmhhwi0s1s= +github.com/mostynb/go-grpc-compression v1.1.17/go.mod h1:FUSBr0QjKqQgoDG/e0yiqlR6aqyXC39+g/hFLDfSsEY= +github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mozilla/tls-observatory v0.0.0-20201209171846-0547674fceff/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= +github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/multiplay/go-ts3 v1.0.0/go.mod h1:14S6cS3fLNT3xOytrA/DkRyAFNuQLMLEqOYAsf87IbQ= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/multiplay/go-ts3 v1.1.0/go.mod h1:OdNmiO3uV++4SldaJDQTIGg8gNAu5MOiccZiAqVqUZA= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= +github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg= +github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g= +github.com/nats-io/nats-server/v2 v2.9.9/go.mod h1:AB6hAnGZDlYfqb7CTAm66ZKMZy9DpfierY1/PbpvI2g= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nats.go v1.12.1/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nats.go v1.19.0/go.mod h1:tLqubohF7t4z3du1QDPYJIQQyhb4wl6DhjxEajSI7UA= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20201221231540-e56b841a3c88/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/ncw/swift v1.0.52 h1:ACF3JufDGgeKp/9mrDgQlEgS8kRYC4XKcuzj/8EJjQU= github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= -github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ= -github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/netsampler/goflow2 v1.1.1/go.mod h1:oNIeGj67SjwrRSTEukErjNT1zZ02W9+8M5mSgQCkZC8= +github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs= +github.com/newrelic/newrelic-telemetry-sdk-go v0.8.1/go.mod h1:2kY6OeOxrJ+RIQlVjWDc/pZlT3MIf30prs6drzMfJ6E= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= +github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= +github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= +github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= +github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= +github.com/nkovacs/streamquote v1.0.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOWpxbJYzzgUsc= +github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= +github.com/nsqio/go-nsq v1.1.0/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= +github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= +github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v0.0.0-20180308005104-6934b124db28/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= @@ -1510,32 +2874,72 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/olivere/elastic v6.2.35+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/olivere/elastic v6.2.37+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/olivere/elastic/v7 v7.0.12/go.mod h1:14rWX28Pnh3qCKYRVnSGXWLf9MbLonYS/4FDCY3LAPo= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= +github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.52.0/go.mod h1:CIX1MWy+2JYdeYhqjK89vrRpCGbz6LTLinp+SM8kZyo= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.54.0/go.mod h1:CSe1wsnLhSAEgAEXLfi8wTyl0VwPVkq7925aV8xm0oM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.57.2/go.mod h1:xPchY5YNOL9jr6phVkJEvkEakMYr8HMD4uGYEoKXIek= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.70.0/go.mod h1:+AoyQ0tJRgM3yd8YcWQ0Muf42QMl00zwozFeJy7QhQE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.70.0/go.mod h1:0niG2Iu5gJ6YLm+xrPRfhxYxbBpkJM7j5S2PPiHcFLA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.70.0/go.mod h1:BjDohX5to3ZHNpUZlUmxuboSV8VL7oeJaxHS8BvpyoI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.52.0/go.mod h1:tU7s/ki/QePSIZ7ExYGWLMb9erCAt66brQHBEHxr8HU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.54.0/go.mod h1:uWFgO68sLEqWrQxL8rws1CkB8EUNQaedP3FWI1gMJOU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.57.2/go.mod h1:4rFuWKMbzM9H39RbDvPtJfAp/fxsHydhJhKns6skmK0= +github.com/openconfig/gnmi v0.0.0-20200414194230-1597cc0f2600/go.mod h1:M/EcuapNQgvzxo1DDXHK4tx3QpYM/uG4l591v33jG2A= +github.com/openconfig/gnmi v0.0.0-20200508230933-d19cebf5e7be/go.mod h1:M/EcuapNQgvzxo1DDXHK4tx3QpYM/uG4l591v33jG2A= +github.com/openconfig/gnmi v0.0.0-20220503232738-6eb133c65a13/go.mod h1:h365Ifq35G6kLZDQlRvrccTt2LKK90VpjZLMNGxJRYc= +github.com/openconfig/gnmi v0.0.0-20220920173703-480bf53a74d2/go.mod h1:Y9os75GmSkhHw2wX8sMsxfI7qRGAEcDh8NTa5a8vj6E= +github.com/openconfig/goyang v0.0.0-20200115183954-d0a48929f0ea/go.mod h1:dhXaV0JgHJzdrHi2l+w0fZrwArtXL7jEFoiqLEdmkvU= +github.com/openconfig/goyang v0.2.2/go.mod h1:vX61x01Q46AzbZUzG617vWqh/cB+aisc+RrNkXRd3W8= +github.com/openconfig/goyang v1.0.0/go.mod h1:vX61x01Q46AzbZUzG617vWqh/cB+aisc+RrNkXRd3W8= +github.com/openconfig/gribi v0.1.1-0.20210423184541-ce37eb4ba92f/go.mod h1:OoH46A2kV42cIXGyviYmAlGmn6cHjGduyC2+I9d/iVs= +github.com/openconfig/grpctunnel v0.0.0-20210610163803-fde4a9dc048d/go.mod h1:x9tAZ4EwqCQ0jI8D6S8Yhw9Z0ee7/BxWQX0k0Uib5Q8= +github.com/openconfig/grpctunnel v0.0.0-20220819142823-6f5422b8ca70/go.mod h1:OmTWe7RyZj2CIzIgy4ovEBzCLBJzRvWSZmn7u02U9gU= +github.com/openconfig/ygot v0.6.0/go.mod h1:o30svNf7O0xK+R35tlx95odkDmZWS9JyWWQSmIhqwAs= +github.com/openconfig/ygot v0.10.4/go.mod h1:oCQNdXnv7dWc8scTDgoFkauv1wwplJn5HspHcjlxSAQ= +github.com/openconfig/ygot v0.20.0/go.mod h1:7ZiBFNc4n/1Hkv2v2dAEpxisqDznp0JVpLR13Toe4AY= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1544,14 +2948,19 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1562,7 +2971,10 @@ github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mo github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= @@ -1573,41 +2985,86 @@ github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKw github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.1.1-0.20190913142402-a7454ce5950e/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= -github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= -github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= -github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= +github.com/ory/go-acc v0.2.6/go.mod h1:4Kb/UnPcT8qRAk3IAxta+hvVapdxTLWtrr7bFLlEgpw= +github.com/ory/viper v1.7.5/go.mod h1:ypOuyJmEUb3oENywQZRgeAMwqgOyDqwboO1tj3DjTaM= +github.com/oschwald/geoip2-golang v1.5.0/go.mod h1:xdvYt5xQzB8ORWFqPnqMwZpCpgNagttWdoZLlJQzg7s= +github.com/oschwald/maxminddb-golang v1.8.0/go.mod h1:RXZtst0N6+FY/3qCNmZMBApR19cdQj43/NM9VkrNAis= +github.com/osrg/gobgp/v3 v3.6.0/go.mod h1:fKQPuk7+4qMiDT5viZTXT/aSEn8yYDkEs5p3NjmU2bw= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/patrickmn/go-cache v0.0.0-20180527043350-9f6ff22cfff8/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pavius/impi v0.0.3/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= +github.com/pborman/ansi v1.0.0/go.mod h1:SgWzwMAx1X/Ez7i90VqF8LRiQtx52pWDiQP+x3iGnzw= +github.com/pborman/getopt v0.0.0-20190409184431-ee0cd42419d3/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= +github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/peterh/liner v1.2.1/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/cmdflag v0.0.2/go.mod h1:a3zKGZ3cdQUfxjd0RGMLZr8xI3nvpJOB+m6o/1X5BmU= +github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.12 h1:44l88ehTZAUGW4VlO1QC4zkilL99M6Y9MXNwEs0uzP8= -github.com/pierrec/lz4/v4 v4.1.12/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v3 v3.3.4/go.mod h1:280XNCGS8jAcG++AHdd6SeWnzyJ1w9oow2vbORyey8Q= +github.com/pierrec/lz4/v4 v4.0.3/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.1/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.11/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= +github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pion/dtls/v2 v2.1.5/go.mod h1:BqCE7xPZbPSubGasRoDFJeTsyJtdD1FanJYL0JGheqY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/transport v0.12.2/go.mod h1:N3+vZQD9HlDP5GWkZ85LohxNsDcNgofQmyL6ojX5d8Q= +github.com/pion/transport v0.13.0/go.mod h1:yxm9uXpK9bpBBWkITk13cLo1y5/ur5VQpG22ny6EP7g= +github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1615,22 +3072,34 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pkg/sftp v1.13.2/go.mod h1:LzqnAvaD5TWeNBsZpfKxSYn1MbjWwOsCIAFFJbpIsK8= +github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v0.0.0-20201127212506-19bd8db6546f/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pquerna/cachecontrol v0.0.0-20160421231612-c97913dcbd76/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/prometheus-community/pro-bing v0.1.0/go.mod h1:BpWlHurD9flHtzq8wrh8QGWYz9ka9z9ZJAyOel8ej58= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= +github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= github.com/prometheus/alertmanager v0.23.0/go.mod h1:0MLTrjQI8EuVmvykEhcfr/7X0xmaDAZrqMgxIq3OXHk= github.com/prometheus/alertmanager v0.23.1-0.20210914172521-e35efbddb66a h1:qroc/F4ygaQ0uc2S+Pyk/exMwnSpokGyN1QjfZ1DiWU= github.com/prometheus/alertmanager v0.23.1-0.20210914172521-e35efbddb66a/go.mod h1:U7pGu+z7A9ZKhK8lq1MvIOp5GdVlZjwOYk+S0h3LSbA= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.0.0-20180328130430-f504d69affe1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= @@ -1638,21 +3107,27 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.6.1-0.20200604110148-03575cad4e55/go.mod h1:25h+Uz1WvXDBZYwqGX8PAb71RBkcjxEVV/R5wGnsq4I= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20180326160409-38c53a9f4bfc/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1663,13 +3138,22 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2 github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= +github.com/prometheus/common v0.24.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= +github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= +github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= +github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/exporter-toolkit v0.7.1 h1:c6RXaK8xBVercEeUQ4tRNL8UGWzDHfvj9dseo1FcK1Y= @@ -1677,7 +3161,6 @@ github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQY github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 h1:dTUS1vaLWq+Y6XKOTnrFpoVsQKLCbCp1OLj24TDi7oM= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20180408092902-8b1c2da0d56d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1692,62 +3175,137 @@ github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4 github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/prometheus v0.0.0-20200609090129-a6600f564e3c/go.mod h1:S5n0C6tSgdnwWshBUceRx5G1OsjLv/EeZ9t3wIfEtsY= +github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2/go.mod h1:5aBj+GpLB+V5MCnrKm5+JAqEJwzDiLugOmDhgt7sDec= github.com/prometheus/prometheus v1.8.2-0.20211119115433-692a54649ed7/go.mod h1:outfylaI89+D5IO87TRPRmxfucIobTO3Rb0l2TKqpj0= github.com/prometheus/prometheus v1.8.2-0.20220303173753-edfe657b5405 h1:nGkR5xtWNTkV1ykDwObBvNPBjZrNQF55Y4b6Jv1YRB8= github.com/prometheus/prometheus v1.8.2-0.20220303173753-edfe657b5405/go.mod h1:yHgqW1gjCflLQEPNTdFCYG4rF+xCnp54C+hubRV3lH4= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= +github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/protocolbuffers/txtpbfmt v0.0.0-20220608084003-fc78c767cd6a/go.mod h1:KjY0wibdYKc4DYkerHSbguaf3JeIPGhNJBp2BNiFH78= +github.com/pseudomuto/protoc-gen-doc v1.4.1/go.mod h1:exDTOVwqpp30eV/EDPFLZy3Pwr2sn6hBC1WIYH/UbIg= +github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/quasilyte/go-ruleguard v0.3.0/go.mod h1:p2miAhLp6fERzFNbcuQ4bevXs8rgK//uCHsUDkumITg= +github.com/quasilyte/go-ruleguard/dsl v0.0.0-20210106184943-e47d54850b18/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.0.0-20210115110123-c73ee1cbff1f/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/rabbitmq/amqp091-go v1.5.0/go.mod h1:JsV0ofX5f1nwOGafb8L5rBItt9GyhfQfcJj+oyz0dGg= +github.com/rasky/go-xdr v0.0.0-20170217172119-4930550ba2e2/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= +github.com/riemann/riemann-go-client v0.5.1-0.20211206220514-f58f10cdce16/go.mod h1:4rS0vfmzOMwfFPhi6Zve4k/59TsBepqd6WESNULE0ho= +github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/robbiet480/go.nut v0.0.0-20220219091450-bd8f121e1fa1/go.mod h1:pL1huxuIlWub46MsMVJg4p7OXkzbPp/APxh9IH0eJjQ= +github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a/go.mod h1:4r5QyqhjIWCcK8DO4KMclc5Iknq5qVBAlbYYzAbUScQ= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/ryancurrah/gomodguard v1.2.0/go.mod h1:rNqbC4TOIdUDcVMSIpNNAzTbzXAZa6W5lnUepvuMMgQ= +github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= +github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA= +github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= +github.com/sanposhiho/wastedassign v0.1.3/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= +github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I= github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 h1:0roa6gXKgyta64uqh52AQG3wzZXH21unn+ltzQSXML0= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/secure-io/sio-go v0.3.1/go.mod h1:+xbkjDzPjwh4Axd07pRKSNriS9SCiYksWnZqdnfpQxs= +github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do= +github.com/securego/gosec v0.0.0-20191119104125-df484bfa9e9f/go.mod h1:H5UrtKXL5BGF4FgRa7p2fyqU/lddaTSCLjexYIfS0Bk= +github.com/securego/gosec v0.0.0-20200203094520-d13bb6d2420c/go.mod h1:gp0gaHj0WlmPh9BdsTmo1aq6C27yIPWdxCKGFGdVKBE= +github.com/securego/gosec/v2 v2.6.1/go.mod h1:I76p3NTHBXsGhybUW+cEQ692q2Vp+A0Z6ZLzDIZy+Ao= github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M= github.com/segmentio/fasthash v1.0.2 h1:86fGDl2hB+iSHYlccB/FP9qRGvLNuH/fhEEFn6gnQUs= github.com/segmentio/fasthash v1.0.2/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/sensu/sensu-go/api/core/v2 v2.15.0/go.mod h1:QxGKxqQv4rpweFrR4Jkp1tas3amGzAy0wO0fUwq0suU= github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v0.0.0-20181107111621-48177ef5f880/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= +github.com/shirou/gopsutil/v3 v3.21.1/go.mod h1:igHnfak0qnw1biGeI2qKQvu0ZkwvEkUcCLlYhZzdr/4= +github.com/shirou/gopsutil/v3 v3.21.6/go.mod h1:JfVbDpIBLVzT8oKbvMg9P3wEIMDDpVn+LwHTKj0ST88= +github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= +github.com/shirou/gopsutil/v3 v3.22.5/go.mod h1:so9G9VzeHt/hsd0YwqprnjHnfARAUktauykSbr+y2gA= +github.com/shirou/gopsutil/v3 v3.22.6/go.mod h1:EdIubSnZhbAvBS1yJ7Xi+AShB/hxwLHOMz4MCYz7yMs= +github.com/shirou/gopsutil/v3 v3.22.8/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI= +github.com/shirou/gopsutil/v3 v3.22.12/go.mod h1:Xd7P1kwZcp5VW52+9XsirIKd/BROzbb2wdX3Kqlz9uI= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/showwin/speedtest-go v1.2.1/go.mod h1:dJugxvC/AQDt4HQQKZ9lKNa2+b1c8nzj9IL0a/F8l1U= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -1755,44 +3313,84 @@ github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJV github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= +github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3/go.mod h1:gJrXWi7wSGXfiC7+VheQaz+ypdCt5SmZNL+BRxUe7y4= +github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083/go.mod h1:adPDS6s7WaajdFBV9mQ7i0dKfQ8xiDnF9ZNETVPpp7c= +github.com/signalfx/golib/v3 v3.3.46/go.mod h1:N7bdXqA6fGQQn03WHZRDXvyN/GUFI+BXjoJgidfncZU= +github.com/signalfx/gomemcache v0.0.0-20180823214636-4f7ef64c72a9/go.mod h1:Ytb8KfCSyuwy/VILnROdgCvbQLA5ch0nkbG7lKT0BXw= +github.com/signalfx/sapm-proto v0.7.2/go.mod h1:HLufOh6Gd2altGxbeve+s6hh0EWCWoOM7MmuYuvs5PI= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.0/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sleepinggenius2/gosmi v0.4.4/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bTY2CNivIhsnDT0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.1.1/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= +github.com/smartystreets/gunit v1.1.3/go.mod h1:EH5qMBab2UclzXUcpR8b93eHsIlp9u+pDQIRp5DZNzQ= github.com/snowflakedb/gosnowflake v1.3.13/go.mod h1:6nfka9aTXkUNha1p1cjeeyjDvcyh7jfjp0l8kGpDBok= -github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= +github.com/snowflakedb/gosnowflake v1.6.13/go.mod h1:BoZ0gnLERaUEiziH4Dumim10LN8cvoaCKovsAfhxzrE= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= +github.com/soheilhy/cmux v0.1.5-0.20210205191134-5ec6847320e5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= github.com/sony/gobreaker v0.4.1 h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= +github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= +github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.4.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1800,9 +3398,22 @@ github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.5.0/go.mod h1:AkYRkVJF8TkSG/xet6PzXX+l39KhhXa2pdqVSxnTcn4= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= +github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= +github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= +github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= -github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1811,110 +3422,210 @@ github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5J github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= +github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.194/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.194/go.mod h1:yrBKWhChnDqNz1xuXdSbWXG56XawEq0G5j1lg4VwBD4= github.com/tencentyun/cos-go-sdk-v5 v0.7.31 h1:NujkkOKMJ3IFs1+trCwXOKRCIPQ8qI5Lxul9JkhTg6M= github.com/tencentyun/cos-go-sdk-v5 v0.7.31/go.mod h1:4E4+bQ2gBVJcgEC9Cufwylio4mXOct2iu05WjgEBx1o= -github.com/tent/http-link-go v0.0.0-20130702225549-ac974c61c2f9/go.mod h1:RHkNRtSLfOK7qBTHaeSX1D6BNpI3qw7NTxsmNr4RvN8= +github.com/testcontainers/testcontainers-go v0.14.0/go.mod h1:hSRGJ1G8Q5Bw2gXgPulJOLlEBaYJHeBSOkQM5JLG+JQ= +github.com/tetafro/godot v1.4.4/go.mod h1:FVDd4JuKliW3UgjswZfJfHq4vAx0bD/Jd5brJjGeaz4= github.com/thanos-io/thanos v0.19.1-0.20211126105533-c5505f5eaa7d h1:jkXuBsvBkxxDwATzwwho08Xvd/jCu7+E+maw8wrknWk= github.com/thanos-io/thanos v0.19.1-0.20211126105533-c5505f5eaa7d/go.mod h1:sfnKJG7cDA41ixNL4gsTJEa3w9Qt8lwAjw+dqRMSDG0= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= -github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= -github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/thomasklein94/packer-plugin-libvirt v0.3.4/go.mod h1:FLQTTGhVNak3rFgrZCJ2TZR6Cywz7ef/+z5Pg11EvJg= +github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.3/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= +github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= +github.com/tj/go-buffer v1.0.1/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tklauser/go-sysconf v0.3.6/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/tklauser/numcpus v0.5.0/go.mod h1:OGzpTxpcIMNGYQdit2BYL1pvk/dSOaJWjKoflh+RQjo= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomarrell/wrapcheck v0.0.0-20201130113247-1683564d9756/go.mod h1:yiFB6fFoV7saXirUGfuK+cPtUh4NX/Hf5y2WC2lehu0= +github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= +github.com/tommy-muehle/go-mnd/v2 v2.3.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 h1:hbyjqt5UnyKeOT3rFVxLxi7iTI6XqR2p4TkwEAQdUiw= github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:Q5IRRDY+cjIaiOjTAnXN5LKQV5MPqVx5ofQn85Jy5Yw= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E= github.com/uber/jaeger-client-go v2.23.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.2.6 h1:tGiWC9HENWE2tqYycIqFTNorMmFRVhNwCpDOpWqnk8E= +github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn0= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ= +github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw= +github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/urfave/cli/v2 v2.11.0/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo= +github.com/urfave/cli/v2 v2.23.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= +github.com/uudashr/gocognit v0.0.0-20190926065955-1655d0de0517/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= +github.com/uudashr/gocognit v1.0.0/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= +github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= +github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= +github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vapourismo/knx-go v0.0.0-20220829185957-fb5458a5389d/go.mod h1:43Jz/tjx4Ehy/CmohTtTIM33hIwYm/6ccdpddnK8KVY= +github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vektra/mockery v0.0.0-20181123154057-e78b021dcbb5/go.mod h1:ppEjwdhyy7Y31EnHRDm1JkChoC7LXIJ7Ex0VYLWtZtQ= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= -github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/wavefronthq/wavefront-sdk-go v0.9.2/go.mod h1:hQI6y8M9OtTCtc0xdwh+dCER4osxXdEAeCpacjpDZEU= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20220913150850-18c4f4234207/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.2/go.mod h1:yitZXdAVI+yPFSb4QUe+VW3vOVl4PZPNcBgbPxAtJxw= +github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmware/govmomi v0.28.1-0.20220921224932-b4b508abf208/go.mod h1:F7adsVewLNHsW/IIm7ziFURaXDaHEwcc+ym4r3INMdY= +github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= +github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= +github.com/wavefronthq/wavefront-sdk-go v0.11.0/go.mod h1:oKJ9Y0y36n+szFm2NiivXI+UubZe3lwfWnN1p+mFNDw= github.com/weaveworks/common v0.0.0-20210913144402-035033b78a78/go.mod h1:YU9FvnS7kUnRt6HY10G+2qHkwzP3n3Vb1XsXDsJTSp8= github.com/weaveworks/common v0.0.0-20211015155308-ebe5bdc2c89e h1:B0gVGyVpjfWJWSRe027EkhmEype0a0Dt2uHVxcPrhfs= github.com/weaveworks/common v0.0.0-20211015155308-ebe5bdc2c89e/go.mod h1:GWX2dQ7yjrgvqH0+d3kCJC5bsY8oOFwqjxFMHaRK4/k= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/weppos/publicsuffix-go v0.4.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= +github.com/weppos/publicsuffix-go v0.5.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= +github.com/weppos/publicsuffix-go v0.13.1-0.20210123135404-5fd73613514e/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= +github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= +github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= +github.com/xdg-go/scram v1.1.0/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/scram v1.0.5/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yohcop/openid-go v1.0.0/go.mod h1:/408xiwkeItSPJZSTPF7+VtZxPkPrRRpRNK2vjGh6yI= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1922,12 +3633,29 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.5.3/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da h1:NimzV1aGyq29m5ukMK0AMWEhFaL/lrEOaephfuoiARg= github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= +github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= +github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e/go.mod h1:w7kd3qXHh8FNaczNjslXqvFQiv5mMWRXlL9klTUAHc8= +github.com/zmap/zcrypto v0.0.0-20210123152837-9cf5beac6d91/go.mod h1:R/deQh6+tSWlgI9tb4jNmXxn8nSCabl5ZQsBX9//I/E= +github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc/go.mod h1:FM4U1E3NzlNMRnSUTU3P1UdukWhYGifqEsjk9fn7BCk= +github.com/zmap/zlint v0.0.0-20190806154020-fd021b4cfbeb/go.mod h1:29UiAJNsiVdvTBFCJW8e3q6dcDbOoPkhMgttOSCIMMY= +github.com/zmap/zlint/v3 v3.1.0/go.mod h1:L7t8s3sEKkb0A2BxGy1IWrxt1ZATa1R4QfJZaQOD3zU= +gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f/go.mod h1:Tiuhl+njh/JIg0uS/sOJVYi0x2HEa5rc1OAaVsb5tAs= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= go.elastic.co/apm v1.11.0/go.mod h1:qoOSi09pnzJDh5fKnfY7bPmQgl8yl2tULdOu03xhui0= go.elastic.co/apm/module/apmhttp v1.11.0/go.mod h1:5JFMIxdeS4vJy+D1PPPjINuX6hZ3AHalZXoOgyqZAkk= @@ -1940,15 +3668,41 @@ go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd v3.3.13+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= +go.etcd.io/etcd v3.3.17+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= go.etcd.io/etcd v3.3.25+incompatible h1:V1RzkZJj9LqsJRy+TUBgpWSbZXITLB819lstuTFoZOY= go.etcd.io/etcd v3.3.25+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= -go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= +go.etcd.io/etcd/api/v3 v3.5.0-alpha.0/go.mod h1:mPcW6aZJukV6Aa81LSKpBjQXTWlXB5r74ymPoSWa3Sw= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= +go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.4 h1:lrneYvz923dvC14R54XcA7FXoZ3mlGZAgmwhfm7HqOg= +go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0-alpha.0/go.mod h1:kdV+xzCJ3luEBSIeQyB/OEKkWKd8Zkux4sbDeANrosU= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= +go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI= +go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= +go.etcd.io/etcd/client/v3 v3.5.0-alpha.0/go.mod h1:wKt7jgDgf/OfKiYmCq5WFGxOFAkVMLxiiXgLDFhECr8= go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4= +go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.etcd.io/etcd/etcdctl/v3 v3.5.0-alpha.0/go.mod h1:YPwSaBciV5G6Gpt435AasAG3ROetZsKNUzibRa/++oo= +go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0/go.mod h1:tV31atvwzcybuqejDoY3oaNRTtlD2l/Ot78Pc9w7DMY= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0/go.mod h1:FAwse6Zlm5v4tEWZaTjmNhe17Int4Oxbu7+2r0DiD3w= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/server/v3 v3.5.0-alpha.0/go.mod h1:tsKetYpt980ZTpzl/gb+UOJj9RkIyCb1u4wjzMg90BQ= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.etcd.io/etcd/tests/v3 v3.5.0-alpha.0/go.mod h1:HnrHxjyCuZ8YDt8PYVyQQ5d1ZQfzJVEtQWllr5Vp/30= +go.etcd.io/etcd/v3 v3.5.0-alpha.0/go.mod h1:JZ79d3LV6NUfPjUxXrpiFAYcjhT+06qqw+i28snx8To= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -1961,9 +3715,13 @@ go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4S go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -go.mongodb.org/mongo-driver v1.7.5 h1:ny3p0reEpgsR2cfA5cjgwFZg3Cv/ofFh/8jbhGtz9VI= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= +go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= +go.mongodb.org/mongo-driver v1.11.1 h1:QP0znIRTuL0jf1oBQoAoM0C6ZJfBK4kx0Uumtv1A7w8= +go.mongodb.org/mongo-driver v1.11.1/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1972,39 +3730,125 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/collector v0.52.0/go.mod h1:a9GvaOhyc0nVOUzqvdv5mxyWghCSso/WRO2GgRl4I1g= +go.opentelemetry.io/collector v0.54.0/go.mod h1:FgNzyfb4sAGb5cqusB5znETJ8Pz4OQUBGbOeGIZ2rlQ= +go.opentelemetry.io/collector v0.57.2/go.mod h1:9TwWyMRhbFNzaaGLtm/6poWNDJw+etvQMS6Fy+8/8Xs= +go.opentelemetry.io/collector v0.70.0/go.mod h1:cNC9VPU99N8R+cBubX2B29a1y0Ji+Fi9tgAY1LwIMt0= +go.opentelemetry.io/collector/component v0.70.0/go.mod h1:oGcF4cJf1kSNe+heznP1s5JGZhhcE8CjaoJR6M2A0Rg= +go.opentelemetry.io/collector/confmap v0.70.0/go.mod h1:8//JWR2TMChLH35Az0mGFrCskEIP6POgZJK6iRRhzeM= +go.opentelemetry.io/collector/consumer v0.70.0/go.mod h1:JfMJigmMJkAMWoL7fDNirOLY0QTE9wxcFccLyJZU7E0= +go.opentelemetry.io/collector/extension/zpagesextension v0.70.0/go.mod h1:JuIyTXNlXGHlSzVs6HK0NVQkFT/kGHfqn7HQI4rhnqY= +go.opentelemetry.io/collector/featuregate v0.70.0/go.mod h1:ih+oCwrHW3bLac/qnPUzes28yDCDmh8WzsAKKauwCYI= +go.opentelemetry.io/collector/model v0.49.0/go.mod h1:nOYQv9KoFPs6ihJwOi24qB209EOhS9HkwhGj54YiEAw= +go.opentelemetry.io/collector/pdata v0.49.0/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= +go.opentelemetry.io/collector/pdata v0.52.0/go.mod h1:GJUTfTv8mlYpHRjcmHXVbvJr48EW/q/P/HuBvpXAE58= +go.opentelemetry.io/collector/pdata v0.54.0/go.mod h1:1nSelv/YqGwdHHaIKNW9ZOHSMqicDX7W4/7TjNCm6N8= +go.opentelemetry.io/collector/pdata v0.56.0/go.mod h1:mYcCREWiIJyHss0dbU+GSiz2tmGZ6u09vtfkKTciog4= +go.opentelemetry.io/collector/pdata v0.57.2/go.mod h1:RU9I8lwBUxucwOsSYzHEcHi15M9QaX78hgQ2PRdSxV0= +go.opentelemetry.io/collector/pdata v1.0.0-rc4/go.mod h1:ft/11i2R6Ld/DC543bAS4R30/W8heexIvNqtzmQpnLQ= +go.opentelemetry.io/collector/semconv v0.52.0/go.mod h1:SxK0rUnUP7YeDakexzbE/vhimTOHwE6m/4aKKd9e27Q= +go.opentelemetry.io/collector/semconv v0.54.0/go.mod h1:HAGkPKNMhc4kEHevEqVIEtUuvsRQMIbUWBb8yBrqEwk= +go.opentelemetry.io/collector/semconv v0.56.0/go.mod h1:EH1wbDvTyqKpKBBpoMIe0KQk2plCcFS66Mo17WtR7CQ= +go.opentelemetry.io/collector/semconv v0.57.2/go.mod h1:84YnUjmm+nhGu4YTDLnHCbxnL74ooWpismPG79tFD7w= +go.opentelemetry.io/collector/semconv v0.70.0/go.mod h1:UAp+qAMqEXOD0eEBmWJ3IJ5+LkF7zVTgmfufwpHmL8w= +go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.32.0/go.mod h1:J0dBVrt7dPS/lKJyQoW0xzQiUr4r2Ik1VwPjAUWnofI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.33.0/go.mod h1:y/SlJpJQPd2UzfBCj0E9Flk9FDCtTyqUmaCB41qFrWI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0/go.mod h1:HSmzQvagH8pS2/xrK7ScWsk0vAMtRTGbMFgInXCi8Tc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 h1:SLme4Porm+UwX0DdHMxlwRt7FzPSE0sys81bet2o0pU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0/go.mod h1:tLYsuf2v8fZreBVwp9gVMhefZlLFZaUiNVSq8QxXRII= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0/go.mod h1:5eCOqeGphOyz6TsY3ZDNjE33SM/TFAK3RGuCL2naTgY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.33.0/go.mod h1:U5rUt7Rw6zuORsWNfpMRy8XMNKLrmIlv/4HgLVW/d5M= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 h1:yt2NKzK7Vyo6h0+X8BA4FpreZQTlVEIarnsBP/H5mzs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U= +go.opentelemetry.io/contrib/propagators/b3 v1.12.0/go.mod h1:0JDB4elfPUWGsCH/qhaMkDzP1l8nB0ANVx8zXuAYEwg= +go.opentelemetry.io/contrib/zpages v0.32.0/go.mod h1:Jx75I61RDcZU3d/1WrP6UdINlzGGLAwFNdQ3bCTb+qw= +go.opentelemetry.io/contrib/zpages v0.33.0/go.mod h1:ddmD63NkBVE29GucaBBCR8/b/TRlY+PkpIbF3m2JF7Y= +go.opentelemetry.io/contrib/zpages v0.37.0/go.mod h1:eENgsW/BBmryW838vx6FVH63sPBIVnUds1oCTSuwCpA= +go.opentelemetry.io/otel v0.7.0/go.mod h1:aZMyHG5TqDOXEgH2tyLiXSUKly1jT3yqE9PmrzIeCdo= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk= -go.opentelemetry.io/otel v1.4.1 h1:QbINgGDDcoQUoMJa2mMaWno49lja9sHwp6aoa2n3a4g= go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= +go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= +go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4AC+0Eo= +go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= +go.opentelemetry.io/otel v1.12.0 h1:IgfC7kqQrRccIKuB7Cl+SRUmsKbEwSGPr0Eu+/ht1SQ= +go.opentelemetry.io/otel v1.12.0/go.mod h1:geaoz0L0r1BEOR81k7/n9W4TCXYCJ7bPO7K374jQHG0= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.12.0/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.35.0/go.mod h1:HKkSo2BOMO2CUdoIUuc/e4aLeMbeZaj+gNgjBj/Qdzk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.35.0/go.mod h1:BTWTNRCV2jdeEaKP+QJWD9g86QnFzxhZfsQZ1w7cSx4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1/go.mod h1:o5RW5o2pKpJLD5dNTCmjF1DorYwMeFJmb/rKr5sLaa8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1/go.mod h1:c6E4V3/U+miqjs/8l950wggHGL1qzlp0Ypj9xoGrPqo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1/go.mod h1:VwYo0Hak6Efuy0TXsZs8o1hnV3dHDPNtDbycG0hI8+M= -go.opentelemetry.io/otel/internal/metric v0.27.0 h1:9dAVGAfFiiEq5NVB9FUJ5et+btbDQAUIJehJ+ikyryk= +go.opentelemetry.io/otel/exporters/prometheus v0.30.0/go.mod h1:qN5feW+0/d661KDtJuATEmHtw5bKBK7NSvNEP927zSs= +go.opentelemetry.io/otel/exporters/prometheus v0.31.0/go.mod h1:QarXIB8L79IwIPoNgG3A6zNvBgVmcppeFogV1d8612s= +go.opentelemetry.io/otel/exporters/prometheus v0.34.0/go.mod h1:6gUoJyfhoWqF0tOLaY0ZmKgkQRcvEQx6p5rVlKHp3s4= go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw= -go.opentelemetry.io/otel/metric v0.27.0 h1:HhJPsGhJoKRSegPQILFbODU56NS/L1UE4fS1sC5kIwQ= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v0.27.0/go.mod h1:raXDJ7uP2/Jc0nVZWQjJtzoyssOYWu/+pjZqRzfvZ7g= +go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU= +go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= +go.opentelemetry.io/otel/metric v0.35.0 h1:aPT5jk/w7F9zW51L7WgRqNKDElBdyRLGuBtI5MX34e8= +go.opentelemetry.io/otel/metric v0.35.0/go.mod h1:qAcbhaTRFU6uG8QM7dDo7XvFsWcugziq/5YI065TokQ= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE= +go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU= +go.opentelemetry.io/otel/sdk v1.8.0/go.mod h1:uPSfc+yfDH2StDM/Rm35WE8gXSNdvCg023J6HeGNO0c= +go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rqyLc3SyX9aU= +go.opentelemetry.io/otel/sdk v1.12.0/go.mod h1:WYcvtgquYvgODEvxOry5owO2y9MyciW7JqMz6cpXShE= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/sdk/metric v0.30.0/go.mod h1:8AKFRi5HyvTR0RRty3paN1aMC9HMT+NzcEhw/BLkLX8= +go.opentelemetry.io/otel/sdk/metric v0.31.0/go.mod h1:fl0SmNnX9mN9xgU6OLYLMBMrNAsaZQi7qBwprwO3abk= +go.opentelemetry.io/otel/sdk/metric v0.34.0/go.mod h1:l4r16BIqiqPy5rd14kkxllPy/fOI4tWo1jkpD9Z3ffQ= +go.opentelemetry.io/otel/sdk/metric v0.35.0/go.mod h1:eDyp1GxSiwV98kr7w4pzrszQh/eze9MqBqPd2bCPmyE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE= -go.opentelemetry.io/otel/trace v1.4.1 h1:O+16qcdTrT7zxv2J6GejTPFinSwA++cYerC5iSiF8EQ= go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= +go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= +go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo= +go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= +go.opentelemetry.io/otel/trace v1.12.0 h1:p28in++7Kd0r2d8gSt931O57fdjUyWxkVbESuILAeUc= +go.opentelemetry.io/otel/trace v1.12.0/go.mod h1:pHlgBynn6s25qJ2szD+Bv+iwKJttjHSI3lUAyf0GNuQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ= -go.starlark.net v0.0.0-20200901195727-6e684ef5eeee/go.mod h1:f0znQkUKRrkk36XxWbGjMqQM8wGv/xHBVE2qc3B5oFU= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= +go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -2012,30 +3856,47 @@ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+ go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go4.org/intern v0.0.0-20210108033219-3eb7198706b2 h1:VFTf+jjIgsldaz/Mr00VaCSswHJrI2hIjQygE/W4IMg= go4.org/intern v0.0.0-20210108033219-3eb7198706b2/go.mod h1:vLqJ+12kCw61iCWsPto0EOHhBS+o4rO5VIucbc9g2Cc= go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222175341-b30ae309168e/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222180813-1025295fd063 h1:1tk03FUNpulq2cuWpXZWj649rwJpk0d20rxWiopKRmc= go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222180813-1025295fd063/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180214000028-650f4a345ab4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -2048,49 +3909,84 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -2106,6 +4002,7 @@ golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhp golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mobile v0.0.0-20210901025245-1fde1d6c3ca1/go.mod h1:jFTmtFYCV0MFtXBU+J5V/+5AUeVS0ON/0WkE/KSrl6E= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -2116,11 +4013,19 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20150829230318-ea47fc708ee3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2130,10 +4035,10 @@ golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190403144856-b630fd6fe46b/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -2150,10 +4055,10 @@ golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2163,7 +4068,9 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -2174,34 +4081,70 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210928044308-7d9f5e0b762b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211111083644-e5c967477495/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= +golang.org/x/net v0.0.0-20211118161319-6a13c67c3ce4/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220421235706-1d1ef9303861/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220615171555-694bf12d69de/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= +golang.org/x/net v0.0.0-20220728211354-c7608f3a8462/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220927171203-f486391704dc/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2215,13 +4158,27 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= +golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2233,8 +4190,16 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220907140024-f12130a52804/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2243,11 +4208,15 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2265,7 +4234,10 @@ golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2277,7 +4249,6 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2292,6 +4263,7 @@ golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2311,7 +4283,9 @@ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2320,104 +4294,190 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201024232916-9f70ab9862d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210110051926-789bb1bd4061/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220222172238-00053529121e h1:AGLQ2aegkB2Y9RY8YdQk+7MDCW9da7YmizIwNIt8NtQ= golang.org/x/sys v0.0.0-20220222172238-00053529121e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181112210238-4b1f3b6b1646/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -2426,16 +4486,30 @@ golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911151314-feee8acb394c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191101200257-8dbcdeb83d3f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191118222007-07fc4c7f2b98/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2446,8 +4520,10 @@ golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200203023011-6f24f261dadb/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -2457,27 +4533,58 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200422205258-72e4a01eba43/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201014170642-d1624618ad65/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210102185154-773b96fafca2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -2486,31 +4593,50 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.9 h1:j9KsMiaP1c3B0OTQGth0/k+miLGTgLsAFUCrF2vLcF8= +golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= -golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.zx2c4.com/go118/netip v0.0.0-20211111135330-a4a02eeacf9d/go.mod h1:5yyfuiqVIJ7t+3MqrpTQ+QqRkMWiESiyDvPNvKYCecg= +golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI= +golang.zx2c4.com/wireguard v0.0.0-20211129173154-2dd424e2d808/go.mod h1:TjUWrnD5ATh7bFvmm/ALEJZQ4ivKbETb6pmyj1vUoNI= +golang.zx2c4.com/wireguard v0.0.0-20211209221555-9c9e7e272434/go.mod h1:TjUWrnD5ATh7bFvmm/ALEJZQ4ivKbETb6pmyj1vUoNI= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211230205640-daad0b7ba671/go.mod h1:Q2XNgour4QSkFj0BWCkVlW0HWJwQgNMsMahpSlI0Eno= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20180829000535-087779f1d2c9/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= @@ -2518,6 +4644,7 @@ google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.21.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= @@ -2527,7 +4654,10 @@ google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.45.0/go.mod h1:ISLIJCedJolbZvDfAk+Ctuq5hf+aJ33WgtUsfyFoLXA= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= @@ -2536,37 +4666,65 @@ google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6 google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0 h1:67zQnAE0T2rB0A3CwLSas0K+SbVzSxP+zTLkQLexeiw= google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.104.0 h1:KBfmLRqdZEbwQleFlSLnzpQJwhjpmNOk4cKQIBDZ9mg= +google.golang.org/api v0.104.0/go.mod h1:JCspTXJbBxa5ySXw4UgUqVer7DfVxbvc/CTUFqAED5U= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190605220351-eb0b1bdb6ae6/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -2581,7 +4739,6 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200420144010-e5e8543f8aeb/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -2589,42 +4746,56 @@ google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200519141106-08726f379972/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210331142528-b7513248f0ba/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210510173355-fb37daa5cd7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210811021853-ddbe55d93216/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210825212027-de86158e7fda/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= @@ -2632,17 +4803,67 @@ google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222154240-daf995802d7b h1:wHqTlwZVR0x5EG2S6vKlCq63+Tl/vBoQELitHxqxDOo= google.golang.org/genproto v0.0.0-20220222154240-daf995802d7b/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6/go.mod h1:1dOng4TWOomJrDGhpXjfCD35wQC6jnC7HpRmOFRqEV0= +google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70= +google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v0.0.0-20180920234847-8997b5fa0873/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -2657,6 +4878,7 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= @@ -2670,11 +4892,24 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ= +google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20211119005141-f45e61797429/go.mod h1:gID3PKrg7pWKntu9Ss6zTLJ0ttC0X9IHgREOCZwbCVU= +google.golang.org/grpc/examples v0.0.0-20220401201435-0066bf69deb3/go.mod h1:gxndsbNG1n4TZcHGgsYEfVGnTxqfEdfiDv6/DADXX9o= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2685,16 +4920,21 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20170511165959-379148ca0225/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2703,6 +4943,10 @@ gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v1 v1.0.0-20161222125816-442357a80af5/go.mod h1:u0ALmqvLRxLI95fkdCEWrE6mhWYZW1aMOJHp5YXLHTg= +gopkg.in/errgo.v1 v1.0.0/go.mod h1:CxwszS/Xz1C49Ucd2i6Zil5UToP1EmyrFhKaMVbg1mk= +gopkg.in/errgo.v1 v1.0.1/go.mod h1:3NjfXwocQRYAPTq4/fzX+CwUhPRcR/azYRhj8G+LqMo= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= gopkg.in/fsnotify.v1 v1.2.1/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -2714,30 +4958,57 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/goose.v1 v1.0.0-20161130145116-8f055ce635d6/go.mod h1:ZM14ECObhzpclsfV8uWsmADh80xveEXRV35GG4g+DHY= gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= +gopkg.in/httprequest.v1 v1.1.1/go.mod h1:/CkavNL+g3qLOrpFHVrEx4NKepeqR4XTZWNj4sGGjz0= +gopkg.in/httprequest.v1 v1.1.2/go.mod h1:/CkavNL+g3qLOrpFHVrEx4NKepeqR4XTZWNj4sGGjz0= +gopkg.in/httprequest.v1 v1.2.0/go.mod h1:T61ZUaJLpMnzvoJDO03ZD8yRXD4nZzBeDoW5e9sffjg= +gopkg.in/httprequest.v1 v1.2.1/go.mod h1:x2Otw96yda5+8+6ZeWwHIJTFkEHWP/qP8pJOzqEtWPM= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= -gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ= -gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/juju/environschema.v1 v1.0.0/go.mod h1:WTgU3KXKCVoO9bMmG/4KHzoaRvLeoxfjArpgd1MGWFA= +gopkg.in/juju/environschema.v1 v1.0.1/go.mod h1:WTgU3KXKCVoO9bMmG/4KHzoaRvLeoxfjArpgd1MGWFA= +gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= +gopkg.in/macaroon-bakery.v3 v3.0.0/go.mod h1:jc4E407H0WJzjH84QSPeecCdts0XnHUmKhA1AP/w1k8= +gopkg.in/macaroon.v2 v2.1.0/go.mod h1:OUb+TQP/OP0WOerC2Jp/3CwhIKyIa9kQjuc7H24e6/o= +gopkg.in/mgo.v2 v2.0.0-20160818015218-f2b6f6c918c4/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531180850-df99d62fd42d/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk= -gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek= +gopkg.in/olivere/elastic.v5 v5.0.86/go.mod h1:M3WNlsF+WhYn7api4D87NIflwTV/c0iVs8cqfWhK+68= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/retry.v1 v1.0.2/go.mod h1:tLRIBNXxoKtalyAWBSIbHdWkIBN2x9jVEm5l0Z+BjXs= +gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g= +gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= +gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170712054546-1be3d31502d6/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -2752,14 +5023,18 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/gotestsum v1.8.2/go.mod h1:6JHCiN6TEjA7Kaz23q1bH0e2Dc3YJjDUZ0DmctFZf+w= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/netdb v0.0.0-20150201073656-a416d700ae39/go.mod h1:rbNo0ST5hSazCG4rGfpHrwnwvzP1QX62WbhzD+ghGzs= +gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= +gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2768,6 +5043,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.1/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.1.2/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.1.4/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= inet.af/netaddr v0.0.0-20210707202901-70468d781e6c h1:ZNUX2CiFwNbN1VFaD4MQFmC8o5Rxc7BQW1P1K8kMpbE= inet.af/netaddr v0.0.0-20210707202901-70468d781e6c/go.mod h1:z0nx+Dh+7N7CC8V5ayHtHGpZpxLQZZxkIaaz6HN65Ls= @@ -2778,37 +5059,86 @@ k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswP k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag= k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.20.0/go.mod h1:Gm8eSIfQN6457haJuPaMxZw4wyP5k+ykPFlrhQDvhvw= -k8s.io/klog/v2 v2.40.1 h1:P4RRucWk/lFOlDdkAr3mc7iWFkgKrZY9qZMAgek06S4= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= +k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20220803164354-a70c9af30aea h1:3QOH5+2fGsY8e1qf+GIFpg+zw/JGNrgyZRQR7/m6uWg= +k8s.io/kube-openapi v0.0.0-20220803164354-a70c9af30aea/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -modernc.org/httpfs v1.0.0/go.mod h1:BSkfoMUcahSijQD5J/Vu4UMOxzmEf5SNRwyXC4PJBEw= -modernc.org/libc v1.3.1/go.mod h1:f8sp9GAfEyGYh3lsRIKtBh/XwACdFvGznxm6GJmQvXk= -modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.0.1/go.mod h1:NSjvC08+g3MLOpcAxQbdctcThAEX4YlJ20WWHYEhvRg= -modernc.org/sqlite v1.7.4/go.mod h1:xse4RHCm8Fzw0COf5SJqAyiDrVeDwAQthAS1V/woNIA= -modernc.org/tcl v1.4.1/go.mod h1:8YCvzidU9SIwkz7RZwlCWK61mhV8X9UwfkRDRp7y5e0= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= +launchpad.net/lpad v0.0.0-20131113112110-000000000065/go.mod h1:cItrEkWN+F4fC6+HU9vEne57WkgMNe2aIWFFiYbvJ3M= +launchpad.net/xmlpath v0.0.0-20130614043138-000000000004/go.mod h1:vqyExLOM3qBx7mvYRkoxjSCF945s0mbe7YynlKYXtsA= +lukechampine.com/frand v1.4.2/go.mod h1:4S/TM2ZgrKejMcKMbeLjISpJMO+/eZ1zu3vYX9dtj3s= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= +modernc.org/cc/v3 v3.38.1/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= +modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= +modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI= +modernc.org/ccgo/v3 v3.0.0-20220910160915-348f15de615a/go.mod h1:8p47QxPkdugex9J4n9P2tLZ9bK01yngIVp00g4nomW0= +modernc.org/ccgo/v3 v3.16.12/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA= +modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0= +modernc.org/libc v1.19.0/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= +modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.19.2/go.mod h1:fEgebDYAGTFJj2c/ukKmnaq/0ZQZg0PSYxRa/bHyCDs= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.15.0/go.mod h1:xRoGotBZ6dU+Zo2tca+2EqVEeMmOUBzHnhIwq4YrVnE= +modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ= +mvdan.cc/gofumpt v0.1.0/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= +mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= +mvdan.cc/unparam v0.0.0-20191111180625-960b1ec0f2c2/go.mod h1:rCqoQrfAmpTX/h2APczwM7UymU/uvaOluiVPIYCSY/k= +mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= +pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= +pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= @@ -2816,11 +5146,18 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +sourcegraph.com/sqs/pbtypes v1.0.0/go.mod h1:3AciMUv4qUuRHRHhOG4TZOB+72GdPVz5k+c648qsFS4= diff --git a/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/vendor/cloud.google.com/go/.release-please-manifest-individual.json new file mode 100644 index 000000000000..e5ebe1edf35e --- /dev/null +++ b/vendor/cloud.google.com/go/.release-please-manifest-individual.json @@ -0,0 +1,13 @@ +{ + "bigquery": "1.43.0", + "bigtable": "1.18.0", + "datastore": "1.9.0", + "errorreporting": "0.2.0", + "firestore": "1.8.0", + "logging": "1.5.0", + "profiler": "0.3.0", + "pubsub": "1.26.0", + "pubsublite": "1.4.1", + "spanner": "1.40.0", + "storage": "1.28.0" +} diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json new file mode 100644 index 000000000000..db91415aa4a8 --- /dev/null +++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json @@ -0,0 +1,115 @@ +{ + "accessapproval": "1.5.0", + "accesscontextmanager": "1.5.0", + "aiplatform": "1.25.0", + "analytics": "0.12.0", + "apigateway": "1.4.0", + "apigeeconnect": "1.4.0", + "apigeeregistry": "0.2.1", + "apikeys": "0.2.0", + "appengine": "1.5.0", + "area120": "0.6.0", + "artifactregistry": "1.9.0", + "asset": "1.10.0", + "assuredworkloads": "1.9.0", + "automl": "1.8.0", + "baremetalsolution": "0.4.0", + "batch": "0.4.0", + "beyondcorp": "0.3.0", + "billing": "1.7.0", + "binaryauthorization": "1.4.0", + "certificatemanager": "1.4.0", + "channel": "1.9.0", + "cloudbuild": "1.5.0", + "clouddms": "1.4.0", + "cloudtasks": "1.8.0", + "compute": "1.12.1", + "compute/metadata": "0.2.1", + "contactcenterinsights": "1.4.0", + "container": "1.8.0", + "containeranalysis": "0.6.0", + "datacatalog": "1.8.1", + "dataflow": "0.7.0", + "dataform": "0.5.0", + "datafusion": "1.5.0", + "datalabeling": "0.6.0", + "dataplex": "1.4.0", + "dataproc": "1.8.0", + "dataqna": "0.6.0", + "datastream": "1.5.0", + "deploy": "1.5.0", + "dialogflow": "1.20.0", + "dlp": "1.7.0", + "documentai": "1.11.0", + "domains": "0.7.0", + "edgecontainer": "0.2.0", + "essentialcontacts": "1.4.0", + "eventarc": "1.9.0", + "filestore": "1.4.0", + "functions": "1.9.0", + "gaming": "1.8.0", + "gkebackup": "0.3.0", + "gkeconnect": "0.6.0", + "gkehub": "0.10.0", + "gkemulticloud": "0.4.0", + "grafeas": "0.2.0", + "gsuiteaddons": "1.4.0", + "iam": "0.7.0", + "iap": "1.5.0", + "ids": "1.2.0", + "iot": "1.4.0", + "kms": "1.6.0", + "language": "1.8.0", + "lifesciences": "0.6.0", + "longrunning": "0.3.0", + "managedidentities": "1.4.0", + "maps": "0.0.0", + "mediatranslation": "0.6.0", + "memcache": "1.7.0", + "metastore": "1.8.0", + "monitoring": "1.9.0", + "networkconnectivity": "1.8.0", + "networkmanagement": "1.5.0", + "networksecurity": "0.6.0", + "notebooks": "1.5.0", + "optimization": "1.2.0", + "orchestration": "1.4.0", + "orgpolicy": "1.5.0", + "osconfig": "1.10.0", + "oslogin": "1.7.0", + "phishingprotection": "0.6.0", + "policytroubleshooter": "1.4.0", + "privatecatalog": "0.6.0", + "recaptchaenterprise/v2": "2.5.0", + "recommendationengine": "0.6.0", + "recommender": "1.8.0", + "redis": "1.10.0", + "resourcemanager": "1.4.0", + "resourcesettings": "1.4.0", + "retail": "1.11.0", + "run": "0.4.0", + "scheduler": "1.7.0", + "secretmanager": "1.9.0", + "security": "1.10.0", + "securitycenter": "1.16.0", + "servicecontrol": "1.5.0", + "servicedirectory": "1.7.0", + "servicemanagement": "1.5.0", + "serviceusage": "1.4.0", + "shell": "1.4.0", + "speech": "1.9.0", + "storagetransfer": "1.6.0", + "talent": "1.4.1", + "texttospeech": "1.5.0", + "tpu": "1.4.0", + "trace": "1.4.0", + "translate": "1.4.0", + "video": "1.10.0", + "videointelligence": "1.9.0", + "vision/v2": "2.5.0", + "vmmigration": "1.3.0", + "vpcaccess": "1.5.0", + "webrisk": "1.7.0", + "websecurityscanner": "1.4.0", + "workflows": "1.9.0" +} diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json new file mode 100644 index 000000000000..9eeeeb454538 --- /dev/null +++ b/vendor/cloud.google.com/go/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "0.107.0" +} diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index bac014faa35a..bb55f2e272b0 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,71 @@ # Changes +## [0.107.0](https://github.com/googleapis/google-cloud-go/compare/v0.106.0...v0.107.0) (2022-11-15) + + +### Features + +* **routing:** Start generating apiv2 ([#7011](https://github.com/googleapis/google-cloud-go/issues/7011)) ([66e8e27](https://github.com/googleapis/google-cloud-go/commit/66e8e2717b2593f4e5640ecb97344bb1d5e5fc0b)) + +## [0.106.0](https://github.com/googleapis/google-cloud-go/compare/v0.105.0...v0.106.0) (2022-11-09) + + +### Features + +* **debugger:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + +## [0.104.0](https://github.com/googleapis/google-cloud-go/compare/v0.103.0...v0.104.0) (2022-08-24) + + +### Features + +* **godocfx:** add friendlyAPIName ([#6447](https://github.com/googleapis/google-cloud-go/issues/6447)) ([c6d3ba4](https://github.com/googleapis/google-cloud-go/commit/c6d3ba401b7b3ae9b710a8850c6ec5d49c4c1490)) + +## [0.103.0](https://github.com/googleapis/google-cloud-go/compare/v0.102.1...v0.103.0) (2022-06-29) + + +### Features + +* **privateca:** temporarily remove REGAPIC support ([199b725](https://github.com/googleapis/google-cloud-go/commit/199b7250f474b1a6f53dcf0aac0c2966f4987b68)) + +## [0.102.1](https://github.com/googleapis/google-cloud-go/compare/v0.102.0...v0.102.1) (2022-06-17) + + +### Bug Fixes + +* **longrunning:** regapic remove path params duped as query params ([#6183](https://github.com/googleapis/google-cloud-go/issues/6183)) ([c963be3](https://github.com/googleapis/google-cloud-go/commit/c963be301f074779e6bb8c897d8064fa076e9e35)) + +## [0.102.0](https://github.com/googleapis/google-cloud-go/compare/v0.101.1...v0.102.0) (2022-05-24) + + +### Features + +* **civil:** add Before and After methods to civil.Time ([#5703](https://github.com/googleapis/google-cloud-go/issues/5703)) ([7acaaaf](https://github.com/googleapis/google-cloud-go/commit/7acaaafef47668c3e8382b8bc03475598c3db187)) + +### [0.101.1](https://github.com/googleapis/google-cloud-go/compare/v0.101.0...v0.101.1) (2022-05-03) + + +### Bug Fixes + +* **internal/gapicgen:** properly update modules that have no gapic changes ([#5945](https://github.com/googleapis/google-cloud-go/issues/5945)) ([de2befc](https://github.com/googleapis/google-cloud-go/commit/de2befcaa2a886499db9da6d4d04d28398c8d44b)) + +## [0.101.0](https://github.com/googleapis/google-cloud-go/compare/v0.100.2...v0.101.0) (2022-04-20) + + +### Features + +* **all:** bump grpc dep ([#5481](https://github.com/googleapis/google-cloud-go/issues/5481)) ([b12964d](https://github.com/googleapis/google-cloud-go/commit/b12964df5c63c647aaf204e73cfcdfd379d19682)) +* **internal/gapicgen:** change versionClient for gapics ([#5687](https://github.com/googleapis/google-cloud-go/issues/5687)) ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) + + +### Bug Fixes + +* **internal/gapicgen:** add generation of internal/version.go for new client modules ([#5726](https://github.com/googleapis/google-cloud-go/issues/5726)) ([341e0df](https://github.com/googleapis/google-cloud-go/commit/341e0df1e44480706180cc5b07c49b3cee904095)) +* **internal/gapicgen:** don't gen version files for longrunning and debugger ([#5698](https://github.com/googleapis/google-cloud-go/issues/5698)) ([3a81108](https://github.com/googleapis/google-cloud-go/commit/3a81108c74cd8864c56b8ab5939afd864db3c64b)) +* **internal/gapicgen:** don't try to make snippets for non-gapics ([#5919](https://github.com/googleapis/google-cloud-go/issues/5919)) ([c94dddc](https://github.com/googleapis/google-cloud-go/commit/c94dddc60ef83a0584ba8f7dd24589d9db971672)) +* **internal/gapicgen:** move breaking change indicator if present ([#5452](https://github.com/googleapis/google-cloud-go/issues/5452)) ([e712df5](https://github.com/googleapis/google-cloud-go/commit/e712df5ebb45598a1653081d7e11e578bad22ff8)) +* **internal/godocfx:** prevent errors for filtered mods ([#5485](https://github.com/googleapis/google-cloud-go/issues/5485)) ([6cb9b89](https://github.com/googleapis/google-cloud-go/commit/6cb9b89b2d654c695eab00d8fb375cce0cd6e059)) + ## [0.100.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.99.0...v0.100.0) (2022-01-04) diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md index c3a3852c3829..6d6e48b65bd7 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -2,7 +2,7 @@ 1. [File an issue](https://github.com/googleapis/google-cloud-go/issues/new/choose). The issue will be used to discuss the bug or feature and should be created - before sending a CL. + before sending a PR. 1. [Install Go](https://golang.org/dl/). 1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`) diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index acde43bc8885..01453cc692a5 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -32,7 +32,14 @@ For an updated list of all of our released APIs please see our ## [Go Versions Supported](#supported-versions) -We currently support Go versions 1.11 and newer. +Our libraries are compatible with at least the three most recent, major Go +releases. They are currently compatible with: + +- Go 1.19 +- Go 1.18 +- Go 1.17 +- Go 1.16 +- Go 1.15 ## Authorization diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go new file mode 100644 index 000000000000..c9ba91825c29 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.14.0" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md new file mode 100644 index 000000000000..6e3ee8d6ab1c --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -0,0 +1,12 @@ +# Changes + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + +## [0.1.0] (2022-10-26) + +Initial release of metadata being it's own module. diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/LICENSE b/vendor/cloud.google.com/go/compute/metadata/LICENSE similarity index 99% rename from vendor/go.opentelemetry.io/otel/internal/metric/LICENSE rename to vendor/cloud.google.com/go/compute/metadata/LICENSE index 261eeb9e9f8b..d64569567334 100644 --- a/vendor/go.opentelemetry.io/otel/internal/metric/LICENSE +++ b/vendor/cloud.google.com/go/compute/metadata/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/vendor/cloud.google.com/go/compute/metadata/README.md b/vendor/cloud.google.com/go/compute/metadata/README.md new file mode 100644 index 000000000000..f940fb2c85b8 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/README.md @@ -0,0 +1,27 @@ +# Compute API + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) + +This is a utility library for communicating with Google Cloud metadata service +on Google Cloud. + +## Install + +```bash +go get cloud.google.com/go/compute/metadata +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 111309f3d8b4..d4aad9bf395f 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -16,7 +16,7 @@ // metadata and API service accounts. // // This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. +// as documented at https://cloud.google.com/compute/docs/metadata/overview. package metadata // import "cloud.google.com/go/compute/metadata" import ( @@ -70,7 +70,9 @@ func newDefaultHTTPClient() *http.Client { Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, + IdleConnTimeout: 60 * time.Second, }, + Timeout: 5 * time.Second, } } diff --git a/vendor/cloud.google.com/go/pubsub/go_mod_tidy_hack.go b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go similarity index 80% rename from vendor/cloud.google.com/go/pubsub/go_mod_tidy_hack.go rename to vendor/cloud.google.com/go/compute/metadata/tidyfix.go index 20b865930b9c..4cef48500817 100644 --- a/vendor/cloud.google.com/go/pubsub/go_mod_tidy_hack.go +++ b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the cloud.google.com/go import, won't actually become part of +// This file, and the {{.RootMod}} import, won't actually become part of // the resultant binary. +//go:build modhack // +build modhack -package pubsub +package metadata // Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "cloud.google.com/go" +import _ "cloud.google.com/go/compute/internal" diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go index 746696f37114..833878ec8f31 100644 --- a/vendor/cloud.google.com/go/doc.go +++ b/vendor/cloud.google.com/go/doc.go @@ -17,14 +17,30 @@ Package cloud is the root of the packages used to access Google Cloud Services. See https://godoc.org/cloud.google.com/go for a full list of sub-packages. - -Client Options +# Client Options All clients in sub-packages are configurable via client options. These options are described here: https://godoc.org/google.golang.org/api/option. +## Endpoint Override + +Endpoint configuration is used to specify the URL to which requests are +sent. It is used for services that support or require regional endpoints, as well +as for other use cases such as [testing against fake +servers](https://github.com/googleapis/google-cloud-go/blob/main/testing.md#testing-grpc-services-using-fakes). + +For example, the Vertex AI service recommends that you configure the endpoint to the +location with the features you want that is closest to your physical location or the +location of your users. There is no global endpoint for Vertex AI. See +[Vertex AI - Locations](https://cloud.google.com/vertex-ai/docs/general/locations) +for more details. The following example demonstrates configuring a Vertex AI client +with a regional endpoint: + + ctx := context.Background() + endpoint := "us-central1-aiplatform.googleapis.com:443" + client, err := aiplatform.NewDatasetClient(ctx, option.WithEndpoint(endpoint)) -Authentication and Authorization +# Authentication and Authorization All the clients in sub-packages support authentication via Google Application Default Credentials (see https://cloud.google.com/docs/authentication/production), or @@ -35,11 +51,12 @@ and authenticate clients. For information on how to create and obtain Application Default Credentials, see https://cloud.google.com/docs/authentication/production. Here is an example of a client using ADC to authenticate: - client, err := secretmanager.NewClient(context.Background()) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. + + client, err := secretmanager.NewClient(context.Background()) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. You can use a file with credentials to authenticate and authorize, such as a JSON key file associated with a Google service account. Service Account keys can be @@ -47,12 +64,13 @@ created and downloaded from https://console.cloud.google.com/iam-admin/serviceaccounts. This example uses the Secret Manger client, but the same steps apply to the other client libraries underneath this package. Example: - client, err := secretmanager.NewClient(context.Background(), - option.WithCredentialsFile("/path/to/service-account-key.json")) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. + + client, err := secretmanager.NewClient(context.Background(), + option.WithCredentialsFile("/path/to/service-account-key.json")) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. In some cases (for instance, you don't want to store secrets on disk), you can create credentials from in-memory JSON and use the WithCredentials option. @@ -62,19 +80,19 @@ the other client libraries underneath this package. Note that scopes can be found at https://developers.google.com/identity/protocols/oauth2/scopes, and are also provided in all auto-generated libraries: for example, cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example: - ctx := context.Background() - creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...) - if err != nil { - // TODO: handle error. - } - client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds)) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. + ctx := context.Background() + creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...) + if err != nil { + // TODO: handle error. + } + client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds)) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. -Timeouts and Cancellation +# Timeouts and Cancellation By default, non-streaming methods, like Create or Get, will have a default deadline applied to the context provided at call time, unless a context deadline is already set. Streaming @@ -83,40 +101,42 @@ arrange for cancellation, use contexts. Transient errors will be retried when correctness allows. Here is an example of how to set a timeout for an RPC, use context.WithTimeout: - ctx := context.Background() - // Do not set a timeout on the context passed to NewClient: dialing happens - // asynchronously, and the context is used to refresh credentials in the - // background. - client, err := secretmanager.NewClient(ctx) - if err != nil { - // TODO: handle error. - } - // Time out if it takes more than 10 seconds to create a dataset. - tctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() // Always call cancel. - - req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"} - if err := client.DeleteSecret(tctx, req); err != nil { - // TODO: handle error. - } + + ctx := context.Background() + // Do not set a timeout on the context passed to NewClient: dialing happens + // asynchronously, and the context is used to refresh credentials in the + // background. + client, err := secretmanager.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Time out if it takes more than 10 seconds to create a dataset. + tctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() // Always call cancel. + + req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"} + if err := client.DeleteSecret(tctx, req); err != nil { + // TODO: handle error. + } Here is an example of how to arrange for an RPC to be canceled, use context.WithCancel: - ctx := context.Background() - // Do not cancel the context passed to NewClient: dialing happens asynchronously, - // and the context is used to refresh credentials in the background. - client, err := secretmanager.NewClient(ctx) - if err != nil { - // TODO: handle error. - } - cctx, cancel := context.WithCancel(ctx) - defer cancel() // Always call cancel. - - // TODO: Make the cancel function available to whatever might want to cancel the - // call--perhaps a GUI button. - req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"} - if err := client.DeleteSecret(cctx, req); err != nil { - // TODO: handle error. - } + + ctx := context.Background() + // Do not cancel the context passed to NewClient: dialing happens asynchronously, + // and the context is used to refresh credentials in the background. + client, err := secretmanager.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + cctx, cancel := context.WithCancel(ctx) + defer cancel() // Always call cancel. + + // TODO: Make the cancel function available to whatever might want to cancel the + // call--perhaps a GUI button. + req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"} + if err := client.DeleteSecret(cctx, req); err != nil { + // TODO: handle error. + } To opt out of default deadlines, set the temporary environment variable GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client @@ -130,8 +150,7 @@ timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts would be ineffective and would only interfere with credential refreshing, which uses the same context. - -Connection Pooling +# Connection Pooling Connection pooling differs in clients based on their transport. Cloud clients either rely on HTTP or gRPC transports to communicate @@ -147,37 +166,65 @@ of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a clie option to NewClient calls. This configures the underlying gRPC connections to be pooled and addressed in a round robin fashion. - -Using the Libraries with Docker +# Using the Libraries with Docker Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928 for more information. - -Debugging +# Debugging To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See https://godoc.org/google.golang.org/grpc/grpclog for more information. For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2". +# Inspecting errors + +Most of the errors returned by the generated clients are wrapped in an +[github.com/googleapis/gax-go/v2/apierror.APIError] and can be further unwrapped +into a [google.golang.org/grpc/status.Status] or +[google.golang.org/api/googleapi.Error] depending +on the transport used to make the call (gRPC or REST). Converting your errors to +these types can be a useful way to get more information about what went wrong +while debugging. + +[github.com/googleapis/gax-go/v2/apierror.APIError] gives access to specific +details in the error. The transport-specific errors can still be unwrapped using +the [github.com/googleapis/gax-go/v2/apierror.APIError]. + + if err != nil { + var ae *apierror.APIError + if errors.As(err, &ae) { + log.Println(ae.Reason()) + log.Println(ae.Details().Help.GetLinks()) + } + } + +If the gRPC transport was used, the [google.golang.org/grpc/status.Status] can +still be parsed using the [google.golang.org/grpc/status.FromError] function. + + if err != nil { + if s, ok := status.FromError(err); ok { + log.Println(s.Message()) + for _, d := range s.Proto().Details { + log.Println(d) + } + } + } -Inspecting errors +If the REST transport was used, the [google.golang.org/api/googleapi.Error] can +be parsed in a similar way, allowing access to details such as the HTTP response +code. -Most of the errors returned by the generated clients can be converted into a -`grpc.Status`. Converting your errors to this type can be a useful to get -more information about what went wrong while debugging. - if err != { - if s, ok := status.FromError(err); ok { - log.Println(s.Message()) - for _, d := range s.Proto().Details { - log.Println(d) + if err != nil { + var gerr *googleapi.Error + if errors.As(err, &gerr) { + log.Println(gerr.Message) } } - } -Client Stability +# Client Stability Clients in this repository are considered alpha or beta unless otherwise marked as stable in the README.md. Semver is not used to communicate stability diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index deca87dea5bc..ced217827b0d 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,5 +1,61 @@ # Changes +## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.7.0...iam/v0.8.0) (2022-12-05) + + +### Features + +* **iam:** Start generating and refresh some libraries ([#7089](https://github.com/googleapis/google-cloud-go/issues/7089)) ([a9045ff](https://github.com/googleapis/google-cloud-go/commit/a9045ff191a711089c37f1d94a63522d9939ce38)) + +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03) + + +### Features + +* **iam:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25) + + +### Features + +* **iam:** start generating stubs dir ([de2d180](https://github.com/googleapis/google-cloud-go/commit/de2d18066dc613b72f6f8db93ca60146dabcfdcc)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28) + + +### Features + +* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06) + + +### Features + +* **iam:** start generating apiv2 ([#6605](https://github.com/googleapis/google-cloud-go/issues/6605)) ([a6004e7](https://github.com/googleapis/google-cloud-go/commit/a6004e762f782869cd85688937475744f7b17e50)) + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23) + + +### Features + +* **iam:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) + +## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.1.1...iam/v0.2.0) (2022-02-14) + + +### Features + +* **iam:** add file for tracking version ([17b36ea](https://github.com/googleapis/google-cloud-go/commit/17b36ead42a96b1a01105122074e65164357519e)) + +### [0.1.1](https://www.github.com/googleapis/google-cloud-go/compare/iam/v0.1.0...iam/v0.1.1) (2022-01-14) + + +### Bug Fixes + +* **iam:** run formatter ([#5277](https://www.github.com/googleapis/google-cloud-go/issues/5277)) ([8682e4e](https://www.github.com/googleapis/google-cloud-go/commit/8682e4ed57a4428a659fbc225f56c91767e2a4a9)) + ## v0.1.0 This is the first tag to carve out iam as its own module. See diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go similarity index 85% rename from vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go rename to vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index f62ccc726897..2793098aabcf 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,15 +11,14 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.5 // source: google/iam/v1/iam_policy.proto -package iam +package iampb import ( context "context" @@ -32,6 +31,7 @@ import ( status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" ) const ( @@ -55,6 +55,12 @@ type SetIamPolicyRequest struct { // valid policy but certain Cloud Platform services (such as Projects) // might reject them. Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"` + // OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only + // the fields in the mask will be modified. If no mask is provided, the + // following default mask is used: + // + // `paths: "bindings, etag"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } func (x *SetIamPolicyRequest) Reset() { @@ -103,6 +109,13 @@ func (x *SetIamPolicyRequest) GetPolicy() *Policy { return nil } +func (x *SetIamPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + // Request message for `GetIamPolicy` method. type GetIamPolicyRequest struct { state protoimpl.MessageState @@ -113,7 +126,7 @@ type GetIamPolicyRequest struct { // See the operation documentation for the appropriate value for this field. Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` // OPTIONAL: A `GetPolicyOptions` object for specifying options to - // `GetIamPolicy`. This field is only used by Cloud IAM. + // `GetIamPolicy`. Options *GetPolicyOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` } @@ -281,24 +294,30 @@ var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{ 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, - 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x70, 0x0a, 0x13, 0x53, - 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a, 0x52, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x06, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x77, 0x0a, + 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, + 0x31, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x01, + 0x0a, 0x13, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, + 0x01, 0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x06, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x77, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, @@ -375,22 +394,24 @@ var file_google_iam_v1_iam_policy_proto_goTypes = []interface{}{ (*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest (*TestIamPermissionsResponse)(nil), // 3: google.iam.v1.TestIamPermissionsResponse (*Policy)(nil), // 4: google.iam.v1.Policy - (*GetPolicyOptions)(nil), // 5: google.iam.v1.GetPolicyOptions + (*fieldmaskpb.FieldMask)(nil), // 5: google.protobuf.FieldMask + (*GetPolicyOptions)(nil), // 6: google.iam.v1.GetPolicyOptions } var file_google_iam_v1_iam_policy_proto_depIdxs = []int32{ 4, // 0: google.iam.v1.SetIamPolicyRequest.policy:type_name -> google.iam.v1.Policy - 5, // 1: google.iam.v1.GetIamPolicyRequest.options:type_name -> google.iam.v1.GetPolicyOptions - 0, // 2: google.iam.v1.IAMPolicy.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 1, // 3: google.iam.v1.IAMPolicy.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 2, // 4: google.iam.v1.IAMPolicy.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 4, // 5: google.iam.v1.IAMPolicy.SetIamPolicy:output_type -> google.iam.v1.Policy - 4, // 6: google.iam.v1.IAMPolicy.GetIamPolicy:output_type -> google.iam.v1.Policy - 3, // 7: google.iam.v1.IAMPolicy.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 5, // [5:8] is the sub-list for method output_type - 2, // [2:5] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name + 5, // 1: google.iam.v1.SetIamPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask + 6, // 2: google.iam.v1.GetIamPolicyRequest.options:type_name -> google.iam.v1.GetPolicyOptions + 0, // 3: google.iam.v1.IAMPolicy.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 1, // 4: google.iam.v1.IAMPolicy.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 2, // 5: google.iam.v1.IAMPolicy.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 4, // 6: google.iam.v1.IAMPolicy.SetIamPolicy:output_type -> google.iam.v1.Policy + 4, // 7: google.iam.v1.IAMPolicy.GetIamPolicy:output_type -> google.iam.v1.Policy + 3, // 8: google.iam.v1.IAMPolicy.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 6, // [6:9] is the sub-list for method output_type + 3, // [3:6] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_google_iam_v1_iam_policy_proto_init() } @@ -484,6 +505,8 @@ const _ = grpc.SupportPackageIsVersion6 type IAMPolicyClient interface { // Sets the access control policy on the specified resource. Replaces any // existing policy. + // + // Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) // Gets the access control policy for a resource. // Returns an empty policy if the resource exists and does not have a policy @@ -491,7 +514,7 @@ type IAMPolicyClient interface { GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) // Returns permissions that a caller has on the specified resource. // If the resource does not exist, this will return an empty set of - // permissions, not a NOT_FOUND error. + // permissions, not a `NOT_FOUND` error. // // Note: This operation is designed to be used for building permission-aware // UIs and command-line tools, not for authorization checking. This operation @@ -538,6 +561,8 @@ func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPer type IAMPolicyServer interface { // Sets the access control policy on the specified resource. Replaces any // existing policy. + // + // Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) // Gets the access control policy for a resource. // Returns an empty policy if the resource exists and does not have a policy @@ -545,7 +570,7 @@ type IAMPolicyServer interface { GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) // Returns permissions that a caller has on the specified resource. // If the resource does not exist, this will return an empty set of - // permissions, not a NOT_FOUND error. + // permissions, not a `NOT_FOUND` error. // // Note: This operation is designed to be used for building permission-aware // UIs and command-line tools, not for authorization checking. This operation diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go similarity index 70% rename from vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go rename to vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index 22763ad963b3..835f21719980 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,21 +11,19 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.5 // source: google/iam/v1/options.proto -package iam +package iampb import ( reflect "reflect" sync "sync" - _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) @@ -43,14 +41,24 @@ type GetPolicyOptions struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Optional. The policy format version to be returned. + // Optional. The maximum policy version that will be used to format the + // policy. // // Valid values are 0, 1, and 3. Requests specifying an invalid value will be // rejected. // - // Requests for policies with any conditional bindings must specify version 3. - // Policies without any conditional bindings may specify any valid value or - // leave the field unset. + // Requests for policies with any conditional role bindings must specify + // version 3. Policies with no conditional role bindings may specify any valid + // value or leave the field unset. + // + // The policy in the response might use the policy version that you specified, + // or it might use a lower policy version. For example, if you specify version + // 3, but the policy has no conditional role bindings, the response uses + // version 1. + // + // To learn which resources support conditions in their IAM policies, see the + // [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). RequestedPolicyVersion int32 `protobuf:"varint,1,opt,name=requested_policy_version,json=requestedPolicyVersion,proto3" json:"requested_policy_version,omitempty"` } @@ -98,23 +106,21 @@ var File_google_iam_v1_options_proto protoreflect.FileDescriptor var file_google_iam_v1_options_proto_rawDesc = []byte{ 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4c, 0x0a, 0x10, 0x47, 0x65, - 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x84, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0c, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, - 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x61, 0x6d, - 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x22, 0x4c, 0x0a, 0x10, + 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x84, 0x01, 0x0a, 0x11, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, + 0x42, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, + 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go new file mode 100644 index 000000000000..ec7777a7687d --- /dev/null +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -0,0 +1,1169 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.5 +// source: google/iam/v1/policy.proto + +package iampb + +import ( + reflect "reflect" + sync "sync" + + expr "google.golang.org/genproto/googleapis/type/expr" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The list of valid permission types for which logging can be configured. +// Admin writes are always logged, and are not configurable. +type AuditLogConfig_LogType int32 + +const ( + // Default case. Should never be this. + AuditLogConfig_LOG_TYPE_UNSPECIFIED AuditLogConfig_LogType = 0 + // Admin reads. Example: CloudIAM getIamPolicy + AuditLogConfig_ADMIN_READ AuditLogConfig_LogType = 1 + // Data writes. Example: CloudSQL Users create + AuditLogConfig_DATA_WRITE AuditLogConfig_LogType = 2 + // Data reads. Example: CloudSQL Users list + AuditLogConfig_DATA_READ AuditLogConfig_LogType = 3 +) + +// Enum value maps for AuditLogConfig_LogType. +var ( + AuditLogConfig_LogType_name = map[int32]string{ + 0: "LOG_TYPE_UNSPECIFIED", + 1: "ADMIN_READ", + 2: "DATA_WRITE", + 3: "DATA_READ", + } + AuditLogConfig_LogType_value = map[string]int32{ + "LOG_TYPE_UNSPECIFIED": 0, + "ADMIN_READ": 1, + "DATA_WRITE": 2, + "DATA_READ": 3, + } +) + +func (x AuditLogConfig_LogType) Enum() *AuditLogConfig_LogType { + p := new(AuditLogConfig_LogType) + *p = x + return p +} + +func (x AuditLogConfig_LogType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AuditLogConfig_LogType) Descriptor() protoreflect.EnumDescriptor { + return file_google_iam_v1_policy_proto_enumTypes[0].Descriptor() +} + +func (AuditLogConfig_LogType) Type() protoreflect.EnumType { + return &file_google_iam_v1_policy_proto_enumTypes[0] +} + +func (x AuditLogConfig_LogType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AuditLogConfig_LogType.Descriptor instead. +func (AuditLogConfig_LogType) EnumDescriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3, 0} +} + +// The type of action performed on a Binding in a policy. +type BindingDelta_Action int32 + +const ( + // Unspecified. + BindingDelta_ACTION_UNSPECIFIED BindingDelta_Action = 0 + // Addition of a Binding. + BindingDelta_ADD BindingDelta_Action = 1 + // Removal of a Binding. + BindingDelta_REMOVE BindingDelta_Action = 2 +) + +// Enum value maps for BindingDelta_Action. +var ( + BindingDelta_Action_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ADD", + 2: "REMOVE", + } + BindingDelta_Action_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ADD": 1, + "REMOVE": 2, + } +) + +func (x BindingDelta_Action) Enum() *BindingDelta_Action { + p := new(BindingDelta_Action) + *p = x + return p +} + +func (x BindingDelta_Action) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (BindingDelta_Action) Descriptor() protoreflect.EnumDescriptor { + return file_google_iam_v1_policy_proto_enumTypes[1].Descriptor() +} + +func (BindingDelta_Action) Type() protoreflect.EnumType { + return &file_google_iam_v1_policy_proto_enumTypes[1] +} + +func (x BindingDelta_Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use BindingDelta_Action.Descriptor instead. +func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{5, 0} +} + +// The type of action performed on an audit configuration in a policy. +type AuditConfigDelta_Action int32 + +const ( + // Unspecified. + AuditConfigDelta_ACTION_UNSPECIFIED AuditConfigDelta_Action = 0 + // Addition of an audit configuration. + AuditConfigDelta_ADD AuditConfigDelta_Action = 1 + // Removal of an audit configuration. + AuditConfigDelta_REMOVE AuditConfigDelta_Action = 2 +) + +// Enum value maps for AuditConfigDelta_Action. +var ( + AuditConfigDelta_Action_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ADD", + 2: "REMOVE", + } + AuditConfigDelta_Action_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ADD": 1, + "REMOVE": 2, + } +) + +func (x AuditConfigDelta_Action) Enum() *AuditConfigDelta_Action { + p := new(AuditConfigDelta_Action) + *p = x + return p +} + +func (x AuditConfigDelta_Action) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AuditConfigDelta_Action) Descriptor() protoreflect.EnumDescriptor { + return file_google_iam_v1_policy_proto_enumTypes[2].Descriptor() +} + +func (AuditConfigDelta_Action) Type() protoreflect.EnumType { + return &file_google_iam_v1_policy_proto_enumTypes[2] +} + +func (x AuditConfigDelta_Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AuditConfigDelta_Action.Descriptor instead. +func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{6, 0} +} + +// An Identity and Access Management (IAM) policy, which specifies access +// controls for Google Cloud resources. +// +// A `Policy` is a collection of `bindings`. A `binding` binds one or more +// `members`, or principals, to a single `role`. Principals can be user +// accounts, service accounts, Google groups, and domains (such as G Suite). A +// `role` is a named list of permissions; each `role` can be an IAM predefined +// role or a user-created custom role. +// +// For some types of Google Cloud resources, a `binding` can also specify a +// `condition`, which is a logical expression that allows access to a resource +// only if the expression evaluates to `true`. A condition can add constraints +// based on attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the +// [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). +// +// **JSON example:** +// +// { +// "bindings": [ +// { +// "role": "roles/resourcemanager.organizationAdmin", +// "members": [ +// "user:mike@example.com", +// "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" +// ] +// }, +// { +// "role": "roles/resourcemanager.organizationViewer", +// "members": [ +// "user:eve@example.com" +// ], +// "condition": { +// "title": "expirable access", +// "description": "Does not grant access after Sep 2020", +// "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", +// } +// } +// ], +// "etag": "BwWWja0YfJA=", +// "version": 3 +// } +// +// **YAML example:** +// +// bindings: +// - members: +// - user:mike@example.com +// - group:admins@example.com +// - domain:google.com +// - serviceAccount:my-project-id@appspot.gserviceaccount.com +// role: roles/resourcemanager.organizationAdmin +// - members: +// - user:eve@example.com +// role: roles/resourcemanager.organizationViewer +// condition: +// title: expirable access +// description: Does not grant access after Sep 2020 +// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') +// etag: BwWWja0YfJA= +// version: 3 +// +// For a description of IAM and its features, see the +// [IAM documentation](https://cloud.google.com/iam/docs/). +type Policy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the format of the policy. + // + // Valid values are `0`, `1`, and `3`. Requests that specify an invalid value + // are rejected. + // + // Any operation that affects conditional role bindings must specify version + // `3`. This requirement applies to the following operations: + // + // - Getting a policy that includes a conditional role binding + // - Adding a conditional role binding to a policy + // - Changing a conditional role binding in a policy + // - Removing any role binding, with or without a condition, from a policy + // that includes conditions + // + // **Important:** If you use IAM Conditions, you must include the `etag` field + // whenever you call `setIamPolicy`. If you omit this field, then IAM allows + // you to overwrite a version `3` policy with a version `1` policy, and all of + // the conditions in the version `3` policy are lost. + // + // If a policy does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. + // + // To learn which resources support conditions in their IAM policies, see the + // [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Associates a list of `members`, or principals, with a `role`. Optionally, + // may specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one principal. + // + // The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 + // of these principals can be Google groups. Each occurrence of a principal + // counts towards these limits. For example, if the `bindings` grant 50 + // different roles to `user:alice@example.com`, and not to any other + // principal, then you can add another 1,450 principals to the `bindings` in + // the `Policy`. + Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings,proto3" json:"bindings,omitempty"` + // Specifies cloud audit logging configuration for this policy. + AuditConfigs []*AuditConfig `protobuf:"bytes,6,rep,name=audit_configs,json=auditConfigs,proto3" json:"audit_configs,omitempty"` + // `etag` is used for optimistic concurrency control as a way to help + // prevent simultaneous updates of a policy from overwriting each other. + // It is strongly suggested that systems make use of the `etag` in the + // read-modify-write cycle to perform policy updates in order to avoid race + // conditions: An `etag` is returned in the response to `getIamPolicy`, and + // systems are expected to put that etag in the request to `setIamPolicy` to + // ensure that their change will be applied to the same version of the policy. + // + // **Important:** If you use IAM Conditions, you must include the `etag` field + // whenever you call `setIamPolicy`. If you omit this field, then IAM allows + // you to overwrite a version `3` policy with a version `1` policy, and all of + // the conditions in the version `3` policy are lost. + Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` +} + +func (x *Policy) Reset() { + *x = Policy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Policy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Policy) ProtoMessage() {} + +func (x *Policy) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Policy.ProtoReflect.Descriptor instead. +func (*Policy) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{0} +} + +func (x *Policy) GetVersion() int32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Policy) GetBindings() []*Binding { + if x != nil { + return x.Bindings + } + return nil +} + +func (x *Policy) GetAuditConfigs() []*AuditConfig { + if x != nil { + return x.AuditConfigs + } + return nil +} + +func (x *Policy) GetEtag() []byte { + if x != nil { + return x.Etag + } + return nil +} + +// Associates `members`, or principals, with a `role`. +type Binding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Role that is assigned to the list of `members`, or principals. + // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + // Specifies the principals requesting access for a Cloud Platform resource. + // `members` can have the following values: + // + // - `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. + // + // - `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. + // + // - `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . + // + // - `serviceAccount:{emailid}`: An email address that represents a service + // account. For example, `my-other-app@appspot.gserviceaccount.com`. + // + // - `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. + // + // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a user that has been recently deleted. For + // example, `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered user + // retains the role in the binding. + // + // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a service account that has been recently + // deleted. For example, + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains the + // role in the binding. + // + // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a Google group that has been recently + // deleted. For example, `admins@example.com?uid=123456789012345678901`. If + // the group is recovered, this value reverts to `group:{emailid}` and the + // recovered group retains the role in the binding. + // + // - `domain:{domain}`: The G Suite domain (primary) that represents all the + // users of that domain. For example, `google.com` or `example.com`. + Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` + // The condition that is associated with this binding. + // + // If the condition evaluates to `true`, then this binding applies to the + // current request. + // + // If the condition evaluates to `false`, then this binding does not apply to + // the current request. However, a different role binding might grant the same + // role to one or more of the principals in this binding. + // + // To learn which resources support conditions in their IAM policies, see the + // [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + Condition *expr.Expr `protobuf:"bytes,3,opt,name=condition,proto3" json:"condition,omitempty"` +} + +func (x *Binding) Reset() { + *x = Binding{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Binding) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Binding) ProtoMessage() {} + +func (x *Binding) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Binding.ProtoReflect.Descriptor instead. +func (*Binding) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{1} +} + +func (x *Binding) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *Binding) GetMembers() []string { + if x != nil { + return x.Members + } + return nil +} + +func (x *Binding) GetCondition() *expr.Expr { + if x != nil { + return x.Condition + } + return nil +} + +// Specifies the audit configuration for a service. +// The configuration determines which permission types are logged, and what +// identities, if any, are exempted from logging. +// An AuditConfig must have one or more AuditLogConfigs. +// +// If there are AuditConfigs for both `allServices` and a specific service, +// the union of the two AuditConfigs is used for that service: the log_types +// specified in each AuditConfig are enabled, and the exempted_members in each +// AuditLogConfig are exempted. +// +// Example Policy with multiple AuditConfigs: +// +// { +// "audit_configs": [ +// { +// "service": "allServices", +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// "exempted_members": [ +// "user:jose@example.com" +// ] +// }, +// { +// "log_type": "DATA_WRITE" +// }, +// { +// "log_type": "ADMIN_READ" +// } +// ] +// }, +// { +// "service": "sampleservice.googleapis.com", +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ" +// }, +// { +// "log_type": "DATA_WRITE", +// "exempted_members": [ +// "user:aliya@example.com" +// ] +// } +// ] +// } +// ] +// } +// +// For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ +// logging. It also exempts jose@example.com from DATA_READ logging, and +// aliya@example.com from DATA_WRITE logging. +type AuditConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. + // `allServices` is a special value that covers all services. + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + // The configuration for logging of each type of permission. + AuditLogConfigs []*AuditLogConfig `protobuf:"bytes,3,rep,name=audit_log_configs,json=auditLogConfigs,proto3" json:"audit_log_configs,omitempty"` +} + +func (x *AuditConfig) Reset() { + *x = AuditConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuditConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuditConfig) ProtoMessage() {} + +func (x *AuditConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuditConfig.ProtoReflect.Descriptor instead. +func (*AuditConfig) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{2} +} + +func (x *AuditConfig) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *AuditConfig) GetAuditLogConfigs() []*AuditLogConfig { + if x != nil { + return x.AuditLogConfigs + } + return nil +} + +// Provides the configuration for logging a type of permissions. +// Example: +// +// { +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// "exempted_members": [ +// "user:jose@example.com" +// ] +// }, +// { +// "log_type": "DATA_WRITE" +// } +// ] +// } +// +// This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting +// jose@example.com from DATA_READ logging. +type AuditLogConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The log type that this config enables. + LogType AuditLogConfig_LogType `protobuf:"varint,1,opt,name=log_type,json=logType,proto3,enum=google.iam.v1.AuditLogConfig_LogType" json:"log_type,omitempty"` + // Specifies the identities that do not cause logging for this type of + // permission. + // Follows the same format of [Binding.members][google.iam.v1.Binding.members]. + ExemptedMembers []string `protobuf:"bytes,2,rep,name=exempted_members,json=exemptedMembers,proto3" json:"exempted_members,omitempty"` +} + +func (x *AuditLogConfig) Reset() { + *x = AuditLogConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuditLogConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuditLogConfig) ProtoMessage() {} + +func (x *AuditLogConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuditLogConfig.ProtoReflect.Descriptor instead. +func (*AuditLogConfig) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3} +} + +func (x *AuditLogConfig) GetLogType() AuditLogConfig_LogType { + if x != nil { + return x.LogType + } + return AuditLogConfig_LOG_TYPE_UNSPECIFIED +} + +func (x *AuditLogConfig) GetExemptedMembers() []string { + if x != nil { + return x.ExemptedMembers + } + return nil +} + +// The difference delta between two policies. +type PolicyDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The delta for Bindings between two policies. + BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas,proto3" json:"binding_deltas,omitempty"` + // The delta for AuditConfigs between two policies. + AuditConfigDeltas []*AuditConfigDelta `protobuf:"bytes,2,rep,name=audit_config_deltas,json=auditConfigDeltas,proto3" json:"audit_config_deltas,omitempty"` +} + +func (x *PolicyDelta) Reset() { + *x = PolicyDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PolicyDelta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PolicyDelta) ProtoMessage() {} + +func (x *PolicyDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PolicyDelta.ProtoReflect.Descriptor instead. +func (*PolicyDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{4} +} + +func (x *PolicyDelta) GetBindingDeltas() []*BindingDelta { + if x != nil { + return x.BindingDeltas + } + return nil +} + +func (x *PolicyDelta) GetAuditConfigDeltas() []*AuditConfigDelta { + if x != nil { + return x.AuditConfigDeltas + } + return nil +} + +// One delta entry for Binding. Each individual change (only one member in each +// entry) to a binding will be a separate entry. +type BindingDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The action that was performed on a Binding. + // Required + Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"` + // Role that is assigned to `members`. + // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Required + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` + // A single identity requesting access for a Cloud Platform resource. + // Follows the same format of Binding.members. + // Required + Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"` + // The condition that is associated with this binding. + Condition *expr.Expr `protobuf:"bytes,4,opt,name=condition,proto3" json:"condition,omitempty"` +} + +func (x *BindingDelta) Reset() { + *x = BindingDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BindingDelta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BindingDelta) ProtoMessage() {} + +func (x *BindingDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BindingDelta.ProtoReflect.Descriptor instead. +func (*BindingDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{5} +} + +func (x *BindingDelta) GetAction() BindingDelta_Action { + if x != nil { + return x.Action + } + return BindingDelta_ACTION_UNSPECIFIED +} + +func (x *BindingDelta) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *BindingDelta) GetMember() string { + if x != nil { + return x.Member + } + return "" +} + +func (x *BindingDelta) GetCondition() *expr.Expr { + if x != nil { + return x.Condition + } + return nil +} + +// One delta entry for AuditConfig. Each individual change (only one +// exempted_member in each entry) to a AuditConfig will be a separate entry. +type AuditConfigDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The action that was performed on an audit configuration in a policy. + // Required + Action AuditConfigDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.AuditConfigDelta_Action" json:"action,omitempty"` + // Specifies a service that was configured for Cloud Audit Logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. + // `allServices` is a special value that covers all services. + // Required + Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` + // A single identity that is exempted from "data access" audit + // logging for the `service` specified above. + // Follows the same format of Binding.members. + ExemptedMember string `protobuf:"bytes,3,opt,name=exempted_member,json=exemptedMember,proto3" json:"exempted_member,omitempty"` + // Specifies the log_type that was be enabled. ADMIN_ACTIVITY is always + // enabled, and cannot be configured. + // Required + LogType string `protobuf:"bytes,4,opt,name=log_type,json=logType,proto3" json:"log_type,omitempty"` +} + +func (x *AuditConfigDelta) Reset() { + *x = AuditConfigDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuditConfigDelta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuditConfigDelta) ProtoMessage() {} + +func (x *AuditConfigDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuditConfigDelta.ProtoReflect.Descriptor instead. +func (*AuditConfigDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{6} +} + +func (x *AuditConfigDelta) GetAction() AuditConfigDelta_Action { + if x != nil { + return x.Action + } + return AuditConfigDelta_ACTION_UNSPECIFIED +} + +func (x *AuditConfigDelta) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *AuditConfigDelta) GetExemptedMember() string { + if x != nil { + return x.ExemptedMember + } + return "" +} + +func (x *AuditConfigDelta) GetLogType() string { + if x != nil { + return x.LogType + } + return "" +} + +var File_google_iam_v1_policy_proto protoreflect.FileDescriptor + +var file_google_iam_v1_policy_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x16, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x01, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x08, 0x62, 0x69, 0x6e, 0x64, + 0x69, 0x6e, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, + 0x6e, 0x67, 0x52, 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3f, 0x0a, 0x0d, + 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, + 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0c, 0x61, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x22, 0x68, 0x0a, 0x07, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, + 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, + 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x72, 0x0a, 0x0b, 0x41, + 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x11, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, + 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, + 0x61, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x22, + 0xd1, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x40, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, + 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6c, 0x6f, 0x67, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, + 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, + 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x22, + 0x52, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x4c, 0x4f, + 0x47, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x44, 0x4d, 0x49, 0x4e, 0x5f, 0x52, 0x45, + 0x41, 0x44, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x57, 0x52, 0x49, + 0x54, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x52, 0x45, 0x41, + 0x44, 0x10, 0x03, 0x22, 0xa2, 0x01, 0x0a, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x44, 0x65, + 0x6c, 0x74, 0x61, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, + 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x0d, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x4f, 0x0a, 0x13, 0x61, 0x75, 0x64, 0x69, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, + 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x11, 0x61, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x22, 0xde, 0x01, 0x0a, 0x0c, 0x42, 0x69, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x06, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, + 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, + 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x02, 0x22, 0xe7, 0x01, 0x0a, 0x10, 0x41, 0x75, + 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x3e, + 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, + 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x2e, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x78, 0x65, 0x6d, + 0x70, 0x74, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x22, 0x35, 0x0a, 0x06, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, + 0x45, 0x10, 0x02, 0x42, 0x83, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, + 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_iam_v1_policy_proto_rawDescOnce sync.Once + file_google_iam_v1_policy_proto_rawDescData = file_google_iam_v1_policy_proto_rawDesc +) + +func file_google_iam_v1_policy_proto_rawDescGZIP() []byte { + file_google_iam_v1_policy_proto_rawDescOnce.Do(func() { + file_google_iam_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_policy_proto_rawDescData) + }) + return file_google_iam_v1_policy_proto_rawDescData +} + +var file_google_iam_v1_policy_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_google_iam_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_google_iam_v1_policy_proto_goTypes = []interface{}{ + (AuditLogConfig_LogType)(0), // 0: google.iam.v1.AuditLogConfig.LogType + (BindingDelta_Action)(0), // 1: google.iam.v1.BindingDelta.Action + (AuditConfigDelta_Action)(0), // 2: google.iam.v1.AuditConfigDelta.Action + (*Policy)(nil), // 3: google.iam.v1.Policy + (*Binding)(nil), // 4: google.iam.v1.Binding + (*AuditConfig)(nil), // 5: google.iam.v1.AuditConfig + (*AuditLogConfig)(nil), // 6: google.iam.v1.AuditLogConfig + (*PolicyDelta)(nil), // 7: google.iam.v1.PolicyDelta + (*BindingDelta)(nil), // 8: google.iam.v1.BindingDelta + (*AuditConfigDelta)(nil), // 9: google.iam.v1.AuditConfigDelta + (*expr.Expr)(nil), // 10: google.type.Expr +} +var file_google_iam_v1_policy_proto_depIdxs = []int32{ + 4, // 0: google.iam.v1.Policy.bindings:type_name -> google.iam.v1.Binding + 5, // 1: google.iam.v1.Policy.audit_configs:type_name -> google.iam.v1.AuditConfig + 10, // 2: google.iam.v1.Binding.condition:type_name -> google.type.Expr + 6, // 3: google.iam.v1.AuditConfig.audit_log_configs:type_name -> google.iam.v1.AuditLogConfig + 0, // 4: google.iam.v1.AuditLogConfig.log_type:type_name -> google.iam.v1.AuditLogConfig.LogType + 8, // 5: google.iam.v1.PolicyDelta.binding_deltas:type_name -> google.iam.v1.BindingDelta + 9, // 6: google.iam.v1.PolicyDelta.audit_config_deltas:type_name -> google.iam.v1.AuditConfigDelta + 1, // 7: google.iam.v1.BindingDelta.action:type_name -> google.iam.v1.BindingDelta.Action + 10, // 8: google.iam.v1.BindingDelta.condition:type_name -> google.type.Expr + 2, // 9: google.iam.v1.AuditConfigDelta.action:type_name -> google.iam.v1.AuditConfigDelta.Action + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_google_iam_v1_policy_proto_init() } +func file_google_iam_v1_policy_proto_init() { + if File_google_iam_v1_policy_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Policy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Binding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuditConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuditLogConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PolicyDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BindingDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuditConfigDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_iam_v1_policy_proto_rawDesc, + NumEnums: 3, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_iam_v1_policy_proto_goTypes, + DependencyIndexes: file_google_iam_v1_policy_proto_depIdxs, + EnumInfos: file_google_iam_v1_policy_proto_enumTypes, + MessageInfos: file_google_iam_v1_policy_proto_msgTypes, + }.Build() + File_google_iam_v1_policy_proto = out.File + file_google_iam_v1_policy_proto_rawDesc = nil + file_google_iam_v1_policy_proto_goTypes = nil + file_google_iam_v1_policy_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index e458e7e6c182..ac3f773f8dd6 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -6,7 +6,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/accessapproval/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/accesscontextmanager/apiv1": { "distribution_name": "cloud.google.com/go/accesscontextmanager/apiv1", @@ -14,8 +14,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/accesscontextmanager/latest/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/aiplatform/apiv1": { "distribution_name": "cloud.google.com/go/aiplatform/apiv1", @@ -24,7 +24,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/aiplatform/apiv1beta1": { + "distribution_name": "cloud.google.com/go/aiplatform/apiv1beta1", + "description": "Vertex AI API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/analytics/admin/apiv1alpha": { "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", @@ -33,7 +42,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/analytics/latest/admin/apiv1alpha", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/apigateway/apiv1": { "distribution_name": "cloud.google.com/go/apigateway/apiv1", @@ -42,7 +51,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigateway/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/apigeeconnect/apiv1": { "distribution_name": "cloud.google.com/go/apigeeconnect/apiv1", @@ -51,7 +60,25 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeconnect/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apigeeregistry/apiv1": { + "distribution_name": "cloud.google.com/go/apigeeregistry/apiv1", + "description": "Apigee Registry API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeregistry/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apikeys/apiv2": { + "distribution_name": "cloud.google.com/go/apikeys/apiv2", + "description": "API Keys API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apikeys/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/appengine/apiv1": { "distribution_name": "cloud.google.com/go/appengine/apiv1", @@ -60,7 +87,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/appengine/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/area120/tables/apiv1alpha1": { "distribution_name": "cloud.google.com/go/area120/tables/apiv1alpha1", @@ -69,7 +96,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/area120/latest/tables/apiv1alpha1", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/artifactregistry/apiv1": { + "distribution_name": "cloud.google.com/go/artifactregistry/apiv1", + "description": "Artifact Registry API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/artifactregistry/apiv1beta2": { "distribution_name": "cloud.google.com/go/artifactregistry/apiv1beta2", @@ -78,7 +114,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1beta2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/asset/apiv1": { "distribution_name": "cloud.google.com/go/asset/apiv1", @@ -87,7 +123,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/asset/apiv1p2beta1": { "distribution_name": "cloud.google.com/go/asset/apiv1p2beta1", @@ -96,7 +132,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1p2beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/asset/apiv1p5beta1": { "distribution_name": "cloud.google.com/go/asset/apiv1p5beta1", @@ -105,7 +141,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1p5beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/assuredworkloads/apiv1": { + "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1", + "description": "Assured Workloads API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/assuredworkloads/apiv1beta1": { "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1beta1", @@ -114,7 +159,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/automl/apiv1": { "distribution_name": "cloud.google.com/go/automl/apiv1", @@ -123,7 +168,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/automl/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/automl/apiv1beta1": { "distribution_name": "cloud.google.com/go/automl/apiv1beta1", @@ -132,7 +177,70 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/automl/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/baremetalsolution/apiv2": { + "distribution_name": "cloud.google.com/go/baremetalsolution/apiv2", + "description": "Bare Metal Solution API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/baremetalsolution/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/batch/apiv1": { + "distribution_name": "cloud.google.com/go/batch/apiv1", + "description": "Batch API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/batch/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/appconnections/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/appconnections/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnections/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/appconnectors/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/appconnectors/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnectors/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/appgateways/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/appgateways/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appgateways/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientconnectorservices/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/clientgateways/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/clientgateways/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientgateways/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery": { "distribution_name": "cloud.google.com/go/bigquery", @@ -143,6 +251,15 @@ "release_level": "ga", "library_type": "GAPIC_MANUAL" }, + "cloud.google.com/go/bigquery/analyticshub/apiv1": { + "distribution_name": "cloud.google.com/go/bigquery/analyticshub/apiv1", + "description": "Analytics Hub API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/analyticshub/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/bigquery/connection/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1", "description": "BigQuery Connection API", @@ -150,7 +267,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/connection/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/connection/apiv1beta1": { "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1beta1", @@ -159,7 +276,25 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/connection/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/dataexchange/apiv1beta1": { + "distribution_name": "cloud.google.com/go/bigquery/dataexchange/apiv1beta1", + "description": "Analytics Hub API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/dataexchange/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/datapolicies/apiv1beta1": { + "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1beta1", + "description": "BigQuery Data Policy API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/datatransfer/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/datatransfer/apiv1", @@ -168,7 +303,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datatransfer/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/migration/apiv2": { + "distribution_name": "cloud.google.com/go/bigquery/migration/apiv2", + "description": "BigQuery Migration API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/migration/apiv2alpha": { "distribution_name": "cloud.google.com/go/bigquery/migration/apiv2alpha", @@ -177,7 +321,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2alpha", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/reservation/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1", @@ -186,16 +330,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/reservation/apiv1", "release_level": "ga", - "library_type": "" - }, - "cloud.google.com/go/bigquery/reservation/apiv1beta1": { - "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1beta1", - "description": "BigQuery Reservation API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/reservation/apiv1beta1", - "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/storage/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1", @@ -204,7 +339,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/storage/apiv1beta1": { "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1", @@ -213,7 +348,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/storage/apiv1beta2": { "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta2", @@ -222,7 +357,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1beta2", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigtable": { "distribution_name": "cloud.google.com/go/bigtable", @@ -240,7 +375,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/billing/budgets/apiv1": { "distribution_name": "cloud.google.com/go/billing/budgets/apiv1", @@ -249,7 +384,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/budgets/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/billing/budgets/apiv1beta1": { "distribution_name": "cloud.google.com/go/billing/budgets/apiv1beta1", @@ -258,7 +393,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/budgets/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/binaryauthorization/apiv1": { + "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1", + "description": "Binary Authorization API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/binaryauthorization/apiv1beta1": { "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1beta1", @@ -267,7 +411,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/certificatemanager/apiv1": { + "distribution_name": "cloud.google.com/go/certificatemanager/apiv1", + "description": "Certificate Manager API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/certificatemanager/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/channel/apiv1": { "distribution_name": "cloud.google.com/go/channel/apiv1", @@ -276,7 +429,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/channel/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudbuild/apiv1/v2": { "distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2", @@ -285,7 +438,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudbuild/latest/apiv1/v2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/clouddms/apiv1": { "distribution_name": "cloud.google.com/go/clouddms/apiv1", @@ -294,7 +447,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/clouddms/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudtasks/apiv2": { "distribution_name": "cloud.google.com/go/cloudtasks/apiv2", @@ -303,7 +456,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudtasks/apiv2beta2": { "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta2", @@ -312,7 +465,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2beta2", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudtasks/apiv2beta3": { "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta3", @@ -321,23 +474,23 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2beta3", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/compute/apiv1": { "distribution_name": "cloud.google.com/go/compute/apiv1", "description": "Google Compute Engine API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/compute/apiv1", - "release_level": "beta", - "library_type": "" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/compute/metadata": { "distribution_name": "cloud.google.com/go/compute/metadata", "description": "Service Metadata API", "language": "Go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/compute/metadata", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/metadata", "release_level": "ga", "library_type": "CORE" }, @@ -347,8 +500,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/contactcenterinsights/latest/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/container/apiv1": { "distribution_name": "cloud.google.com/go/container/apiv1", @@ -357,7 +510,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/container/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/containeranalysis/apiv1beta1": { "distribution_name": "cloud.google.com/go/containeranalysis/apiv1beta1", @@ -366,7 +519,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/containeranalysis/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datacatalog/apiv1": { "distribution_name": "cloud.google.com/go/datacatalog/apiv1", @@ -375,7 +528,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datacatalog/apiv1beta1": { "distribution_name": "cloud.google.com/go/datacatalog/apiv1beta1", @@ -384,7 +537,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataflow/apiv1beta3": { "distribution_name": "cloud.google.com/go/dataflow/apiv1beta3", @@ -393,7 +546,25 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataflow/latest/apiv1beta3", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dataform/apiv1alpha2": { + "distribution_name": "cloud.google.com/go/dataform/apiv1alpha2", + "description": "Dataform API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2", + "release_level": "alpha", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dataform/apiv1beta1": { + "distribution_name": "cloud.google.com/go/dataform/apiv1beta1", + "description": "Dataform API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datafusion/apiv1": { "distribution_name": "cloud.google.com/go/datafusion/apiv1", @@ -402,7 +573,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datafusion/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datalabeling/apiv1beta1": { "distribution_name": "cloud.google.com/go/datalabeling/apiv1beta1", @@ -411,7 +582,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datalabeling/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dataplex/apiv1": { + "distribution_name": "cloud.google.com/go/dataplex/apiv1", + "description": "Cloud Dataplex API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataplex/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataproc/apiv1": { "distribution_name": "cloud.google.com/go/dataproc/apiv1", @@ -420,7 +600,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataproc/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataqna/apiv1alpha": { "distribution_name": "cloud.google.com/go/dataqna/apiv1alpha", @@ -429,7 +609,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataqna/latest/apiv1alpha", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datastore": { "distribution_name": "cloud.google.com/go/datastore", @@ -447,7 +627,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest/admin/apiv1", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/datastream/apiv1": { + "distribution_name": "cloud.google.com/go/datastream/apiv1", + "description": "Datastream API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datastream/apiv1alpha1": { "distribution_name": "cloud.google.com/go/datastream/apiv1alpha1", @@ -456,7 +645,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1alpha1", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/debugger/apiv2": { "distribution_name": "cloud.google.com/go/debugger/apiv2", @@ -465,7 +654,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/debugger/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/deploy/apiv1": { "distribution_name": "cloud.google.com/go/deploy/apiv1", @@ -473,8 +662,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/deploy/latest/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dialogflow/apiv2": { "distribution_name": "cloud.google.com/go/dialogflow/apiv2", @@ -483,7 +672,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dialogflow/apiv2beta1": { + "distribution_name": "cloud.google.com/go/dialogflow/apiv2beta1", + "description": "Dialogflow API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dialogflow/cx/apiv3": { "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3", @@ -492,7 +690,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/cx/apiv3", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dialogflow/cx/apiv3beta1": { "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3beta1", @@ -501,7 +699,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/cx/apiv3beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dlp/apiv2": { "distribution_name": "cloud.google.com/go/dlp/apiv2", @@ -510,7 +708,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dlp/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/documentai/apiv1": { "distribution_name": "cloud.google.com/go/documentai/apiv1", @@ -519,7 +717,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/documentai/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/documentai/apiv1beta3": { "distribution_name": "cloud.google.com/go/documentai/apiv1beta3", @@ -528,7 +726,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/documentai/latest/apiv1beta3", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/domains/apiv1beta1": { "distribution_name": "cloud.google.com/go/domains/apiv1beta1", @@ -537,14 +735,23 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/domains/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/edgecontainer/apiv1": { + "distribution_name": "cloud.google.com/go/edgecontainer/apiv1", + "description": "Distributed Cloud Edge Container API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgecontainer/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/errorreporting": { "distribution_name": "cloud.google.com/go/errorreporting", "description": "Cloud Error Reporting API", "language": "Go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/errorreporting", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/errorreporting/latest", "release_level": "beta", "library_type": "GAPIC_MANUAL" }, @@ -555,7 +762,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/errorreporting/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/essentialcontacts/apiv1": { "distribution_name": "cloud.google.com/go/essentialcontacts/apiv1", @@ -564,7 +771,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/essentialcontacts/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/eventarc/apiv1": { "distribution_name": "cloud.google.com/go/eventarc/apiv1", @@ -573,7 +780,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/eventarc/publishing/apiv1": { + "distribution_name": "cloud.google.com/go/eventarc/publishing/apiv1", + "description": "Eventarc Publishing API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/publishing/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/filestore/apiv1": { "distribution_name": "cloud.google.com/go/filestore/apiv1", @@ -581,8 +797,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/filestore/latest/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/firestore": { "distribution_name": "cloud.google.com/go/firestore", @@ -600,7 +816,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/firestore/apiv1/admin": { "distribution_name": "cloud.google.com/go/firestore/apiv1/admin", @@ -609,7 +825,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest/apiv1/admin", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/functions/apiv1": { "distribution_name": "cloud.google.com/go/functions/apiv1", @@ -618,14 +834,32 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/functions/apiv2": { + "distribution_name": "cloud.google.com/go/functions/apiv2", + "description": "Cloud Functions API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/functions/apiv2beta": { + "distribution_name": "cloud.google.com/go/functions/apiv2beta", + "description": "Cloud Functions API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2beta", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/functions/metadata": { "distribution_name": "cloud.google.com/go/functions/metadata", "description": "Cloud Functions", "language": "Go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/functions/metadata", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/metadata", "release_level": "alpha", "library_type": "CORE" }, @@ -636,7 +870,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gaming/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gaming/apiv1beta": { "distribution_name": "cloud.google.com/go/gaming/apiv1beta", @@ -645,7 +879,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gaming/latest/apiv1beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/gkebackup/apiv1": { + "distribution_name": "cloud.google.com/go/gkebackup/apiv1", + "description": "Backup for GKE API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkebackup/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gkeconnect/gateway/apiv1beta1": { "distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1beta1", @@ -654,7 +897,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkeconnect/latest/gateway/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gkehub/apiv1beta1": { "distribution_name": "cloud.google.com/go/gkehub/apiv1beta1", @@ -663,7 +906,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkehub/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/gkemulticloud/apiv1": { + "distribution_name": "cloud.google.com/go/gkemulticloud/apiv1", + "description": "Anthos Multi-Cloud API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkemulticloud/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gsuiteaddons/apiv1": { "distribution_name": "cloud.google.com/go/gsuiteaddons/apiv1", @@ -672,25 +924,34 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gsuiteaddons/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iam": { "distribution_name": "cloud.google.com/go/iam", "description": "Cloud IAM", "language": "Go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/iam", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest", "release_level": "ga", "library_type": "CORE" }, + "cloud.google.com/go/iam/apiv2": { + "distribution_name": "cloud.google.com/go/iam/apiv2", + "description": "Identity and Access Management (IAM) API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/iam/credentials/apiv1": { "distribution_name": "cloud.google.com/go/iam/credentials/apiv1", "description": "IAM Service Account Credentials API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/iam/credentials/apiv1", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/credentials/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iap/apiv1": { "distribution_name": "cloud.google.com/go/iap/apiv1", @@ -699,7 +960,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iap/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/ids/apiv1": { "distribution_name": "cloud.google.com/go/ids/apiv1", @@ -707,8 +968,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ids/latest/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iot/apiv1": { "distribution_name": "cloud.google.com/go/iot/apiv1", @@ -717,7 +978,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iot/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/kms/apiv1": { "distribution_name": "cloud.google.com/go/kms/apiv1", @@ -726,7 +987,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/kms/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/language/apiv1": { "distribution_name": "cloud.google.com/go/language/apiv1", @@ -735,16 +996,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/language/apiv1beta2": { "distribution_name": "cloud.google.com/go/language/apiv1beta2", - "description": "Google Cloud Natural Language API", + "description": "Cloud Natural Language API", "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1beta2", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/lifesciences/apiv2beta": { "distribution_name": "cloud.google.com/go/lifesciences/apiv2beta", @@ -753,7 +1014,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/lifesciences/latest/apiv2beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/logging": { "distribution_name": "cloud.google.com/go/logging", @@ -771,16 +1032,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/longrunning/autogen": { "distribution_name": "cloud.google.com/go/longrunning/autogen", "description": "Long Running Operations API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/longrunning/autogen", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/longrunning/latest/autogen", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/managedidentities/apiv1": { "distribution_name": "cloud.google.com/go/managedidentities/apiv1", @@ -789,7 +1050,25 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedidentities/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/addressvalidation/apiv1": { + "distribution_name": "cloud.google.com/go/maps/addressvalidation/apiv1", + "description": "Address Validation API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/addressvalidation/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/routing/apiv2": { + "distribution_name": "cloud.google.com/go/maps/routing/apiv2", + "description": "Routes API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/routing/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/mediatranslation/apiv1beta1": { "distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1", @@ -798,7 +1077,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/mediatranslation/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/memcache/apiv1": { "distribution_name": "cloud.google.com/go/memcache/apiv1", @@ -807,7 +1086,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memcache/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/memcache/apiv1beta2": { "distribution_name": "cloud.google.com/go/memcache/apiv1beta2", @@ -816,7 +1095,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memcache/latest/apiv1beta2", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/metastore/apiv1": { "distribution_name": "cloud.google.com/go/metastore/apiv1", @@ -825,7 +1104,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/metastore/apiv1alpha": { "distribution_name": "cloud.google.com/go/metastore/apiv1alpha", @@ -834,7 +1113,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1alpha", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/metastore/apiv1beta": { "distribution_name": "cloud.google.com/go/metastore/apiv1beta", @@ -843,7 +1122,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/apiv3/v2": { "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", @@ -852,7 +1131,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/apiv3/v2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/dashboard/apiv1": { "distribution_name": "cloud.google.com/go/monitoring/dashboard/apiv1", @@ -861,7 +1140,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/dashboard/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/metricsscope/apiv1": { "distribution_name": "cloud.google.com/go/monitoring/metricsscope/apiv1", @@ -870,7 +1149,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/metricsscope/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkconnectivity/apiv1": { "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1", @@ -878,8 +1157,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkconnectivity/latest/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkconnectivity/apiv1alpha1": { "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1alpha1", @@ -888,7 +1167,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkconnectivity/latest/apiv1alpha1", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkmanagement/apiv1": { "distribution_name": "cloud.google.com/go/networkmanagement/apiv1", @@ -897,7 +1176,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkmanagement/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networksecurity/apiv1beta1": { "distribution_name": "cloud.google.com/go/networksecurity/apiv1beta1", @@ -906,7 +1185,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networksecurity/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/notebooks/apiv1": { + "distribution_name": "cloud.google.com/go/notebooks/apiv1", + "description": "Notebooks API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/notebooks/apiv1beta1": { "distribution_name": "cloud.google.com/go/notebooks/apiv1beta1", @@ -915,7 +1203,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/optimization/apiv1": { + "distribution_name": "cloud.google.com/go/optimization/apiv1", + "description": "Cloud Optimization API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/optimization/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/orchestration/airflow/service/apiv1": { "distribution_name": "cloud.google.com/go/orchestration/airflow/service/apiv1", @@ -923,8 +1220,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/orchestration/latest/airflow/service/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/orgpolicy/apiv2": { "distribution_name": "cloud.google.com/go/orgpolicy/apiv2", @@ -933,7 +1230,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/orgpolicy/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/agentendpoint/apiv1": { "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1", @@ -942,16 +1239,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/agentendpoint/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/agentendpoint/apiv1beta": { "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1beta", - "description": "Cloud OS Config API", + "description": "OS Config API", "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/agentendpoint/apiv1beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/apiv1": { "distribution_name": "cloud.google.com/go/osconfig/apiv1", @@ -960,7 +1257,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/apiv1alpha": { "distribution_name": "cloud.google.com/go/osconfig/apiv1alpha", @@ -969,16 +1266,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1alpha", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/apiv1beta": { "distribution_name": "cloud.google.com/go/osconfig/apiv1beta", - "description": "Cloud OS Config API", + "description": "OS Config API", "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/oslogin/apiv1": { "distribution_name": "cloud.google.com/go/oslogin/apiv1", @@ -987,7 +1284,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oslogin/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/oslogin/apiv1beta": { "distribution_name": "cloud.google.com/go/oslogin/apiv1beta", @@ -996,7 +1293,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oslogin/latest/apiv1beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/phishingprotection/apiv1beta1": { "distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1", @@ -1005,7 +1302,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/phishingprotection/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/policytroubleshooter/apiv1": { "distribution_name": "cloud.google.com/go/policytroubleshooter/apiv1", @@ -1014,7 +1311,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/privatecatalog/apiv1beta1": { "distribution_name": "cloud.google.com/go/privatecatalog/apiv1beta1", @@ -1023,14 +1320,14 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/privatecatalog/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/profiler": { "distribution_name": "cloud.google.com/go/profiler", "description": "Cloud Profiler", "language": "Go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/profiler", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/profiler/latest", "release_level": "ga", "library_type": "AGENT" }, @@ -1050,7 +1347,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/pubsublite": { "distribution_name": "cloud.google.com/go/pubsublite", @@ -1058,7 +1355,7 @@ "language": "Go", "client_library_type": "manual", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/pubsublite/apiv1": { @@ -1068,25 +1365,25 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/recaptchaenterprise/apiv1": { - "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1", + "cloud.google.com/go/recaptchaenterprise/v2/apiv1": { + "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1", "description": "reCAPTCHA Enterprise API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/latest/apiv1", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/recaptchaenterprise/apiv1beta1": { - "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1beta1", + "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1": { + "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1", "description": "reCAPTCHA Enterprise API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/latest/apiv1beta1", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recommendationengine/apiv1beta1": { "distribution_name": "cloud.google.com/go/recommendationengine/apiv1beta1", @@ -1095,7 +1392,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommendationengine/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recommender/apiv1": { "distribution_name": "cloud.google.com/go/recommender/apiv1", @@ -1104,7 +1401,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommender/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recommender/apiv1beta1": { "distribution_name": "cloud.google.com/go/recommender/apiv1beta1", @@ -1113,7 +1410,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommender/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/redis/apiv1": { "distribution_name": "cloud.google.com/go/redis/apiv1", @@ -1122,7 +1419,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/redis/apiv1beta1": { "distribution_name": "cloud.google.com/go/redis/apiv1beta1", @@ -1131,7 +1428,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcemanager/apiv2": { "distribution_name": "cloud.google.com/go/resourcemanager/apiv2", @@ -1140,7 +1437,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcemanager/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcemanager/apiv3": { "distribution_name": "cloud.google.com/go/resourcemanager/apiv3", @@ -1149,7 +1446,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcemanager/latest/apiv3", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcesettings/apiv1": { "distribution_name": "cloud.google.com/go/resourcesettings/apiv1", @@ -1158,7 +1455,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcesettings/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/retail/apiv2": { "distribution_name": "cloud.google.com/go/retail/apiv2", @@ -1167,7 +1464,25 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/retail/apiv2alpha": { + "distribution_name": "cloud.google.com/go/retail/apiv2alpha", + "description": "Retail API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2alpha", + "release_level": "alpha", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/retail/apiv2beta": { + "distribution_name": "cloud.google.com/go/retail/apiv2beta", + "description": "Retail API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2beta", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/rpcreplay": { "distribution_name": "cloud.google.com/go/rpcreplay", @@ -1178,6 +1493,15 @@ "release_level": "ga", "library_type": "OTHER" }, + "cloud.google.com/go/run/apiv2": { + "distribution_name": "cloud.google.com/go/run/apiv2", + "description": "Cloud Run Admin API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/run/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/scheduler/apiv1": { "distribution_name": "cloud.google.com/go/scheduler/apiv1", "description": "Cloud Scheduler API", @@ -1185,7 +1509,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/scheduler/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/scheduler/apiv1beta1": { "distribution_name": "cloud.google.com/go/scheduler/apiv1beta1", @@ -1194,7 +1518,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/scheduler/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/secretmanager/apiv1": { "distribution_name": "cloud.google.com/go/secretmanager/apiv1", @@ -1203,16 +1527,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1", "release_level": "ga", - "library_type": "" - }, - "cloud.google.com/go/secretmanager/apiv1beta1": { - "distribution_name": "cloud.google.com/go/secretmanager/apiv1beta1", - "description": "Secret Manager API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1beta1", - "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/security/privateca/apiv1": { "distribution_name": "cloud.google.com/go/security/privateca/apiv1", @@ -1221,7 +1536,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/privateca/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/security/privateca/apiv1beta1": { "distribution_name": "cloud.google.com/go/security/privateca/apiv1beta1", @@ -1230,7 +1545,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/privateca/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/security/publicca/apiv1beta1": { + "distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1", + "description": "Public Certificate Authority API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/apiv1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1", @@ -1239,7 +1563,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/apiv1beta1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1beta1", @@ -1248,7 +1572,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/apiv1p1beta1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1p1beta1", @@ -1257,7 +1581,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1p1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/settings/apiv1beta1": { "distribution_name": "cloud.google.com/go/securitycenter/settings/apiv1beta1", @@ -1266,7 +1590,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/settings/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicecontrol/apiv1": { "distribution_name": "cloud.google.com/go/servicecontrol/apiv1", @@ -1275,7 +1599,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicecontrol/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicedirectory/apiv1": { "distribution_name": "cloud.google.com/go/servicedirectory/apiv1", @@ -1284,7 +1608,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicedirectory/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicedirectory/apiv1beta1": { "distribution_name": "cloud.google.com/go/servicedirectory/apiv1beta1", @@ -1293,7 +1617,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicedirectory/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicemanagement/apiv1": { "distribution_name": "cloud.google.com/go/servicemanagement/apiv1", @@ -1302,7 +1626,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicemanagement/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/serviceusage/apiv1": { "distribution_name": "cloud.google.com/go/serviceusage/apiv1", @@ -1311,7 +1635,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/serviceusage/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/shell/apiv1": { "distribution_name": "cloud.google.com/go/shell/apiv1", @@ -1320,7 +1644,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shell/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/spanner": { "distribution_name": "cloud.google.com/go/spanner", @@ -1333,12 +1657,12 @@ }, "cloud.google.com/go/spanner/admin/database/apiv1": { "distribution_name": "cloud.google.com/go/spanner/admin/database/apiv1", - "description": "Cloud Spanner Database Admin API", + "description": "Cloud Spanner API", "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/database/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/spanner/admin/instance/apiv1": { "distribution_name": "cloud.google.com/go/spanner/admin/instance/apiv1", @@ -1347,7 +1671,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/instance/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/spanner/apiv1": { "distribution_name": "cloud.google.com/go/spanner/apiv1", @@ -1356,7 +1680,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/speech/apiv1": { "distribution_name": "cloud.google.com/go/speech/apiv1", @@ -1365,7 +1689,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/speech/apiv1p1beta1": { "distribution_name": "cloud.google.com/go/speech/apiv1p1beta1", @@ -1374,7 +1698,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv1p1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/speech/apiv2": { + "distribution_name": "cloud.google.com/go/speech/apiv2", + "description": "Cloud Speech-to-Text API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/storage": { "distribution_name": "cloud.google.com/go/storage", @@ -1392,7 +1725,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest/internal/apiv2", "release_level": "alpha", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/storagetransfer/apiv1": { "distribution_name": "cloud.google.com/go/storagetransfer/apiv1", @@ -1401,7 +1734,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storagetransfer/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/talent/apiv4": { "distribution_name": "cloud.google.com/go/talent/apiv4", @@ -1409,8 +1742,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/talent/apiv4beta1": { "distribution_name": "cloud.google.com/go/talent/apiv4beta1", @@ -1419,7 +1752,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/texttospeech/apiv1": { "distribution_name": "cloud.google.com/go/texttospeech/apiv1", @@ -1428,7 +1761,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/texttospeech/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/tpu/apiv1": { "distribution_name": "cloud.google.com/go/tpu/apiv1", @@ -1437,7 +1770,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/tpu/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/trace/apiv1": { "distribution_name": "cloud.google.com/go/trace/apiv1", @@ -1446,7 +1779,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/trace/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/trace/apiv2": { "distribution_name": "cloud.google.com/go/trace/apiv2", @@ -1455,7 +1788,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/trace/latest/apiv2", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/translate/apiv3": { "distribution_name": "cloud.google.com/go/translate/apiv3", @@ -1464,25 +1797,34 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/translate/latest/apiv3", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/video/transcoder/apiv1": { - "distribution_name": "cloud.google.com/go/video/transcoder/apiv1", - "description": "Transcoder API", + "cloud.google.com/go/video/livestream/apiv1": { + "distribution_name": "cloud.google.com/go/video/livestream/apiv1", + "description": "Live Stream API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/transcoder/apiv1", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/livestream/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/video/transcoder/apiv1beta1": { - "distribution_name": "cloud.google.com/go/video/transcoder/apiv1beta1", + "cloud.google.com/go/video/stitcher/apiv1": { + "distribution_name": "cloud.google.com/go/video/stitcher/apiv1", + "description": "Video Stitcher API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/stitcher/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/video/transcoder/apiv1": { + "distribution_name": "cloud.google.com/go/video/transcoder/apiv1", "description": "Transcoder API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/transcoder/apiv1beta1", - "release_level": "beta", - "library_type": "" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/transcoder/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/videointelligence/apiv1": { "distribution_name": "cloud.google.com/go/videointelligence/apiv1", @@ -1491,7 +1833,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/videointelligence/apiv1beta2": { "distribution_name": "cloud.google.com/go/videointelligence/apiv1beta2", @@ -1500,25 +1842,34 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1beta2", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/videointelligence/apiv1p3beta1": { + "distribution_name": "cloud.google.com/go/videointelligence/apiv1p3beta1", + "description": "Cloud Video Intelligence API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1p3beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/vision/apiv1": { - "distribution_name": "cloud.google.com/go/vision/apiv1", + "cloud.google.com/go/vision/v2/apiv1": { + "distribution_name": "cloud.google.com/go/vision/v2/apiv1", "description": "Cloud Vision API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/latest/apiv1", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/vision/apiv1p1beta1": { - "distribution_name": "cloud.google.com/go/vision/apiv1p1beta1", + "cloud.google.com/go/vision/v2/apiv1p1beta1": { + "distribution_name": "cloud.google.com/go/vision/v2/apiv1p1beta1", "description": "Cloud Vision API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/latest/apiv1p1beta1", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1p1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vmmigration/apiv1": { "distribution_name": "cloud.google.com/go/vmmigration/apiv1", @@ -1526,8 +1877,8 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmmigration/latest/apiv1", - "release_level": "beta", - "library_type": "" + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vpcaccess/apiv1": { "distribution_name": "cloud.google.com/go/vpcaccess/apiv1", @@ -1536,7 +1887,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vpcaccess/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/webrisk/apiv1": { "distribution_name": "cloud.google.com/go/webrisk/apiv1", @@ -1545,7 +1896,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/webrisk/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/webrisk/apiv1beta1": { "distribution_name": "cloud.google.com/go/webrisk/apiv1beta1", @@ -1554,7 +1905,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/webrisk/latest/apiv1beta1", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/websecurityscanner/apiv1": { "distribution_name": "cloud.google.com/go/websecurityscanner/apiv1", @@ -1563,7 +1914,16 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/websecurityscanner/latest/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/workflows/apiv1": { + "distribution_name": "cloud.google.com/go/workflows/apiv1", + "description": "Workflows API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1", + "release_level": "ga", + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/apiv1beta": { "distribution_name": "cloud.google.com/go/workflows/apiv1beta", @@ -1572,7 +1932,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/executions/apiv1": { "distribution_name": "cloud.google.com/go/workflows/executions/apiv1", @@ -1581,7 +1941,7 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/executions/apiv1", "release_level": "ga", - "library_type": "" + "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/executions/apiv1beta": { "distribution_name": "cloud.google.com/go/workflows/executions/apiv1beta", @@ -1590,6 +1950,6 @@ "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/executions/apiv1beta", "release_level": "beta", - "library_type": "" + "library_type": "GAPIC_AUTO" } } diff --git a/vendor/cloud.google.com/go/internal/annotate.go b/vendor/cloud.google.com/go/internal/annotate.go index 6435695ba34b..30d7bcf77ac6 100644 --- a/vendor/cloud.google.com/go/internal/annotate.go +++ b/vendor/cloud.google.com/go/internal/annotate.go @@ -31,7 +31,8 @@ import ( // - "google.golang.org/api/googleapi".Error // If the error is not one of these types, Annotate behaves // like -// fmt.Errorf("%s: %v", msg, err) +// +// fmt.Errorf("%s: %v", msg, err) func Annotate(err error, msg string) error { if err == nil { panic("Annotate called with nil") diff --git a/vendor/cloud.google.com/go/internal/pubsub/message.go b/vendor/cloud.google.com/go/internal/pubsub/message.go new file mode 100644 index 000000000000..7d7092b19100 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/pubsub/message.go @@ -0,0 +1,221 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and + +package pubsub + +import ( + "context" + "time" +) + +// AckHandler implements ack/nack handling. +type AckHandler interface { + // OnAck processes a message ack. + OnAck() + + // OnNack processes a message nack. + OnNack() + + // OnAckWithResult processes a message ack and returns + // a result that shows if it succeeded. + OnAckWithResult() *AckResult + + // OnNackWithResult processes a message nack and returns + // a result that shows if it succeeded. + OnNackWithResult() *AckResult +} + +// Message represents a Pub/Sub message. +type Message struct { + // ID identifies this message. This ID is assigned by the server and is + // populated for Messages obtained from a subscription. + // + // This field is read-only. + ID string + + // Data is the actual data in the message. + Data []byte + + // Attributes represents the key-value pairs the current message is + // labelled with. + Attributes map[string]string + + // PublishTime is the time at which the message was published. This is + // populated by the server for Messages obtained from a subscription. + // + // This field is read-only. + PublishTime time.Time + + // DeliveryAttempt is the number of times a message has been delivered. + // This is part of the dead lettering feature that forwards messages that + // fail to be processed (from nack/ack deadline timeout) to a dead letter topic. + // If dead lettering is enabled, this will be set on all attempts, starting + // with value 1. Otherwise, the value will be nil. + // This field is read-only. + DeliveryAttempt *int + + // OrderingKey identifies related messages for which publish order should + // be respected. If empty string is used, message will be sent unordered. + OrderingKey string + + // ackh handles Ack() or Nack(). + ackh AckHandler +} + +// Ack indicates successful processing of a Message passed to the Subscriber.Receive callback. +// It should not be called on any other Message value. +// If message acknowledgement fails, the Message will be redelivered. +// Client code must call Ack or Nack when finished for each received Message. +// Calls to Ack or Nack have no effect after the first call. +func (m *Message) Ack() { + if m.ackh != nil { + m.ackh.OnAck() + } +} + +// Nack indicates that the client will not or cannot process a Message passed to the Subscriber.Receive callback. +// It should not be called on any other Message value. +// Nack will result in the Message being redelivered more quickly than if it were allowed to expire. +// Client code must call Ack or Nack when finished for each received Message. +// Calls to Ack or Nack have no effect after the first call. +func (m *Message) Nack() { + if m.ackh != nil { + m.ackh.OnNack() + } +} + +// AcknowledgeStatus represents the status of an Ack or Nack request. +type AcknowledgeStatus int + +const ( + // AcknowledgeStatusSuccess indicates the request was a success. + AcknowledgeStatusSuccess AcknowledgeStatus = iota + // AcknowledgeStatusPermissionDenied indicates the caller does not have sufficient permissions. + AcknowledgeStatusPermissionDenied + // AcknowledgeStatusFailedPrecondition indicates the request encountered a FailedPrecondition error. + AcknowledgeStatusFailedPrecondition + // AcknowledgeStatusInvalidAckID indicates one or more of the ack IDs sent were invalid. + AcknowledgeStatusInvalidAckID + // AcknowledgeStatusOther indicates another unknown error was returned. + AcknowledgeStatusOther +) + +// AckResult holds the result from a call to Ack or Nack. +type AckResult struct { + ready chan struct{} + res AcknowledgeStatus + err error +} + +// Ready returns a channel that is closed when the result is ready. +// When the Ready channel is closed, Get is guaranteed not to block. +func (r *AckResult) Ready() <-chan struct{} { return r.ready } + +// Get returns the status and/or error result of a Ack, Nack, or Modack call. +// Get blocks until the Ack/Nack completes or the context is done. +func (r *AckResult) Get(ctx context.Context) (res AcknowledgeStatus, err error) { + // If the result is already ready, return it even if the context is done. + select { + case <-r.Ready(): + return r.res, r.err + default: + } + select { + case <-ctx.Done(): + // Explicitly return AcknowledgeStatusOther for context cancelled cases, + // since the default is success. + return AcknowledgeStatusOther, ctx.Err() + case <-r.Ready(): + return r.res, r.err + } +} + +// NewAckResult creates a AckResult. +func NewAckResult() *AckResult { + return &AckResult{ + ready: make(chan struct{}), + } +} + +// SetAckResult sets the ack response and error for a ack result and closes +// the Ready channel. Any call after the first for the same AckResult +// is a no-op. +func SetAckResult(r *AckResult, res AcknowledgeStatus, err error) { + select { + case <-r.Ready(): + return + default: + r.res = res + r.err = err + close(r.ready) + } +} + +// AckWithResult acknowledges a message in Pub/Sub and it will not be +// delivered to this subscription again. +// +// You should avoid acknowledging messages until you have +// *finished* processing them, so that in the event of a failure, +// you receive the message again. +// +// If exactly-once delivery is enabled on the subscription, the +// AckResult returned by this method tracks the state of acknowledgement +// operation. If the operation completes successfully, the message is +// guaranteed NOT to be re-delivered. Otherwise, the result will +// contain an error with more details about the failure and the +// message may be re-delivered. +// +// If exactly-once delivery is NOT enabled on the subscription, or +// if using Pub/Sub Lite, AckResult readies immediately with a AcknowledgeStatus.Success. +// Since acks in Cloud Pub/Sub are best effort when exactly-once +// delivery is disabled, the message may be re-delivered. Because +// re-deliveries are possible, you should ensure that your processing +// code is idempotent, as you may receive any given message more than +// once. +func (m *Message) AckWithResult() *AckResult { + if m.ackh != nil { + return m.ackh.OnAckWithResult() + } + return nil +} + +// NackWithResult declines to acknowledge the message which indicates that +// the client will not or cannot process a Message. This will cause the message +// to be re-delivered to subscribers. Re-deliveries may take place immediately +// or after a delay. +// +// If exactly-once delivery is enabled on the subscription, the +// AckResult returned by this method tracks the state of nack +// operation. If the operation completes successfully, the result will +// contain AckResponse.Success. Otherwise, the result will contain an error +// with more details about the failure. +// +// If exactly-once delivery is NOT enabled on the subscription, or +// if using Pub/Sub Lite, AckResult readies immediately with a AcknowledgeStatus.Success. +func (m *Message) NackWithResult() *AckResult { + if m.ackh != nil { + return m.ackh.OnNackWithResult() + } + return nil +} + +// NewMessage creates a message with an AckHandler implementation, which should +// not be nil. +func NewMessage(ackh AckHandler) *Message { + return &Message{ackh: ackh} +} + +// MessageAckHandler provides access to the internal field Message.ackh. +func MessageAckHandler(m *Message) AckHandler { + return m.ackh +} diff --git a/vendor/cloud.google.com/go/internal/pubsub/publish.go b/vendor/cloud.google.com/go/internal/pubsub/publish.go new file mode 100644 index 000000000000..d03ab0f7796f --- /dev/null +++ b/vendor/cloud.google.com/go/internal/pubsub/publish.go @@ -0,0 +1,57 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and + +package pubsub + +import "context" + +// A PublishResult holds the result from a call to Publish. +type PublishResult struct { + ready chan struct{} + serverID string + err error +} + +// Ready returns a channel that is closed when the result is ready. +// When the Ready channel is closed, Get is guaranteed not to block. +func (r *PublishResult) Ready() <-chan struct{} { return r.ready } + +// Get returns the server-generated message ID and/or error result of a Publish call. +// Get blocks until the Publish call completes or the context is done. +func (r *PublishResult) Get(ctx context.Context) (serverID string, err error) { + // If the result is already ready, return it even if the context is done. + select { + case <-r.Ready(): + return r.serverID, r.err + default: + } + select { + case <-ctx.Done(): + return "", ctx.Err() + case <-r.Ready(): + return r.serverID, r.err + } +} + +// NewPublishResult creates a PublishResult. +func NewPublishResult() *PublishResult { + return &PublishResult{ready: make(chan struct{})} +} + +// SetPublishResult sets the server ID and error for a publish result and closes +// the Ready channel. +func SetPublishResult(r *PublishResult, sid string, err error) { + r.serverID = sid + r.err = err + close(r.ready) +} diff --git a/vendor/cloud.google.com/go/internal/testutil/context.go b/vendor/cloud.google.com/go/internal/testutil/context.go index edada10464b4..7ece9928a66d 100644 --- a/vendor/cloud.google.com/go/internal/testutil/context.go +++ b/vendor/cloud.google.com/go/internal/testutil/context.go @@ -26,11 +26,13 @@ import ( "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" + "google.golang.org/api/impersonate" ) const ( - envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" - envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" + envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" + envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" + envImpersonate = "GCLOUD_TESTS_IMPERSONATE_CREDENTIALS" ) // ProjID returns the project ID to use in integration tests, or the empty @@ -52,6 +54,12 @@ func Credentials(ctx context.Context, scopes ...string) *google.Credentials { // will return nil. CredentialsEnv will log.Fatal if the token source is // specified but missing or invalid. func CredentialsEnv(ctx context.Context, envVar string, scopes ...string) *google.Credentials { + if impKey := os.Getenv(envImpersonate); impKey == "true" { + return &google.Credentials{ + TokenSource: impersonatedTokenSource(ctx, scopes), + ProjectID: "dulcet-port-762", + } + } key := os.Getenv(envVar) if key == "" { // Try for application default credentials. creds, err := google.FindDefaultCredentials(ctx, scopes...) @@ -88,6 +96,9 @@ func TokenSource(ctx context.Context, scopes ...string) oauth2.TokenSource { // return nil. TokenSourceEnv will log.Fatal if the token source is specified but // missing or invalid. func TokenSourceEnv(ctx context.Context, envVar string, scopes ...string) oauth2.TokenSource { + if impKey := os.Getenv(envImpersonate); impKey == "true" { + return impersonatedTokenSource(ctx, scopes) + } key := os.Getenv(envVar) if key == "" { // Try for application default credentials. ts, err := google.DefaultTokenSource(ctx, scopes...) @@ -104,6 +115,17 @@ func TokenSourceEnv(ctx context.Context, envVar string, scopes ...string) oauth2 return conf.TokenSource(ctx) } +func impersonatedTokenSource(ctx context.Context, scopes []string) oauth2.TokenSource { + ts, err := impersonate.CredentialsTokenSource(ctx, impersonate.CredentialsConfig{ + TargetPrincipal: "kokoro@dulcet-port-762.iam.gserviceaccount.com", + Scopes: scopes, + }) + if err != nil { + log.Fatalf("Unable to impersonate credentials, exiting: %v", err) + } + return ts +} + // JWTConfig reads the JSON private key file whose name is in the default // environment variable, and returns the jwt.Config it contains. It ignores // scopes. diff --git a/vendor/cloud.google.com/go/longrunning/CHANGES.md b/vendor/cloud.google.com/go/longrunning/CHANGES.md new file mode 100644 index 000000000000..3f9074b6e045 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/CHANGES.md @@ -0,0 +1,12 @@ +# Changes + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.2.1...longrunning/v0.3.0) (2022-11-03) + + +### Features + +* **longrunning:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + +## v0.1.0 + +Initial release. diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/cloud.google.com/go/longrunning/LICENSE similarity index 93% rename from vendor/github.com/containerd/containerd/LICENSE rename to vendor/cloud.google.com/go/longrunning/LICENSE index 584149b6ee28..d64569567334 100644 --- a/vendor/github.com/containerd/containerd/LICENSE +++ b/vendor/cloud.google.com/go/longrunning/LICENSE @@ -1,7 +1,7 @@ Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -176,13 +176,24 @@ END OF TERMS AND CONDITIONS - Copyright The containerd Authors + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/vendor/cloud.google.com/go/longrunning/README.md b/vendor/cloud.google.com/go/longrunning/README.md new file mode 100644 index 000000000000..a07f3093fd38 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/README.md @@ -0,0 +1,26 @@ +# longrunning + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/longrunning.svg)](https://pkg.go.dev/cloud.google.com/go/longrunning) + +A helper library for working with long running operations. + +## Install + +```bash +go get cloud.google.com/go/longrunning +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/longrunning/autogen/doc.go b/vendor/cloud.google.com/go/longrunning/autogen/doc.go index 8d4f10e9f05f..a3c2461ea3b2 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/doc.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/doc.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,53 +17,64 @@ // Package longrunning is an auto-generated package for the // Long Running Operations API. // -// NOTE: This package is in alpha. It is not stable, and is likely to change. +// NOTE: This package is in alpha. It is not stable, and is likely to change. // -// Example usage +// # Example usage // // To get started with this package, create a client. -// ctx := context.Background() -// c, err := longrunning.NewOperationsClient(ctx) -// if err != nil { -// // TODO: Handle error. -// } -// defer c.Close() +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := longrunning.NewOperationsClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() // // The client will use your default application credentials. Clients should be reused instead of created as needed. // The methods of Client are safe for concurrent use by multiple goroutines. // The returned client must be Closed when it is done being used. // -// Using the Client +// # Using the Client // // The following is an example of making an API call with the newly created client. // -// ctx := context.Background() -// c, err := longrunning.NewOperationsClient(ctx) -// if err != nil { -// // TODO: Handle error. -// } -// defer c.Close() -// -// req := &longrunningpb.ListOperationsRequest{ -// // TODO: Fill request struct fields. -// // See https://pkg.go.dev/google.golang.org/genproto/googleapis/longrunning#ListOperationsRequest. -// } -// it := c.ListOperations(ctx, req) -// for { -// resp, err := it.Next() -// if err == iterator.Done { -// break -// } -// if err != nil { -// // TODO: Handle error. -// } -// // TODO: Use resp. -// _ = resp -// } -// -// Use of Context -// -// The ctx passed to NewClient is used for authentication requests and +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := longrunning.NewOperationsClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &longrunningpb.ListOperationsRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/cloud.google.com/go/longrunning/autogen/longrunningpb#ListOperationsRequest. +// } +// it := c.ListOperations(ctx, req) +// for { +// resp, err := it.Next() +// if err == iterator.Done { +// break +// } +// if err != nil { +// // TODO: Handle error. +// } +// // TODO: Use resp. +// _ = resp +// } +// +// # Use of Context +// +// The ctx passed to NewOperationsClient is used for authentication requests and // for creating the underlying connection, but is not used for subsequent calls. // Individual methods on the client use the ctx given to them. // @@ -75,6 +86,8 @@ package longrunning // import "cloud.google.com/go/longrunning/autogen" import ( "context" + "fmt" + "net/http" "os" "runtime" "strconv" @@ -90,7 +103,14 @@ import ( type clientHookParams struct{} type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) -const versionClient = "20211208" +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { out, _ := metadata.FromOutgoingContext(ctx) @@ -115,7 +135,9 @@ func checkDisableDeadlines() (bool, error) { // DefaultAuthScopes reports the default set of authentication scopes to use with this package. func DefaultAuthScopes() []string { - return []string{} + return []string{ + "", + } } // versionGo returns the Go runtime version. The returned string @@ -154,3 +176,22 @@ func versionGo() string { } return "UNKNOWN" } + +// maybeUnknownEnum wraps the given proto-JSON parsing error if it is the result +// of receiving an unknown enum value. +func maybeUnknownEnum(err error) error { + if strings.Contains(err.Error(), "invalid value for enum type") { + err = fmt.Errorf("received an unknown enum value; a later version of the library may support it: %w", err) + } + return err +} + +// buildHeaders extracts metadata from the outgoing context, joins it with any other +// given metadata, and converts them into a http.Header. +func buildHeaders(ctx context.Context, mds ...metadata.MD) http.Header { + if cmd, ok := metadata.FromOutgoingContext(ctx); ok { + mds = append(mds, cmd) + } + md := metadata.Join(mds...) + return http.Header(md) +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json b/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json index eff1271b207c..527142821665 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json +++ b/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json @@ -36,6 +36,36 @@ ] } } + }, + "rest": { + "libraryClient": "OperationsClient", + "rpcs": { + "CancelOperation": { + "methods": [ + "CancelOperation" + ] + }, + "DeleteOperation": { + "methods": [ + "DeleteOperation" + ] + }, + "GetOperation": { + "methods": [ + "GetOperation" + ] + }, + "ListOperations": { + "methods": [ + "ListOperations" + ] + }, + "WaitOperation": { + "methods": [ + "WaitOperation" + ] + } + } } } } diff --git a/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go b/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go rename to vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go index 44b30dca5490..6f3ae542be88 100644 --- a/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.5 // source: google/longrunning/operations.proto -package longrunning +package longrunningpb import ( context "context" @@ -70,6 +70,7 @@ type Operation struct { // If `done` == `true`, exactly one of `error` or `response` is set. // // Types that are assignable to Result: + // // *Operation_Error // *Operation_Response Result isOperation_Result `protobuf_oneof:"result"` @@ -519,13 +520,13 @@ func (x *WaitOperationRequest) GetTimeout() *durationpb.Duration { // // Example: // -// rpc LongRunningRecognize(LongRunningRecognizeRequest) -// returns (google.longrunning.Operation) { -// option (google.longrunning.operation_info) = { -// response_type: "LongRunningRecognizeResponse" -// metadata_type: "LongRunningRecognizeMetadata" -// }; -// } +// rpc LongRunningRecognize(LongRunningRecognizeRequest) +// returns (google.longrunning.Operation) { +// option (google.longrunning.operation_info) = { +// response_type: "LongRunningRecognizeResponse" +// metadata_type: "LongRunningRecognizeMetadata" +// }; +// } type OperationInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go index b90b54cce4e8..a04f1e341f43 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,21 +17,27 @@ package longrunning import ( + "bytes" "context" "fmt" + "io/ioutil" "math" + "net/http" "net/url" "time" + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" - longrunningpb "google.golang.org/genproto/googleapis/longrunning" + httptransport "google.golang.org/api/transport/http" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" ) @@ -108,7 +114,53 @@ func defaultOperationsCallOptions() *OperationsCallOptions { } } -// internalOperationsClient is an interface that defines the methods availaible from Long Running Operations API. +func defaultOperationsRESTCallOptions() *OperationsCallOptions { + return &OperationsCallOptions{ + ListOperations: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + GetOperation: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + DeleteOperation: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + CancelOperation: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + WaitOperation: []gax.CallOption{}, + } +} + +// internalOperationsClient is an interface that defines the methods available from Long Running Operations API. type internalOperationsClient interface { Close() error setGoogleClientInfo(...string) @@ -157,7 +209,8 @@ func (c *OperationsClient) setGoogleClientInfo(keyval ...string) { // Connection returns a connection to the API service. // -// Deprecated. +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. func (c *OperationsClient) Connection() *grpc.ClientConn { return c.internalClient.Connection() } @@ -286,7 +339,8 @@ func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*Ope // Connection returns a connection to the API service. // -// Deprecated. +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. func (c *operationsGRPCClient) Connection() *grpc.ClientConn { return c.connPool.Conn() } @@ -296,7 +350,7 @@ func (c *operationsGRPCClient) Connection() *grpc.ClientConn { // use by Google-written clients. func (c *operationsGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } @@ -306,8 +360,85 @@ func (c *operationsGRPCClient) Close() error { return c.connPool.Close() } +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type operationsRESTClient struct { + // The http endpoint to connect to. + endpoint string + + // The http client. + httpClient *http.Client + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD + + // Points back to the CallOptions field of the containing OperationsClient + CallOptions **OperationsCallOptions +} + +// NewOperationsRESTClient creates a new operations rest client. +// +// Manages long-running operations with an API service. +// +// When an API method normally takes long time to complete, it can be designed +// to return Operation to the client, and the client can use this +// interface to receive the real response asynchronously by polling the +// operation resource, or pass the operation resource to another API (such as +// Google Cloud Pub/Sub API) to receive the response. Any API service that +// returns long-running operations should implement the Operations interface +// so developers can have a consistent client experience. +func NewOperationsRESTClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) { + clientOpts := append(defaultOperationsRESTClientOptions(), opts...) + httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + + callOpts := defaultOperationsRESTCallOptions() + c := &operationsRESTClient{ + endpoint: endpoint, + httpClient: httpClient, + CallOptions: &callOpts, + } + c.setGoogleClientInfo() + + return &OperationsClient{internalClient: c, CallOptions: callOpts}, nil +} + +func defaultOperationsRESTClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("https://longrunning.googleapis.com"), + internaloption.WithDefaultMTLSEndpoint("https://longrunning.mtls.googleapis.com"), + internaloption.WithDefaultAudience("https://longrunning.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + } +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *operationsRESTClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *operationsRESTClient) Close() error { + // Replace httpClient with nil to force cleanup. + c.httpClient = nil + return nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: This method always returns nil. +func (c *operationsRESTClient) Connection() *grpc.ClientConn { + return nil +} func (c *operationsGRPCClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).ListOperations[0:len((*c.CallOptions).ListOperations):len((*c.CallOptions).ListOperations)], opts...) it := &OperationIterator{} @@ -357,6 +488,7 @@ func (c *operationsGRPCClient) GetOperation(ctx context.Context, req *longrunnin ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) var resp *longrunningpb.Operation @@ -378,6 +510,7 @@ func (c *operationsGRPCClient) DeleteOperation(ctx context.Context, req *longrun ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).DeleteOperation[0:len((*c.CallOptions).DeleteOperation):len((*c.CallOptions).DeleteOperation)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -395,6 +528,7 @@ func (c *operationsGRPCClient) CancelOperation(ctx context.Context, req *longrun ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).CancelOperation[0:len((*c.CallOptions).CancelOperation):len((*c.CallOptions).CancelOperation)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -420,6 +554,321 @@ func (c *operationsGRPCClient) WaitOperation(ctx context.Context, req *longrunni return resp, nil } +// ListOperations lists operations that match the specified filter in the request. If the +// server doesn’t support this method, it returns UNIMPLEMENTED. +// +// NOTE: the name binding allows API services to override the binding +// to use different resource name schemes, such as users/*/operations. To +// override the binding, API services can add a binding such as +// "/v1/{name=users/*}/operations" to their service configuration. +// For backwards compatibility, the default name includes the operations +// collection id, however overriding users must ensure the name binding +// is the parent resource, without the operations collection id. +func (c *operationsRESTClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { + it := &OperationIterator{} + req = proto.Clone(req).(*longrunningpb.ListOperationsRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) { + resp := &longrunningpb.ListOperationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + headers := buildHeaders(ctx, c.xGoogMetadata, metadata.Pairs("Content-Type", "application/json")) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := ioutil.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return maybeUnknownEnum(err) + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetOperations(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +// GetOperation gets the latest state of a long-running operation. Clients can use this +// method to poll the operation result at intervals as recommended by the API +// service. +func (c *operationsRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + // Build HTTP headers from client and context metadata. + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &longrunningpb.Operation{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := ioutil.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return maybeUnknownEnum(err) + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// DeleteOperation deletes a long-running operation. This method indicates that the client is +// no longer interested in the operation result. It does not cancel the +// operation. If the server doesn’t support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +func (c *operationsRESTClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + // Build HTTP headers from client and context metadata. + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + // Returns nil if there is no error, otherwise wraps + // the response code and body into a non-nil error + return googleapi.CheckResponse(httpRsp) + }, opts...) +} + +// CancelOperation starts asynchronous cancellation on a long-running operation. The server +// makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn’t support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. Clients can use +// Operations.GetOperation or +// other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, +// the operation is not deleted; instead, it becomes an operation with +// an Operation.error value with a google.rpc.Status.code of 1, +// corresponding to Code.CANCELLED. +func (c *operationsRESTClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:cancel", req.GetName()) + + // Build HTTP headers from client and context metadata. + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + // Returns nil if there is no error, otherwise wraps + // the response code and body into a non-nil error + return googleapi.CheckResponse(httpRsp) + }, opts...) +} + +// WaitOperation waits until the specified long-running operation is done or reaches at most +// a specified timeout, returning the latest state. If the operation is +// already done, the latest state is immediately returned. If the timeout +// specified is greater than the default HTTP/RPC timeout, the HTTP/RPC +// timeout is used. If the server does not support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +// Note that this method is on a best-effort basis. It may return the latest +// state before the specified timeout (including immediately), meaning even an +// immediate response is no guarantee that the operation is done. +func (c *operationsRESTClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("") + + params := url.Values{} + if req.GetName() != "" { + params.Add("name", fmt.Sprintf("%v", req.GetName())) + } + if req.GetTimeout() != nil { + timeout, err := protojson.Marshal(req.GetTimeout()) + if err != nil { + return nil, err + } + params.Add("timeout", string(timeout)) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + headers := buildHeaders(ctx, c.xGoogMetadata, metadata.Pairs("Content-Type", "application/json")) + opts = append((*c.CallOptions).WaitOperation[0:len((*c.CallOptions).WaitOperation):len((*c.CallOptions).WaitOperation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &longrunningpb.Operation{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := ioutil.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return maybeUnknownEnum(err) + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + // OperationIterator manages a stream of *longrunningpb.Operation. type OperationIterator struct { items []*longrunningpb.Operation diff --git a/vendor/cloud.google.com/go/iam/go_mod_tidy_hack.go b/vendor/cloud.google.com/go/longrunning/tidyfix.go similarity index 88% rename from vendor/cloud.google.com/go/iam/go_mod_tidy_hack.go rename to vendor/cloud.google.com/go/longrunning/tidyfix.go index fbdd65f60c08..d9a07f99e0da 100644 --- a/vendor/cloud.google.com/go/iam/go_mod_tidy_hack.go +++ b/vendor/cloud.google.com/go/longrunning/tidyfix.go @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the cloud.google.com/go import, won't actually become part of +// This file, and the {{.RootMod}} import, won't actually become part of // the resultant binary. +//go:build modhack // +build modhack -package iam +package longrunning // Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository import _ "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/migration.md b/vendor/cloud.google.com/go/migration.md new file mode 100644 index 000000000000..224dcfa13972 --- /dev/null +++ b/vendor/cloud.google.com/go/migration.md @@ -0,0 +1,50 @@ +# go-genproto to google-cloud-go message type migration + +The message types for all of our client libraries are being migrated from the +`google.golang.org/genproto` [module](https://pkg.go.dev/google.golang.org/genproto) +to their respective product specific module in this repository. For example +this asset request type that was once found in [genproto](https://pkg.go.dev/google.golang.org/genproto@v0.0.0-20220908141613-51c1cc9bc6d0/googleapis/cloud/asset/v1p5beta1#ListAssetsRequest) +can now be found in directly in the [asset module](https://pkg.go.dev/cloud.google.com/go/asset/apiv1p5beta1/assetpb#ListAssetsRequest). + +Although the type definitions have moved, aliases have been left in the old +genproto packages to ensure a smooth non-breaking transition. + +## How do I migrate to the new packages? + +The easiest option is to run a migration tool at the root of our project. It is +like `go fix`, but specifically for this migration. Before running the tool it +is best to make sure any modules that have the prefix of `cloud.google.com/go` +are up to date. To run the tool, do the following: + +```bash +go run cloud.google.com/go/internal/aliasfix/cmd/aliasfix@latest . +go mod tidy +``` + +The tool should only change up to one line in the import statement per file. +This can also be done by hand if you prefer. + +## Do I have to migrate? + +Yes if you wish to keep using the newest versions of our client libraries with +the newest features -- You should migrate by the start of 2023. Until then we +will keep updating the aliases in go-genproto weekly. If you have an existing +workload that uses these client libraries and does not need to update its +dependencies there is no action to take. All existing written code will continue +to work. + +## Why are these types being moved + +1. This change will help simplify dependency trees over time. +2. The types will now be in product specific modules that are versioned + independently with semver. This is especially a benefit for users that rely + on multiple clients in a single application. Because message types are no + longer mono-packaged users are less likely to run into intermediate + dependency conflicts when updating dependencies. +3. Having all these types in one repository will help us ensure that unintended + changes are caught before they would be released. + +## Have questions? + +Please reach out to us on our [issue tracker](https://github.com/googleapis/google-cloud-go/issues/new?assignees=&labels=genproto-migration&template=migration-issue.md&title=package%3A+migration+help) +if you have any questions or concerns. diff --git a/vendor/cloud.google.com/go/pubsub/CHANGES.md b/vendor/cloud.google.com/go/pubsub/CHANGES.md index 49de654c5bb4..b044ce3dbbe9 100644 --- a/vendor/cloud.google.com/go/pubsub/CHANGES.md +++ b/vendor/cloud.google.com/go/pubsub/CHANGES.md @@ -1,5 +1,359 @@ # Changes +## [1.27.1](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.27.0...pubsub/v1.27.1) (2022-12-02) + + +### Bug Fixes + +* **pubsub:** downgrade some dependencies ([7540152](https://github.com/googleapis/google-cloud-go/commit/754015236d5af7c82a75da218b71a87b9ead6eb5)) + +## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.26.0...pubsub/v1.27.0) (2022-11-29) + + +### Features + +* **pubsub:** start generating proto stubs ([cf89415](https://github.com/googleapis/google-cloud-go/commit/cf894154e451a32b431fef2af3781a0d2d8080ff)) + +## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.25.1...pubsub/v1.26.0) (2022-10-24) + + +### Features + +* **pubsub:** Add support for snapshot labels ([#6835](https://github.com/googleapis/google-cloud-go/issues/6835)) ([c17851b](https://github.com/googleapis/google-cloud-go/commit/c17851b5c3d811cd3e6a28162f0e399bb31a1363)) + + +### Bug Fixes + +* **pubsub:** Remove unused AckResult map ([#6656](https://github.com/googleapis/google-cloud-go/issues/6656)) ([5f69002](https://github.com/googleapis/google-cloud-go/commit/5f690022551ac584e5c66af4324a17d7044a898d)) + + +### Documentation + +* **pubsub:** Fix comments on message for exactly once delivery ([#6878](https://github.com/googleapis/google-cloud-go/issues/6878)) ([a8109e2](https://github.com/googleapis/google-cloud-go/commit/a8109e2d3257d1698ce1b751618428ef25cbb859)), refs [#6877](https://github.com/googleapis/google-cloud-go/issues/6877) +* **pubsub:** Update streams section ([#6682](https://github.com/googleapis/google-cloud-go/issues/6682)) ([7b4e2b4](https://github.com/googleapis/google-cloud-go/commit/7b4e2b412058f965a9f9159231afe551a6f58a74)) +* **pubsub:** Update subscription retry policy defaults ([#6909](https://github.com/googleapis/google-cloud-go/issues/6909)) ([c5c2f8f](https://github.com/googleapis/google-cloud-go/commit/c5c2f8f7125034c611edf1d08ca35ece6554c454)), refs [#6903](https://github.com/googleapis/google-cloud-go/issues/6903) + +## [1.25.1](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.25.0...pubsub/v1.25.1) (2022-08-24) + + +### Bug Fixes + +* **pubsub:** up version of cloud.google.com/go ([#6558](https://github.com/googleapis/google-cloud-go/issues/6558)) ([be9dcfb](https://github.com/googleapis/google-cloud-go/commit/be9dcfbdfa5876a548eb3c60337c38e1d282bb88)), refs [#6555](https://github.com/googleapis/google-cloud-go/issues/6555) + +## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.24.0...pubsub/v1.25.0) (2022-08-23) + + +### Features + +* **pubsub:** support exactly once delivery ([#6506](https://github.com/googleapis/google-cloud-go/issues/6506)) ([74da335](https://github.com/googleapis/google-cloud-go/commit/74da335fea6cd70b27808507f2e58ae53f5f4910)) + + +### Documentation + +* **pubsub:** typo ([#6453](https://github.com/googleapis/google-cloud-go/issues/6453)) ([34d839e](https://github.com/googleapis/google-cloud-go/commit/34d839ec546633a0fb7f73448337ac8d8c796acd)) + +## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.23.1...pubsub/v1.24.0) (2022-07-18) + + +### Features + +* **pubsub/pstest:** subscription message ordering ([#6257](https://github.com/googleapis/google-cloud-go/issues/6257)) ([71bd273](https://github.com/googleapis/google-cloud-go/commit/71bd273b8a77ed22c41a1284813ee59eb6820bda)) + + +### Bug Fixes + +* **pubsub:** make receipt modack call async ([#6335](https://github.com/googleapis/google-cloud-go/issues/6335)) ([d12ca07](https://github.com/googleapis/google-cloud-go/commit/d12ca07720b6b29360e583c1eea22f001239952f)) + +## [1.23.1](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.23.0...pubsub/v1.23.1) (2022-06-30) + + +### Bug Fixes + +* **pubsub:** increase modack deadline RPC timeout ([#6289](https://github.com/googleapis/google-cloud-go/issues/6289)) ([d24600f](https://github.com/googleapis/google-cloud-go/commit/d24600fda7e574a388e8898c2ecc1958d07f4224)) + +## [1.23.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.22.2...pubsub/v1.23.0) (2022-06-23) + + +### Features + +* **pubsub:** report publisher outstanding metrics ([#6187](https://github.com/googleapis/google-cloud-go/issues/6187)) ([cc1528b](https://github.com/googleapis/google-cloud-go/commit/cc1528b2bfebbb48d49bcacd639abf2cf3468c96)) +* **pubsub:** support bigquery subscriptions ([#6119](https://github.com/googleapis/google-cloud-go/issues/6119)) ([81f704a](https://github.com/googleapis/google-cloud-go/commit/81f704a2cdeece8f73d7c09eae730a905afdb870)) + +## [1.22.2](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.22.1...pubsub/v1.22.2) (2022-06-03) + + +### Bug Fixes + +* **pubsub:** fix iterator distribution bound calculations ([#6125](https://github.com/googleapis/google-cloud-go/issues/6125)) ([6c470ff](https://github.com/googleapis/google-cloud-go/commit/6c470ff02072d7af32ee07a772c5d0796b545a45)) + +## [1.22.1](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.22.0...pubsub/v1.22.1) (2022-06-02) + + +### Bug Fixes + +* **pubsub:** use MaxInt instead of MaxInt64 for BufferedByteLimit ([#6113](https://github.com/googleapis/google-cloud-go/issues/6113)) ([06721e0](https://github.com/googleapis/google-cloud-go/commit/06721e06a16f5c94a31b96809aad02f5eb38147c)) + +## [1.22.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.21.1...pubsub/v1.22.0) (2022-05-31) + + +### Features + +* **pubsub:** add BigQuery configuration for subscriptions ([6ef576e](https://github.com/googleapis/google-cloud-go/commit/6ef576e2d821d079e7b940cd5d49fe3ca64a7ba2)) +* **pubsub:** add min extension period ([#6041](https://github.com/googleapis/google-cloud-go/issues/6041)) ([f2407c7](https://github.com/googleapis/google-cloud-go/commit/f2407c7013bbfdfc0103296accc828b0be674f5d)) + + +### Bug Fixes + +* **pubsub:** disable deprecated BufferedByteLimit when using MaxOutstandingBytes ([#6009](https://github.com/googleapis/google-cloud-go/issues/6009)) ([dbfdf76](https://github.com/googleapis/google-cloud-go/commit/dbfdf762c77f9cfad637c573b06f0a49e01316f3)) + +### [1.21.1](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.21.0...pubsub/v1.21.1) (2022-05-04) + + +### Bug Fixes + +* **pubsub:** mark ignore option default for publish flow control ([#5983](https://github.com/googleapis/google-cloud-go/issues/5983)) ([3f41531](https://github.com/googleapis/google-cloud-go/commit/3f41531579b7a55acea66fec8362e9134125c8a0)) + +## [1.21.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.20.0...pubsub/v1.21.0) (2022-04-26) + + +### Features + +* **pubsub:** deprecate synchronous mode ([#5910](https://github.com/googleapis/google-cloud-go/issues/5910)) ([bda5179](https://github.com/googleapis/google-cloud-go/commit/bda5179fa240b1468cd1043128493f634be28986)) + + +### Bug Fixes + +* **pubsub:** enable updating enable_exactly_once_delivery in fake pubsub ([#5940](https://github.com/googleapis/google-cloud-go/issues/5940)) ([ee44bf6](https://github.com/googleapis/google-cloud-go/commit/ee44bf646af1c38ed0943a997051b0225e22a6bf)) +* **pubsub:** nack messages properly with error from receive scheduler ([#5909](https://github.com/googleapis/google-cloud-go/issues/5909)) ([80edea4](https://github.com/googleapis/google-cloud-go/commit/80edea40dd722efb3c15cd3de3f24e0e7ad08ed7)) + +## [1.20.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.19.0...pubsub/v1.20.0) (2022-04-11) + + +### Features + +* **pubsub/pstest:** add topic retention support ([#4790](https://github.com/googleapis/google-cloud-go/issues/4790)) ([0a4ad6a](https://github.com/googleapis/google-cloud-go/commit/0a4ad6a72ddc379a94a88ec70ac678a227843cfd)) + + +### Bug Fixes + +* **pubsub:** ignore grpc errors in ack/modack ([#5796](https://github.com/googleapis/google-cloud-go/issues/5796)) ([4fb9aec](https://github.com/googleapis/google-cloud-go/commit/4fb9aecd2bc415c846e26eb960859e10e1af61f3)) + +## [1.19.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.18.0...pubsub/v1.19.0) (2022-03-07) + + +### Features + +* **pubsub:** add better version metadata to calls ([d1ad921](https://github.com/googleapis/google-cloud-go/commit/d1ad921d0322e7ce728ca9d255a3cf0437d26add)) +* **pubsub:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) + + +### Bug Fixes + +* **pubsub:** prevent infinite retry with publishing invalid utf-8 chars ([#5728](https://github.com/googleapis/google-cloud-go/issues/5728)) ([0a4dab9](https://github.com/googleapis/google-cloud-go/commit/0a4dab9043db81342dc41bd496d35fd4a7b08ad5)) +* **pubsub:** removing misspelled field, add correctly spelled field ([4a223de](https://github.com/googleapis/google-cloud-go/commit/4a223de8eab072d95818c761e41fb3f3f6ac728c)) + +## [1.18.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.17.1...pubsub/v1.18.0) (2022-02-08) + + +### Features + +* **pubsub:** add exactly once delivery flag ([f71dc3d](https://www.github.com/googleapis/google-cloud-go/commit/f71dc3dfefa54ab41861aea15971108850a9f98b)) +* **pubsub:** add exactly once delivery flag ([f71dc3d](https://www.github.com/googleapis/google-cloud-go/commit/f71dc3dfefa54ab41861aea15971108850a9f98b)) + + +### Bug Fixes + +* **pubsub:** add deadletter and retries handling in the fake pubsub ([#5320](https://www.github.com/googleapis/google-cloud-go/issues/5320)) ([116a610](https://www.github.com/googleapis/google-cloud-go/commit/116a61008e174e5d49b9485d78bc13f64461322f)) +* **pubsub:** pass context into checkOrdering to allow cancel ([#5316](https://www.github.com/googleapis/google-cloud-go/issues/5316)) ([fc08c49](https://www.github.com/googleapis/google-cloud-go/commit/fc08c49fc013cbad00642bbba317e02f0ba15a6d)) + +### [1.17.1](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.17.0...pubsub/v1.17.1) (2021-10-25) + + +### Bug Fixes + +* **pubsub:** add methods to allow retrieval of topic/sub config names ([#4953](https://www.github.com/googleapis/google-cloud-go/issues/4953)) ([bff5b1c](https://www.github.com/googleapis/google-cloud-go/commit/bff5b1ca331a0d193407a0f3eb501772cbb8ba78)) +* **pubsub:** prevent draining error return for Receive ([#4733](https://www.github.com/googleapis/google-cloud-go/issues/4733)) ([c6d5189](https://www.github.com/googleapis/google-cloud-go/commit/c6d51891649d8169089a0a2b7365ea54f991af56)) +* **pubsub:** tag ctx in iterator with subscription for opencensus ([#5011](https://www.github.com/googleapis/google-cloud-go/issues/5011)) ([cdf9588](https://www.github.com/googleapis/google-cloud-go/commit/cdf958864e278bb394cc548cb5f15ad08859f347)) + +## [1.17.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.16.0...pubsub/v1.17.0) (2021-09-08) + + +### Features + +* **pubsub:** add list configs for topic & sub ([#4607](https://www.github.com/googleapis/google-cloud-go/issues/4607)) ([a6550c5](https://www.github.com/googleapis/google-cloud-go/commit/a6550c5dfb381e286fea6a905dc658c4a865d643)) +* **pubsub:** add publisher flow control support ([#4292](https://www.github.com/googleapis/google-cloud-go/issues/4292)) ([bff24c3](https://www.github.com/googleapis/google-cloud-go/commit/bff24c3a62a2f037c1ccef14986f917e41953734)) + +## [1.16.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.15.0...pubsub/v1.16.0) (2021-08-24) + + +### Features + +* **pubsub:** add topic message retention duration ([#4520](https://www.github.com/googleapis/google-cloud-go/issues/4520)) ([0440336](https://www.github.com/googleapis/google-cloud-go/commit/0440336c988a4401cbdb5d85a8cc7fca388831e5)) + +## [1.15.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.14.0...pubsub/v1.15.0) (2021-08-13) + + +### Features + +* **pubsub:** Add topic retention options ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) + + +### Bug Fixes + +* **pubsub:** always make config check to prevent race ([#4606](https://www.github.com/googleapis/google-cloud-go/issues/4606)) ([8cfcf53](https://www.github.com/googleapis/google-cloud-go/commit/8cfcf53d03b9b442e7f0bc1c1b20c791e31c07b0)), refs [#3626](https://www.github.com/googleapis/google-cloud-go/issues/3626) +* **pubsub:** mitigate race in checking ordering config ([#4602](https://www.github.com/googleapis/google-cloud-go/issues/4602)) ([112eea2](https://www.github.com/googleapis/google-cloud-go/commit/112eea20b46bbc34e5f8f65b9812fb3e60107409)), refs [#3626](https://www.github.com/googleapis/google-cloud-go/issues/3626) +* **pubsub:** replace IAMPolicy in API config ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) + +## [1.14.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.13.0...pubsub/v1.14.0) (2021-08-09) + + +### Features + +* **pubsub:** expose CallOptions for pub/sub retries and timeouts ([#4428](https://www.github.com/googleapis/google-cloud-go/issues/4428)) ([8b99dd3](https://www.github.com/googleapis/google-cloud-go/commit/8b99dd356475a750000c06a44fc7b8423d703967)) + +## [1.13.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.12.2...pubsub/v1.13.0) (2021-07-20) + + +### Features + +* **pubsub/pstest:** add ability to create a pstest server listening on ([#4459](https://www.github.com/googleapis/google-cloud-go/issues/4459)) ([f1b7c8b](https://www.github.com/googleapis/google-cloud-go/commit/f1b7c8b33bc135c6cb8f21cdec586b25d81ea214)) + +### [1.12.2](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.12.1...pubsub/v1.12.2) (2021-07-08) + + +### Bug Fixes + +* **pubsub:** retry all goaway errors ([#4384](https://www.github.com/googleapis/google-cloud-go/issues/4384)) ([1eae86f](https://www.github.com/googleapis/google-cloud-go/commit/1eae86f1882660d901b9fb0e8dab6f138a048dbb)), refs [#4257](https://www.github.com/googleapis/google-cloud-go/issues/4257) + +### [1.12.1](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.12.0...pubsub/v1.12.1) (2021-07-01) + + +### Bug Fixes + +* **pubsub:** retry GOAWAY errors ([#4313](https://www.github.com/googleapis/google-cloud-go/issues/4313)) ([7076fef](https://www.github.com/googleapis/google-cloud-go/commit/7076fef5fef81cce47dbfbab3d7257cc7d3776bc)) + +## [1.12.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.11.0...pubsub/v1.12.0) (2021-06-23) + + +### Features + +* **pubsub/pstest:** add channel to support user-defined publish responses ([#4251](https://www.github.com/googleapis/google-cloud-go/issues/4251)) ([e1304f4](https://www.github.com/googleapis/google-cloud-go/commit/e1304f435fed4a767f4a652f32f1386979ff794f)) + + +### Bug Fixes + +* **pubsub:** fix memory leak issue in publish scheduler ([#4282](https://www.github.com/googleapis/google-cloud-go/issues/4282)) ([22ffc18](https://www.github.com/googleapis/google-cloud-go/commit/22ffc18e522c0f943db57f8c943e7356067bedfd)) + +## [1.11.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.10.3...pubsub/v1.11.0) (2021-05-27) + + +### Features + +* **pubsub:** add flush method to topic ([#2863](https://www.github.com/googleapis/google-cloud-go/issues/2863)) ([825ddd6](https://www.github.com/googleapis/google-cloud-go/commit/825ddd692363eb2dd8cd253cc5976867e432f547)) + +### [1.10.3](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.10.2...pubsub/v1.10.3) (2021-04-23) + + +### Bug Fixes + +* **pubsub:** fix failing message storage policy tests ([#4003](https://www.github.com/googleapis/google-cloud-go/issues/4003)) ([8946158](https://www.github.com/googleapis/google-cloud-go/commit/8946158561e1599c164021364e7fcb2a4c4d2f3d)) +* **pubsub:** make config call permission error in Receive transparent ([#3985](https://www.github.com/googleapis/google-cloud-go/issues/3985)) ([a1614db](https://www.github.com/googleapis/google-cloud-go/commit/a1614db35a51d21c52bcba5e805071381d8f5133)) + +### [1.10.2](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.10.1...pubsub/v1.10.2) (2021-04-08) + + +### Bug Fixes + +* **pubsub:** respect subscription message ordering field in scheduler ([#3886](https://www.github.com/googleapis/google-cloud-go/issues/3886)) ([1fcc78a](https://www.github.com/googleapis/google-cloud-go/commit/1fcc78ac6ecb461c3bbede9667436614c9df1535)) +* **pubsub:** update quiescenceDur in failing e2e test ([#3780](https://www.github.com/googleapis/google-cloud-go/issues/3780)) ([97e6c69](https://www.github.com/googleapis/google-cloud-go/commit/97e6c696c39bf4cf49fa5ef51145cfcb2a1a5d71)) + +### [1.10.1](https://www.github.com/googleapis/google-cloud-go/compare/v1.10.0...v1.10.1) (2021-03-04) + + +### Bug Fixes + +* **pubsub:** hide context.Cancelled error in sync pull ([#3752](https://www.github.com/googleapis/google-cloud-go/issues/3752)) ([f88bdc8](https://www.github.com/googleapis/google-cloud-go/commit/f88bdc85072e5ad511a907d98207ebf7d22e9df7)) + +## [1.10.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.9.1...v1.10.0) (2021-02-10) + + +### Features + +* **pubsub:** add opencensus metrics for outstanding messages/bytes ([#3690](https://www.github.com/googleapis/google-cloud-go/issues/3690)) ([4039b82](https://www.github.com/googleapis/google-cloud-go/commit/4039b82e95b3a8ba2322d1f4fe9e2c21b087a907)) + +### [1.9.1](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.9.0...v1.9.1) (2020-12-10) + +### Bug Fixes + +* **pubsub:** fix default stream ack deadline seconds ([#3430](https://www.github.com/googleapis/google-cloud-go/issues/3430)) ([a10263a](https://www.github.com/googleapis/google-cloud-go/commit/a10263adc2ec9483ecedd0bf0b028863342ea760)) +* **pubsub:** respect streamAckDeadlineSeconds with MaxExtensionPeriod ([#3367](https://www.github.com/googleapis/google-cloud-go/issues/3367)) ([45131b6](https://www.github.com/googleapis/google-cloud-go/commit/45131b6c526ded2964ffd067c4a5420d508f0b1a)) + +## [1.9.0](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.8.3...v1.9.0) (2020-12-03) + +### Features + +- **pubsub:** Enable server side flow control by default with the option to turn it off ([#3154](https://www.github.com/googleapis/google-cloud-go/issues/3154)) ([e392e61](https://www.github.com/googleapis/google-cloud-go/commit/e392e6157ee02a344528de63ab16baba61470b24)) + +### Refactor + +**NOTE**: Several changes were proposed for allowing `Message` and `PublishResult` to be used outside the library. However, the decision was made to only allow packages in `google-cloud-go` to access `NewMessage` and `NewPublishResult` (see #3351). + +- **pubsub:** Allow Message and PublishResult to be used outside the package ([#3200](https://www.github.com/googleapis/google-cloud-go/issues/3200)) ([581bf92](https://www.github.com/googleapis/google-cloud-go/commit/581bf92878dcb52ae8ea3633d4b3fcbb7054ff0f)) +- **pubsub:** Remove NewMessage and NewPublishResult ([#3232](https://www.github.com/googleapis/google-cloud-go/issues/3232)) ([a781a3a](https://www.github.com/googleapis/google-cloud-go/commit/a781a3ad0c626fc0a7aff0ce33b1ef0830ee2259)) + +### [1.8.3](https://www.github.com/googleapis/google-cloud-go/compare/pubsub/v1.8.2...v1.8.3) (2020-11-10) + +### Bug Fixes + +- **pubsub:** retry deadline exceeded errors in Acknowledge ([#3157](https://www.github.com/googleapis/google-cloud-go/issues/3157)) ([ae75b46](https://www.github.com/googleapis/google-cloud-go/commit/ae75b46033d9f14f41c1bde4b9646c93f8e2bbad)) + +## v1.8.2 + +- Fixes: + - fix(pubsub): track errors in published messages opencensus metric (#2970) + - fix(pubsub): do not propagate context deadline exceeded error (#3055) + +## v1.8.1 + +- Suppress connection is closing on error on subscriber close. (#2951) + +## v1.8.0 + +- Add status code to error injection in pstest. This is a BREAKING CHANGE. + +## v1.7.0 + +- Add reactor options to pstest server. (#2916) + +## v1.6.2 + +- Make message.Modacks thread safe in pstest. (#2755) +- Fix issue with closing publisher and subscriber client errors. (#2867) +- Fix updating subscription filtering/retry policy in pstest. (#2901) + +## v1.6.1 + +- Fix issue where EnableMessageOrdering wasn't being parsed properly to `SubscriptionConfig`. + +## v1.6.0 + +- Fix issue where subscriber streams were limited because it was using a single grpc conn. + - As a side effect, publisher and subscriber grpc conns are no longer shared. +- Add fake time function in pstest. +- Add support for server side flow control. + +## v1.5.0 + +- Add support for subscription detachment. +- Add support for message filtering in subscriptions. +- Add support for RetryPolicy (server-side feature). +- Fix publish error path when ordering key is disabled. +- Fix panic on Topic.ResumePublish method. + +## v1.4.0 + +- Add support for upcoming ordering keys feature. + ## v1.3.1 - Fix bug with removing dead letter policy from a subscription diff --git a/vendor/cloud.google.com/go/pubsub/README.md b/vendor/cloud.google.com/go/pubsub/README.md index 59f4cf66d9d2..cc3d2dccb834 100644 --- a/vendor/cloud.google.com/go/pubsub/README.md +++ b/vendor/cloud.google.com/go/pubsub/README.md @@ -1,9 +1,9 @@ -## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) +## Cloud Pub/Sub [![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/pubsub.svg)](https://pkg.go.dev/cloud.google.com/go/pubsub) - [About Cloud Pubsub](https://cloud.google.com/pubsub/) - [API documentation](https://cloud.google.com/pubsub/docs) -- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub) -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub) +- [Go client documentation](https://pkg.go.dev/cloud.google.com/go/pubsub) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/pubsub) ### Example Usage @@ -43,4 +43,4 @@ err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { if err != nil { log.Println(err) } -``` \ No newline at end of file +``` diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go index 378e9b72cc81..e7a00c124e11 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,28 +20,90 @@ // Provides reliable, many-to-many, asynchronous messaging between // applications. // -// Use of Context +// # Example usage // -// The ctx passed to NewClient is used for authentication requests and +// To get started with this package, create a client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := pubsub.NewSchemaClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// The client will use your default application credentials. Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +// The returned client must be Closed when it is done being used. +// +// # Using the Client +// +// The following is an example of making an API call with the newly created client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := pubsub.NewSchemaClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &pubsubpb.CreateSchemaRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/google.golang.org/genproto/googleapis/pubsub/v1#CreateSchemaRequest. +// } +// resp, err := c.CreateSchema(ctx, req) +// if err != nil { +// // TODO: Handle error. +// } +// // TODO: Use resp. +// _ = resp +// +// # Use of Context +// +// The ctx passed to NewSchemaClient is used for authentication requests and // for creating the underlying connection, but is not used for subsequent calls. // Individual methods on the client use the ctx given to them. // // To close the open connection, use the Close() method. // // For information about setting deadlines, reusing contexts, and more -// please visit godoc.org/cloud.google.com/go. +// please visit https://pkg.go.dev/cloud.google.com/go. package pubsub // import "cloud.google.com/go/pubsub/apiv1" import ( "context" + "os" "runtime" + "strconv" "strings" "unicode" + "google.golang.org/api/option" "google.golang.org/grpc/metadata" ) -const versionClient = "20200312" +// For more information on implementing a client constructor hook, see +// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. +type clientHookParams struct{} +type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) + +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { out, _ := metadata.FromOutgoingContext(ctx) @@ -54,6 +116,16 @@ func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { return metadata.NewOutgoingContext(ctx, out) } +func checkDisableDeadlines() (bool, error) { + raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE") + if !ok { + return false, nil + } + + b, err := strconv.ParseBool(raw) + return b, err +} + // DefaultAuthScopes reports the default set of authentication scopes to use with this package. func DefaultAuthScopes() []string { return []string{ diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/gapic_metadata.json b/vendor/cloud.google.com/go/pubsub/apiv1/gapic_metadata.json new file mode 100644 index 000000000000..64b2999668a6 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/gapic_metadata.json @@ -0,0 +1,236 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", + "language": "go", + "protoPackage": "google.pubsub.v1", + "libraryPackage": "cloud.google.com/go/pubsub/apiv1", + "services": { + "Publisher": { + "clients": { + "grpc": { + "libraryClient": "PublisherClient", + "rpcs": { + "CreateTopic": { + "methods": [ + "CreateTopic" + ] + }, + "DeleteTopic": { + "methods": [ + "DeleteTopic" + ] + }, + "DetachSubscription": { + "methods": [ + "DetachSubscription" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetTopic": { + "methods": [ + "GetTopic" + ] + }, + "ListTopicSnapshots": { + "methods": [ + "ListTopicSnapshots" + ] + }, + "ListTopicSubscriptions": { + "methods": [ + "ListTopicSubscriptions" + ] + }, + "ListTopics": { + "methods": [ + "ListTopics" + ] + }, + "Publish": { + "methods": [ + "Publish" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateTopic": { + "methods": [ + "UpdateTopic" + ] + } + } + } + } + }, + "SchemaService": { + "clients": { + "grpc": { + "libraryClient": "SchemaClient", + "rpcs": { + "CreateSchema": { + "methods": [ + "CreateSchema" + ] + }, + "DeleteSchema": { + "methods": [ + "DeleteSchema" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetSchema": { + "methods": [ + "GetSchema" + ] + }, + "ListSchemas": { + "methods": [ + "ListSchemas" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "ValidateMessage": { + "methods": [ + "ValidateMessage" + ] + }, + "ValidateSchema": { + "methods": [ + "ValidateSchema" + ] + } + } + } + } + }, + "Subscriber": { + "clients": { + "grpc": { + "libraryClient": "SubscriberClient", + "rpcs": { + "Acknowledge": { + "methods": [ + "Acknowledge" + ] + }, + "CreateSnapshot": { + "methods": [ + "CreateSnapshot" + ] + }, + "CreateSubscription": { + "methods": [ + "CreateSubscription" + ] + }, + "DeleteSnapshot": { + "methods": [ + "DeleteSnapshot" + ] + }, + "DeleteSubscription": { + "methods": [ + "DeleteSubscription" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetSnapshot": { + "methods": [ + "GetSnapshot" + ] + }, + "GetSubscription": { + "methods": [ + "GetSubscription" + ] + }, + "ListSnapshots": { + "methods": [ + "ListSnapshots" + ] + }, + "ListSubscriptions": { + "methods": [ + "ListSubscriptions" + ] + }, + "ModifyAckDeadline": { + "methods": [ + "ModifyAckDeadline" + ] + }, + "ModifyPushConfig": { + "methods": [ + "ModifyPushConfig" + ] + }, + "Pull": { + "methods": [ + "Pull" + ] + }, + "Seek": { + "methods": [ + "Seek" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "StreamingPull": { + "methods": [ + "StreamingPull" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateSnapshot": { + "methods": [ + "UpdateSnapshot" + ] + }, + "UpdateSubscription": { + "methods": [ + "UpdateSubscription" + ] + } + } + } + } + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/info.go b/vendor/cloud.google.com/go/pubsub/apiv1/info.go new file mode 100644 index 000000000000..fde99d8d0c53 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/info.go @@ -0,0 +1,33 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Also passes any +// provided key-value pairs. Intended for use by Google-written clients. +// +// Internal use only. +func (pc *PublisherClient) SetGoogleClientInfo(keyval ...string) { + pc.setGoogleClientInfo(keyval...) +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Also passes any +// provided key-value pairs. Intended for use by Google-written clients. +// +// Internal use only. +func (sc *SubscriberClient) SetGoogleClientInfo(keyval ...string) { + sc.setGoogleClientInfo(keyval...) +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go b/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go index b9ab4848db1b..7416e376d18e 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go @@ -17,7 +17,9 @@ package pubsub // PublisherProjectPath returns the path for the project resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s", project) +// +// fmt.Sprintf("projects/%s", project) +// // instead. func PublisherProjectPath(project string) string { return "" + @@ -29,7 +31,9 @@ func PublisherProjectPath(project string) string { // PublisherTopicPath returns the path for the topic resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s/topics/%s", project, topic) +// +// fmt.Sprintf("projects/%s/topics/%s", project, topic) +// // instead. func PublisherTopicPath(project, topic string) string { return "" + @@ -43,7 +47,9 @@ func PublisherTopicPath(project, topic string) string { // SubscriberProjectPath returns the path for the project resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s", project) +// +// fmt.Sprintf("projects/%s", project) +// // instead. func SubscriberProjectPath(project string) string { return "" + @@ -55,7 +61,9 @@ func SubscriberProjectPath(project string) string { // SubscriberSnapshotPath returns the path for the snapshot resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s/snapshots/%s", project, snapshot) +// +// fmt.Sprintf("projects/%s/snapshots/%s", project, snapshot) +// // instead. func SubscriberSnapshotPath(project, snapshot string) string { return "" + @@ -69,7 +77,9 @@ func SubscriberSnapshotPath(project, snapshot string) string { // SubscriberSubscriptionPath returns the path for the subscription resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription) +// +// fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription) +// // instead. func SubscriberSubscriptionPath(project, subscription string) string { return "" + @@ -83,7 +93,9 @@ func SubscriberSubscriptionPath(project, subscription string) string { // SubscriberTopicPath returns the path for the topic resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s/topics/%s", project, topic) +// +// fmt.Sprintf("projects/%s/topics/%s", project, topic) +// // instead. func SubscriberTopicPath(project, topic string) string { return "" + diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go index 96c392f826bc..11b1298de1cc 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,17 +23,21 @@ import ( "net/url" "time" - "github.com/golang/protobuf/proto" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" + iampb "google.golang.org/genproto/googleapis/iam/v1" pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" ) +var newPublisherClientHook clientHook + // PublisherCallOptions contains the retry settings for each method of PublisherClient. type PublisherCallOptions struct { CreateTopic []gax.CallOption @@ -44,13 +48,19 @@ type PublisherCallOptions struct { ListTopicSubscriptions []gax.CallOption ListTopicSnapshots []gax.CallOption DeleteTopic []gax.CallOption + DetachSubscription []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption } -func defaultPublisherClientOptions() []option.ClientOption { +func defaultPublisherGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ - option.WithEndpoint("pubsub.googleapis.com:443"), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), - option.WithScopes(DefaultAuthScopes()...), + internaloption.WithDefaultEndpoint("pubsub.googleapis.com:443"), + internaloption.WithDefaultMTLSEndpoint("pubsub.mtls.googleapis.com:443"), + internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -160,75 +170,258 @@ func defaultPublisherCallOptions() *PublisherCallOptions { }) }), }, + DetachSubscription: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, } } +// internalPublisherClient is an interface that defines the methods available from Cloud Pub/Sub API. +type internalPublisherClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + CreateTopic(context.Context, *pubsubpb.Topic, ...gax.CallOption) (*pubsubpb.Topic, error) + UpdateTopic(context.Context, *pubsubpb.UpdateTopicRequest, ...gax.CallOption) (*pubsubpb.Topic, error) + Publish(context.Context, *pubsubpb.PublishRequest, ...gax.CallOption) (*pubsubpb.PublishResponse, error) + GetTopic(context.Context, *pubsubpb.GetTopicRequest, ...gax.CallOption) (*pubsubpb.Topic, error) + ListTopics(context.Context, *pubsubpb.ListTopicsRequest, ...gax.CallOption) *TopicIterator + ListTopicSubscriptions(context.Context, *pubsubpb.ListTopicSubscriptionsRequest, ...gax.CallOption) *StringIterator + ListTopicSnapshots(context.Context, *pubsubpb.ListTopicSnapshotsRequest, ...gax.CallOption) *StringIterator + DeleteTopic(context.Context, *pubsubpb.DeleteTopicRequest, ...gax.CallOption) error + DetachSubscription(context.Context, *pubsubpb.DetachSubscriptionRequest, ...gax.CallOption) (*pubsubpb.DetachSubscriptionResponse, error) + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) +} + // PublisherClient is a client for interacting with Cloud Pub/Sub API. -// // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The service that an application uses to manipulate topics, and to send +// messages to a topic. type PublisherClient struct { + // The internal transport-dependent client. + internalClient internalPublisherClient + + // The call options for this service. + CallOptions *PublisherCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *PublisherClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *PublisherClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *PublisherClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// CreateTopic creates the given topic with the given name. See the [resource name rules] +// (https://cloud.google.com/pubsub/docs/admin#resource_names (at https://cloud.google.com/pubsub/docs/admin#resource_names)). +func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + return c.internalClient.CreateTopic(ctx, req, opts...) +} + +// UpdateTopic updates an existing topic. Note that certain properties of a +// topic are not modifiable. +func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + return c.internalClient.UpdateTopic(ctx, req, opts...) +} + +// Publish adds one or more messages to the topic. Returns NOT_FOUND if the topic +// does not exist. +func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) { + return c.internalClient.Publish(ctx, req, opts...) +} + +// GetTopic gets the configuration of a topic. +func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + return c.internalClient.GetTopic(ctx, req, opts...) +} + +// ListTopics lists matching topics. +func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator { + return c.internalClient.ListTopics(ctx, req, opts...) +} + +// ListTopicSubscriptions lists the names of the attached subscriptions on this topic. +func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator { + return c.internalClient.ListTopicSubscriptions(ctx, req, opts...) +} + +// ListTopicSnapshots lists the names of the snapshots on this topic. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) operations, +// which allow you to manage message acknowledgments in bulk. That is, you can +// set the acknowledgment state of messages in an existing subscription to the +// state captured by a snapshot. +func (c *PublisherClient) ListTopicSnapshots(ctx context.Context, req *pubsubpb.ListTopicSnapshotsRequest, opts ...gax.CallOption) *StringIterator { + return c.internalClient.ListTopicSnapshots(ctx, req, opts...) +} + +// DeleteTopic deletes the topic with the given name. Returns NOT_FOUND if the topic +// does not exist. After a topic is deleted, a new topic may be created with +// the same name; this is an entirely new topic with none of the old +// configuration or subscriptions. Existing subscriptions to this topic are +// not deleted, but their topic field is set to _deleted-topic_. +func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteTopic(ctx, req, opts...) +} + +// DetachSubscription detaches a subscription from this topic. All messages retained in the +// subscription are dropped. Subsequent Pull and StreamingPull requests +// will return FAILED_PRECONDITION. If the subscription is a push +// subscription, pushes to the endpoint will stop. +func (c *PublisherClient) DetachSubscription(ctx context.Context, req *pubsubpb.DetachSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.DetachSubscriptionResponse, error) { + return c.internalClient.DetachSubscription(ctx, req, opts...) +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *PublisherClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *PublisherClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *PublisherClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// publisherGRPCClient is a client for interacting with Cloud Pub/Sub API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type publisherGRPCClient struct { // Connection pool of gRPC connections to the service. connPool gtransport.ConnPool + // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE + disableDeadlines bool + + // Points back to the CallOptions field of the containing PublisherClient + CallOptions **PublisherCallOptions + // The gRPC API client. publisherClient pubsubpb.PublisherClient - // The call options for this service. - CallOptions *PublisherCallOptions + iamPolicyClient iampb.IAMPolicyClient // The x-goog-* metadata to be sent with each request. xGoogMetadata metadata.MD } -// NewPublisherClient creates a new publisher client. +// NewPublisherClient creates a new publisher client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. // // The service that an application uses to manipulate topics, and to send // messages to a topic. func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*PublisherClient, error) { - connPool, err := gtransport.DialPool(ctx, append(defaultPublisherClientOptions(), opts...)...) + clientOpts := defaultPublisherGRPCClientOptions() + if newPublisherClientHook != nil { + hookOpts, err := newPublisherClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + disableDeadlines, err := checkDisableDeadlines() if err != nil { return nil, err } - c := &PublisherClient{ - connPool: connPool, - CallOptions: defaultPublisherCallOptions(), - publisherClient: pubsubpb.NewPublisherClient(connPool), + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err } - c.SetGoogleClientInfo() + client := PublisherClient{CallOptions: defaultPublisherCallOptions()} + + c := &publisherGRPCClient{ + connPool: connPool, + disableDeadlines: disableDeadlines, + publisherClient: pubsubpb.NewPublisherClient(connPool), + CallOptions: &client.CallOptions, + iamPolicyClient: iampb.NewIAMPolicyClient(connPool), + } + c.setGoogleClientInfo() + + client.internalClient = c - return c, nil + return &client, nil } // Connection returns a connection to the API service. // -// Deprecated. -func (c *PublisherClient) Connection() *grpc.ClientConn { +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *publisherGRPCClient) Connection() *grpc.ClientConn { return c.connPool.Conn() } -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *PublisherClient) Close() error { - return c.connPool.Close() -} - -// SetGoogleClientInfo sets the name and version of the application in +// setGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *PublisherClient) SetGoogleClientInfo(keyval ...string) { +func (c *publisherGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } -// CreateTopic creates the given topic with the given name. See the -// -// resource name rules (at https://cloud.google.com/pubsub/docs/admin#resource_names). -func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) { +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *publisherGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *publisherGRPCClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateTopic[0:len(c.CallOptions.CreateTopic):len(c.CallOptions.CreateTopic)], opts...) + opts = append((*c.CallOptions).CreateTopic[0:len((*c.CallOptions).CreateTopic):len((*c.CallOptions).CreateTopic)], opts...) var resp *pubsubpb.Topic err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -241,12 +434,16 @@ func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, return resp, nil } -// UpdateTopic updates an existing topic. Note that certain properties of a -// topic are not modifiable. -func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { +func (c *publisherGRPCClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic.name", url.QueryEscape(req.GetTopic().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateTopic[0:len(c.CallOptions.UpdateTopic):len(c.CallOptions.UpdateTopic)], opts...) + opts = append((*c.CallOptions).UpdateTopic[0:len((*c.CallOptions).UpdateTopic):len((*c.CallOptions).UpdateTopic)], opts...) var resp *pubsubpb.Topic err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -259,12 +456,16 @@ func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateT return resp, nil } -// Publish adds one or more messages to the topic. Returns NOT_FOUND if the topic -// does not exist. -func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) { +func (c *publisherGRPCClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.Publish[0:len(c.CallOptions.Publish):len(c.CallOptions.Publish)], opts...) + opts = append((*c.CallOptions).Publish[0:len((*c.CallOptions).Publish):len((*c.CallOptions).Publish)], opts...) var resp *pubsubpb.PublishResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -277,11 +478,16 @@ func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequ return resp, nil } -// GetTopic gets the configuration of a topic. -func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { +func (c *publisherGRPCClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetTopic[0:len(c.CallOptions.GetTopic):len(c.CallOptions.GetTopic)], opts...) + opts = append((*c.CallOptions).GetTopic[0:len((*c.CallOptions).GetTopic):len((*c.CallOptions).GetTopic)], opts...) var resp *pubsubpb.Topic err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -294,19 +500,21 @@ func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRe return resp, nil } -// ListTopics lists matching topics. -func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator { +func (c *publisherGRPCClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListTopics[0:len(c.CallOptions.ListTopics):len(c.CallOptions.ListTopics)], opts...) + opts = append((*c.CallOptions).ListTopics[0:len((*c.CallOptions).ListTopics):len((*c.CallOptions).ListTopics)], opts...) it := &TopicIterator{} req = proto.Clone(req).(*pubsubpb.ListTopicsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Topic, string, error) { - var resp *pubsubpb.ListTopicsResponse - req.PageToken = pageToken + resp := &pubsubpb.ListTopicsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -319,7 +527,7 @@ func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopi } it.Response = resp - return resp.Topics, resp.NextPageToken, nil + return resp.GetTopics(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) @@ -329,25 +537,29 @@ func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopi it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + return it } -// ListTopicSubscriptions lists the names of the subscriptions on this topic. -func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator { +func (c *publisherGRPCClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListTopicSubscriptions[0:len(c.CallOptions.ListTopicSubscriptions):len(c.CallOptions.ListTopicSubscriptions)], opts...) + opts = append((*c.CallOptions).ListTopicSubscriptions[0:len((*c.CallOptions).ListTopicSubscriptions):len((*c.CallOptions).ListTopicSubscriptions)], opts...) it := &StringIterator{} req = proto.Clone(req).(*pubsubpb.ListTopicSubscriptionsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { - var resp *pubsubpb.ListTopicSubscriptionsResponse - req.PageToken = pageToken + resp := &pubsubpb.ListTopicSubscriptionsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -360,7 +572,7 @@ func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsu } it.Response = resp - return resp.Subscriptions, resp.NextPageToken, nil + return resp.GetSubscriptions(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) @@ -370,30 +582,29 @@ func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsu it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + return it } -// ListTopicSnapshots lists the names of the snapshots on this topic. Snapshots are used in -// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow -// you to manage message acknowledgments in bulk. That is, you can set the -// acknowledgment state of messages in an existing subscription to the state -// captured by a snapshot. -func (c *PublisherClient) ListTopicSnapshots(ctx context.Context, req *pubsubpb.ListTopicSnapshotsRequest, opts ...gax.CallOption) *StringIterator { +func (c *publisherGRPCClient) ListTopicSnapshots(ctx context.Context, req *pubsubpb.ListTopicSnapshotsRequest, opts ...gax.CallOption) *StringIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListTopicSnapshots[0:len(c.CallOptions.ListTopicSnapshots):len(c.CallOptions.ListTopicSnapshots)], opts...) + opts = append((*c.CallOptions).ListTopicSnapshots[0:len((*c.CallOptions).ListTopicSnapshots):len((*c.CallOptions).ListTopicSnapshots)], opts...) it := &StringIterator{} req = proto.Clone(req).(*pubsubpb.ListTopicSnapshotsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { - var resp *pubsubpb.ListTopicSnapshotsResponse - req.PageToken = pageToken + resp := &pubsubpb.ListTopicSnapshotsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -406,7 +617,7 @@ func (c *PublisherClient) ListTopicSnapshots(ctx context.Context, req *pubsubpb. } it.Response = resp - return resp.Snapshots, resp.NextPageToken, nil + return resp.GetSnapshots(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) @@ -416,21 +627,24 @@ func (c *PublisherClient) ListTopicSnapshots(ctx context.Context, req *pubsubpb. it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + return it } -// DeleteTopic deletes the topic with the given name. Returns NOT_FOUND if the topic -// does not exist. After a topic is deleted, a new topic may be created with -// the same name; this is an entirely new topic with none of the old -// configuration or subscriptions. Existing subscriptions to this topic are -// not deleted, but their topic field is set to _deleted-topic_. -func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error { +func (c *publisherGRPCClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteTopic[0:len(c.CallOptions.DeleteTopic):len(c.CallOptions.DeleteTopic)], opts...) + opts = append((*c.CallOptions).DeleteTopic[0:len((*c.CallOptions).DeleteTopic):len((*c.CallOptions).DeleteTopic)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.publisherClient.DeleteTopic(ctx, req, settings.GRPC...) @@ -439,6 +653,79 @@ func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteT return err } +func (c *publisherGRPCClient) DetachSubscription(ctx context.Context, req *pubsubpb.DetachSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.DetachSubscriptionResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).DetachSubscription[0:len((*c.CallOptions).DetachSubscription):len((*c.CallOptions).DetachSubscription)], opts...) + var resp *pubsubpb.DetachSubscriptionResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.DetachSubscription(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *publisherGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *publisherGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *publisherGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + // StringIterator manages a stream of string. type StringIterator struct { items []string diff --git a/vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go similarity index 89% rename from vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go rename to vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go index 1a4a03011c2d..84fd669008c9 100644 --- a/vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/pubsub/v1/pubsub.proto -package pubsub +package pubsubpb import ( context "context" @@ -44,6 +44,122 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// Possible states for a subscription. +type Subscription_State int32 + +const ( + // Default value. This value is unused. + Subscription_STATE_UNSPECIFIED Subscription_State = 0 + // The subscription can actively receive messages + Subscription_ACTIVE Subscription_State = 1 + // The subscription cannot receive messages because of an error with the + // resource to which it pushes messages. See the more detailed error state + // in the corresponding configuration. + Subscription_RESOURCE_ERROR Subscription_State = 2 +) + +// Enum value maps for Subscription_State. +var ( + Subscription_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "RESOURCE_ERROR", + } + Subscription_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "RESOURCE_ERROR": 2, + } +) + +func (x Subscription_State) Enum() *Subscription_State { + p := new(Subscription_State) + *p = x + return p +} + +func (x Subscription_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Subscription_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_pubsub_v1_pubsub_proto_enumTypes[0].Descriptor() +} + +func (Subscription_State) Type() protoreflect.EnumType { + return &file_google_pubsub_v1_pubsub_proto_enumTypes[0] +} + +func (x Subscription_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Subscription_State.Descriptor instead. +func (Subscription_State) EnumDescriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{17, 0} +} + +// Possible states for a BigQuery subscription. +type BigQueryConfig_State int32 + +const ( + // Default value. This value is unused. + BigQueryConfig_STATE_UNSPECIFIED BigQueryConfig_State = 0 + // The subscription can actively send messages to BigQuery + BigQueryConfig_ACTIVE BigQueryConfig_State = 1 + // Cannot write to the BigQuery table because of permission denied errors. + BigQueryConfig_PERMISSION_DENIED BigQueryConfig_State = 2 + // Cannot write to the BigQuery table because it does not exist. + BigQueryConfig_NOT_FOUND BigQueryConfig_State = 3 + // Cannot write to the BigQuery table due to a schema mismatch. + BigQueryConfig_SCHEMA_MISMATCH BigQueryConfig_State = 4 +) + +// Enum value maps for BigQueryConfig_State. +var ( + BigQueryConfig_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "PERMISSION_DENIED", + 3: "NOT_FOUND", + 4: "SCHEMA_MISMATCH", + } + BigQueryConfig_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "PERMISSION_DENIED": 2, + "NOT_FOUND": 3, + "SCHEMA_MISMATCH": 4, + } +) + +func (x BigQueryConfig_State) Enum() *BigQueryConfig_State { + p := new(BigQueryConfig_State) + *p = x + return p +} + +func (x BigQueryConfig_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (BigQueryConfig_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_pubsub_v1_pubsub_proto_enumTypes[1].Descriptor() +} + +func (BigQueryConfig_State) Type() protoreflect.EnumType { + return &file_google_pubsub_v1_pubsub_proto_enumTypes[1] +} + +func (x BigQueryConfig_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use BigQueryConfig_State.Descriptor instead. +func (BigQueryConfig_State) EnumDescriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22, 0} +} + // A policy constraining the storage of messages published to the topic. type MessageStoragePolicy struct { state protoimpl.MessageState @@ -1154,9 +1270,15 @@ type Subscription struct { // field will be `_deleted-topic_` if the topic has been deleted. Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` // If push delivery is used with this subscription, this field is - // used to configure it. An empty `pushConfig` signifies that the subscriber - // will pull and ack messages using API methods. + // used to configure it. Either `pushConfig` or `bigQueryConfig` can be set, + // but not both. If both are empty, then the subscriber will pull and ack + // messages using API methods. PushConfig *PushConfig `protobuf:"bytes,4,opt,name=push_config,json=pushConfig,proto3" json:"push_config,omitempty"` + // If delivery to BigQuery is used with this subscription, this field is + // used to configure it. Either `pushConfig` or `bigQueryConfig` can be set, + // but not both. If both are empty, then the subscriber will pull and ack + // messages using API methods. + BigqueryConfig *BigQueryConfig `protobuf:"bytes,18,opt,name=bigquery_config,json=bigqueryConfig,proto3" json:"bigquery_config,omitempty"` // The approximate amount of time (on a best-effort basis) Pub/Sub waits for // the subscriber to acknowledge receipt before resending the message. In the // interval after the message is delivered and before it is acknowledged, it @@ -1255,6 +1377,9 @@ type Subscription struct { // the `message_retention_duration` field in `Topic`. This field is set only // in responses from the server; it is ignored if it is set in any requests. TopicMessageRetentionDuration *durationpb.Duration `protobuf:"bytes,17,opt,name=topic_message_retention_duration,json=topicMessageRetentionDuration,proto3" json:"topic_message_retention_duration,omitempty"` + // Output only. An output-only field indicating whether or not the subscription can receive + // messages. + State Subscription_State `protobuf:"varint,19,opt,name=state,proto3,enum=google.pubsub.v1.Subscription_State" json:"state,omitempty"` } func (x *Subscription) Reset() { @@ -1310,6 +1435,13 @@ func (x *Subscription) GetPushConfig() *PushConfig { return nil } +func (x *Subscription) GetBigqueryConfig() *BigQueryConfig { + if x != nil { + return x.BigqueryConfig + } + return nil +} + func (x *Subscription) GetAckDeadlineSeconds() int32 { if x != nil { return x.AckDeadlineSeconds @@ -1394,6 +1526,13 @@ func (x *Subscription) GetTopicMessageRetentionDuration() *durationpb.Duration { return nil } +func (x *Subscription) GetState() Subscription_State { + if x != nil { + return x.State + } + return Subscription_STATE_UNSPECIFIED +} + // A policy that specifies how Cloud Pub/Sub retries message delivery. // // Retry delay will be exponential based on provided minimum and maximum @@ -1639,6 +1778,7 @@ type PushConfig struct { // authenticated push. // // Types that are assignable to AuthenticationMethod: + // // *PushConfig_OidcToken_ AuthenticationMethod isPushConfig_AuthenticationMethod `protobuf_oneof:"authentication_method"` } @@ -1715,6 +1855,102 @@ type PushConfig_OidcToken_ struct { func (*PushConfig_OidcToken_) isPushConfig_AuthenticationMethod() {} +// Configuration for a BigQuery subscription. +type BigQueryConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the table to which to write data, of the form + // {projectId}:{datasetId}.{tableId} + Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` + // When true, use the topic's schema as the columns to write to in BigQuery, + // if it exists. + UseTopicSchema bool `protobuf:"varint,2,opt,name=use_topic_schema,json=useTopicSchema,proto3" json:"use_topic_schema,omitempty"` + // When true, write the subscription name, message_id, publish_time, + // attributes, and ordering_key to additional columns in the table. The + // subscription name, message_id, and publish_time fields are put in their own + // columns while all other message properties (other than data) are written to + // a JSON object in the attributes column. + WriteMetadata bool `protobuf:"varint,3,opt,name=write_metadata,json=writeMetadata,proto3" json:"write_metadata,omitempty"` + // When true and use_topic_schema is true, any fields that are a part of the + // topic schema that are not part of the BigQuery table schema are dropped + // when writing to BigQuery. Otherwise, the schemas must be kept in sync and + // any messages with extra fields are not written and remain in the + // subscription's backlog. + DropUnknownFields bool `protobuf:"varint,4,opt,name=drop_unknown_fields,json=dropUnknownFields,proto3" json:"drop_unknown_fields,omitempty"` + // Output only. An output-only field that indicates whether or not the subscription can + // receive messages. + State BigQueryConfig_State `protobuf:"varint,5,opt,name=state,proto3,enum=google.pubsub.v1.BigQueryConfig_State" json:"state,omitempty"` +} + +func (x *BigQueryConfig) Reset() { + *x = BigQueryConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BigQueryConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BigQueryConfig) ProtoMessage() {} + +func (x *BigQueryConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BigQueryConfig.ProtoReflect.Descriptor instead. +func (*BigQueryConfig) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22} +} + +func (x *BigQueryConfig) GetTable() string { + if x != nil { + return x.Table + } + return "" +} + +func (x *BigQueryConfig) GetUseTopicSchema() bool { + if x != nil { + return x.UseTopicSchema + } + return false +} + +func (x *BigQueryConfig) GetWriteMetadata() bool { + if x != nil { + return x.WriteMetadata + } + return false +} + +func (x *BigQueryConfig) GetDropUnknownFields() bool { + if x != nil { + return x.DropUnknownFields + } + return false +} + +func (x *BigQueryConfig) GetState() BigQueryConfig_State { + if x != nil { + return x.State + } + return BigQueryConfig_STATE_UNSPECIFIED +} + // A message and its corresponding acknowledgment ID. type ReceivedMessage struct { state protoimpl.MessageState @@ -1747,7 +1983,7 @@ type ReceivedMessage struct { func (x *ReceivedMessage) Reset() { *x = ReceivedMessage{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1760,7 +1996,7 @@ func (x *ReceivedMessage) String() string { func (*ReceivedMessage) ProtoMessage() {} func (x *ReceivedMessage) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1773,7 +2009,7 @@ func (x *ReceivedMessage) ProtoReflect() protoreflect.Message { // Deprecated: Use ReceivedMessage.ProtoReflect.Descriptor instead. func (*ReceivedMessage) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{23} } func (x *ReceivedMessage) GetAckId() string { @@ -1811,7 +2047,7 @@ type GetSubscriptionRequest struct { func (x *GetSubscriptionRequest) Reset() { *x = GetSubscriptionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1824,7 +2060,7 @@ func (x *GetSubscriptionRequest) String() string { func (*GetSubscriptionRequest) ProtoMessage() {} func (x *GetSubscriptionRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1837,7 +2073,7 @@ func (x *GetSubscriptionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSubscriptionRequest.ProtoReflect.Descriptor instead. func (*GetSubscriptionRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{23} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24} } func (x *GetSubscriptionRequest) GetSubscription() string { @@ -1863,7 +2099,7 @@ type UpdateSubscriptionRequest struct { func (x *UpdateSubscriptionRequest) Reset() { *x = UpdateSubscriptionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1876,7 +2112,7 @@ func (x *UpdateSubscriptionRequest) String() string { func (*UpdateSubscriptionRequest) ProtoMessage() {} func (x *UpdateSubscriptionRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1889,7 +2125,7 @@ func (x *UpdateSubscriptionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateSubscriptionRequest.ProtoReflect.Descriptor instead. func (*UpdateSubscriptionRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{25} } func (x *UpdateSubscriptionRequest) GetSubscription() *Subscription { @@ -1926,7 +2162,7 @@ type ListSubscriptionsRequest struct { func (x *ListSubscriptionsRequest) Reset() { *x = ListSubscriptionsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1939,7 +2175,7 @@ func (x *ListSubscriptionsRequest) String() string { func (*ListSubscriptionsRequest) ProtoMessage() {} func (x *ListSubscriptionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1952,7 +2188,7 @@ func (x *ListSubscriptionsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSubscriptionsRequest.ProtoReflect.Descriptor instead. func (*ListSubscriptionsRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{25} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26} } func (x *ListSubscriptionsRequest) GetProject() string { @@ -1993,7 +2229,7 @@ type ListSubscriptionsResponse struct { func (x *ListSubscriptionsResponse) Reset() { *x = ListSubscriptionsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2006,7 +2242,7 @@ func (x *ListSubscriptionsResponse) String() string { func (*ListSubscriptionsResponse) ProtoMessage() {} func (x *ListSubscriptionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2019,7 +2255,7 @@ func (x *ListSubscriptionsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSubscriptionsResponse.ProtoReflect.Descriptor instead. func (*ListSubscriptionsResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{27} } func (x *ListSubscriptionsResponse) GetSubscriptions() []*Subscription { @@ -2050,7 +2286,7 @@ type DeleteSubscriptionRequest struct { func (x *DeleteSubscriptionRequest) Reset() { *x = DeleteSubscriptionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2063,7 +2299,7 @@ func (x *DeleteSubscriptionRequest) String() string { func (*DeleteSubscriptionRequest) ProtoMessage() {} func (x *DeleteSubscriptionRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2076,7 +2312,7 @@ func (x *DeleteSubscriptionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteSubscriptionRequest.ProtoReflect.Descriptor instead. func (*DeleteSubscriptionRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{27} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{28} } func (x *DeleteSubscriptionRequest) GetSubscription() string { @@ -2107,7 +2343,7 @@ type ModifyPushConfigRequest struct { func (x *ModifyPushConfigRequest) Reset() { *x = ModifyPushConfigRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2120,7 +2356,7 @@ func (x *ModifyPushConfigRequest) String() string { func (*ModifyPushConfigRequest) ProtoMessage() {} func (x *ModifyPushConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2133,7 +2369,7 @@ func (x *ModifyPushConfigRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ModifyPushConfigRequest.ProtoReflect.Descriptor instead. func (*ModifyPushConfigRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{28} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{29} } func (x *ModifyPushConfigRequest) GetSubscription() string { @@ -2178,7 +2414,7 @@ type PullRequest struct { func (x *PullRequest) Reset() { *x = PullRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2191,7 +2427,7 @@ func (x *PullRequest) String() string { func (*PullRequest) ProtoMessage() {} func (x *PullRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2204,7 +2440,7 @@ func (x *PullRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PullRequest.ProtoReflect.Descriptor instead. func (*PullRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{29} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{30} } func (x *PullRequest) GetSubscription() string { @@ -2245,7 +2481,7 @@ type PullResponse struct { func (x *PullResponse) Reset() { *x = PullResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2258,7 +2494,7 @@ func (x *PullResponse) String() string { func (*PullResponse) ProtoMessage() {} func (x *PullResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2271,7 +2507,7 @@ func (x *PullResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PullResponse.ProtoReflect.Descriptor instead. func (*PullResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{30} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{31} } func (x *PullResponse) GetReceivedMessages() []*ReceivedMessage { @@ -2306,7 +2542,7 @@ type ModifyAckDeadlineRequest struct { func (x *ModifyAckDeadlineRequest) Reset() { *x = ModifyAckDeadlineRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2319,7 +2555,7 @@ func (x *ModifyAckDeadlineRequest) String() string { func (*ModifyAckDeadlineRequest) ProtoMessage() {} func (x *ModifyAckDeadlineRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2332,7 +2568,7 @@ func (x *ModifyAckDeadlineRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ModifyAckDeadlineRequest.ProtoReflect.Descriptor instead. func (*ModifyAckDeadlineRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{31} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{32} } func (x *ModifyAckDeadlineRequest) GetSubscription() string { @@ -2374,7 +2610,7 @@ type AcknowledgeRequest struct { func (x *AcknowledgeRequest) Reset() { *x = AcknowledgeRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2387,7 +2623,7 @@ func (x *AcknowledgeRequest) String() string { func (*AcknowledgeRequest) ProtoMessage() {} func (x *AcknowledgeRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2400,7 +2636,7 @@ func (x *AcknowledgeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AcknowledgeRequest.ProtoReflect.Descriptor instead. func (*AcknowledgeRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{32} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{33} } func (x *AcknowledgeRequest) GetSubscription() string { @@ -2491,7 +2727,7 @@ type StreamingPullRequest struct { func (x *StreamingPullRequest) Reset() { *x = StreamingPullRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2504,7 +2740,7 @@ func (x *StreamingPullRequest) String() string { func (*StreamingPullRequest) ProtoMessage() {} func (x *StreamingPullRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2517,7 +2753,7 @@ func (x *StreamingPullRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamingPullRequest.ProtoReflect.Descriptor instead. func (*StreamingPullRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{33} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{34} } func (x *StreamingPullRequest) GetSubscription() string { @@ -2598,7 +2834,7 @@ type StreamingPullResponse struct { func (x *StreamingPullResponse) Reset() { *x = StreamingPullResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2611,7 +2847,7 @@ func (x *StreamingPullResponse) String() string { func (*StreamingPullResponse) ProtoMessage() {} func (x *StreamingPullResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2624,7 +2860,7 @@ func (x *StreamingPullResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamingPullResponse.ProtoReflect.Descriptor instead. func (*StreamingPullResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{34} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{35} } func (x *StreamingPullResponse) GetReceivedMessages() []*ReceivedMessage { @@ -2670,12 +2906,14 @@ type CreateSnapshotRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Required. The subscription whose backlog the snapshot retains. // Specifically, the created snapshot is guaranteed to retain: - // (a) The existing backlog on the subscription. More precisely, this is - // defined as the messages in the subscription's backlog that are - // unacknowledged upon the successful completion of the - // `CreateSnapshot` request; as well as: - // (b) Any messages published to the subscription's topic following the - // successful completion of the CreateSnapshot request. + // + // (a) The existing backlog on the subscription. More precisely, this is + // defined as the messages in the subscription's backlog that are + // unacknowledged upon the successful completion of the + // `CreateSnapshot` request; as well as: + // (b) Any messages published to the subscription's topic following the + // successful completion of the CreateSnapshot request. + // // Format is `projects/{project}/subscriptions/{sub}`. Subscription string `protobuf:"bytes,2,opt,name=subscription,proto3" json:"subscription,omitempty"` // See Creating and @@ -2686,7 +2924,7 @@ type CreateSnapshotRequest struct { func (x *CreateSnapshotRequest) Reset() { *x = CreateSnapshotRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2699,7 +2937,7 @@ func (x *CreateSnapshotRequest) String() string { func (*CreateSnapshotRequest) ProtoMessage() {} func (x *CreateSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2712,7 +2950,7 @@ func (x *CreateSnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateSnapshotRequest.ProtoReflect.Descriptor instead. func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{35} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{36} } func (x *CreateSnapshotRequest) GetName() string { @@ -2752,7 +2990,7 @@ type UpdateSnapshotRequest struct { func (x *UpdateSnapshotRequest) Reset() { *x = UpdateSnapshotRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2765,7 +3003,7 @@ func (x *UpdateSnapshotRequest) String() string { func (*UpdateSnapshotRequest) ProtoMessage() {} func (x *UpdateSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2778,7 +3016,7 @@ func (x *UpdateSnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateSnapshotRequest.ProtoReflect.Descriptor instead. func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{36} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37} } func (x *UpdateSnapshotRequest) GetSnapshot() *Snapshot { @@ -2828,7 +3066,7 @@ type Snapshot struct { func (x *Snapshot) Reset() { *x = Snapshot{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2841,7 +3079,7 @@ func (x *Snapshot) String() string { func (*Snapshot) ProtoMessage() {} func (x *Snapshot) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2854,7 +3092,7 @@ func (x *Snapshot) ProtoReflect() protoreflect.Message { // Deprecated: Use Snapshot.ProtoReflect.Descriptor instead. func (*Snapshot) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{38} } func (x *Snapshot) GetName() string { @@ -2899,7 +3137,7 @@ type GetSnapshotRequest struct { func (x *GetSnapshotRequest) Reset() { *x = GetSnapshotRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2912,7 +3150,7 @@ func (x *GetSnapshotRequest) String() string { func (*GetSnapshotRequest) ProtoMessage() {} func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2925,7 +3163,7 @@ func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSnapshotRequest.ProtoReflect.Descriptor instead. func (*GetSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{38} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39} } func (x *GetSnapshotRequest) GetSnapshot() string { @@ -2955,7 +3193,7 @@ type ListSnapshotsRequest struct { func (x *ListSnapshotsRequest) Reset() { *x = ListSnapshotsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2968,7 +3206,7 @@ func (x *ListSnapshotsRequest) String() string { func (*ListSnapshotsRequest) ProtoMessage() {} func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2981,7 +3219,7 @@ func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSnapshotsRequest.ProtoReflect.Descriptor instead. func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{40} } func (x *ListSnapshotsRequest) GetProject() string { @@ -3021,7 +3259,7 @@ type ListSnapshotsResponse struct { func (x *ListSnapshotsResponse) Reset() { *x = ListSnapshotsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3034,7 +3272,7 @@ func (x *ListSnapshotsResponse) String() string { func (*ListSnapshotsResponse) ProtoMessage() {} func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3047,7 +3285,7 @@ func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSnapshotsResponse.ProtoReflect.Descriptor instead. func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{40} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{41} } func (x *ListSnapshotsResponse) GetSnapshots() []*Snapshot { @@ -3078,7 +3316,7 @@ type DeleteSnapshotRequest struct { func (x *DeleteSnapshotRequest) Reset() { *x = DeleteSnapshotRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3091,7 +3329,7 @@ func (x *DeleteSnapshotRequest) String() string { func (*DeleteSnapshotRequest) ProtoMessage() {} func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3104,7 +3342,7 @@ func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteSnapshotRequest.ProtoReflect.Descriptor instead. func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{41} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{42} } func (x *DeleteSnapshotRequest) GetSnapshot() string { @@ -3123,6 +3361,7 @@ type SeekRequest struct { // Required. The subscription to affect. Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` // Types that are assignable to Target: + // // *SeekRequest_Time // *SeekRequest_Snapshot Target isSeekRequest_Target `protobuf_oneof:"target"` @@ -3131,7 +3370,7 @@ type SeekRequest struct { func (x *SeekRequest) Reset() { *x = SeekRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3144,7 +3383,7 @@ func (x *SeekRequest) String() string { func (*SeekRequest) ProtoMessage() {} func (x *SeekRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3157,7 +3396,7 @@ func (x *SeekRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SeekRequest.ProtoReflect.Descriptor instead. func (*SeekRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{42} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{43} } func (x *SeekRequest) GetSubscription() string { @@ -3228,7 +3467,7 @@ type SeekResponse struct { func (x *SeekResponse) Reset() { *x = SeekResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3241,7 +3480,7 @@ func (x *SeekResponse) String() string { func (*SeekResponse) ProtoMessage() {} func (x *SeekResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3254,7 +3493,7 @@ func (x *SeekResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SeekResponse.ProtoReflect.Descriptor instead. func (*SeekResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{43} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{44} } // Contains information needed for generating an @@ -3283,7 +3522,7 @@ type PushConfig_OidcToken struct { func (x *PushConfig_OidcToken) Reset() { *x = PushConfig_OidcToken{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[47] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3296,7 +3535,7 @@ func (x *PushConfig_OidcToken) String() string { func (*PushConfig_OidcToken) ProtoMessage() {} func (x *PushConfig_OidcToken) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[47] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3345,7 +3584,7 @@ type StreamingPullResponse_AcknowledgeConfirmation struct { func (x *StreamingPullResponse_AcknowledgeConfirmation) Reset() { *x = StreamingPullResponse_AcknowledgeConfirmation{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[49] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3358,7 +3597,7 @@ func (x *StreamingPullResponse_AcknowledgeConfirmation) String() string { func (*StreamingPullResponse_AcknowledgeConfirmation) ProtoMessage() {} func (x *StreamingPullResponse_AcknowledgeConfirmation) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[49] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3371,7 +3610,7 @@ func (x *StreamingPullResponse_AcknowledgeConfirmation) ProtoReflect() protorefl // Deprecated: Use StreamingPullResponse_AcknowledgeConfirmation.ProtoReflect.Descriptor instead. func (*StreamingPullResponse_AcknowledgeConfirmation) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{34, 0} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{35, 0} } func (x *StreamingPullResponse_AcknowledgeConfirmation) GetAckIds() []string { @@ -3412,7 +3651,7 @@ type StreamingPullResponse_ModifyAckDeadlineConfirmation struct { func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) Reset() { *x = StreamingPullResponse_ModifyAckDeadlineConfirmation{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[50] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3425,7 +3664,7 @@ func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) String() string { func (*StreamingPullResponse_ModifyAckDeadlineConfirmation) ProtoMessage() {} func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[50] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3438,7 +3677,7 @@ func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) ProtoReflect() pro // Deprecated: Use StreamingPullResponse_ModifyAckDeadlineConfirmation.ProtoReflect.Descriptor instead. func (*StreamingPullResponse_ModifyAckDeadlineConfirmation) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{34, 1} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{35, 1} } func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) GetAckIds() []string { @@ -3470,7 +3709,7 @@ type StreamingPullResponse_SubscriptionProperties struct { func (x *StreamingPullResponse_SubscriptionProperties) Reset() { *x = StreamingPullResponse_SubscriptionProperties{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3483,7 +3722,7 @@ func (x *StreamingPullResponse_SubscriptionProperties) String() string { func (*StreamingPullResponse_SubscriptionProperties) ProtoMessage() {} func (x *StreamingPullResponse_SubscriptionProperties) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3496,7 +3735,7 @@ func (x *StreamingPullResponse_SubscriptionProperties) ProtoReflect() protorefle // Deprecated: Use StreamingPullResponse_SubscriptionProperties.ProtoReflect.Descriptor instead. func (*StreamingPullResponse_SubscriptionProperties) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{34, 2} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{35, 2} } func (x *StreamingPullResponse_SubscriptionProperties) GetExactlyOnceDeliveryEnabled() bool { @@ -3696,7 +3935,7 @@ var file_google_pubsub_v1_pubsub_proto_rawDesc = []byte{ 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, 0x1a, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb4, 0x08, 0x0a, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x80, 0x0a, 0x0a, 0x0c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, @@ -3707,108 +3946,142 @@ var file_google_pubsub_v1_pubsub_proto_rawDesc = []byte{ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x30, 0x0a, 0x14, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, - 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, - 0x61, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, - 0x64, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x5f, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x13, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x41, 0x63, 0x6b, 0x65, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x57, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x12, 0x49, 0x0a, 0x0f, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x62, 0x69, 0x67, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x14, 0x61, + 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x61, 0x63, 0x6b, 0x44, 0x65, + 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x32, 0x0a, + 0x15, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x5f, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x72, 0x65, + 0x74, 0x61, 0x69, 0x6e, 0x41, 0x63, 0x6b, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x73, 0x12, 0x57, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x36, + 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x72, + 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x4f, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x10, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x50, 0x0a, 0x12, 0x64, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x10, 0x64, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x12, + 0x3f, 0x0a, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, + 0x79, 0x5f, 0x6f, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x18, + 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x78, 0x61, + 0x63, 0x74, 0x6c, 0x79, 0x4f, 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, + 0x12, 0x67, 0x0a, 0x20, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x42, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x4f, 0x0a, 0x11, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x10, 0x65, 0x78, 0x70, 0x69, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x16, 0x0a, 0x06, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x12, 0x50, 0x0a, 0x12, 0x64, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x74, - 0x74, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, - 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x52, 0x10, 0x64, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, - 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, - 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, - 0x63, 0x68, 0x65, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x65, 0x74, 0x61, - 0x63, 0x68, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x65, - 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, 0x5f, 0x6f, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x69, - 0x76, 0x65, 0x72, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x45, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, 0x4f, 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, - 0x69, 0x76, 0x65, 0x72, 0x79, 0x12, 0x67, 0x0a, 0x20, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x1d, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x39, - 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x58, 0xea, 0x41, 0x55, 0x0a, 0x22, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x7d, 0x22, 0x95, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x62, - 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1d, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3e, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, + 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, + 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x10, 0x02, 0x3a, 0x58, 0xea, 0x41, 0x55, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x22, + 0x95, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x42, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, + 0x66, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x42, 0x61, 0x63, 0x6b, + 0x6f, 0x66, 0x66, 0x12, 0x42, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x62, + 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x42, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, - 0x69, 0x6d, 0x75, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x22, 0x72, 0x0a, 0x10, 0x44, - 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x2a, 0x0a, 0x11, 0x64, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x74, - 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x61, 0x64, - 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x32, 0x0a, 0x15, 0x6d, - 0x61, 0x78, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, - 0x6d, 0x70, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x44, - 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x22, - 0x3f, 0x0a, 0x10, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x74, 0x74, 0x6c, - 0x22, 0xfd, 0x02, 0x0a, 0x0a, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x23, 0x0a, 0x0d, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x75, 0x73, 0x68, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x4c, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x12, 0x47, 0x0a, 0x0a, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x48, 0x00, - 0x52, 0x09, 0x6f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0x5b, 0x0a, 0x09, 0x4f, - 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1a, 0x0a, 0x08, - 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x68, 0x65, - 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x22, 0x72, 0x0a, 0x10, 0x44, 0x65, 0x61, 0x64, 0x4c, + 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x64, + 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, + 0x65, 0x72, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x64, + 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x44, 0x65, 0x6c, 0x69, 0x76, + 0x65, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x22, 0x3f, 0x0a, 0x10, 0x45, + 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x22, 0xfd, 0x02, 0x0a, + 0x0a, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, + 0x75, 0x73, 0x68, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x70, 0x75, 0x73, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x4c, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x47, + 0x0a, 0x0a, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x4f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x69, + 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0x5b, 0x0a, 0x09, 0x4f, 0x69, 0x64, 0x63, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, + 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, + 0x65, 0x6e, 0x63, 0x65, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0xd1, 0x02, 0x0a, + 0x0e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0e, 0x75, 0x73, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, + 0x25, 0x0a, 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x75, + 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x65, 0x0a, 0x05, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, + 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, + 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x53, + 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x04, 0x22, 0x8e, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6d, @@ -4377,164 +4650,171 @@ func file_google_pubsub_v1_pubsub_proto_rawDescGZIP() []byte { return file_google_pubsub_v1_pubsub_proto_rawDescData } -var file_google_pubsub_v1_pubsub_proto_msgTypes = make([]protoimpl.MessageInfo, 54) +var file_google_pubsub_v1_pubsub_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_pubsub_v1_pubsub_proto_msgTypes = make([]protoimpl.MessageInfo, 55) var file_google_pubsub_v1_pubsub_proto_goTypes = []interface{}{ - (*MessageStoragePolicy)(nil), // 0: google.pubsub.v1.MessageStoragePolicy - (*SchemaSettings)(nil), // 1: google.pubsub.v1.SchemaSettings - (*Topic)(nil), // 2: google.pubsub.v1.Topic - (*PubsubMessage)(nil), // 3: google.pubsub.v1.PubsubMessage - (*GetTopicRequest)(nil), // 4: google.pubsub.v1.GetTopicRequest - (*UpdateTopicRequest)(nil), // 5: google.pubsub.v1.UpdateTopicRequest - (*PublishRequest)(nil), // 6: google.pubsub.v1.PublishRequest - (*PublishResponse)(nil), // 7: google.pubsub.v1.PublishResponse - (*ListTopicsRequest)(nil), // 8: google.pubsub.v1.ListTopicsRequest - (*ListTopicsResponse)(nil), // 9: google.pubsub.v1.ListTopicsResponse - (*ListTopicSubscriptionsRequest)(nil), // 10: google.pubsub.v1.ListTopicSubscriptionsRequest - (*ListTopicSubscriptionsResponse)(nil), // 11: google.pubsub.v1.ListTopicSubscriptionsResponse - (*ListTopicSnapshotsRequest)(nil), // 12: google.pubsub.v1.ListTopicSnapshotsRequest - (*ListTopicSnapshotsResponse)(nil), // 13: google.pubsub.v1.ListTopicSnapshotsResponse - (*DeleteTopicRequest)(nil), // 14: google.pubsub.v1.DeleteTopicRequest - (*DetachSubscriptionRequest)(nil), // 15: google.pubsub.v1.DetachSubscriptionRequest - (*DetachSubscriptionResponse)(nil), // 16: google.pubsub.v1.DetachSubscriptionResponse - (*Subscription)(nil), // 17: google.pubsub.v1.Subscription - (*RetryPolicy)(nil), // 18: google.pubsub.v1.RetryPolicy - (*DeadLetterPolicy)(nil), // 19: google.pubsub.v1.DeadLetterPolicy - (*ExpirationPolicy)(nil), // 20: google.pubsub.v1.ExpirationPolicy - (*PushConfig)(nil), // 21: google.pubsub.v1.PushConfig - (*ReceivedMessage)(nil), // 22: google.pubsub.v1.ReceivedMessage - (*GetSubscriptionRequest)(nil), // 23: google.pubsub.v1.GetSubscriptionRequest - (*UpdateSubscriptionRequest)(nil), // 24: google.pubsub.v1.UpdateSubscriptionRequest - (*ListSubscriptionsRequest)(nil), // 25: google.pubsub.v1.ListSubscriptionsRequest - (*ListSubscriptionsResponse)(nil), // 26: google.pubsub.v1.ListSubscriptionsResponse - (*DeleteSubscriptionRequest)(nil), // 27: google.pubsub.v1.DeleteSubscriptionRequest - (*ModifyPushConfigRequest)(nil), // 28: google.pubsub.v1.ModifyPushConfigRequest - (*PullRequest)(nil), // 29: google.pubsub.v1.PullRequest - (*PullResponse)(nil), // 30: google.pubsub.v1.PullResponse - (*ModifyAckDeadlineRequest)(nil), // 31: google.pubsub.v1.ModifyAckDeadlineRequest - (*AcknowledgeRequest)(nil), // 32: google.pubsub.v1.AcknowledgeRequest - (*StreamingPullRequest)(nil), // 33: google.pubsub.v1.StreamingPullRequest - (*StreamingPullResponse)(nil), // 34: google.pubsub.v1.StreamingPullResponse - (*CreateSnapshotRequest)(nil), // 35: google.pubsub.v1.CreateSnapshotRequest - (*UpdateSnapshotRequest)(nil), // 36: google.pubsub.v1.UpdateSnapshotRequest - (*Snapshot)(nil), // 37: google.pubsub.v1.Snapshot - (*GetSnapshotRequest)(nil), // 38: google.pubsub.v1.GetSnapshotRequest - (*ListSnapshotsRequest)(nil), // 39: google.pubsub.v1.ListSnapshotsRequest - (*ListSnapshotsResponse)(nil), // 40: google.pubsub.v1.ListSnapshotsResponse - (*DeleteSnapshotRequest)(nil), // 41: google.pubsub.v1.DeleteSnapshotRequest - (*SeekRequest)(nil), // 42: google.pubsub.v1.SeekRequest - (*SeekResponse)(nil), // 43: google.pubsub.v1.SeekResponse - nil, // 44: google.pubsub.v1.Topic.LabelsEntry - nil, // 45: google.pubsub.v1.PubsubMessage.AttributesEntry - nil, // 46: google.pubsub.v1.Subscription.LabelsEntry - (*PushConfig_OidcToken)(nil), // 47: google.pubsub.v1.PushConfig.OidcToken - nil, // 48: google.pubsub.v1.PushConfig.AttributesEntry - (*StreamingPullResponse_AcknowledgeConfirmation)(nil), // 49: google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation - (*StreamingPullResponse_ModifyAckDeadlineConfirmation)(nil), // 50: google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation - (*StreamingPullResponse_SubscriptionProperties)(nil), // 51: google.pubsub.v1.StreamingPullResponse.SubscriptionProperties - nil, // 52: google.pubsub.v1.CreateSnapshotRequest.LabelsEntry - nil, // 53: google.pubsub.v1.Snapshot.LabelsEntry - (Encoding)(0), // 54: google.pubsub.v1.Encoding - (*durationpb.Duration)(nil), // 55: google.protobuf.Duration - (*timestamppb.Timestamp)(nil), // 56: google.protobuf.Timestamp - (*fieldmaskpb.FieldMask)(nil), // 57: google.protobuf.FieldMask - (*emptypb.Empty)(nil), // 58: google.protobuf.Empty + (Subscription_State)(0), // 0: google.pubsub.v1.Subscription.State + (BigQueryConfig_State)(0), // 1: google.pubsub.v1.BigQueryConfig.State + (*MessageStoragePolicy)(nil), // 2: google.pubsub.v1.MessageStoragePolicy + (*SchemaSettings)(nil), // 3: google.pubsub.v1.SchemaSettings + (*Topic)(nil), // 4: google.pubsub.v1.Topic + (*PubsubMessage)(nil), // 5: google.pubsub.v1.PubsubMessage + (*GetTopicRequest)(nil), // 6: google.pubsub.v1.GetTopicRequest + (*UpdateTopicRequest)(nil), // 7: google.pubsub.v1.UpdateTopicRequest + (*PublishRequest)(nil), // 8: google.pubsub.v1.PublishRequest + (*PublishResponse)(nil), // 9: google.pubsub.v1.PublishResponse + (*ListTopicsRequest)(nil), // 10: google.pubsub.v1.ListTopicsRequest + (*ListTopicsResponse)(nil), // 11: google.pubsub.v1.ListTopicsResponse + (*ListTopicSubscriptionsRequest)(nil), // 12: google.pubsub.v1.ListTopicSubscriptionsRequest + (*ListTopicSubscriptionsResponse)(nil), // 13: google.pubsub.v1.ListTopicSubscriptionsResponse + (*ListTopicSnapshotsRequest)(nil), // 14: google.pubsub.v1.ListTopicSnapshotsRequest + (*ListTopicSnapshotsResponse)(nil), // 15: google.pubsub.v1.ListTopicSnapshotsResponse + (*DeleteTopicRequest)(nil), // 16: google.pubsub.v1.DeleteTopicRequest + (*DetachSubscriptionRequest)(nil), // 17: google.pubsub.v1.DetachSubscriptionRequest + (*DetachSubscriptionResponse)(nil), // 18: google.pubsub.v1.DetachSubscriptionResponse + (*Subscription)(nil), // 19: google.pubsub.v1.Subscription + (*RetryPolicy)(nil), // 20: google.pubsub.v1.RetryPolicy + (*DeadLetterPolicy)(nil), // 21: google.pubsub.v1.DeadLetterPolicy + (*ExpirationPolicy)(nil), // 22: google.pubsub.v1.ExpirationPolicy + (*PushConfig)(nil), // 23: google.pubsub.v1.PushConfig + (*BigQueryConfig)(nil), // 24: google.pubsub.v1.BigQueryConfig + (*ReceivedMessage)(nil), // 25: google.pubsub.v1.ReceivedMessage + (*GetSubscriptionRequest)(nil), // 26: google.pubsub.v1.GetSubscriptionRequest + (*UpdateSubscriptionRequest)(nil), // 27: google.pubsub.v1.UpdateSubscriptionRequest + (*ListSubscriptionsRequest)(nil), // 28: google.pubsub.v1.ListSubscriptionsRequest + (*ListSubscriptionsResponse)(nil), // 29: google.pubsub.v1.ListSubscriptionsResponse + (*DeleteSubscriptionRequest)(nil), // 30: google.pubsub.v1.DeleteSubscriptionRequest + (*ModifyPushConfigRequest)(nil), // 31: google.pubsub.v1.ModifyPushConfigRequest + (*PullRequest)(nil), // 32: google.pubsub.v1.PullRequest + (*PullResponse)(nil), // 33: google.pubsub.v1.PullResponse + (*ModifyAckDeadlineRequest)(nil), // 34: google.pubsub.v1.ModifyAckDeadlineRequest + (*AcknowledgeRequest)(nil), // 35: google.pubsub.v1.AcknowledgeRequest + (*StreamingPullRequest)(nil), // 36: google.pubsub.v1.StreamingPullRequest + (*StreamingPullResponse)(nil), // 37: google.pubsub.v1.StreamingPullResponse + (*CreateSnapshotRequest)(nil), // 38: google.pubsub.v1.CreateSnapshotRequest + (*UpdateSnapshotRequest)(nil), // 39: google.pubsub.v1.UpdateSnapshotRequest + (*Snapshot)(nil), // 40: google.pubsub.v1.Snapshot + (*GetSnapshotRequest)(nil), // 41: google.pubsub.v1.GetSnapshotRequest + (*ListSnapshotsRequest)(nil), // 42: google.pubsub.v1.ListSnapshotsRequest + (*ListSnapshotsResponse)(nil), // 43: google.pubsub.v1.ListSnapshotsResponse + (*DeleteSnapshotRequest)(nil), // 44: google.pubsub.v1.DeleteSnapshotRequest + (*SeekRequest)(nil), // 45: google.pubsub.v1.SeekRequest + (*SeekResponse)(nil), // 46: google.pubsub.v1.SeekResponse + nil, // 47: google.pubsub.v1.Topic.LabelsEntry + nil, // 48: google.pubsub.v1.PubsubMessage.AttributesEntry + nil, // 49: google.pubsub.v1.Subscription.LabelsEntry + (*PushConfig_OidcToken)(nil), // 50: google.pubsub.v1.PushConfig.OidcToken + nil, // 51: google.pubsub.v1.PushConfig.AttributesEntry + (*StreamingPullResponse_AcknowledgeConfirmation)(nil), // 52: google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation + (*StreamingPullResponse_ModifyAckDeadlineConfirmation)(nil), // 53: google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation + (*StreamingPullResponse_SubscriptionProperties)(nil), // 54: google.pubsub.v1.StreamingPullResponse.SubscriptionProperties + nil, // 55: google.pubsub.v1.CreateSnapshotRequest.LabelsEntry + nil, // 56: google.pubsub.v1.Snapshot.LabelsEntry + (Encoding)(0), // 57: google.pubsub.v1.Encoding + (*durationpb.Duration)(nil), // 58: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 59: google.protobuf.Timestamp + (*fieldmaskpb.FieldMask)(nil), // 60: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 61: google.protobuf.Empty } var file_google_pubsub_v1_pubsub_proto_depIdxs = []int32{ - 54, // 0: google.pubsub.v1.SchemaSettings.encoding:type_name -> google.pubsub.v1.Encoding - 44, // 1: google.pubsub.v1.Topic.labels:type_name -> google.pubsub.v1.Topic.LabelsEntry - 0, // 2: google.pubsub.v1.Topic.message_storage_policy:type_name -> google.pubsub.v1.MessageStoragePolicy - 1, // 3: google.pubsub.v1.Topic.schema_settings:type_name -> google.pubsub.v1.SchemaSettings - 55, // 4: google.pubsub.v1.Topic.message_retention_duration:type_name -> google.protobuf.Duration - 45, // 5: google.pubsub.v1.PubsubMessage.attributes:type_name -> google.pubsub.v1.PubsubMessage.AttributesEntry - 56, // 6: google.pubsub.v1.PubsubMessage.publish_time:type_name -> google.protobuf.Timestamp - 2, // 7: google.pubsub.v1.UpdateTopicRequest.topic:type_name -> google.pubsub.v1.Topic - 57, // 8: google.pubsub.v1.UpdateTopicRequest.update_mask:type_name -> google.protobuf.FieldMask - 3, // 9: google.pubsub.v1.PublishRequest.messages:type_name -> google.pubsub.v1.PubsubMessage - 2, // 10: google.pubsub.v1.ListTopicsResponse.topics:type_name -> google.pubsub.v1.Topic - 21, // 11: google.pubsub.v1.Subscription.push_config:type_name -> google.pubsub.v1.PushConfig - 55, // 12: google.pubsub.v1.Subscription.message_retention_duration:type_name -> google.protobuf.Duration - 46, // 13: google.pubsub.v1.Subscription.labels:type_name -> google.pubsub.v1.Subscription.LabelsEntry - 20, // 14: google.pubsub.v1.Subscription.expiration_policy:type_name -> google.pubsub.v1.ExpirationPolicy - 19, // 15: google.pubsub.v1.Subscription.dead_letter_policy:type_name -> google.pubsub.v1.DeadLetterPolicy - 18, // 16: google.pubsub.v1.Subscription.retry_policy:type_name -> google.pubsub.v1.RetryPolicy - 55, // 17: google.pubsub.v1.Subscription.topic_message_retention_duration:type_name -> google.protobuf.Duration - 55, // 18: google.pubsub.v1.RetryPolicy.minimum_backoff:type_name -> google.protobuf.Duration - 55, // 19: google.pubsub.v1.RetryPolicy.maximum_backoff:type_name -> google.protobuf.Duration - 55, // 20: google.pubsub.v1.ExpirationPolicy.ttl:type_name -> google.protobuf.Duration - 48, // 21: google.pubsub.v1.PushConfig.attributes:type_name -> google.pubsub.v1.PushConfig.AttributesEntry - 47, // 22: google.pubsub.v1.PushConfig.oidc_token:type_name -> google.pubsub.v1.PushConfig.OidcToken - 3, // 23: google.pubsub.v1.ReceivedMessage.message:type_name -> google.pubsub.v1.PubsubMessage - 17, // 24: google.pubsub.v1.UpdateSubscriptionRequest.subscription:type_name -> google.pubsub.v1.Subscription - 57, // 25: google.pubsub.v1.UpdateSubscriptionRequest.update_mask:type_name -> google.protobuf.FieldMask - 17, // 26: google.pubsub.v1.ListSubscriptionsResponse.subscriptions:type_name -> google.pubsub.v1.Subscription - 21, // 27: google.pubsub.v1.ModifyPushConfigRequest.push_config:type_name -> google.pubsub.v1.PushConfig - 22, // 28: google.pubsub.v1.PullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage - 22, // 29: google.pubsub.v1.StreamingPullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage - 49, // 30: google.pubsub.v1.StreamingPullResponse.acknowledge_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation - 50, // 31: google.pubsub.v1.StreamingPullResponse.modify_ack_deadline_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation - 51, // 32: google.pubsub.v1.StreamingPullResponse.subscription_properties:type_name -> google.pubsub.v1.StreamingPullResponse.SubscriptionProperties - 52, // 33: google.pubsub.v1.CreateSnapshotRequest.labels:type_name -> google.pubsub.v1.CreateSnapshotRequest.LabelsEntry - 37, // 34: google.pubsub.v1.UpdateSnapshotRequest.snapshot:type_name -> google.pubsub.v1.Snapshot - 57, // 35: google.pubsub.v1.UpdateSnapshotRequest.update_mask:type_name -> google.protobuf.FieldMask - 56, // 36: google.pubsub.v1.Snapshot.expire_time:type_name -> google.protobuf.Timestamp - 53, // 37: google.pubsub.v1.Snapshot.labels:type_name -> google.pubsub.v1.Snapshot.LabelsEntry - 37, // 38: google.pubsub.v1.ListSnapshotsResponse.snapshots:type_name -> google.pubsub.v1.Snapshot - 56, // 39: google.pubsub.v1.SeekRequest.time:type_name -> google.protobuf.Timestamp - 2, // 40: google.pubsub.v1.Publisher.CreateTopic:input_type -> google.pubsub.v1.Topic - 5, // 41: google.pubsub.v1.Publisher.UpdateTopic:input_type -> google.pubsub.v1.UpdateTopicRequest - 6, // 42: google.pubsub.v1.Publisher.Publish:input_type -> google.pubsub.v1.PublishRequest - 4, // 43: google.pubsub.v1.Publisher.GetTopic:input_type -> google.pubsub.v1.GetTopicRequest - 8, // 44: google.pubsub.v1.Publisher.ListTopics:input_type -> google.pubsub.v1.ListTopicsRequest - 10, // 45: google.pubsub.v1.Publisher.ListTopicSubscriptions:input_type -> google.pubsub.v1.ListTopicSubscriptionsRequest - 12, // 46: google.pubsub.v1.Publisher.ListTopicSnapshots:input_type -> google.pubsub.v1.ListTopicSnapshotsRequest - 14, // 47: google.pubsub.v1.Publisher.DeleteTopic:input_type -> google.pubsub.v1.DeleteTopicRequest - 15, // 48: google.pubsub.v1.Publisher.DetachSubscription:input_type -> google.pubsub.v1.DetachSubscriptionRequest - 17, // 49: google.pubsub.v1.Subscriber.CreateSubscription:input_type -> google.pubsub.v1.Subscription - 23, // 50: google.pubsub.v1.Subscriber.GetSubscription:input_type -> google.pubsub.v1.GetSubscriptionRequest - 24, // 51: google.pubsub.v1.Subscriber.UpdateSubscription:input_type -> google.pubsub.v1.UpdateSubscriptionRequest - 25, // 52: google.pubsub.v1.Subscriber.ListSubscriptions:input_type -> google.pubsub.v1.ListSubscriptionsRequest - 27, // 53: google.pubsub.v1.Subscriber.DeleteSubscription:input_type -> google.pubsub.v1.DeleteSubscriptionRequest - 31, // 54: google.pubsub.v1.Subscriber.ModifyAckDeadline:input_type -> google.pubsub.v1.ModifyAckDeadlineRequest - 32, // 55: google.pubsub.v1.Subscriber.Acknowledge:input_type -> google.pubsub.v1.AcknowledgeRequest - 29, // 56: google.pubsub.v1.Subscriber.Pull:input_type -> google.pubsub.v1.PullRequest - 33, // 57: google.pubsub.v1.Subscriber.StreamingPull:input_type -> google.pubsub.v1.StreamingPullRequest - 28, // 58: google.pubsub.v1.Subscriber.ModifyPushConfig:input_type -> google.pubsub.v1.ModifyPushConfigRequest - 38, // 59: google.pubsub.v1.Subscriber.GetSnapshot:input_type -> google.pubsub.v1.GetSnapshotRequest - 39, // 60: google.pubsub.v1.Subscriber.ListSnapshots:input_type -> google.pubsub.v1.ListSnapshotsRequest - 35, // 61: google.pubsub.v1.Subscriber.CreateSnapshot:input_type -> google.pubsub.v1.CreateSnapshotRequest - 36, // 62: google.pubsub.v1.Subscriber.UpdateSnapshot:input_type -> google.pubsub.v1.UpdateSnapshotRequest - 41, // 63: google.pubsub.v1.Subscriber.DeleteSnapshot:input_type -> google.pubsub.v1.DeleteSnapshotRequest - 42, // 64: google.pubsub.v1.Subscriber.Seek:input_type -> google.pubsub.v1.SeekRequest - 2, // 65: google.pubsub.v1.Publisher.CreateTopic:output_type -> google.pubsub.v1.Topic - 2, // 66: google.pubsub.v1.Publisher.UpdateTopic:output_type -> google.pubsub.v1.Topic - 7, // 67: google.pubsub.v1.Publisher.Publish:output_type -> google.pubsub.v1.PublishResponse - 2, // 68: google.pubsub.v1.Publisher.GetTopic:output_type -> google.pubsub.v1.Topic - 9, // 69: google.pubsub.v1.Publisher.ListTopics:output_type -> google.pubsub.v1.ListTopicsResponse - 11, // 70: google.pubsub.v1.Publisher.ListTopicSubscriptions:output_type -> google.pubsub.v1.ListTopicSubscriptionsResponse - 13, // 71: google.pubsub.v1.Publisher.ListTopicSnapshots:output_type -> google.pubsub.v1.ListTopicSnapshotsResponse - 58, // 72: google.pubsub.v1.Publisher.DeleteTopic:output_type -> google.protobuf.Empty - 16, // 73: google.pubsub.v1.Publisher.DetachSubscription:output_type -> google.pubsub.v1.DetachSubscriptionResponse - 17, // 74: google.pubsub.v1.Subscriber.CreateSubscription:output_type -> google.pubsub.v1.Subscription - 17, // 75: google.pubsub.v1.Subscriber.GetSubscription:output_type -> google.pubsub.v1.Subscription - 17, // 76: google.pubsub.v1.Subscriber.UpdateSubscription:output_type -> google.pubsub.v1.Subscription - 26, // 77: google.pubsub.v1.Subscriber.ListSubscriptions:output_type -> google.pubsub.v1.ListSubscriptionsResponse - 58, // 78: google.pubsub.v1.Subscriber.DeleteSubscription:output_type -> google.protobuf.Empty - 58, // 79: google.pubsub.v1.Subscriber.ModifyAckDeadline:output_type -> google.protobuf.Empty - 58, // 80: google.pubsub.v1.Subscriber.Acknowledge:output_type -> google.protobuf.Empty - 30, // 81: google.pubsub.v1.Subscriber.Pull:output_type -> google.pubsub.v1.PullResponse - 34, // 82: google.pubsub.v1.Subscriber.StreamingPull:output_type -> google.pubsub.v1.StreamingPullResponse - 58, // 83: google.pubsub.v1.Subscriber.ModifyPushConfig:output_type -> google.protobuf.Empty - 37, // 84: google.pubsub.v1.Subscriber.GetSnapshot:output_type -> google.pubsub.v1.Snapshot - 40, // 85: google.pubsub.v1.Subscriber.ListSnapshots:output_type -> google.pubsub.v1.ListSnapshotsResponse - 37, // 86: google.pubsub.v1.Subscriber.CreateSnapshot:output_type -> google.pubsub.v1.Snapshot - 37, // 87: google.pubsub.v1.Subscriber.UpdateSnapshot:output_type -> google.pubsub.v1.Snapshot - 58, // 88: google.pubsub.v1.Subscriber.DeleteSnapshot:output_type -> google.protobuf.Empty - 43, // 89: google.pubsub.v1.Subscriber.Seek:output_type -> google.pubsub.v1.SeekResponse - 65, // [65:90] is the sub-list for method output_type - 40, // [40:65] is the sub-list for method input_type - 40, // [40:40] is the sub-list for extension type_name - 40, // [40:40] is the sub-list for extension extendee - 0, // [0:40] is the sub-list for field type_name + 57, // 0: google.pubsub.v1.SchemaSettings.encoding:type_name -> google.pubsub.v1.Encoding + 47, // 1: google.pubsub.v1.Topic.labels:type_name -> google.pubsub.v1.Topic.LabelsEntry + 2, // 2: google.pubsub.v1.Topic.message_storage_policy:type_name -> google.pubsub.v1.MessageStoragePolicy + 3, // 3: google.pubsub.v1.Topic.schema_settings:type_name -> google.pubsub.v1.SchemaSettings + 58, // 4: google.pubsub.v1.Topic.message_retention_duration:type_name -> google.protobuf.Duration + 48, // 5: google.pubsub.v1.PubsubMessage.attributes:type_name -> google.pubsub.v1.PubsubMessage.AttributesEntry + 59, // 6: google.pubsub.v1.PubsubMessage.publish_time:type_name -> google.protobuf.Timestamp + 4, // 7: google.pubsub.v1.UpdateTopicRequest.topic:type_name -> google.pubsub.v1.Topic + 60, // 8: google.pubsub.v1.UpdateTopicRequest.update_mask:type_name -> google.protobuf.FieldMask + 5, // 9: google.pubsub.v1.PublishRequest.messages:type_name -> google.pubsub.v1.PubsubMessage + 4, // 10: google.pubsub.v1.ListTopicsResponse.topics:type_name -> google.pubsub.v1.Topic + 23, // 11: google.pubsub.v1.Subscription.push_config:type_name -> google.pubsub.v1.PushConfig + 24, // 12: google.pubsub.v1.Subscription.bigquery_config:type_name -> google.pubsub.v1.BigQueryConfig + 58, // 13: google.pubsub.v1.Subscription.message_retention_duration:type_name -> google.protobuf.Duration + 49, // 14: google.pubsub.v1.Subscription.labels:type_name -> google.pubsub.v1.Subscription.LabelsEntry + 22, // 15: google.pubsub.v1.Subscription.expiration_policy:type_name -> google.pubsub.v1.ExpirationPolicy + 21, // 16: google.pubsub.v1.Subscription.dead_letter_policy:type_name -> google.pubsub.v1.DeadLetterPolicy + 20, // 17: google.pubsub.v1.Subscription.retry_policy:type_name -> google.pubsub.v1.RetryPolicy + 58, // 18: google.pubsub.v1.Subscription.topic_message_retention_duration:type_name -> google.protobuf.Duration + 0, // 19: google.pubsub.v1.Subscription.state:type_name -> google.pubsub.v1.Subscription.State + 58, // 20: google.pubsub.v1.RetryPolicy.minimum_backoff:type_name -> google.protobuf.Duration + 58, // 21: google.pubsub.v1.RetryPolicy.maximum_backoff:type_name -> google.protobuf.Duration + 58, // 22: google.pubsub.v1.ExpirationPolicy.ttl:type_name -> google.protobuf.Duration + 51, // 23: google.pubsub.v1.PushConfig.attributes:type_name -> google.pubsub.v1.PushConfig.AttributesEntry + 50, // 24: google.pubsub.v1.PushConfig.oidc_token:type_name -> google.pubsub.v1.PushConfig.OidcToken + 1, // 25: google.pubsub.v1.BigQueryConfig.state:type_name -> google.pubsub.v1.BigQueryConfig.State + 5, // 26: google.pubsub.v1.ReceivedMessage.message:type_name -> google.pubsub.v1.PubsubMessage + 19, // 27: google.pubsub.v1.UpdateSubscriptionRequest.subscription:type_name -> google.pubsub.v1.Subscription + 60, // 28: google.pubsub.v1.UpdateSubscriptionRequest.update_mask:type_name -> google.protobuf.FieldMask + 19, // 29: google.pubsub.v1.ListSubscriptionsResponse.subscriptions:type_name -> google.pubsub.v1.Subscription + 23, // 30: google.pubsub.v1.ModifyPushConfigRequest.push_config:type_name -> google.pubsub.v1.PushConfig + 25, // 31: google.pubsub.v1.PullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage + 25, // 32: google.pubsub.v1.StreamingPullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage + 52, // 33: google.pubsub.v1.StreamingPullResponse.acknowledge_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation + 53, // 34: google.pubsub.v1.StreamingPullResponse.modify_ack_deadline_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation + 54, // 35: google.pubsub.v1.StreamingPullResponse.subscription_properties:type_name -> google.pubsub.v1.StreamingPullResponse.SubscriptionProperties + 55, // 36: google.pubsub.v1.CreateSnapshotRequest.labels:type_name -> google.pubsub.v1.CreateSnapshotRequest.LabelsEntry + 40, // 37: google.pubsub.v1.UpdateSnapshotRequest.snapshot:type_name -> google.pubsub.v1.Snapshot + 60, // 38: google.pubsub.v1.UpdateSnapshotRequest.update_mask:type_name -> google.protobuf.FieldMask + 59, // 39: google.pubsub.v1.Snapshot.expire_time:type_name -> google.protobuf.Timestamp + 56, // 40: google.pubsub.v1.Snapshot.labels:type_name -> google.pubsub.v1.Snapshot.LabelsEntry + 40, // 41: google.pubsub.v1.ListSnapshotsResponse.snapshots:type_name -> google.pubsub.v1.Snapshot + 59, // 42: google.pubsub.v1.SeekRequest.time:type_name -> google.protobuf.Timestamp + 4, // 43: google.pubsub.v1.Publisher.CreateTopic:input_type -> google.pubsub.v1.Topic + 7, // 44: google.pubsub.v1.Publisher.UpdateTopic:input_type -> google.pubsub.v1.UpdateTopicRequest + 8, // 45: google.pubsub.v1.Publisher.Publish:input_type -> google.pubsub.v1.PublishRequest + 6, // 46: google.pubsub.v1.Publisher.GetTopic:input_type -> google.pubsub.v1.GetTopicRequest + 10, // 47: google.pubsub.v1.Publisher.ListTopics:input_type -> google.pubsub.v1.ListTopicsRequest + 12, // 48: google.pubsub.v1.Publisher.ListTopicSubscriptions:input_type -> google.pubsub.v1.ListTopicSubscriptionsRequest + 14, // 49: google.pubsub.v1.Publisher.ListTopicSnapshots:input_type -> google.pubsub.v1.ListTopicSnapshotsRequest + 16, // 50: google.pubsub.v1.Publisher.DeleteTopic:input_type -> google.pubsub.v1.DeleteTopicRequest + 17, // 51: google.pubsub.v1.Publisher.DetachSubscription:input_type -> google.pubsub.v1.DetachSubscriptionRequest + 19, // 52: google.pubsub.v1.Subscriber.CreateSubscription:input_type -> google.pubsub.v1.Subscription + 26, // 53: google.pubsub.v1.Subscriber.GetSubscription:input_type -> google.pubsub.v1.GetSubscriptionRequest + 27, // 54: google.pubsub.v1.Subscriber.UpdateSubscription:input_type -> google.pubsub.v1.UpdateSubscriptionRequest + 28, // 55: google.pubsub.v1.Subscriber.ListSubscriptions:input_type -> google.pubsub.v1.ListSubscriptionsRequest + 30, // 56: google.pubsub.v1.Subscriber.DeleteSubscription:input_type -> google.pubsub.v1.DeleteSubscriptionRequest + 34, // 57: google.pubsub.v1.Subscriber.ModifyAckDeadline:input_type -> google.pubsub.v1.ModifyAckDeadlineRequest + 35, // 58: google.pubsub.v1.Subscriber.Acknowledge:input_type -> google.pubsub.v1.AcknowledgeRequest + 32, // 59: google.pubsub.v1.Subscriber.Pull:input_type -> google.pubsub.v1.PullRequest + 36, // 60: google.pubsub.v1.Subscriber.StreamingPull:input_type -> google.pubsub.v1.StreamingPullRequest + 31, // 61: google.pubsub.v1.Subscriber.ModifyPushConfig:input_type -> google.pubsub.v1.ModifyPushConfigRequest + 41, // 62: google.pubsub.v1.Subscriber.GetSnapshot:input_type -> google.pubsub.v1.GetSnapshotRequest + 42, // 63: google.pubsub.v1.Subscriber.ListSnapshots:input_type -> google.pubsub.v1.ListSnapshotsRequest + 38, // 64: google.pubsub.v1.Subscriber.CreateSnapshot:input_type -> google.pubsub.v1.CreateSnapshotRequest + 39, // 65: google.pubsub.v1.Subscriber.UpdateSnapshot:input_type -> google.pubsub.v1.UpdateSnapshotRequest + 44, // 66: google.pubsub.v1.Subscriber.DeleteSnapshot:input_type -> google.pubsub.v1.DeleteSnapshotRequest + 45, // 67: google.pubsub.v1.Subscriber.Seek:input_type -> google.pubsub.v1.SeekRequest + 4, // 68: google.pubsub.v1.Publisher.CreateTopic:output_type -> google.pubsub.v1.Topic + 4, // 69: google.pubsub.v1.Publisher.UpdateTopic:output_type -> google.pubsub.v1.Topic + 9, // 70: google.pubsub.v1.Publisher.Publish:output_type -> google.pubsub.v1.PublishResponse + 4, // 71: google.pubsub.v1.Publisher.GetTopic:output_type -> google.pubsub.v1.Topic + 11, // 72: google.pubsub.v1.Publisher.ListTopics:output_type -> google.pubsub.v1.ListTopicsResponse + 13, // 73: google.pubsub.v1.Publisher.ListTopicSubscriptions:output_type -> google.pubsub.v1.ListTopicSubscriptionsResponse + 15, // 74: google.pubsub.v1.Publisher.ListTopicSnapshots:output_type -> google.pubsub.v1.ListTopicSnapshotsResponse + 61, // 75: google.pubsub.v1.Publisher.DeleteTopic:output_type -> google.protobuf.Empty + 18, // 76: google.pubsub.v1.Publisher.DetachSubscription:output_type -> google.pubsub.v1.DetachSubscriptionResponse + 19, // 77: google.pubsub.v1.Subscriber.CreateSubscription:output_type -> google.pubsub.v1.Subscription + 19, // 78: google.pubsub.v1.Subscriber.GetSubscription:output_type -> google.pubsub.v1.Subscription + 19, // 79: google.pubsub.v1.Subscriber.UpdateSubscription:output_type -> google.pubsub.v1.Subscription + 29, // 80: google.pubsub.v1.Subscriber.ListSubscriptions:output_type -> google.pubsub.v1.ListSubscriptionsResponse + 61, // 81: google.pubsub.v1.Subscriber.DeleteSubscription:output_type -> google.protobuf.Empty + 61, // 82: google.pubsub.v1.Subscriber.ModifyAckDeadline:output_type -> google.protobuf.Empty + 61, // 83: google.pubsub.v1.Subscriber.Acknowledge:output_type -> google.protobuf.Empty + 33, // 84: google.pubsub.v1.Subscriber.Pull:output_type -> google.pubsub.v1.PullResponse + 37, // 85: google.pubsub.v1.Subscriber.StreamingPull:output_type -> google.pubsub.v1.StreamingPullResponse + 61, // 86: google.pubsub.v1.Subscriber.ModifyPushConfig:output_type -> google.protobuf.Empty + 40, // 87: google.pubsub.v1.Subscriber.GetSnapshot:output_type -> google.pubsub.v1.Snapshot + 43, // 88: google.pubsub.v1.Subscriber.ListSnapshots:output_type -> google.pubsub.v1.ListSnapshotsResponse + 40, // 89: google.pubsub.v1.Subscriber.CreateSnapshot:output_type -> google.pubsub.v1.Snapshot + 40, // 90: google.pubsub.v1.Subscriber.UpdateSnapshot:output_type -> google.pubsub.v1.Snapshot + 61, // 91: google.pubsub.v1.Subscriber.DeleteSnapshot:output_type -> google.protobuf.Empty + 46, // 92: google.pubsub.v1.Subscriber.Seek:output_type -> google.pubsub.v1.SeekResponse + 68, // [68:93] is the sub-list for method output_type + 43, // [43:68] is the sub-list for method input_type + 43, // [43:43] is the sub-list for extension type_name + 43, // [43:43] is the sub-list for extension extendee + 0, // [0:43] is the sub-list for field type_name } func init() { file_google_pubsub_v1_pubsub_proto_init() } @@ -4809,7 +5089,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReceivedMessage); i { + switch v := v.(*BigQueryConfig); i { case 0: return &v.state case 1: @@ -4821,7 +5101,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSubscriptionRequest); i { + switch v := v.(*ReceivedMessage); i { case 0: return &v.state case 1: @@ -4833,7 +5113,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateSubscriptionRequest); i { + switch v := v.(*GetSubscriptionRequest); i { case 0: return &v.state case 1: @@ -4845,7 +5125,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListSubscriptionsRequest); i { + switch v := v.(*UpdateSubscriptionRequest); i { case 0: return &v.state case 1: @@ -4857,7 +5137,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListSubscriptionsResponse); i { + switch v := v.(*ListSubscriptionsRequest); i { case 0: return &v.state case 1: @@ -4869,7 +5149,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteSubscriptionRequest); i { + switch v := v.(*ListSubscriptionsResponse); i { case 0: return &v.state case 1: @@ -4881,7 +5161,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ModifyPushConfigRequest); i { + switch v := v.(*DeleteSubscriptionRequest); i { case 0: return &v.state case 1: @@ -4893,7 +5173,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PullRequest); i { + switch v := v.(*ModifyPushConfigRequest); i { case 0: return &v.state case 1: @@ -4905,7 +5185,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PullResponse); i { + switch v := v.(*PullRequest); i { case 0: return &v.state case 1: @@ -4917,7 +5197,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ModifyAckDeadlineRequest); i { + switch v := v.(*PullResponse); i { case 0: return &v.state case 1: @@ -4929,7 +5209,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AcknowledgeRequest); i { + switch v := v.(*ModifyAckDeadlineRequest); i { case 0: return &v.state case 1: @@ -4941,7 +5221,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamingPullRequest); i { + switch v := v.(*AcknowledgeRequest); i { case 0: return &v.state case 1: @@ -4953,7 +5233,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamingPullResponse); i { + switch v := v.(*StreamingPullRequest); i { case 0: return &v.state case 1: @@ -4965,7 +5245,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateSnapshotRequest); i { + switch v := v.(*StreamingPullResponse); i { case 0: return &v.state case 1: @@ -4977,7 +5257,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateSnapshotRequest); i { + switch v := v.(*CreateSnapshotRequest); i { case 0: return &v.state case 1: @@ -4989,7 +5269,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Snapshot); i { + switch v := v.(*UpdateSnapshotRequest); i { case 0: return &v.state case 1: @@ -5001,7 +5281,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSnapshotRequest); i { + switch v := v.(*Snapshot); i { case 0: return &v.state case 1: @@ -5013,7 +5293,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListSnapshotsRequest); i { + switch v := v.(*GetSnapshotRequest); i { case 0: return &v.state case 1: @@ -5025,7 +5305,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListSnapshotsResponse); i { + switch v := v.(*ListSnapshotsRequest); i { case 0: return &v.state case 1: @@ -5037,7 +5317,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteSnapshotRequest); i { + switch v := v.(*ListSnapshotsResponse); i { case 0: return &v.state case 1: @@ -5049,7 +5329,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SeekRequest); i { + switch v := v.(*DeleteSnapshotRequest); i { case 0: return &v.state case 1: @@ -5061,6 +5341,18 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SeekRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SeekResponse); i { case 0: return &v.state @@ -5072,7 +5364,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { return nil } } - file_google_pubsub_v1_pubsub_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + file_google_pubsub_v1_pubsub_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PushConfig_OidcToken); i { case 0: return &v.state @@ -5084,7 +5376,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { return nil } } - file_google_pubsub_v1_pubsub_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + file_google_pubsub_v1_pubsub_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StreamingPullResponse_AcknowledgeConfirmation); i { case 0: return &v.state @@ -5096,7 +5388,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { return nil } } - file_google_pubsub_v1_pubsub_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + file_google_pubsub_v1_pubsub_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StreamingPullResponse_ModifyAckDeadlineConfirmation); i { case 0: return &v.state @@ -5108,7 +5400,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { return nil } } - file_google_pubsub_v1_pubsub_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + file_google_pubsub_v1_pubsub_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StreamingPullResponse_SubscriptionProperties); i { case 0: return &v.state @@ -5124,7 +5416,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { file_google_pubsub_v1_pubsub_proto_msgTypes[21].OneofWrappers = []interface{}{ (*PushConfig_OidcToken_)(nil), } - file_google_pubsub_v1_pubsub_proto_msgTypes[42].OneofWrappers = []interface{}{ + file_google_pubsub_v1_pubsub_proto_msgTypes[43].OneofWrappers = []interface{}{ (*SeekRequest_Time)(nil), (*SeekRequest_Snapshot)(nil), } @@ -5133,13 +5425,14 @@ func file_google_pubsub_v1_pubsub_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_pubsub_v1_pubsub_proto_rawDesc, - NumEnums: 0, - NumMessages: 54, + NumEnums: 2, + NumMessages: 55, NumExtensions: 0, NumServices: 2, }, GoTypes: file_google_pubsub_v1_pubsub_proto_goTypes, DependencyIndexes: file_google_pubsub_v1_pubsub_proto_depIdxs, + EnumInfos: file_google_pubsub_v1_pubsub_proto_enumTypes, MessageInfos: file_google_pubsub_v1_pubsub_proto_msgTypes, }.Build() File_google_pubsub_v1_pubsub_proto = out.File diff --git a/vendor/google.golang.org/genproto/googleapis/pubsub/v1/schema.pb.go b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/schema.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/pubsub/v1/schema.pb.go rename to vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/schema.pb.go index 502989b1dfa7..814159324a5a 100644 --- a/vendor/google.golang.org/genproto/googleapis/pubsub/v1/schema.pb.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/schema.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/pubsub/v1/schema.proto -package pubsub +package pubsubpb import ( context "context" @@ -708,6 +708,7 @@ type ValidateMessageRequest struct { // Format is `projects/{project-id}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Types that are assignable to SchemaSpec: + // // *ValidateMessageRequest_Name // *ValidateMessageRequest_Schema SchemaSpec isValidateMessageRequest_SchemaSpec `protobuf_oneof:"schema_spec"` diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go new file mode 100644 index 000000000000..b905d25d4a5a --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go @@ -0,0 +1,491 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package pubsub + +import ( + "context" + "fmt" + "math" + "net/url" + + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + iampb "google.golang.org/genproto/googleapis/iam/v1" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" +) + +var newSchemaClientHook clientHook + +// SchemaCallOptions contains the retry settings for each method of SchemaClient. +type SchemaCallOptions struct { + CreateSchema []gax.CallOption + GetSchema []gax.CallOption + ListSchemas []gax.CallOption + DeleteSchema []gax.CallOption + ValidateSchema []gax.CallOption + ValidateMessage []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption +} + +func defaultSchemaGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("pubsub.googleapis.com:443"), + internaloption.WithDefaultMTLSEndpoint("pubsub.mtls.googleapis.com:443"), + internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultSchemaCallOptions() *SchemaCallOptions { + return &SchemaCallOptions{ + CreateSchema: []gax.CallOption{}, + GetSchema: []gax.CallOption{}, + ListSchemas: []gax.CallOption{}, + DeleteSchema: []gax.CallOption{}, + ValidateSchema: []gax.CallOption{}, + ValidateMessage: []gax.CallOption{}, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, + } +} + +// internalSchemaClient is an interface that defines the methods available from Cloud Pub/Sub API. +type internalSchemaClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + CreateSchema(context.Context, *pubsubpb.CreateSchemaRequest, ...gax.CallOption) (*pubsubpb.Schema, error) + GetSchema(context.Context, *pubsubpb.GetSchemaRequest, ...gax.CallOption) (*pubsubpb.Schema, error) + ListSchemas(context.Context, *pubsubpb.ListSchemasRequest, ...gax.CallOption) *SchemaIterator + DeleteSchema(context.Context, *pubsubpb.DeleteSchemaRequest, ...gax.CallOption) error + ValidateSchema(context.Context, *pubsubpb.ValidateSchemaRequest, ...gax.CallOption) (*pubsubpb.ValidateSchemaResponse, error) + ValidateMessage(context.Context, *pubsubpb.ValidateMessageRequest, ...gax.CallOption) (*pubsubpb.ValidateMessageResponse, error) + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) +} + +// SchemaClient is a client for interacting with Cloud Pub/Sub API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// Service for doing schema-related operations. +type SchemaClient struct { + // The internal transport-dependent client. + internalClient internalSchemaClient + + // The call options for this service. + CallOptions *SchemaCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *SchemaClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *SchemaClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *SchemaClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// CreateSchema creates a schema. +func (c *SchemaClient) CreateSchema(ctx context.Context, req *pubsubpb.CreateSchemaRequest, opts ...gax.CallOption) (*pubsubpb.Schema, error) { + return c.internalClient.CreateSchema(ctx, req, opts...) +} + +// GetSchema gets a schema. +func (c *SchemaClient) GetSchema(ctx context.Context, req *pubsubpb.GetSchemaRequest, opts ...gax.CallOption) (*pubsubpb.Schema, error) { + return c.internalClient.GetSchema(ctx, req, opts...) +} + +// ListSchemas lists schemas in a project. +func (c *SchemaClient) ListSchemas(ctx context.Context, req *pubsubpb.ListSchemasRequest, opts ...gax.CallOption) *SchemaIterator { + return c.internalClient.ListSchemas(ctx, req, opts...) +} + +// DeleteSchema deletes a schema. +func (c *SchemaClient) DeleteSchema(ctx context.Context, req *pubsubpb.DeleteSchemaRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteSchema(ctx, req, opts...) +} + +// ValidateSchema validates a schema. +func (c *SchemaClient) ValidateSchema(ctx context.Context, req *pubsubpb.ValidateSchemaRequest, opts ...gax.CallOption) (*pubsubpb.ValidateSchemaResponse, error) { + return c.internalClient.ValidateSchema(ctx, req, opts...) +} + +// ValidateMessage validates a message against a schema. +func (c *SchemaClient) ValidateMessage(ctx context.Context, req *pubsubpb.ValidateMessageRequest, opts ...gax.CallOption) (*pubsubpb.ValidateMessageResponse, error) { + return c.internalClient.ValidateMessage(ctx, req, opts...) +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *SchemaClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *SchemaClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *SchemaClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// schemaGRPCClient is a client for interacting with Cloud Pub/Sub API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type schemaGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE + disableDeadlines bool + + // Points back to the CallOptions field of the containing SchemaClient + CallOptions **SchemaCallOptions + + // The gRPC API client. + schemaClient pubsubpb.SchemaServiceClient + + iamPolicyClient iampb.IAMPolicyClient + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewSchemaClient creates a new schema service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// Service for doing schema-related operations. +func NewSchemaClient(ctx context.Context, opts ...option.ClientOption) (*SchemaClient, error) { + clientOpts := defaultSchemaGRPCClientOptions() + if newSchemaClientHook != nil { + hookOpts, err := newSchemaClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + disableDeadlines, err := checkDisableDeadlines() + if err != nil { + return nil, err + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := SchemaClient{CallOptions: defaultSchemaCallOptions()} + + c := &schemaGRPCClient{ + connPool: connPool, + disableDeadlines: disableDeadlines, + schemaClient: pubsubpb.NewSchemaServiceClient(connPool), + CallOptions: &client.CallOptions, + iamPolicyClient: iampb.NewIAMPolicyClient(connPool), + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *schemaGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *schemaGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *schemaGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *schemaGRPCClient) CreateSchema(ctx context.Context, req *pubsubpb.CreateSchemaRequest, opts ...gax.CallOption) (*pubsubpb.Schema, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CreateSchema[0:len((*c.CallOptions).CreateSchema):len((*c.CallOptions).CreateSchema)], opts...) + var resp *pubsubpb.Schema + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.schemaClient.CreateSchema(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *schemaGRPCClient) GetSchema(ctx context.Context, req *pubsubpb.GetSchemaRequest, opts ...gax.CallOption) (*pubsubpb.Schema, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetSchema[0:len((*c.CallOptions).GetSchema):len((*c.CallOptions).GetSchema)], opts...) + var resp *pubsubpb.Schema + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.schemaClient.GetSchema(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *schemaGRPCClient) ListSchemas(ctx context.Context, req *pubsubpb.ListSchemasRequest, opts ...gax.CallOption) *SchemaIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListSchemas[0:len((*c.CallOptions).ListSchemas):len((*c.CallOptions).ListSchemas)], opts...) + it := &SchemaIterator{} + req = proto.Clone(req).(*pubsubpb.ListSchemasRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Schema, string, error) { + resp := &pubsubpb.ListSchemasResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.schemaClient.ListSchemas(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetSchemas(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *schemaGRPCClient) DeleteSchema(ctx context.Context, req *pubsubpb.DeleteSchemaRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).DeleteSchema[0:len((*c.CallOptions).DeleteSchema):len((*c.CallOptions).DeleteSchema)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.schemaClient.DeleteSchema(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *schemaGRPCClient) ValidateSchema(ctx context.Context, req *pubsubpb.ValidateSchemaRequest, opts ...gax.CallOption) (*pubsubpb.ValidateSchemaResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ValidateSchema[0:len((*c.CallOptions).ValidateSchema):len((*c.CallOptions).ValidateSchema)], opts...) + var resp *pubsubpb.ValidateSchemaResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.schemaClient.ValidateSchema(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *schemaGRPCClient) ValidateMessage(ctx context.Context, req *pubsubpb.ValidateMessageRequest, opts ...gax.CallOption) (*pubsubpb.ValidateMessageResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ValidateMessage[0:len((*c.CallOptions).ValidateMessage):len((*c.CallOptions).ValidateMessage)], opts...) + var resp *pubsubpb.ValidateMessageResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.schemaClient.ValidateMessage(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *schemaGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *schemaGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *schemaGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SchemaIterator manages a stream of *pubsubpb.Schema. +type SchemaIterator struct { + items []*pubsubpb.Schema + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Schema, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SchemaIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SchemaIterator) Next() (*pubsubpb.Schema, error) { + var item *pubsubpb.Schema + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SchemaIterator) bufLen() int { + return len(it.items) +} + +func (it *SchemaIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go index 5bbf8908059b..e5ea42364a7a 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,17 +23,21 @@ import ( "net/url" "time" - "github.com/golang/protobuf/proto" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" + iampb "google.golang.org/genproto/googleapis/iam/v1" pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" ) +var newSubscriberClientHook clientHook + // SubscriberCallOptions contains the retry settings for each method of SubscriberClient. type SubscriberCallOptions struct { CreateSubscription []gax.CallOption @@ -52,13 +56,18 @@ type SubscriberCallOptions struct { UpdateSnapshot []gax.CallOption DeleteSnapshot []gax.CallOption Seek []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption } -func defaultSubscriberClientOptions() []option.ClientOption { +func defaultSubscriberGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ - option.WithEndpoint("pubsub.googleapis.com:443"), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), - option.WithScopes(DefaultAuthScopes()...), + internaloption.WithDefaultEndpoint("pubsub.googleapis.com:443"), + internaloption.WithDefaultMTLSEndpoint("pubsub.mtls.googleapis.com:443"), + internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -260,86 +269,356 @@ func defaultSubscriberCallOptions() *SubscriberCallOptions { }) }), }, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, } } +// internalSubscriberClient is an interface that defines the methods available from Cloud Pub/Sub API. +type internalSubscriberClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + CreateSubscription(context.Context, *pubsubpb.Subscription, ...gax.CallOption) (*pubsubpb.Subscription, error) + GetSubscription(context.Context, *pubsubpb.GetSubscriptionRequest, ...gax.CallOption) (*pubsubpb.Subscription, error) + UpdateSubscription(context.Context, *pubsubpb.UpdateSubscriptionRequest, ...gax.CallOption) (*pubsubpb.Subscription, error) + ListSubscriptions(context.Context, *pubsubpb.ListSubscriptionsRequest, ...gax.CallOption) *SubscriptionIterator + DeleteSubscription(context.Context, *pubsubpb.DeleteSubscriptionRequest, ...gax.CallOption) error + ModifyAckDeadline(context.Context, *pubsubpb.ModifyAckDeadlineRequest, ...gax.CallOption) error + Acknowledge(context.Context, *pubsubpb.AcknowledgeRequest, ...gax.CallOption) error + Pull(context.Context, *pubsubpb.PullRequest, ...gax.CallOption) (*pubsubpb.PullResponse, error) + StreamingPull(context.Context, ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) + ModifyPushConfig(context.Context, *pubsubpb.ModifyPushConfigRequest, ...gax.CallOption) error + GetSnapshot(context.Context, *pubsubpb.GetSnapshotRequest, ...gax.CallOption) (*pubsubpb.Snapshot, error) + ListSnapshots(context.Context, *pubsubpb.ListSnapshotsRequest, ...gax.CallOption) *SnapshotIterator + CreateSnapshot(context.Context, *pubsubpb.CreateSnapshotRequest, ...gax.CallOption) (*pubsubpb.Snapshot, error) + UpdateSnapshot(context.Context, *pubsubpb.UpdateSnapshotRequest, ...gax.CallOption) (*pubsubpb.Snapshot, error) + DeleteSnapshot(context.Context, *pubsubpb.DeleteSnapshotRequest, ...gax.CallOption) error + Seek(context.Context, *pubsubpb.SeekRequest, ...gax.CallOption) (*pubsubpb.SeekResponse, error) + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) +} + // SubscriberClient is a client for interacting with Cloud Pub/Sub API. -// // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The service that an application uses to manipulate subscriptions and to +// consume messages from a subscription via the Pull method or by +// establishing a bi-directional stream using the StreamingPull method. type SubscriberClient struct { + // The internal transport-dependent client. + internalClient internalSubscriberClient + + // The call options for this service. + CallOptions *SubscriberCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *SubscriberClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *SubscriberClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *SubscriberClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// CreateSubscription creates a subscription to a given topic. See the [resource name rules] +// (https://cloud.google.com/pubsub/docs/admin#resource_names (at https://cloud.google.com/pubsub/docs/admin#resource_names)). +// If the subscription already exists, returns ALREADY_EXISTS. +// If the corresponding topic doesn’t exist, returns NOT_FOUND. +// +// If the name is not provided in the request, the server will assign a random +// name for this subscription on the same project as the topic, conforming +// to the [resource name format] +// (https://cloud.google.com/pubsub/docs/admin#resource_names (at https://cloud.google.com/pubsub/docs/admin#resource_names)). The generated +// name is populated in the returned Subscription object. Note that for REST +// API requests, you must specify a name in the request. +func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + return c.internalClient.CreateSubscription(ctx, req, opts...) +} + +// GetSubscription gets the configuration details of a subscription. +func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + return c.internalClient.GetSubscription(ctx, req, opts...) +} + +// UpdateSubscription updates an existing subscription. Note that certain properties of a +// subscription, such as its topic, are not modifiable. +func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + return c.internalClient.UpdateSubscription(ctx, req, opts...) +} + +// ListSubscriptions lists matching subscriptions. +func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator { + return c.internalClient.ListSubscriptions(ctx, req, opts...) +} + +// DeleteSubscription deletes an existing subscription. All messages retained in the subscription +// are immediately dropped. Calls to Pull after deletion will return +// NOT_FOUND. After a subscription is deleted, a new one may be created with +// the same name, but the new one has no association with the old +// subscription or its topic unless the same topic is specified. +func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteSubscription(ctx, req, opts...) +} + +// ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful +// to indicate that more time is needed to process a message by the +// subscriber, or to make the message available for redelivery if the +// processing was interrupted. Note that this does not modify the +// subscription-level ackDeadlineSeconds used for subsequent messages. +func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error { + return c.internalClient.ModifyAckDeadline(ctx, req, opts...) +} + +// Acknowledge acknowledges the messages associated with the ack_ids in the +// AcknowledgeRequest. The Pub/Sub system can remove the relevant messages +// from the subscription. +// +// Acknowledging a message whose ack deadline has expired may succeed, +// but such a message may be redelivered later. Acknowledging a message more +// than once will not result in an error. +func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error { + return c.internalClient.Acknowledge(ctx, req, opts...) +} + +// Pull pulls messages from the server. The server may return UNAVAILABLE if +// there are too many concurrent pull requests pending for the given +// subscription. +func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) { + return c.internalClient.Pull(ctx, req, opts...) +} + +// StreamingPull establishes a stream with the server, which sends messages down to the +// client. The client streams acknowledgements and ack deadline modifications +// back to the server. The server will close the stream and return the status +// on any error. The server may close the stream with status UNAVAILABLE to +// reassign server-side resources, in which case, the client should +// re-establish the stream. Flow control can be achieved by configuring the +// underlying RPC channel. +func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) { + return c.internalClient.StreamingPull(ctx, opts...) +} + +// ModifyPushConfig modifies the PushConfig for a specified subscription. +// +// This may be used to change a push subscription to a pull one (signified by +// an empty PushConfig) or vice versa, or change the endpoint URL and other +// attributes of a push subscription. Messages will accumulate for delivery +// continuously through the call regardless of changes to the PushConfig. +func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error { + return c.internalClient.ModifyPushConfig(ctx, req, opts...) +} + +// GetSnapshot gets the configuration details of a snapshot. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow you to manage message acknowledgments in bulk. That +// is, you can set the acknowledgment state of messages in an existing +// subscription to the state captured by a snapshot. +func (c *SubscriberClient) GetSnapshot(ctx context.Context, req *pubsubpb.GetSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + return c.internalClient.GetSnapshot(ctx, req, opts...) +} + +// ListSnapshots lists the existing snapshots. Snapshots are used in Seek (at https://cloud.google.com/pubsub/docs/replay-overview) operations, which +// allow you to manage message acknowledgments in bulk. That is, you can set +// the acknowledgment state of messages in an existing subscription to the +// state captured by a snapshot. +func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator { + return c.internalClient.ListSnapshots(ctx, req, opts...) +} + +// CreateSnapshot creates a snapshot from the requested subscription. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) operations, +// which allow you to manage message acknowledgments in bulk. That is, you can +// set the acknowledgment state of messages in an existing subscription to the +// state captured by a snapshot. +// If the snapshot already exists, returns ALREADY_EXISTS. +// If the requested subscription doesn’t exist, returns NOT_FOUND. +// If the backlog in the subscription is too old – and the resulting snapshot +// would expire in less than 1 hour – then FAILED_PRECONDITION is returned. +// See also the Snapshot.expire_time field. If the name is not provided in +// the request, the server will assign a random +// name for this snapshot on the same project as the subscription, conforming +// to the [resource name format] +// (https://cloud.google.com/pubsub/docs/admin#resource_names (at https://cloud.google.com/pubsub/docs/admin#resource_names)). The +// generated name is populated in the returned Snapshot object. Note that for +// REST API requests, you must specify a name in the request. +func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + return c.internalClient.CreateSnapshot(ctx, req, opts...) +} + +// UpdateSnapshot updates an existing snapshot. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow +// you to manage message acknowledgments in bulk. That is, you can set the +// acknowledgment state of messages in an existing subscription to the state +// captured by a snapshot. +func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + return c.internalClient.UpdateSnapshot(ctx, req, opts...) +} + +// DeleteSnapshot removes an existing snapshot. Snapshots are used in [Seek] +// (https://cloud.google.com/pubsub/docs/replay-overview (at https://cloud.google.com/pubsub/docs/replay-overview)) operations, which +// allow you to manage message acknowledgments in bulk. That is, you can set +// the acknowledgment state of messages in an existing subscription to the +// state captured by a snapshot. +// When the snapshot is deleted, all messages retained in the snapshot +// are immediately dropped. After a snapshot is deleted, a new one may be +// created with the same name, but the new one has no association with the old +// snapshot or its subscription, unless the same subscription is specified. +func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteSnapshot(ctx, req, opts...) +} + +// Seek seeks an existing subscription to a point in time or to a given snapshot, +// whichever is provided in the request. Snapshots are used in [Seek] +// (https://cloud.google.com/pubsub/docs/replay-overview (at https://cloud.google.com/pubsub/docs/replay-overview)) operations, which +// allow you to manage message acknowledgments in bulk. That is, you can set +// the acknowledgment state of messages in an existing subscription to the +// state captured by a snapshot. Note that both the subscription and the +// snapshot must be on the same topic. +func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) { + return c.internalClient.Seek(ctx, req, opts...) +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *SubscriberClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *SubscriberClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *SubscriberClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// subscriberGRPCClient is a client for interacting with Cloud Pub/Sub API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type subscriberGRPCClient struct { // Connection pool of gRPC connections to the service. connPool gtransport.ConnPool + // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE + disableDeadlines bool + + // Points back to the CallOptions field of the containing SubscriberClient + CallOptions **SubscriberCallOptions + // The gRPC API client. subscriberClient pubsubpb.SubscriberClient - // The call options for this service. - CallOptions *SubscriberCallOptions + iamPolicyClient iampb.IAMPolicyClient // The x-goog-* metadata to be sent with each request. xGoogMetadata metadata.MD } -// NewSubscriberClient creates a new subscriber client. +// NewSubscriberClient creates a new subscriber client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. // // The service that an application uses to manipulate subscriptions and to // consume messages from a subscription via the Pull method or by // establishing a bi-directional stream using the StreamingPull method. func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) { - connPool, err := gtransport.DialPool(ctx, append(defaultSubscriberClientOptions(), opts...)...) + clientOpts := defaultSubscriberGRPCClientOptions() + if newSubscriberClientHook != nil { + hookOpts, err := newSubscriberClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + disableDeadlines, err := checkDisableDeadlines() if err != nil { return nil, err } - c := &SubscriberClient{ - connPool: connPool, - CallOptions: defaultSubscriberCallOptions(), + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := SubscriberClient{CallOptions: defaultSubscriberCallOptions()} + + c := &subscriberGRPCClient{ + connPool: connPool, + disableDeadlines: disableDeadlines, subscriberClient: pubsubpb.NewSubscriberClient(connPool), + CallOptions: &client.CallOptions, + iamPolicyClient: iampb.NewIAMPolicyClient(connPool), } - c.SetGoogleClientInfo() + c.setGoogleClientInfo() - return c, nil + client.internalClient = c + + return &client, nil } // Connection returns a connection to the API service. // -// Deprecated. -func (c *SubscriberClient) Connection() *grpc.ClientConn { +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *subscriberGRPCClient) Connection() *grpc.ClientConn { return c.connPool.Conn() } -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *SubscriberClient) Close() error { - return c.connPool.Close() -} - -// SetGoogleClientInfo sets the name and version of the application in +// setGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *SubscriberClient) SetGoogleClientInfo(keyval ...string) { +func (c *subscriberGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } -// CreateSubscription creates a subscription to a given topic. See the -// -// resource name rules (at https://cloud.google.com/pubsub/docs/admin#resource_names). -// If the subscription already exists, returns ALREADY_EXISTS. -// If the corresponding topic doesn’t exist, returns NOT_FOUND. -// -// If the name is not provided in the request, the server will assign a random -// name for this subscription on the same project as the topic, conforming -// to the -// resource name -// format (at https://cloud.google.com/pubsub/docs/admin#resource_names). The -// generated name is populated in the returned Subscription object. Note that -// for REST API requests, you must specify a name in the request. -func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *subscriberGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *subscriberGRPCClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateSubscription[0:len(c.CallOptions.CreateSubscription):len(c.CallOptions.CreateSubscription)], opts...) + opts = append((*c.CallOptions).CreateSubscription[0:len((*c.CallOptions).CreateSubscription):len((*c.CallOptions).CreateSubscription)], opts...) var resp *pubsubpb.Subscription err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -352,11 +631,16 @@ func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb return resp, nil } -// GetSubscription gets the configuration details of a subscription. -func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { +func (c *subscriberGRPCClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetSubscription[0:len(c.CallOptions.GetSubscription):len(c.CallOptions.GetSubscription)], opts...) + opts = append((*c.CallOptions).GetSubscription[0:len((*c.CallOptions).GetSubscription):len((*c.CallOptions).GetSubscription)], opts...) var resp *pubsubpb.Subscription err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -369,12 +653,16 @@ func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.Ge return resp, nil } -// UpdateSubscription updates an existing subscription. Note that certain properties of a -// subscription, such as its topic, are not modifiable. -func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { +func (c *subscriberGRPCClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription.name", url.QueryEscape(req.GetSubscription().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateSubscription[0:len(c.CallOptions.UpdateSubscription):len(c.CallOptions.UpdateSubscription)], opts...) + opts = append((*c.CallOptions).UpdateSubscription[0:len((*c.CallOptions).UpdateSubscription):len((*c.CallOptions).UpdateSubscription)], opts...) var resp *pubsubpb.Subscription err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -387,19 +675,21 @@ func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb return resp, nil } -// ListSubscriptions lists matching subscriptions. -func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator { +func (c *subscriberGRPCClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListSubscriptions[0:len(c.CallOptions.ListSubscriptions):len(c.CallOptions.ListSubscriptions)], opts...) + opts = append((*c.CallOptions).ListSubscriptions[0:len((*c.CallOptions).ListSubscriptions):len((*c.CallOptions).ListSubscriptions)], opts...) it := &SubscriptionIterator{} req = proto.Clone(req).(*pubsubpb.ListSubscriptionsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) { - var resp *pubsubpb.ListSubscriptionsResponse - req.PageToken = pageToken + resp := &pubsubpb.ListSubscriptionsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -412,7 +702,7 @@ func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb. } it.Response = resp - return resp.Subscriptions, resp.NextPageToken, nil + return resp.GetSubscriptions(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) @@ -422,21 +712,24 @@ func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb. it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + return it } -// DeleteSubscription deletes an existing subscription. All messages retained in the subscription -// are immediately dropped. Calls to Pull after deletion will return -// NOT_FOUND. After a subscription is deleted, a new one may be created with -// the same name, but the new one has no association with the old -// subscription or its topic unless the same topic is specified. -func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error { +func (c *subscriberGRPCClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteSubscription[0:len(c.CallOptions.DeleteSubscription):len(c.CallOptions.DeleteSubscription)], opts...) + opts = append((*c.CallOptions).DeleteSubscription[0:len((*c.CallOptions).DeleteSubscription):len((*c.CallOptions).DeleteSubscription)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.DeleteSubscription(ctx, req, settings.GRPC...) @@ -445,15 +738,16 @@ func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb return err } -// ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful -// to indicate that more time is needed to process a message by the -// subscriber, or to make the message available for redelivery if the -// processing was interrupted. Note that this does not modify the -// subscription-level ackDeadlineSeconds used for subsequent messages. -func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error { +func (c *subscriberGRPCClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ModifyAckDeadline[0:len(c.CallOptions.ModifyAckDeadline):len(c.CallOptions.ModifyAckDeadline)], opts...) + opts = append((*c.CallOptions).ModifyAckDeadline[0:len((*c.CallOptions).ModifyAckDeadline):len((*c.CallOptions).ModifyAckDeadline)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.ModifyAckDeadline(ctx, req, settings.GRPC...) @@ -462,17 +756,16 @@ func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb. return err } -// Acknowledge acknowledges the messages associated with the ack_ids in the -// AcknowledgeRequest. The Pub/Sub system can remove the relevant messages -// from the subscription. -// -// Acknowledging a message whose ack deadline has expired may succeed, -// but such a message may be redelivered later. Acknowledging a message more -// than once will not result in an error. -func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error { +func (c *subscriberGRPCClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.Acknowledge[0:len(c.CallOptions.Acknowledge):len(c.CallOptions.Acknowledge)], opts...) + opts = append((*c.CallOptions).Acknowledge[0:len((*c.CallOptions).Acknowledge):len((*c.CallOptions).Acknowledge)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.Acknowledge(ctx, req, settings.GRPC...) @@ -481,13 +774,16 @@ func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.Acknow return err } -// Pull pulls messages from the server. The server may return UNAVAILABLE if -// there are too many concurrent pull requests pending for the given -// subscription. -func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) { +func (c *subscriberGRPCClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.Pull[0:len(c.CallOptions.Pull):len(c.CallOptions.Pull)], opts...) + opts = append((*c.CallOptions).Pull[0:len((*c.CallOptions).Pull):len((*c.CallOptions).Pull)], opts...) var resp *pubsubpb.PullResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -500,17 +796,10 @@ func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, return resp, nil } -// StreamingPull establishes a stream with the server, which sends messages down to the -// client. The client streams acknowledgements and ack deadline modifications -// back to the server. The server will close the stream and return the status -// on any error. The server may close the stream with status UNAVAILABLE to -// reassign server-side resources, in which case, the client should -// re-establish the stream. Flow control can be achieved by configuring the -// underlying RPC channel. -func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) { +func (c *subscriberGRPCClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) { ctx = insertMetadata(ctx, c.xGoogMetadata) - opts = append(c.CallOptions.StreamingPull[0:len(c.CallOptions.StreamingPull):len(c.CallOptions.StreamingPull)], opts...) var resp pubsubpb.Subscriber_StreamingPullClient + opts = append((*c.CallOptions).StreamingPull[0:len((*c.CallOptions).StreamingPull):len((*c.CallOptions).StreamingPull)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.subscriberClient.StreamingPull(ctx, settings.GRPC...) @@ -522,16 +811,16 @@ func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOp return resp, nil } -// ModifyPushConfig modifies the PushConfig for a specified subscription. -// -// This may be used to change a push subscription to a pull one (signified by -// an empty PushConfig) or vice versa, or change the endpoint URL and other -// attributes of a push subscription. Messages will accumulate for delivery -// continuously through the call regardless of changes to the PushConfig. -func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error { +func (c *subscriberGRPCClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ModifyPushConfig[0:len(c.CallOptions.ModifyPushConfig):len(c.CallOptions.ModifyPushConfig)], opts...) + opts = append((*c.CallOptions).ModifyPushConfig[0:len((*c.CallOptions).ModifyPushConfig):len((*c.CallOptions).ModifyPushConfig)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.ModifyPushConfig(ctx, req, settings.GRPC...) @@ -540,15 +829,16 @@ func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.M return err } -// GetSnapshot gets the configuration details of a snapshot. Snapshots are used in -// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow you to manage message acknowledgments in bulk. That -// is, you can set the acknowledgment state of messages in an existing -// subscription to the state captured by a snapshot. -func (c *SubscriberClient) GetSnapshot(ctx context.Context, req *pubsubpb.GetSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { +func (c *subscriberGRPCClient) GetSnapshot(ctx context.Context, req *pubsubpb.GetSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot", url.QueryEscape(req.GetSnapshot()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetSnapshot[0:len(c.CallOptions.GetSnapshot):len(c.CallOptions.GetSnapshot)], opts...) + opts = append((*c.CallOptions).GetSnapshot[0:len((*c.CallOptions).GetSnapshot):len((*c.CallOptions).GetSnapshot)], opts...) var resp *pubsubpb.Snapshot err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -561,24 +851,21 @@ func (c *SubscriberClient) GetSnapshot(ctx context.Context, req *pubsubpb.GetSna return resp, nil } -// ListSnapshots lists the existing snapshots. Snapshots are used in -// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow -// you to manage message acknowledgments in bulk. That is, you can set the -// acknowledgment state of messages in an existing subscription to the state -// captured by a snapshot. -func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator { +func (c *subscriberGRPCClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListSnapshots[0:len(c.CallOptions.ListSnapshots):len(c.CallOptions.ListSnapshots)], opts...) + opts = append((*c.CallOptions).ListSnapshots[0:len((*c.CallOptions).ListSnapshots):len((*c.CallOptions).ListSnapshots)], opts...) it := &SnapshotIterator{} req = proto.Clone(req).(*pubsubpb.ListSnapshotsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Snapshot, string, error) { - var resp *pubsubpb.ListSnapshotsResponse - req.PageToken = pageToken + resp := &pubsubpb.ListSnapshotsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -591,7 +878,7 @@ func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.List } it.Response = resp - return resp.Snapshots, resp.NextPageToken, nil + return resp.GetSnapshots(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) @@ -601,36 +888,24 @@ func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.List it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.PageSize) - it.pageInfo.Token = req.PageToken + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + return it } -// CreateSnapshot creates a snapshot from the requested subscription. Snapshots are used in -// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow -// you to manage message acknowledgments in bulk. That is, you can set the -// acknowledgment state of messages in an existing subscription to the state -// captured by a snapshot. -// -// -// If the snapshot already exists, returns ALREADY_EXISTS. -// If the requested subscription doesn’t exist, returns NOT_FOUND. -// If the backlog in the subscription is too old – and the resulting snapshot -// would expire in less than 1 hour – then FAILED_PRECONDITION is returned. -// See also the Snapshot.expire_time field. If the name is not provided in -// the request, the server will assign a random -// name for this snapshot on the same project as the subscription, conforming -// to the -// resource name -// format (at https://cloud.google.com/pubsub/docs/admin#resource_names). The -// generated name is populated in the returned Snapshot object. Note that for -// REST API requests, you must specify a name in the request. -func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { +func (c *subscriberGRPCClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateSnapshot[0:len(c.CallOptions.CreateSnapshot):len(c.CallOptions.CreateSnapshot)], opts...) + opts = append((*c.CallOptions).CreateSnapshot[0:len((*c.CallOptions).CreateSnapshot):len((*c.CallOptions).CreateSnapshot)], opts...) var resp *pubsubpb.Snapshot err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -643,16 +918,16 @@ func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.Cre return resp, nil } -// UpdateSnapshot updates an existing snapshot. Snapshots are used in -// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow -// you to manage message acknowledgments in bulk. That is, you can set the -// acknowledgment state of messages in an existing subscription to the state -// captured by a snapshot. -func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { +func (c *subscriberGRPCClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot.name", url.QueryEscape(req.GetSnapshot().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateSnapshot[0:len(c.CallOptions.UpdateSnapshot):len(c.CallOptions.UpdateSnapshot)], opts...) + opts = append((*c.CallOptions).UpdateSnapshot[0:len((*c.CallOptions).UpdateSnapshot):len((*c.CallOptions).UpdateSnapshot)], opts...) var resp *pubsubpb.Snapshot err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -665,22 +940,16 @@ func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.Upd return resp, nil } -// DeleteSnapshot removes an existing snapshot. Snapshots are used in -// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow -// you to manage message acknowledgments in bulk. That is, you can set the -// acknowledgment state of messages in an existing subscription to the state -// captured by a snapshot. -// -// -// When the snapshot is deleted, all messages retained in the snapshot -// are immediately dropped. After a snapshot is deleted, a new one may be -// created with the same name, but the new one has no association with the old -// snapshot or its subscription, unless the same subscription is specified. -func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error { +func (c *subscriberGRPCClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot", url.QueryEscape(req.GetSnapshot()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteSnapshot[0:len(c.CallOptions.DeleteSnapshot):len(c.CallOptions.DeleteSnapshot)], opts...) + opts = append((*c.CallOptions).DeleteSnapshot[0:len((*c.CallOptions).DeleteSnapshot):len((*c.CallOptions).DeleteSnapshot)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.DeleteSnapshot(ctx, req, settings.GRPC...) @@ -689,18 +958,16 @@ func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.Del return err } -// Seek seeks an existing subscription to a point in time or to a given snapshot, -// whichever is provided in the request. Snapshots are used in -// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow -// you to manage message acknowledgments in bulk. That is, you can set the -// acknowledgment state of messages in an existing subscription to the state -// captured by a snapshot. Note that both the subscription and the snapshot -// must be on the same topic. -func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) { +func (c *subscriberGRPCClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) + defer cancel() + ctx = cctx + } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.Seek[0:len(c.CallOptions.Seek):len(c.CallOptions.Seek)], opts...) + opts = append((*c.CallOptions).Seek[0:len((*c.CallOptions).Seek):len((*c.CallOptions).Seek)], opts...) var resp *pubsubpb.SeekResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -713,6 +980,57 @@ func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, return resp, nil } +func (c *subscriberGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *subscriberGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *subscriberGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + // SnapshotIterator manages a stream of *pubsubpb.Snapshot. type SnapshotIterator struct { items []*pubsubpb.Snapshot diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/version.go b/vendor/cloud.google.com/go/pubsub/apiv1/version.go new file mode 100644 index 000000000000..7ba352b539a4 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/version.go @@ -0,0 +1,23 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapicgen. DO NOT EDIT. + +package pubsub + +import "cloud.google.com/go/pubsub/internal" + +func init() { + versionClient = internal.Version +} diff --git a/vendor/cloud.google.com/go/pubsub/debug.go b/vendor/cloud.google.com/go/pubsub/debug.go index 977ae577f7e7..056bd6342d4f 100644 --- a/vendor/cloud.google.com/go/pubsub/debug.go +++ b/vendor/cloud.google.com/go/pubsub/debug.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build psdebug // +build psdebug package pubsub diff --git a/vendor/cloud.google.com/go/pubsub/doc.go b/vendor/cloud.google.com/go/pubsub/doc.go index a86fc3d4a594..b7f5f94fce67 100644 --- a/vendor/cloud.google.com/go/pubsub/doc.go +++ b/vendor/cloud.google.com/go/pubsub/doc.go @@ -24,17 +24,16 @@ https://cloud.google.com/pubsub/docs See https://godoc.org/cloud.google.com/go for authentication, timeouts, connection pooling and similar aspects of this package. - -Publishing +# Publishing Google Cloud Pub/Sub messages are published to topics. Topics may be created using the pubsub package like so: - topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name") + topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name") Messages may then be published to a topic: - res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")}) + res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")}) Publish queues the message for publishing and returns immediately. When enough messages have accumulated, or enough time has elapsed, the batch of messages is @@ -46,82 +45,102 @@ blocks until the message has been sent to the service. The first time you call Publish on a topic, goroutines are started in the background. To clean up these goroutines, call Stop: - topic.Stop() - + topic.Stop() -Receiving +# Receiving To receive messages published to a topic, clients create subscriptions to the topic. There may be more than one subscription per topic; each message that is published to the topic will be delivered to all of its subscriptions. -Subsciptions may be created like so: +Subscriptions may be created like so: - sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name", - pubsub.SubscriptionConfig{Topic: topic}) + sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name", + pubsub.SubscriptionConfig{Topic: topic}) Messages are then consumed from a subscription via callback. - err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) { - log.Printf("Got message: %s", m.Data) - m.Ack() - }) - if err != nil { - // Handle error. - } + err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) { + log.Printf("Got message: %s", m.Data) + m.Ack() + }) + if err != nil { + // Handle error. + } The callback is invoked concurrently by multiple goroutines, maximizing throughput. To terminate a call to Receive, cancel its context. Once client code has processed the message, it must call Message.Ack or -message.Nack, otherwise the message will eventually be redelivered. If the -client cannot or doesn't want to process the message, it can call Message.Nack +Message.Nack; otherwise the message will eventually be redelivered. Ack/Nack +MUST be called within the Receive handler function, and not from a goroutine. +Otherwise, flow control (e.g. ReceiveSettings.MaxOutstandingMessages) will +not be respected, and messages can get orphaned when cancelling Receive. + +If the client cannot or doesn't want to process the message, it can call Message.Nack to speed redelivery. For more information and configuration options, see -"Deadlines" below. +"Ack Deadlines" below. -Note: It is possible for Messages to be redelivered, even if Message.Ack has +Note: It is possible for Messages to be redelivered even if Message.Ack has been called. Client code must be robust to multiple deliveries of messages. -Note: This uses pubsub's streaming pull feature. This feature properties that +Note: This uses pubsub's streaming pull feature. This feature has properties that may be surprising. Please take a look at https://cloud.google.com/pubsub/docs/pull#streamingpull for more details on how streaming pull behaves compared to the synchronous pull method. +# Streams Management + +Streams used for streaming pull are configured by setting sub.ReceiveSettings.NumGoroutines. +However, the total number of streams possible is capped by the gRPC connection pool setting. +By default, the number of connections in the pool is min(4,GOMAXPROCS). -Deadlines +If you have 4 or more CPU cores, the default setting allows a maximum of 400 streams which is still a good default for most cases. +If you want to have more open streams (such as for low CPU core machines), you should pass in the grpc option as described below: + + opts := []option.ClientOption{ + option.WithGRPCConnectionPool(8), + } + client, err := pubsub.NewClient(ctx, projID, opts...) + +# Ack Deadlines The default pubsub deadlines are suitable for most use cases, but may be overridden. This section describes the tradeoffs that should be considered when overriding the defaults. Behind the scenes, each message returned by the Pub/Sub server has an -associated lease, known as an "ACK deadline". Unless a message is -acknowledged within the ACK deadline, or the client requests that -the ACK deadline be extended, the message will become eligible for redelivery. +associated lease, known as an "ack deadline". Unless a message is +acknowledged within the ack deadline, or the client requests that +the ack deadline be extended, the message will become eligible for redelivery. As a convenience, the pubsub client will automatically extend deadlines until either: - * Message.Ack or Message.Nack is called, or - * The "MaxExtension" period elapses from the time the message is fetched from the server. - -ACK deadlines are extended periodically by the client. The initial ACK -deadline given to messages is 10s. The period between extensions, as well as the -length of the extension, automatically adjust depending on the time it takes to ack -messages, up to 10m. This has the effect that subscribers that process messages -quickly have their message ack deadlines extended for a short amount, whereas + - Message.Ack or Message.Nack is called, or + - The "MaxExtension" duration elapses from the time the message is fetched from + the server. This defaults to 60m. + +Ack deadlines are extended periodically by the client. The initial ack +deadline given to messages is based on the subscription's AckDeadline property, +which defaults to 10s. The period between extensions, as well as the +length of the extension, automatically adjusts based on the time it takes the +subscriber application to ack messages (based on the 99th percentile of ack latency). +By default, this extension period is capped at 10m, but this limit can be configured +by the "MaxExtensionPeriod" setting. This has the effect that subscribers that process +messages quickly have their message ack deadlines extended for a short amount, whereas subscribers that process message slowly have their message ack deadlines extended for a large amount. The net effect is fewer RPCs sent from the client library. For example, consider a subscriber that takes 3 minutes to process each message. -Since the library has already recorded several 3 minute "time to ack"s in a +Since the library has already recorded several 3-minute "ack latencies"s in a percentile distribution, future message extensions are sent with a value of 3 minutes, every 3 minutes. Suppose the application crashes 5 seconds after the library sends such an extension: the Pub/Sub server would wait the remaining 2m55s before re-sending the messages out to other subscribers. -Please note that the client library does not use the subscription's AckDeadline -by default. To enforce the subscription AckDeadline, set MaxExtension to the -subscription's AckDeadline: +Please note that by default, the client library does not use the subscription's +AckDeadline for the MaxExtension value. To enforce the subscription's AckDeadline, +set MaxExtension to the subscription's AckDeadline: cfg, err := sub.Config(ctx) if err != nil { @@ -130,11 +149,29 @@ subscription's AckDeadline: sub.ReceiveSettings.MaxExtension = cfg.AckDeadline - -Slow Message Processing +# Slow Message Processing For use cases where message processing exceeds 30 minutes, we recommend using the base client in a pull model, since long-lived streams are periodically killed by firewalls. See the example at https://godoc.org/cloud.google.com/go/pubsub/apiv1#example-SubscriberClient-Pull-LengthyClientProcessing + +# Emulator + +To use an emulator with this library, you can set the PUBSUB_EMULATOR_HOST +environment variable to the address at which your emulator is running. This will +send requests to that address instead of to Cloud Pub/Sub. You can then create +and use a client as usual: + + // Set PUBSUB_EMULATOR_HOST environment variable. + err := os.Setenv("PUBSUB_EMULATOR_HOST", "localhost:9000") + if err != nil { + // TODO: Handle error. + } + // Create client as usual. + client, err := pubsub.NewClient(ctx, "my-project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() */ package pubsub // import "cloud.google.com/go/pubsub" diff --git a/vendor/cloud.google.com/go/pubsub/flow_controller.go b/vendor/cloud.google.com/go/pubsub/flow_controller.go index 3f165a0ac18c..8b3525081556 100644 --- a/vendor/cloud.google.com/go/pubsub/flow_controller.go +++ b/vendor/cloud.google.com/go/pubsub/flow_controller.go @@ -16,12 +16,60 @@ package pubsub import ( "context" + "errors" "sync/atomic" "golang.org/x/sync/semaphore" ) -// flowController implements flow control for Subscription.Receive. +// LimitExceededBehavior configures the behavior that flowController can use in case +// the flow control limits are exceeded. +type LimitExceededBehavior int + +const ( + // FlowControlIgnore disables flow control. + FlowControlIgnore LimitExceededBehavior = iota + // FlowControlBlock signals to wait until the request can be made without exceeding the limit. + FlowControlBlock + // FlowControlSignalError signals an error to the caller of acquire. + FlowControlSignalError +) + +// flowControllerPurpose indicates whether a flowController is for a topic or a +// subscription. +type flowControllerPurpose int + +const ( + flowControllerPurposeSubscription flowControllerPurpose = iota + flowControllerPurposeTopic +) + +// FlowControlSettings controls flow control for messages while publishing or subscribing. +type FlowControlSettings struct { + // MaxOutstandingMessages is the maximum number of buffered messages to be published. + // If less than or equal to zero, this is disabled. + MaxOutstandingMessages int + + // MaxOutstandingBytes is the maximum size of buffered messages to be published. + // If less than or equal to zero, this is disabled. + MaxOutstandingBytes int + + // LimitExceededBehavior configures the behavior when trying to publish + // additional messages while the flow controller is full. The available options + // are Ignore (disable, default), Block, and SignalError (publish + // results will return an error). + LimitExceededBehavior LimitExceededBehavior +} + +var ( + // ErrFlowControllerMaxOutstandingMessages indicates that outstanding messages exceeds MaxOutstandingMessages. + ErrFlowControllerMaxOutstandingMessages = errors.New("pubsub: MaxOutstandingMessages flow controller limit exceeded") + + // ErrFlowControllerMaxOutstandingBytes indicates that outstanding bytes of messages exceeds MaxOutstandingBytes. + ErrFlowControllerMaxOutstandingBytes = errors.New("pubsub: MaxOutstandingBytes flow control limit exceeded") +) + +// flowController implements flow control for publishing and subscribing. type flowController struct { maxCount int maxSize int // max total size of messages @@ -31,81 +79,111 @@ type flowController struct { // small releases. // Atomic. countRemaining int64 + // Number of outstanding bytes remaining. Atomic. + bytesRemaining int64 + limitBehavior LimitExceededBehavior + purpose flowControllerPurpose } // newFlowController creates a new flowController that ensures no more than // maxCount messages or maxSize bytes are outstanding at once. If maxCount or // maxSize is < 1, then an unlimited number of messages or bytes is permitted, // respectively. -func newFlowController(maxCount, maxSize int) *flowController { - fc := &flowController{ - maxCount: maxCount, - maxSize: maxSize, - semCount: nil, - semSize: nil, +func newFlowController(fc FlowControlSettings) flowController { + f := flowController{ + maxCount: fc.MaxOutstandingMessages, + maxSize: fc.MaxOutstandingBytes, + semCount: nil, + semSize: nil, + limitBehavior: fc.LimitExceededBehavior, } - if maxCount > 0 { - fc.semCount = semaphore.NewWeighted(int64(maxCount)) + if fc.MaxOutstandingMessages > 0 { + f.semCount = semaphore.NewWeighted(int64(fc.MaxOutstandingMessages)) } - if maxSize > 0 { - fc.semSize = semaphore.NewWeighted(int64(maxSize)) + if fc.MaxOutstandingBytes > 0 { + f.semSize = semaphore.NewWeighted(int64(fc.MaxOutstandingBytes)) } - return fc + return f +} + +func newTopicFlowController(fc FlowControlSettings) flowController { + f := newFlowController(fc) + f.purpose = flowControllerPurposeTopic + return f +} + +func newSubscriptionFlowController(fc FlowControlSettings) flowController { + f := newFlowController(fc) + f.purpose = flowControllerPurposeSubscription + return f } -// acquire blocks until one message of size bytes can proceed or ctx is done. -// It returns nil in the first case, or ctx.Err() in the second. +// acquire allocates space for a message: the message count and its size. // -// acquire allows large messages to proceed by treating a size greater than maxSize +// In FlowControlSignalError mode, large messages greater than maxSize +// will be result in an error. In other modes, large messages will be treated // as if it were equal to maxSize. func (f *flowController) acquire(ctx context.Context, size int) error { - if f.semCount != nil { - if err := f.semCount.Acquire(ctx, 1); err != nil { - return err + switch f.limitBehavior { + case FlowControlIgnore: + return nil + case FlowControlBlock: + if f.semCount != nil { + if err := f.semCount.Acquire(ctx, 1); err != nil { + return err + } } - } - if f.semSize != nil { - if err := f.semSize.Acquire(ctx, f.bound(size)); err != nil { - if f.semCount != nil { - f.semCount.Release(1) + if f.semSize != nil { + if err := f.semSize.Acquire(ctx, f.bound(size)); err != nil { + if f.semCount != nil { + f.semCount.Release(1) + } + return err + } + } + case FlowControlSignalError: + if f.semCount != nil { + if !f.semCount.TryAcquire(1) { + return ErrFlowControllerMaxOutstandingMessages + } + } + if f.semSize != nil { + // Try to acquire the full size of the message here. + if !f.semSize.TryAcquire(int64(size)) { + if f.semCount != nil { + f.semCount.Release(1) + } + return ErrFlowControllerMaxOutstandingBytes } - return err } } - atomic.AddInt64(&f.countRemaining, 1) - return nil -} -// tryAcquire returns false if acquire would block. Otherwise, it behaves like -// acquire and returns true. -// -// tryAcquire allows large messages to proceed by treating a size greater than -// maxSize as if it were equal to maxSize. -func (f *flowController) tryAcquire(size int) bool { if f.semCount != nil { - if !f.semCount.TryAcquire(1) { - return false - } + outstandingMessages := atomic.AddInt64(&f.countRemaining, 1) + f.recordOutstandingMessages(ctx, outstandingMessages) } + if f.semSize != nil { - if !f.semSize.TryAcquire(f.bound(size)) { - if f.semCount != nil { - f.semCount.Release(1) - } - return false - } + outstandingBytes := atomic.AddInt64(&f.bytesRemaining, f.bound(size)) + f.recordOutstandingBytes(ctx, outstandingBytes) } - atomic.AddInt64(&f.countRemaining, 1) - return true + return nil } // release notes that one message of size bytes is no longer outstanding. -func (f *flowController) release(size int) { - atomic.AddInt64(&f.countRemaining, -1) +func (f *flowController) release(ctx context.Context, size int) { + if f.limitBehavior == FlowControlIgnore { + return + } + if f.semCount != nil { + outstandingMessages := atomic.AddInt64(&f.countRemaining, -1) + f.recordOutstandingMessages(ctx, outstandingMessages) f.semCount.Release(1) } if f.semSize != nil { + outstandingBytes := atomic.AddInt64(&f.bytesRemaining, -1*f.bound(size)) + f.recordOutstandingBytes(ctx, outstandingBytes) f.semSize.Release(f.bound(size)) } } @@ -117,6 +195,26 @@ func (f *flowController) bound(size int) int64 { return int64(size) } +// count returns the number of outstanding messages. +// if maxCount is 0, this will always return 0. func (f *flowController) count() int { return int(atomic.LoadInt64(&f.countRemaining)) } + +func (f *flowController) recordOutstandingMessages(ctx context.Context, n int64) { + if f.purpose == flowControllerPurposeTopic { + recordStat(ctx, PublisherOutstandingMessages, n) + return + } + + recordStat(ctx, OutstandingMessages, n) +} + +func (f *flowController) recordOutstandingBytes(ctx context.Context, n int64) { + if f.purpose == flowControllerPurposeTopic { + recordStat(ctx, PublisherOutstandingBytes, n) + return + } + + recordStat(ctx, OutstandingBytes, n) +} diff --git a/vendor/cloud.google.com/go/pubsub/internal/scheduler/publish_scheduler.go b/vendor/cloud.google.com/go/pubsub/internal/scheduler/publish_scheduler.go new file mode 100644 index 000000000000..cba172b79db2 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/internal/scheduler/publish_scheduler.go @@ -0,0 +1,207 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduler + +import ( + "errors" + "reflect" + "sync" + "time" + + "google.golang.org/api/support/bundler" +) + +// PublishScheduler is a scheduler which is designed for Pub/Sub's Publish flow. +// It bundles items before handling them. All items in this PublishScheduler use +// the same handler. +// +// Each item is added with a given key. Items added to the empty string key are +// handled in random order. Items added to any other key are handled +// sequentially. +type PublishScheduler struct { + // Settings passed down to each bundler that gets created. + DelayThreshold time.Duration + BundleCountThreshold int + BundleByteThreshold int + BundleByteLimit int + BufferedByteLimit int + + mu sync.Mutex + bundlers sync.Map // keys -> *bundler.Bundler + outstanding sync.Map // keys -> num outstanding messages + + keysMu sync.RWMutex + // keysWithErrors tracks ordering keys that cannot accept new messages. + // A bundler might not accept new messages if publishing has failed + // for a specific ordering key, and can be resumed with topic.ResumePublish(). + keysWithErrors map[string]struct{} + + // workers is a channel that represents workers. Rather than a pool, where + // worker are "removed" until the pool is empty, the channel is more like a + // set of work desks, where workers are "added" until all the desks are full. + // + // workers does not restrict the amount of goroutines in the bundlers. + // Rather, it acts as the flow control for completion of bundler work. + workers chan struct{} + handle func(bundle interface{}) + done chan struct{} +} + +// NewPublishScheduler returns a new PublishScheduler. +// +// The workers arg is the number of workers that will operate on the queue of +// work. A reasonably large number of workers is highly recommended. If the +// workers arg is 0, then a healthy default of 10 workers is used. +// +// The scheduler does not use a parent context. If it did, canceling that +// context would immediately stop the scheduler without waiting for +// undelivered messages. +// +// The scheduler should be stopped only with FlushAndStop. +func NewPublishScheduler(workers int, handle func(bundle interface{})) *PublishScheduler { + if workers == 0 { + workers = 10 + } + + s := PublishScheduler{ + keysWithErrors: make(map[string]struct{}), + workers: make(chan struct{}, workers), + handle: handle, + done: make(chan struct{}), + } + + return &s +} + +// Add adds an item to the scheduler at a given key. +// +// Add never blocks. Buffering happens in the scheduler's publishers. There is +// no flow control. +// +// Since ordered keys require only a single outstanding RPC at once, it is +// possible to send ordered key messages to Topic.Publish (and subsequently to +// PublishScheduler.Add) faster than the bundler can publish them to the +// Pub/Sub service, resulting in a backed up queue of Pub/Sub bundles. Each +// item in the bundler queue is a goroutine. +func (s *PublishScheduler) Add(key string, item interface{}, size int) error { + select { + case <-s.done: + return errors.New("draining") + default: + } + + s.mu.Lock() + defer s.mu.Unlock() + var b *bundler.Bundler + bInterface, ok := s.bundlers.Load(key) + + if !ok { + s.outstanding.Store(key, 1) + b = bundler.NewBundler(item, func(bundle interface{}) { + s.workers <- struct{}{} + s.handle(bundle) + <-s.workers + + nlen := reflect.ValueOf(bundle).Len() + s.mu.Lock() + outsInterface, _ := s.outstanding.Load(key) + s.outstanding.Store(key, outsInterface.(int)-nlen) + if v, _ := s.outstanding.Load(key); v == 0 { + s.outstanding.Delete(key) + s.bundlers.Delete(key) + } + s.mu.Unlock() + }) + b.DelayThreshold = s.DelayThreshold + b.BundleCountThreshold = s.BundleCountThreshold + b.BundleByteThreshold = s.BundleByteThreshold + b.BundleByteLimit = s.BundleByteLimit + b.BufferedByteLimit = s.BufferedByteLimit + + if b.BufferedByteLimit == 0 { + b.BufferedByteLimit = 1e9 + } + + if key == "" { + // There's no way to express "unlimited" in the bundler, so we use + // some high number. + b.HandlerLimit = 1e9 + } else { + // HandlerLimit=1 causes the bundler to act as a sequential queue. + b.HandlerLimit = 1 + } + + s.bundlers.Store(key, b) + } else { + b = bInterface.(*bundler.Bundler) + oi, _ := s.outstanding.Load(key) + s.outstanding.Store(key, oi.(int)+1) + } + + return b.Add(item, size) +} + +// FlushAndStop begins flushing items from bundlers and from the scheduler. It +// blocks until all items have been flushed. +func (s *PublishScheduler) FlushAndStop() { + close(s.done) + s.bundlers.Range(func(_, bi interface{}) bool { + bi.(*bundler.Bundler).Flush() + return true + }) +} + +// Flush waits until all bundlers are sent. +func (s *PublishScheduler) Flush() { + var wg sync.WaitGroup + s.bundlers.Range(func(_, bi interface{}) bool { + wg.Add(1) + go func(b *bundler.Bundler) { + defer wg.Done() + b.Flush() + }(bi.(*bundler.Bundler)) + return true + }) + wg.Wait() + +} + +// IsPaused checks if the bundler associated with an ordering keys is +// paused. +func (s *PublishScheduler) IsPaused(orderingKey string) bool { + s.keysMu.RLock() + defer s.keysMu.RUnlock() + _, ok := s.keysWithErrors[orderingKey] + return ok +} + +// Pause pauses the bundler associated with the provided ordering key, +// preventing it from accepting new messages. Any outstanding messages +// that haven't been published will error. If orderingKey is empty, +// this is a no-op. +func (s *PublishScheduler) Pause(orderingKey string) { + if orderingKey != "" { + s.keysMu.Lock() + defer s.keysMu.Unlock() + s.keysWithErrors[orderingKey] = struct{}{} + } +} + +// Resume resumes accepting message with the provided ordering key. +func (s *PublishScheduler) Resume(orderingKey string) { + s.keysMu.Lock() + defer s.keysMu.Unlock() + delete(s.keysWithErrors, orderingKey) +} diff --git a/vendor/cloud.google.com/go/pubsub/internal/scheduler/receive_scheduler.go b/vendor/cloud.google.com/go/pubsub/internal/scheduler/receive_scheduler.go new file mode 100644 index 000000000000..dafccdd1cee3 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/internal/scheduler/receive_scheduler.go @@ -0,0 +1,141 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduler + +import ( + "errors" + "sync" +) + +// ErrReceiveDraining indicates the scheduler has shutdown and is draining. +var ErrReceiveDraining error = errors.New("pubsub: receive scheduler draining") + +// ReceiveScheduler is a scheduler which is designed for Pub/Sub's Receive flow. +// +// Each item is added with a given key. Items added to the empty string key are +// handled in random order. Items added to any other key are handled +// sequentially. +type ReceiveScheduler struct { + // workers is a channel that represents workers. Rather than a pool, where + // worker are "removed" until the pool is empty, the channel is more like a + // set of work desks, where workers are "added" until all the desks are full. + // + // A worker taking an item from the unordered queue (key="") completes a + // single item and then goes back to the pool. + // + // A worker taking an item from an ordered queue (key="something") completes + // all work in that queue until the queue is empty, then deletes the queue, + // then goes back to the pool. + workers chan struct{} + done chan struct{} + + mu sync.Mutex + m map[string][]func() +} + +// NewReceiveScheduler creates a new ReceiveScheduler. +// +// The workers arg is the number of concurrent calls to handle. If the workers +// arg is 0, then a healthy default of 10 workers is used. If less than 0, this +// will be set to an large number, similar to PublishScheduler's handler limit. +func NewReceiveScheduler(workers int) *ReceiveScheduler { + if workers == 0 { + workers = 10 + } else if workers < 0 { + workers = 1e9 + } + + return &ReceiveScheduler{ + workers: make(chan struct{}, workers), + done: make(chan struct{}), + m: make(map[string][]func()), + } +} + +// Add adds the item to be handled. Add may block. +// +// Buffering happens above the ReceiveScheduler in the form of a flow controller +// that requests batches of messages to pull. A backed up ReceiveScheduler.Add +// call causes pushback to the pubsub service (less Receive calls on the +// long-lived stream), which keeps memory footprint stable. +func (s *ReceiveScheduler) Add(key string, item interface{}, handle func(item interface{})) error { + select { + case <-s.done: + return ErrReceiveDraining + default: + } + if key == "" { + // Spawn a worker. + s.workers <- struct{}{} + go func() { + // Unordered keys can be handled immediately. + handle(item) + <-s.workers + }() + return nil + } + + // Add it to the queue. This has to happen before we enter the goroutine + // below to prevent a race from the next iteration of the key-loop + // adding another item before this one gets queued. + + s.mu.Lock() + _, ok := s.m[key] + s.m[key] = append(s.m[key], func() { + handle(item) + }) + s.mu.Unlock() + if ok { + // Someone is already working on this key. + return nil + } + + // Spawn a worker. + s.workers <- struct{}{} + + go func() { + defer func() { <-s.workers }() + + // Key-Loop: loop through the available items in the key's queue. + for { + s.mu.Lock() + if len(s.m[key]) == 0 { + // We're done processing items - the queue is empty. Delete + // the queue from the map and free up the worker. + delete(s.m, key) + s.mu.Unlock() + return + } + // Pop an item from the queue. + next := s.m[key][0] + s.m[key] = s.m[key][1:] + s.mu.Unlock() + + next() // Handle next in queue. + } + }() + + return nil +} + +// Shutdown begins flushing messages and stops accepting new Add calls. Shutdown +// does not block, or wait for all messages to be flushed. +func (s *ReceiveScheduler) Shutdown() { + select { + case <-s.done: + default: + close(s.done) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/internal/version.go b/vendor/cloud.google.com/go/pubsub/internal/version.go new file mode 100644 index 000000000000..c828210e2d61 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.27.1" diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go index c42fb02d71b1..ced6a5c90089 100644 --- a/vendor/cloud.google.com/go/pubsub/iterator.go +++ b/vendor/cloud.google.com/go/pubsub/iterator.go @@ -16,18 +16,23 @@ package pubsub import ( "context" + "errors" "io" + "log" + "strings" "sync" "time" + ipubsub "cloud.google.com/go/internal/pubsub" vkit "cloud.google.com/go/pubsub/apiv1" "cloud.google.com/go/pubsub/internal/distribution" - "github.com/golang/protobuf/proto" gax "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/apierror" pb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protowire" ) // Between message receipt and ack (that is, the time spent processing a message) we want to extend the message @@ -36,21 +41,40 @@ import ( // of the actual deadline. const gracePeriod = 5 * time.Second +// ackIDBatchSize is the maximum number of ACK IDs to send in a single Ack/Modack RPC. +// The backend imposes a maximum request size limit of 524288 bytes (512 KiB) per +// acknowledge / modifyAckDeadline request. ACK IDs have a maximum size of 164 +// bytes, thus we cannot send more than 524288/176 ~= 2979 ACK IDs in an Ack/ModAc + +// Accounting for some overhead, we should thus only send a maximum of 2500 ACK +// IDs at a time. +// This is a var such that it can be modified for tests. +const ackIDBatchSize int = 2500 + +// These are vars so tests can change them. +var ( + maxDurationPerLeaseExtension = 10 * time.Minute + minDurationPerLeaseExtension = 10 * time.Second + minDurationPerLeaseExtensionExactlyOnce = 1 * time.Minute + + // The total amount of time to retry acks/modacks with exactly once delivery enabled subscriptions. + exactlyOnceDeliveryRetryDeadline = 600 * time.Second +) + type messageIterator struct { - ctx context.Context - cancel func() // the function that will cancel ctx; called in stop - po *pullOptions - ps *pullStream - subc *vkit.SubscriberClient - subName string - maxExtensionPeriod *time.Duration - kaTick <-chan time.Time // keep-alive (deadline extensions) - ackTicker *time.Ticker // message acks - nackTicker *time.Ticker // message nacks (more frequent than acks) - pingTicker *time.Ticker // sends to the stream to keep it open - failed chan struct{} // closed on stream error - drained chan struct{} // closed when stopped && no more pending messages - wg sync.WaitGroup + ctx context.Context + cancel func() // the function that will cancel ctx; called in stop + po *pullOptions + ps *pullStream + subc *vkit.SubscriberClient + subName string + kaTick <-chan time.Time // keep-alive (deadline extensions) + ackTicker *time.Ticker // message acks + nackTicker *time.Ticker // message nacks + pingTicker *time.Ticker // sends to the stream to keep it open + failed chan struct{} // closed on stream error + drained chan struct{} // closed when stopped && no more pending messages + wg sync.WaitGroup mu sync.Mutex ackTimeDist *distribution.D // dist uses seconds @@ -62,30 +86,43 @@ type messageIterator struct { // to update ack deadlines (via modack), we'll consult this table and only include IDs // that are not beyond their deadline. keepAliveDeadlines map[string]time.Time - pendingAcks map[string]bool - pendingNacks map[string]bool - pendingModAcks map[string]bool // ack IDs whose ack deadline is to be modified - err error // error from stream failure + pendingAcks map[string]*AckResult + pendingNacks map[string]*AckResult + // ack IDs whose ack deadline is to be modified + // ModAcks don't have AckResults but allows reuse of the SendModAck function. + pendingModAcks map[string]*AckResult + err error // error from stream failure + + eoMu sync.RWMutex + enableExactlyOnceDelivery bool + sendNewAckDeadline bool } // newMessageIterator starts and returns a new messageIterator. // subName is the full name of the subscription to pull messages from. // Stop must be called on the messageIterator when it is no longer needed. // The iterator always uses the background context for acking messages and extending message deadlines. -func newMessageIterator(subc *vkit.SubscriberClient, subName string, maxExtensionPeriod *time.Duration, po *pullOptions) *messageIterator { +func newMessageIterator(subc *vkit.SubscriberClient, subName string, po *pullOptions) *messageIterator { var ps *pullStream if !po.synchronous { - ps = newPullStream(context.Background(), subc.StreamingPull, subName) + maxMessages := po.maxOutstandingMessages + maxBytes := po.maxOutstandingBytes + if po.useLegacyFlowControl { + maxMessages = 0 + maxBytes = 0 + } + ps = newPullStream(context.Background(), subc.StreamingPull, subName, maxMessages, maxBytes, po.maxExtensionPeriod) } // The period will update each tick based on the distribution of acks. We'll start by arbitrarily sending // the first keepAlive halfway towards the minimum ack deadline. - keepAlivePeriod := minAckDeadline / 2 + keepAlivePeriod := minDurationPerLeaseExtension / 2 // Ack promptly so users don't lose work if client crashes. ackTicker := time.NewTicker(100 * time.Millisecond) nackTicker := time.NewTicker(100 * time.Millisecond) pingTicker := time.NewTicker(30 * time.Second) cctx, cancel := context.WithCancel(context.Background()) + cctx = withSubscriptionKey(cctx, subName) it := &messageIterator{ ctx: cctx, cancel: cancel, @@ -93,18 +130,17 @@ func newMessageIterator(subc *vkit.SubscriberClient, subName string, maxExtensio po: po, subc: subc, subName: subName, - maxExtensionPeriod: maxExtensionPeriod, kaTick: time.After(keepAlivePeriod), ackTicker: ackTicker, nackTicker: nackTicker, pingTicker: pingTicker, failed: make(chan struct{}), drained: make(chan struct{}), - ackTimeDist: distribution.New(int(maxAckDeadline/time.Second) + 1), + ackTimeDist: distribution.New(int(maxDurationPerLeaseExtension/time.Second) + 1), keepAliveDeadlines: map[string]time.Time{}, - pendingAcks: map[string]bool{}, - pendingNacks: map[string]bool{}, - pendingModAcks: map[string]bool{}, + pendingAcks: map[string]*AckResult{}, + pendingNacks: map[string]*AckResult{}, + pendingModAcks: map[string]*AckResult{}, } it.wg.Add(1) go it.sender() @@ -142,16 +178,26 @@ func (it *messageIterator) checkDrained() { } } +// Given a receiveTime, add the elapsed time to the iterator's ack distribution. +// These values are bounded by the ModifyAckDeadline limits, which are +// min/maxDurationPerLeaseExtension. +func (it *messageIterator) addToDistribution(receiveTime time.Time) { + d := time.Since(receiveTime) + d = maxDuration(d, minDurationPerLeaseExtension) + d = minDuration(d, maxDurationPerLeaseExtension) + it.ackTimeDist.Record(int(d / time.Second)) +} + // Called when a message is acked/nacked. -func (it *messageIterator) done(ackID string, ack bool, receiveTime time.Time) { - it.ackTimeDist.Record(int(time.Since(receiveTime) / time.Second)) +func (it *messageIterator) done(ackID string, ack bool, r *AckResult, receiveTime time.Time) { + it.addToDistribution(receiveTime) it.mu.Lock() defer it.mu.Unlock() delete(it.keepAliveDeadlines, ackID) if ack { - it.pendingAcks[ackID] = true + it.pendingAcks[ackID] = r } else { - it.pendingNacks[ackID] = true + it.pendingNacks[ackID] = r } it.checkDrained() } @@ -200,34 +246,39 @@ func (it *messageIterator) receive(maxToPull int32) ([]*Message, error) { return nil, it.fail(err) } recordStat(it.ctx, PullCount, int64(len(rmsgs))) - msgs, err := convertMessages(rmsgs) + now := time.Now() + msgs, err := convertMessages(rmsgs, now, it.done) if err != nil { return nil, it.fail(err) } // We received some messages. Remember them so we can keep them alive. Also, // do a receipt mod-ack when streaming. maxExt := time.Now().Add(it.po.maxExtension) - ackIDs := map[string]bool{} + ackIDs := map[string]*AckResult{} it.mu.Lock() - now := time.Now() for _, m := range msgs { - m.receiveTime = now - addRecv(m.ID, m.ackID, now) - m.doneFunc = it.done - it.keepAliveDeadlines[m.ackID] = maxExt + ackID := msgAckID(m) + addRecv(m.ID, ackID, now) + it.keepAliveDeadlines[ackID] = maxExt // Don't change the mod-ack if the message is going to be nacked. This is // possible if there are retries. - if !it.pendingNacks[m.ackID] { - ackIDs[m.ackID] = true + if _, ok := it.pendingNacks[ackID]; !ok { + // Don't use the message's AckResult here since these are only for receipt modacks. + // ModAckResults are transparent to the user anyway so these can automatically succeed. + // We can't use an empty AckResult here either since SetAckResult will try to + // close the channel without checking if it exists. + ackIDs[ackID] = newSuccessAckResult() } } deadline := it.ackDeadline() it.mu.Unlock() - if len(ackIDs) > 0 { - if !it.sendModAck(ackIDs, deadline) { - return nil, it.err + go func() { + if len(ackIDs) > 0 { + // Don't check the return value of this since modacks are fire and forget, + // meaning errors should not be propagated to the client. + it.sendModAck(ackIDs, deadline) } - } + }() return msgs, nil } @@ -243,6 +294,8 @@ func (it *messageIterator) pullMessages(maxToPull int32) ([]*pb.ReceivedMessage, switch { case err == context.Canceled: return nil, nil + case status.Code(err) == codes.Canceled: + return nil, nil case err != nil: return nil, err default: @@ -255,6 +308,12 @@ func (it *messageIterator) recvMessages() ([]*pb.ReceivedMessage, error) { if err != nil { return nil, err } + it.eoMu.Lock() + if got := res.GetSubscriptionProperties().GetExactlyOnceDeliveryEnabled(); got != it.enableExactlyOnceDelivery { + it.sendNewAckDeadline = true + it.enableExactlyOnceDelivery = got + } + it.eoMu.Unlock() return res.ReceivedMessages, nil } @@ -320,36 +379,30 @@ func (it *messageIterator) sender() { sendPing = !it.po.synchronous } // Lock is held here. - var acks, nacks, modAcks map[string]bool + var acks, nacks, modAcks map[string]*AckResult if sendAcks { acks = it.pendingAcks - it.pendingAcks = map[string]bool{} + it.pendingAcks = map[string]*AckResult{} } if sendNacks { nacks = it.pendingNacks - it.pendingNacks = map[string]bool{} + it.pendingNacks = map[string]*AckResult{} } if sendModAcks { modAcks = it.pendingModAcks - it.pendingModAcks = map[string]bool{} + it.pendingModAcks = map[string]*AckResult{} } it.mu.Unlock() // Make Ack and ModAck RPCs. if sendAcks { - if !it.sendAck(acks) { - return - } + it.sendAck(acks) } if sendNacks { // Nack indicated by modifying the deadline to zero. - if !it.sendModAck(nacks, 0) { - return - } + it.sendModAck(nacks, 0) } if sendModAcks { - if !it.sendModAck(modAcks, dl) { - return - } + it.sendModAck(modAcks, dl) } if sendPing { it.pingStream() @@ -371,96 +424,192 @@ func (it *messageIterator) handleKeepAlives() { // https://groups.google.com/forum/#!msg/golang-nuts/UciASUb03Js/pzSq5iVFAQAJ. delete(it.keepAliveDeadlines, id) } else { - // This will not conflict with a nack, because nacking removes the ID from keepAliveDeadlines. - it.pendingModAcks[id] = true + // Use a success AckResult since we don't propagate ModAcks back to the user. + it.pendingModAcks[id] = newSuccessAckResult() } } it.checkDrained() } -func (it *messageIterator) sendAck(m map[string]bool) bool { - // Account for the Subscription field. - overhead := calcFieldSizeString(it.subName) - return it.sendAckIDRPC(m, maxPayload-overhead, func(ids []string) error { - recordStat(it.ctx, AckCount, int64(len(ids))) - addAcks(ids) +// sendAck is used to confirm acknowledgement of a message. If exactly once delivery is +// enabled, we'll retry these messages for a short duration in a goroutine. +func (it *messageIterator) sendAck(m map[string]*AckResult) { + ackIDs := make([]string, 0, len(m)) + for k := range m { + ackIDs = append(ackIDs, k) + } + it.eoMu.RLock() + exactlyOnceDelivery := it.enableExactlyOnceDelivery + it.eoMu.RUnlock() + + var toSend []string + for len(ackIDs) > 0 { + toSend, ackIDs = splitRequestIDs(ackIDs, ackIDBatchSize) + + recordStat(it.ctx, AckCount, int64(len(toSend))) + addAcks(toSend) // Use context.Background() as the call's context, not it.ctx. We don't // want to cancel this RPC when the iterator is stopped. - return it.subc.Acknowledge(context.Background(), &pb.AcknowledgeRequest{ + cctx2, cancel2 := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel2() + err := it.subc.Acknowledge(cctx2, &pb.AcknowledgeRequest{ Subscription: it.subName, - AckIds: ids, + AckIds: toSend, }) - }) + if exactlyOnceDelivery { + resultsByAckID := make(map[string]*AckResult) + for _, ackID := range toSend { + resultsByAckID[ackID] = m[ackID] + } + st, md := extractMetadata(err) + _, toRetry := processResults(st, resultsByAckID, md) + if len(toRetry) > 0 { + // Retry acks in a separate goroutine. + go func() { + it.retryAcks(toRetry) + }() + } + } + } } +// sendModAck is used to extend the lease of messages or nack them. // The receipt mod-ack amount is derived from a percentile distribution based // on the time it takes to process messages. The percentile chosen is the 99%th // percentile in order to capture the highest amount of time necessary without -// considering 1% outliers. -func (it *messageIterator) sendModAck(m map[string]bool, deadline time.Duration) bool { +// considering 1% outliers. If the ModAck RPC fails and exactly once delivery is +// enabled, we retry it in a separate goroutine for a short duration. +func (it *messageIterator) sendModAck(m map[string]*AckResult, deadline time.Duration) { deadlineSec := int32(deadline / time.Second) - // Account for the Subscription and AckDeadlineSeconds fields. - overhead := calcFieldSizeString(it.subName) + calcFieldSizeInt(int(deadlineSec)) - return it.sendAckIDRPC(m, maxPayload-overhead, func(ids []string) error { + ackIDs := make([]string, 0, len(m)) + for k := range m { + ackIDs = append(ackIDs, k) + } + it.eoMu.RLock() + exactlyOnceDelivery := it.enableExactlyOnceDelivery + it.eoMu.RUnlock() + var toSend []string + for len(ackIDs) > 0 { + toSend, ackIDs = splitRequestIDs(ackIDs, ackIDBatchSize) if deadline == 0 { - recordStat(it.ctx, NackCount, int64(len(ids))) + recordStat(it.ctx, NackCount, int64(len(toSend))) } else { - recordStat(it.ctx, ModAckCount, int64(len(ids))) + recordStat(it.ctx, ModAckCount, int64(len(toSend))) } - addModAcks(ids, deadlineSec) - // Retry this RPC on Unavailable for a short amount of time, then give up - // without returning a fatal error. The utility of this RPC is by nature - // transient (since the deadline is relative to the current time) and it - // isn't crucial for correctness (since expired messages will just be - // resent). - cctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - bo := gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: time.Second, - Multiplier: 2, - } - for { - err := it.subc.ModifyAckDeadline(cctx, &pb.ModifyAckDeadlineRequest{ - Subscription: it.subName, - AckDeadlineSeconds: deadlineSec, - AckIds: ids, - }) - switch status.Code(err) { - case codes.Unavailable: - if err := gax.Sleep(cctx, bo.Pause()); err == nil { - continue - } - // Treat sleep timeout like RPC timeout. - fallthrough - case codes.DeadlineExceeded: - // Timeout. Not a fatal error, but note that it happened. - recordStat(it.ctx, ModAckTimeoutCount, 1) - return nil - default: - // Any other error is fatal. - return err + addModAcks(toSend, deadlineSec) + // Use context.Background() as the call's context, not it.ctx. We don't + // want to cancel this RPC when the iterator is stopped. + cctx, cancel2 := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel2() + err := it.subc.ModifyAckDeadline(cctx, &pb.ModifyAckDeadlineRequest{ + Subscription: it.subName, + AckDeadlineSeconds: deadlineSec, + AckIds: toSend, + }) + if exactlyOnceDelivery { + resultsByAckID := make(map[string]*AckResult) + for _, ackID := range toSend { + resultsByAckID[ackID] = m[ackID] + } + + st, md := extractMetadata(err) + _, toRetry := processResults(st, resultsByAckID, md) + if len(toRetry) > 0 { + // Retry modacks/nacks in a separate goroutine. + go func() { + it.retryModAcks(toRetry, deadlineSec) + }() } } - }) + } } -func (it *messageIterator) sendAckIDRPC(ackIDSet map[string]bool, maxSize int, call func([]string) error) bool { - ackIDs := make([]string, 0, len(ackIDSet)) - for k := range ackIDSet { - ackIDs = append(ackIDs, k) +// retryAcks retries the ack RPC with backoff. This must be called in a goroutine +// in it.sendAck(), with a max of 2500 ackIDs. +func (it *messageIterator) retryAcks(m map[string]*AckResult) { + ctx, cancel := context.WithTimeout(context.Background(), exactlyOnceDeliveryRetryDeadline) + defer cancel() + bo := newExactlyOnceBackoff() + for { + if ctx.Err() != nil { + for _, r := range m { + ipubsub.SetAckResult(r, AcknowledgeStatusOther, ctx.Err()) + } + return + } + // Don't need to split map since this is the retry function and + // there is already a max of 2500 ackIDs here. + ackIDs := make([]string, 0, len(m)) + for k := range m { + ackIDs = append(ackIDs, k) + } + cctx2, cancel2 := context.WithTimeout(ctx, 60*time.Second) + defer cancel2() + err := it.subc.Acknowledge(cctx2, &pb.AcknowledgeRequest{ + Subscription: it.subName, + AckIds: ackIDs, + }) + st, md := extractMetadata(err) + _, toRetry := processResults(st, m, md) + if len(toRetry) == 0 { + return + } + time.Sleep(bo.Pause()) + m = toRetry } - var toSend []string - for len(ackIDs) > 0 { - toSend, ackIDs = splitRequestIDs(ackIDs, maxSize) - if err := call(toSend); err != nil { - // The underlying client handles retries, so any error is fatal to the - // iterator. - it.fail(err) - return false +} + +// retryModAcks retries the modack RPC with backoff. This must be called in a goroutine +// in it.sendModAck(), with a max of 2500 ackIDs. Modacks are retried up to 3 times +// since after that, the message will have expired. Nacks are retried up until the default +// deadline of 10 minutes. +func (it *messageIterator) retryModAcks(m map[string]*AckResult, deadlineSec int32) { + bo := newExactlyOnceBackoff() + retryCount := 0 + ctx, cancel := context.WithTimeout(context.Background(), exactlyOnceDeliveryRetryDeadline) + defer cancel() + for { + // If context is done, complete all remaining Nacks with DeadlineExceeded + // ModAcks are not exposed to the user so these don't need to be modified. + if ctx.Err() != nil { + if deadlineSec == 0 { + for _, r := range m { + ipubsub.SetAckResult(r, AcknowledgeStatusOther, ctx.Err()) + } + } + return + } + // Only retry modack requests up to 3 times. + if deadlineSec != 0 && retryCount > 3 { + ackIDs := make([]string, 0, len(m)) + for k := range m { + ackIDs = append(ackIDs, k) + } + log.Printf("automatic lease modack retry failed for following IDs: %v", ackIDs) + return + } + // Don't need to split map since this is the retry function and + // there is already a max of 2500 ackIDs here. + ackIDs := make([]string, 0, len(m)) + for k := range m { + ackIDs = append(ackIDs, k) } + cctx2, cancel2 := context.WithTimeout(ctx, 60*time.Second) + defer cancel2() + err := it.subc.ModifyAckDeadline(cctx2, &pb.ModifyAckDeadlineRequest{ + Subscription: it.subName, + AckIds: ackIDs, + AckDeadlineSeconds: deadlineSec, + }) + st, md := extractMetadata(err) + _, toRetry := processResults(st, m, md) + if len(toRetry) == 0 { + return + } + time.Sleep(bo.Pause()) + m = toRetry + retryCount++ } - return true } // Send a message to the stream to keep it open. The stream will close if there's no @@ -470,8 +619,14 @@ func (it *messageIterator) sendAckIDRPC(ackIDSet map[string]bool, maxSize int, c // default ack deadline, and if the messages are small enough so that many can fit // into the buffer. func (it *messageIterator) pingStream() { - // Ignore error; if the stream is broken, this doesn't matter anyway. - _ = it.ps.Send(&pb.StreamingPullRequest{}) + spr := &pb.StreamingPullRequest{} + it.eoMu.RLock() + if it.sendNewAckDeadline { + spr.StreamAckDeadlineSeconds = int32(it.ackDeadline()) + it.sendNewAckDeadline = false + } + it.eoMu.RUnlock() + it.ps.Send(spr) } // calcFieldSizeString returns the number of bytes string fields @@ -479,7 +634,7 @@ func (it *messageIterator) pingStream() { func calcFieldSizeString(fields ...string) int { overhead := 0 for _, field := range fields { - overhead += 1 + len(field) + proto.SizeVarint(uint64(len(field))) + overhead += 1 + len(field) + protowire.SizeVarint(uint64(len(field))) } return overhead } @@ -489,26 +644,18 @@ func calcFieldSizeString(fields ...string) int { func calcFieldSizeInt(fields ...int) int { overhead := 0 for _, field := range fields { - overhead += 1 + proto.SizeVarint(uint64(field)) + overhead += 1 + protowire.SizeVarint(uint64(field)) } return overhead } // splitRequestIDs takes a slice of ackIDs and returns two slices such that the first -// ackID slice can be used in a request where the payload does not exceed maxSize. -func splitRequestIDs(ids []string, maxSize int) (prefix, remainder []string) { - size := 0 - i := 0 - // TODO(hongalex): Use binary search to find split index, since ackIDs are - // fairly constant. - for size < maxSize && i < len(ids) { - size += calcFieldSizeString(ids[i]) - i++ - } - if size > maxSize { - i-- - } - return ids[:i], ids[i:] +// ackID slice can be used in a request where the payload does not exceed ackIDBatchSize. +func splitRequestIDs(ids []string, maxBatchSize int) (prefix, remainder []string) { + if len(ids) < maxBatchSize { + return ids, []string{} + } + return ids[:maxBatchSize], ids[maxBatchSize:] } // The deadline to ack is derived from a percentile distribution based @@ -519,15 +666,107 @@ func splitRequestIDs(ids []string, maxSize int) (prefix, remainder []string) { // expiration. func (it *messageIterator) ackDeadline() time.Duration { pt := time.Duration(it.ackTimeDist.Percentile(.99)) * time.Second + it.eoMu.RLock() + enableExactlyOnce := it.enableExactlyOnceDelivery + it.eoMu.RUnlock() + return boundedDuration(pt, it.po.minExtensionPeriod, it.po.maxExtensionPeriod, enableExactlyOnce) +} - if *it.maxExtensionPeriod > 0 && pt > *it.maxExtensionPeriod { - return *it.maxExtensionPeriod +func boundedDuration(ackDeadline, minExtension, maxExtension time.Duration, exactlyOnce bool) time.Duration { + // If the user explicitly sets a maxExtensionPeriod, respect it. + if maxExtension > 0 { + ackDeadline = minDuration(ackDeadline, maxExtension) } - if pt > maxAckDeadline { - return maxAckDeadline + + // If the user explicitly sets a minExtensionPeriod, respect it. + if minExtension > 0 { + ackDeadline = maxDuration(ackDeadline, minExtension) + } else if exactlyOnce { + // Higher minimum ack_deadline for subscriptions with + // exactly-once delivery enabled. + ackDeadline = maxDuration(ackDeadline, minDurationPerLeaseExtensionExactlyOnce) + } else if ackDeadline < minDurationPerLeaseExtension { + // Otherwise, lower bound is min ack extension. This is normally bounded + // when adding datapoints to the distribution, but this is needed for + // the initial few calls to ackDeadline. + ackDeadline = minDurationPerLeaseExtension + } + + return ackDeadline +} + +func minDuration(x, y time.Duration) time.Duration { + if x < y { + return x } - if pt < minAckDeadline { - return minAckDeadline + return y +} + +func maxDuration(x, y time.Duration) time.Duration { + if x > y { + return x + } + return y +} + +const ( + transientErrStringPrefix = "TRANSIENT_" + permanentInvalidAckErrString = "PERMANENT_FAILURE_INVALID_ACK_ID" +) + +// extracts information from an API error for exactly once delivery's ack/modack err responses. +func extractMetadata(err error) (*status.Status, map[string]string) { + apiErr, ok := apierror.FromError(err) + if ok { + return apiErr.GRPCStatus(), apiErr.Metadata() + } + return nil, nil +} + +// processResults processes AckResults by referring to errorStatus and errorsMap. +// The errors returned by the server in `errorStatus` or in `errorsByAckID` +// are used to complete the AckResults in `ackResMap` (with a success +// or error) or to return requests for further retries. +// This function returns two maps of ackID to ack results, one for completed results and the other for ones to retry. +// Logic is derived from python-pubsub: https://github.com/googleapis/python-pubsub/blob/main/google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py#L161-L220 +func processResults(errorStatus *status.Status, ackResMap map[string]*AckResult, errorsByAckID map[string]string) (map[string]*AckResult, map[string]*AckResult) { + completedResults := make(map[string]*AckResult) + retryResults := make(map[string]*AckResult) + for ackID, ar := range ackResMap { + // Handle special errors returned for ack/modack RPCs via the ErrorInfo + // sidecar metadata when exactly-once delivery is enabled. + if errAckID, ok := errorsByAckID[ackID]; ok { + if strings.HasPrefix(errAckID, transientErrStringPrefix) { + retryResults[ackID] = ar + } else { + if errAckID == permanentInvalidAckErrString { + ipubsub.SetAckResult(ar, AcknowledgeStatusInvalidAckID, errors.New(errAckID)) + } else { + ipubsub.SetAckResult(ar, AcknowledgeStatusOther, errors.New(errAckID)) + } + completedResults[ackID] = ar + } + } else if errorStatus != nil && contains(errorStatus.Code(), exactlyOnceDeliveryTemporaryRetryErrors) { + retryResults[ackID] = ar + } else if errorStatus != nil { + // Other gRPC errors are not retried. + switch errorStatus.Code() { + case codes.PermissionDenied: + ipubsub.SetAckResult(ar, AcknowledgeStatusPermissionDenied, errorStatus.Err()) + case codes.FailedPrecondition: + ipubsub.SetAckResult(ar, AcknowledgeStatusFailedPrecondition, errorStatus.Err()) + default: + ipubsub.SetAckResult(ar, AcknowledgeStatusOther, errorStatus.Err()) + } + completedResults[ackID] = ar + } else if ar != nil { + // Since no error occurred, requests with AckResults are completed successfully. + ipubsub.SetAckResult(ar, AcknowledgeStatusSuccess, nil) + completedResults[ackID] = ar + } else { + // All other requests are considered completed. + completedResults[ackID] = ar + } } - return pt + return completedResults, retryResults } diff --git a/vendor/cloud.google.com/go/pubsub/message.go b/vendor/cloud.google.com/go/pubsub/message.go index 051c1df18883..1bebb9fc526b 100644 --- a/vendor/cloud.google.com/go/pubsub/message.go +++ b/vendor/cloud.google.com/go/pubsub/message.go @@ -15,106 +15,178 @@ package pubsub import ( + "fmt" "time" - "github.com/golang/protobuf/ptypes" + ipubsub "cloud.google.com/go/internal/pubsub" pb "google.golang.org/genproto/googleapis/pubsub/v1" ) // Message represents a Pub/Sub message. -type Message struct { - // ID identifies this message. - // This ID is assigned by the server and is populated for Messages obtained from a subscription. - // This field is read-only. - ID string +// +// Message can be passed to Topic.Publish for publishing. +// +// If received in the callback passed to Subscription.Receive, client code must +// call Message.Ack or Message.Nack when finished processing the Message. Calls +// to Ack or Nack have no effect after the first call. +// +// Ack indicates successful processing of a Message. If message acknowledgement +// fails, the Message will be redelivered. Nack indicates that the client will +// not or cannot process a Message. Nack will result in the Message being +// redelivered more quickly than if it were allowed to expire. +// +// If using exactly once delivery, you should call Message.AckWithResult and +// Message.NackWithResult instead. These methods will return an AckResult, +// which tracks the state of acknowledgement operation. If the AckResult returns +// successful, the message is guaranteed NOT to be re-delivered. Otherwise, +// the AckResult will return an error with more details about the failure +// and the message may be re-delivered. +type Message = ipubsub.Message + +// msgAckHandler performs a safe cast of the message's ack handler to psAckHandler. +func msgAckHandler(m *Message, eod bool) (*psAckHandler, bool) { + ackh, ok := ipubsub.MessageAckHandler(m).(*psAckHandler) + ackh.exactlyOnceDelivery = eod + return ackh, ok +} + +func msgAckID(m *Message) string { + if ackh, ok := msgAckHandler(m, false); ok { + return ackh.ackID + } + return "" +} + +// The done method of the iterator that created a Message. +type iterDoneFunc func(string, bool, *AckResult, time.Time) + +func convertMessages(rms []*pb.ReceivedMessage, receiveTime time.Time, doneFunc iterDoneFunc) ([]*Message, error) { + msgs := make([]*Message, 0, len(rms)) + for i, m := range rms { + msg, err := toMessage(m, receiveTime, doneFunc) + if err != nil { + return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m) + } + msgs = append(msgs, msg) + } + return msgs, nil +} - // Data is the actual data in the message. - Data []byte +func toMessage(resp *pb.ReceivedMessage, receiveTime time.Time, doneFunc iterDoneFunc) (*Message, error) { + ackh := &psAckHandler{ackID: resp.AckId} + msg := ipubsub.NewMessage(ackh) + if resp.Message == nil { + return msg, nil + } - // Attributes represents the key-value pairs the current message - // is labelled with. - Attributes map[string]string + pubTime := resp.Message.PublishTime.AsTime() + var deliveryAttempt *int + if resp.DeliveryAttempt > 0 { + da := int(resp.DeliveryAttempt) + deliveryAttempt = &da + } + + msg.Data = resp.Message.Data + msg.Attributes = resp.Message.Attributes + msg.ID = resp.Message.MessageId + msg.PublishTime = pubTime + msg.DeliveryAttempt = deliveryAttempt + msg.OrderingKey = resp.Message.OrderingKey + ackh.receiveTime = receiveTime + ackh.doneFunc = doneFunc + ackh.ackResult = ipubsub.NewAckResult() + return msg, nil +} + +// AckResult holds the result from a call to Ack or Nack. +// +// Call Get to obtain the result of the Ack/NackWithResult call. Example: +// +// // Get blocks until Ack/NackWithResult completes or ctx is done. +// ackStatus, err := r.Get(ctx) +// if err != nil { +// // TODO: Handle error. +// } +type AckResult = ipubsub.AckResult + +// AcknowledgeStatus represents the status of an Ack or Nack request. +type AcknowledgeStatus = ipubsub.AcknowledgeStatus + +const ( + // AcknowledgeStatusSuccess indicates the request was a success. + AcknowledgeStatusSuccess AcknowledgeStatus = iota + // AcknowledgeStatusPermissionDenied indicates the caller does not have sufficient permissions. + AcknowledgeStatusPermissionDenied + // AcknowledgeStatusFailedPrecondition indicates the request encountered a FailedPrecondition error. + AcknowledgeStatusFailedPrecondition + // AcknowledgeStatusInvalidAckID indicates one or more of the ack IDs sent were invalid. + AcknowledgeStatusInvalidAckID + // AcknowledgeStatusOther indicates another unknown error was returned. + AcknowledgeStatusOther +) + +// psAckHandler handles ack/nack for the pubsub package. +type psAckHandler struct { // ackID is the identifier to acknowledge this message. ackID string - // The time at which the message was published. - // This is populated by the server for Messages obtained from a subscription. - // This field is read-only. - PublishTime time.Time - // receiveTime is the time the message was received by the client. receiveTime time.Time - // DeliveryAttempt is the number of times a message has been delivered. - // This is part of the dead lettering feature that forwards messages that - // fail to be processed (from nack/ack deadline timeout) to a dead letter topic. - // If dead lettering is enabled, this will be set on all attempts, starting - // with value 1. Otherwise, the value will be nil. - // This field is read-only. - // - // It is EXPERIMENTAL and a part of a closed alpha that may not be - // accessible to all users. This field is subject to change or removal - // without notice. - DeliveryAttempt *int - - // size is the approximate size of the message's data and attributes. - size int - calledDone bool // The done method of the iterator that created this Message. - doneFunc func(string, bool, time.Time) -} + doneFunc iterDoneFunc -func toMessage(resp *pb.ReceivedMessage) (*Message, error) { - if resp.Message == nil { - return &Message{ackID: resp.AckId}, nil - } + // the ack result that will be returned for this ack handler + // if AckWithResult or NackWithResult is called. + ackResult *AckResult - pubTime, err := ptypes.Timestamp(resp.Message.PublishTime) - if err != nil { - return nil, err - } + // exactlyOnceDelivery determines if the message needs to be delivered + // exactly once. + exactlyOnceDelivery bool +} - var deliveryAttempt *int - if resp.DeliveryAttempt > 0 { - da := int(resp.DeliveryAttempt) - deliveryAttempt = &da - } +func (ah *psAckHandler) OnAck() { + ah.done(true) +} - return &Message{ - ackID: resp.AckId, - Data: resp.Message.Data, - Attributes: resp.Message.Attributes, - ID: resp.Message.MessageId, - PublishTime: pubTime, - DeliveryAttempt: deliveryAttempt, - }, nil +func (ah *psAckHandler) OnNack() { + ah.done(false) } -// Ack indicates successful processing of a Message passed to the Subscriber.Receive callback. -// It should not be called on any other Message value. -// If message acknowledgement fails, the Message will be redelivered. -// Client code must call Ack or Nack when finished for each received Message. -// Calls to Ack or Nack have no effect after the first call. -func (m *Message) Ack() { - m.done(true) +func (ah *psAckHandler) OnAckWithResult() *AckResult { + if !ah.exactlyOnceDelivery { + return newSuccessAckResult() + } + // call done with true to indicate ack. + ah.done(true) + return ah.ackResult } -// Nack indicates that the client will not or cannot process a Message passed to the Subscriber.Receive callback. -// It should not be called on any other Message value. -// Nack will result in the Message being redelivered more quickly than if it were allowed to expire. -// Client code must call Ack or Nack when finished for each received Message. -// Calls to Ack or Nack have no effect after the first call. -func (m *Message) Nack() { - m.done(false) +func (ah *psAckHandler) OnNackWithResult() *AckResult { + if !ah.exactlyOnceDelivery { + return newSuccessAckResult() + } + // call done with false to indicate nack. + ah.done(false) + return ah.ackResult } -func (m *Message) done(ack bool) { - if m.calledDone { +func (ah *psAckHandler) done(ack bool) { + if ah.calledDone { return } - m.calledDone = true - m.doneFunc(m.ackID, ack, m.receiveTime) + ah.calledDone = true + if ah.doneFunc != nil { + ah.doneFunc(ah.ackID, ack, ah.ackResult, ah.receiveTime) + } +} + +// newSuccessAckResult returns an AckResult that resolves to success immediately. +func newSuccessAckResult() *AckResult { + ar := ipubsub.NewAckResult() + ipubsub.SetAckResult(ar, AcknowledgeStatusSuccess, nil) + return ar } diff --git a/vendor/cloud.google.com/go/pubsub/nodebug.go b/vendor/cloud.google.com/go/pubsub/nodebug.go index 774a74a58dfd..92760220afc1 100644 --- a/vendor/cloud.google.com/go/pubsub/nodebug.go +++ b/vendor/cloud.google.com/go/pubsub/nodebug.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !psdebug // +build !psdebug package pubsub diff --git a/vendor/cloud.google.com/go/pubsub/pstest/fake.go b/vendor/cloud.google.com/go/pubsub/pstest/fake.go index be53bcf67eea..cae0f2db1b91 100644 --- a/vendor/cloud.google.com/go/pubsub/pstest/fake.go +++ b/vendor/cloud.google.com/go/pubsub/pstest/fake.go @@ -34,14 +34,37 @@ import ( "time" "cloud.google.com/go/internal/testutil" - "github.com/golang/protobuf/ptypes" - durpb "github.com/golang/protobuf/ptypes/duration" - emptypb "github.com/golang/protobuf/ptypes/empty" pb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + durpb "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" ) +// ReactorOptions is a map that Server uses to look up reactors. +// Key is the function name, value is array of reactor for the function. +type ReactorOptions map[string][]Reactor + +// Reactor is an interface to allow reaction function to a certain call. +type Reactor interface { + // React handles the message types and returns results. If "handled" is false, + // then the test server will ignore the results and continue to the next reactor + // or the original handler. + React(_ interface{}) (handled bool, ret interface{}, err error) +} + +// ServerReactorOption is options passed to the server for reactor creation. +type ServerReactorOption struct { + FuncName string + Reactor Reactor +} + +type publishResponse struct { + resp *pb.PublishResponse + err error +} + // For testing. Note that even though changes to the now variable are atomic, a call // to the stored function can race with a change to that function. This could be a // problem if tests are run in parallel, or even if concurrent parts of the same test @@ -70,43 +93,83 @@ type GServer struct { pb.PublisherServer pb.SubscriberServer - mu sync.Mutex - topics map[string]*topic - subs map[string]*subscription - msgs []*Message // all messages ever published - msgsByID map[string]*Message - wg sync.WaitGroup - nextID int - streamTimeout time.Duration + mu sync.Mutex + topics map[string]*topic + subs map[string]*subscription + msgs []*Message // all messages ever published + msgsByID map[string]*Message + wg sync.WaitGroup + nextID int + streamTimeout time.Duration + timeNowFunc func() time.Time + reactorOptions ReactorOptions + schemas map[string]*pb.Schema + + // PublishResponses is a channel of responses to use for Publish. + publishResponses chan *publishResponse + // autoPublishResponse enables the server to automatically generate + // PublishResponse when publish is called. Otherwise, responses + // are generated from the publishResponses channel. + autoPublishResponse bool } // NewServer creates a new fake server running in the current process. -func NewServer() *Server { - srv, err := testutil.NewServer() +func NewServer(opts ...ServerReactorOption) *Server { + return NewServerWithPort(0, opts...) +} + +// NewServerWithPort creates a new fake server running in the current process at the specified port. +func NewServerWithPort(port int, opts ...ServerReactorOption) *Server { + srv, err := testutil.NewServerWithPort(port) if err != nil { - panic(fmt.Sprintf("pstest.NewServer: %v", err)) + panic(fmt.Sprintf("pstest.NewServerWithPort: %v", err)) + } + reactorOptions := ReactorOptions{} + for _, opt := range opts { + reactorOptions[opt.FuncName] = append(reactorOptions[opt.FuncName], opt.Reactor) } s := &Server{ srv: srv, Addr: srv.Addr, GServer: GServer{ - topics: map[string]*topic{}, - subs: map[string]*subscription{}, - msgsByID: map[string]*Message{}, + topics: map[string]*topic{}, + subs: map[string]*subscription{}, + msgsByID: map[string]*Message{}, + timeNowFunc: timeNow, + reactorOptions: reactorOptions, + publishResponses: make(chan *publishResponse, 100), + autoPublishResponse: true, + schemas: map[string]*pb.Schema{}, }, } pb.RegisterPublisherServer(srv.Gsrv, &s.GServer) pb.RegisterSubscriberServer(srv.Gsrv, &s.GServer) + pb.RegisterSchemaServiceServer(srv.Gsrv, &s.GServer) srv.Start() return s } +// SetTimeNowFunc registers f as a function to +// be used instead of time.Now for this server. +func (s *Server) SetTimeNowFunc(f func() time.Time) { + s.GServer.timeNowFunc = f +} + // Publish behaves as if the Publish RPC was called with a message with the given // data and attrs. It returns the ID of the message. // The topic will be created if it doesn't exist. // // Publish panics if there is an error, which is appropriate for testing. func (s *Server) Publish(topic string, data []byte, attrs map[string]string) string { + return s.PublishOrdered(topic, data, attrs, "") +} + +// PublishOrdered behaves as if the Publish RPC was called with a message with the given +// data, attrs and ordering key. It returns the ID of the message. +// The topic will be created if it doesn't exist. +// +// PublishOrdered panics if there is an error, which is appropriate for testing. +func (s *Server) PublishOrdered(topic string, data []byte, attrs map[string]string, orderingKey string) string { const topicPattern = "projects/*/topics/*" ok, err := path.Match(topicPattern, topic) if err != nil { @@ -115,10 +178,10 @@ func (s *Server) Publish(topic string, data []byte, attrs map[string]string) str if !ok { panic(fmt.Sprintf("topic name must be of the form %q", topicPattern)) } - _, _ = s.GServer.CreateTopic(context.TODO(), &pb.Topic{Name: topic}) + s.GServer.CreateTopic(context.TODO(), &pb.Topic{Name: topic}) req := &pb.PublishRequest{ Topic: topic, - Messages: []*pb.PubsubMessage{{Data: data, Attributes: attrs}}, + Messages: []*pb.PubsubMessage{{Data: data, Attributes: attrs, OrderingKey: orderingKey}}, } res, err := s.GServer.Publish(context.TODO(), req) if err != nil { @@ -127,6 +190,35 @@ func (s *Server) Publish(topic string, data []byte, attrs map[string]string) str return res.MessageIds[0] } +// AddPublishResponse adds a new publish response to the channel used for +// responding to publish requests. +func (s *Server) AddPublishResponse(pbr *pb.PublishResponse, err error) { + pr := &publishResponse{} + if err != nil { + pr.err = err + } else { + pr.resp = pbr + } + s.GServer.publishResponses <- pr +} + +// SetAutoPublishResponse controls whether to automatically respond +// to messages published or to use user-added responses from the +// publishResponses channel. +func (s *Server) SetAutoPublishResponse(autoPublishResponse bool) { + s.GServer.mu.Lock() + defer s.GServer.mu.Unlock() + s.GServer.autoPublishResponse = autoPublishResponse +} + +// ResetPublishResponses resets the buffered publishResponses channel +// with a new buffered channel with the given size. +func (s *Server) ResetPublishResponses(size int) { + s.GServer.mu.Lock() + defer s.GServer.mu.Unlock() + s.GServer.publishResponses = make(chan *publishResponse, size) +} + // SetStreamTimeout sets the amount of time a stream will be active before it shuts // itself down. This mimics the real service's behavior of closing streams after 30 // minutes. If SetStreamTimeout is never called or is passed zero, streams never shut @@ -143,14 +235,15 @@ type Message struct { Data []byte Attributes map[string]string PublishTime time.Time - Deliveries int // number of times delivery of the message was attempted - Acks int // number of acks received from clients + Deliveries int // number of times delivery of the message was attempted + Acks int // number of acks received from clients + Modacks []Modack // modacks received by server for this message + OrderingKey string // protected by server mutex deliveries int acks int - Modacks []Modack // modacks received by server for this message - + modacks []Modack } // Modack represents a modack sent to the server. @@ -169,6 +262,7 @@ func (s *Server) Messages() []*Message { for _, m := range s.GServer.msgs { m.Deliveries = m.deliveries m.Acks = m.acks + m.Modacks = append([]Modack(nil), m.modacks...) msgs = append(msgs, m) } return msgs @@ -184,6 +278,7 @@ func (s *Server) Message(id string) *Message { if m != nil { m.Deliveries = m.deliveries m.Acks = m.acks + m.Modacks = append([]Modack(nil), m.modacks...) } return m } @@ -217,9 +312,16 @@ func (s *GServer) CreateTopic(_ context.Context, t *pb.Topic) (*pb.Topic, error) s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(t, "CreateTopic", &pb.Topic{}); handled || err != nil { + return ret.(*pb.Topic), err + } + if s.topics[t.Name] != nil { return nil, status.Errorf(codes.AlreadyExists, "topic %q", t.Name) } + if err := checkMRD(t.MessageRetentionDuration); err != nil { + return nil, err + } top := newTopic(t) s.topics[t.Name] = top return top.proto, nil @@ -229,6 +331,10 @@ func (s *GServer) GetTopic(_ context.Context, req *pb.GetTopicRequest) (*pb.Topi s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "GetTopic", &pb.Topic{}); handled || err != nil { + return ret.(*pb.Topic), err + } + if t := s.topics[req.Topic]; t != nil { return t.proto, nil } @@ -239,6 +345,10 @@ func (s *GServer) UpdateTopic(_ context.Context, req *pb.UpdateTopicRequest) (*p s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "UpdateTopic", &pb.Topic{}); handled || err != nil { + return ret.(*pb.Topic), err + } + t := s.topics[req.Topic.Name] if t == nil { return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic.Name) @@ -249,6 +359,11 @@ func (s *GServer) UpdateTopic(_ context.Context, req *pb.UpdateTopicRequest) (*p t.proto.Labels = req.Topic.Labels case "message_storage_policy": t.proto.MessageStoragePolicy = req.Topic.MessageStoragePolicy + case "message_retention_duration": + if err := checkMRD(req.Topic.MessageRetentionDuration); err != nil { + return nil, err + } + t.proto.MessageRetentionDuration = req.Topic.MessageRetentionDuration default: return nil, status.Errorf(codes.InvalidArgument, "unknown field name %q", path) } @@ -260,6 +375,10 @@ func (s *GServer) ListTopics(_ context.Context, req *pb.ListTopicsRequest) (*pb. s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "ListTopics", &pb.ListTopicsResponse{}); handled || err != nil { + return ret.(*pb.ListTopicsResponse), err + } + var names []string for n := range s.topics { if strings.HasPrefix(n, req.Project) { @@ -282,6 +401,10 @@ func (s *GServer) ListTopicSubscriptions(_ context.Context, req *pb.ListTopicSub s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "ListTopicSubscriptions", &pb.ListTopicSubscriptionsResponse{}); handled || err != nil { + return ret.(*pb.ListTopicSubscriptionsResponse), err + } + var names []string for name, sub := range s.subs { if sub.topic.proto.Name == req.Topic { @@ -303,10 +426,22 @@ func (s *GServer) DeleteTopic(_ context.Context, req *pb.DeleteTopicRequest) (*e s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "DeleteTopic", &emptypb.Empty{}); handled || err != nil { + return ret.(*emptypb.Empty), err + } + t := s.topics[req.Topic] if t == nil { return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) } + for _, sub := range s.subs { + if sub.deadLetterTopic == nil { + continue + } + if req.Topic == sub.deadLetterTopic.proto.Name { + return nil, status.Errorf(codes.FailedPrecondition, "topic %q used as deadLetter for %s", req.Topic, sub.proto.Name) + } + } t.stop() delete(s.topics, req.Topic) return &emptypb.Empty{}, nil @@ -316,6 +451,10 @@ func (s *GServer) CreateSubscription(_ context.Context, ps *pb.Subscription) (*p s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(ps, "CreateSubscription", &pb.Subscription{}); handled || err != nil { + return ret.(*pb.Subscription), err + } + if ps.Name == "" { return nil, status.Errorf(codes.InvalidArgument, "missing name") } @@ -341,8 +480,22 @@ func (s *GServer) CreateSubscription(_ context.Context, ps *pb.Subscription) (*p if ps.PushConfig == nil { ps.PushConfig = &pb.PushConfig{} } + if ps.BigqueryConfig == nil { + ps.BigqueryConfig = &pb.BigQueryConfig{} + } else if ps.BigqueryConfig.Table != "" { + ps.BigqueryConfig.State = pb.BigQueryConfig_ACTIVE + } + ps.TopicMessageRetentionDuration = top.proto.MessageRetentionDuration + var deadLetterTopic *topic + if ps.DeadLetterPolicy != nil { + dlTopic, ok := s.topics[ps.DeadLetterPolicy.DeadLetterTopic] + if !ok { + return nil, status.Errorf(codes.NotFound, "deadLetter topic %q", ps.DeadLetterPolicy.DeadLetterTopic) + } + deadLetterTopic = dlTopic + } - sub := newSubscription(top, &s.mu, ps) + sub := newSubscription(top, &s.mu, s.timeNowFunc, deadLetterTopic, ps) top.subs[ps.Name] = sub s.subs[ps.Name] = sub sub.start(&s.wg) @@ -355,8 +508,9 @@ var minAckDeadlineSecs int32 // SetMinAckDeadline changes the minack deadline to n. Must be // greater than or equal to 1 second. Remember to reset this value // to the default after your test changes it. Example usage: -// pstest.SetMinAckDeadlineSecs(1) -// defer pstest.ResetMinAckDeadlineSecs() +// +// pstest.SetMinAckDeadlineSecs(1) +// defer pstest.ResetMinAckDeadlineSecs() func SetMinAckDeadline(n time.Duration) { if n < time.Second { panic("SetMinAckDeadline expects a value greater than 1 second") @@ -383,11 +537,14 @@ const ( maxMessageRetentionDuration = 168 * time.Hour ) -var defaultMessageRetentionDuration = ptypes.DurationProto(maxMessageRetentionDuration) +var defaultMessageRetentionDuration = durpb.New(maxMessageRetentionDuration) func checkMRD(pmrd *durpb.Duration) error { - mrd, err := ptypes.Duration(pmrd) - if err != nil || mrd < minMessageRetentionDuration || mrd > maxMessageRetentionDuration { + if pmrd == nil { + return nil + } + mrd := pmrd.AsDuration() + if mrd < minMessageRetentionDuration || mrd > maxMessageRetentionDuration { return status.Errorf(codes.InvalidArgument, "bad message_retention_duration %+v", pmrd) } return nil @@ -396,6 +553,11 @@ func checkMRD(pmrd *durpb.Duration) error { func (s *GServer) GetSubscription(_ context.Context, req *pb.GetSubscriptionRequest) (*pb.Subscription, error) { s.mu.Lock() defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "GetSubscription", &pb.Subscription{}); handled || err != nil { + return ret.(*pb.Subscription), err + } + sub, err := s.findSubscription(req.Subscription) if err != nil { return nil, err @@ -409,6 +571,11 @@ func (s *GServer) UpdateSubscription(_ context.Context, req *pb.UpdateSubscripti } s.mu.Lock() defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "UpdateSubscription", &pb.Subscription{}); handled || err != nil { + return ret.(*pb.Subscription), err + } + sub, err := s.findSubscription(req.Subscription.Name) if err != nil { return nil, err @@ -418,6 +585,12 @@ func (s *GServer) UpdateSubscription(_ context.Context, req *pb.UpdateSubscripti case "push_config": sub.proto.PushConfig = req.Subscription.PushConfig + case "bigquery_config": + sub.proto.BigqueryConfig = req.GetSubscription().GetBigqueryConfig() + if sub.proto.GetBigqueryConfig().GetTable() != "" { + sub.proto.GetBigqueryConfig().State = pb.BigQueryConfig_ACTIVE + } + case "ack_deadline_seconds": a := req.Subscription.AckDeadlineSeconds if err := checkAckDeadline(a); err != nil { @@ -440,6 +613,28 @@ func (s *GServer) UpdateSubscription(_ context.Context, req *pb.UpdateSubscripti case "expiration_policy": sub.proto.ExpirationPolicy = req.Subscription.ExpirationPolicy + case "dead_letter_policy": + sub.proto.DeadLetterPolicy = req.Subscription.DeadLetterPolicy + if sub.proto.DeadLetterPolicy != nil { + dlTopic, ok := s.topics[sub.proto.DeadLetterPolicy.DeadLetterTopic] + if !ok { + return nil, status.Errorf(codes.NotFound, "topic %q", sub.proto.DeadLetterPolicy.DeadLetterTopic) + } + sub.deadLetterTopic = dlTopic + } + + case "retry_policy": + sub.proto.RetryPolicy = req.Subscription.RetryPolicy + + case "filter": + sub.proto.Filter = req.Subscription.Filter + + case "enable_exactly_once_delivery": + sub.proto.EnableExactlyOnceDelivery = req.Subscription.EnableExactlyOnceDelivery + for _, st := range sub.streams { + st.enableExactlyOnceDelivery = req.Subscription.EnableExactlyOnceDelivery + } + default: return nil, status.Errorf(codes.InvalidArgument, "unknown field name %q", path) } @@ -451,6 +646,10 @@ func (s *GServer) ListSubscriptions(_ context.Context, req *pb.ListSubscriptions s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "ListSubscriptions", &pb.ListSubscriptionsResponse{}); handled || err != nil { + return ret.(*pb.ListSubscriptionsResponse), err + } + var names []string for name := range s.subs { if strings.HasPrefix(name, req.Project) { @@ -472,6 +671,11 @@ func (s *GServer) ListSubscriptions(_ context.Context, req *pb.ListSubscriptions func (s *GServer) DeleteSubscription(_ context.Context, req *pb.DeleteSubscriptionRequest) (*emptypb.Empty, error) { s.mu.Lock() defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "DeleteSubscription", &emptypb.Empty{}); handled || err != nil { + return ret.(*emptypb.Empty), err + } + sub, err := s.findSubscription(req.Subscription) if err != nil { return nil, err @@ -482,10 +686,30 @@ func (s *GServer) DeleteSubscription(_ context.Context, req *pb.DeleteSubscripti return &emptypb.Empty{}, nil } +func (s *GServer) DetachSubscription(_ context.Context, req *pb.DetachSubscriptionRequest) (*pb.DetachSubscriptionResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "DetachSubscription", &pb.DetachSubscriptionResponse{}); handled || err != nil { + return ret.(*pb.DetachSubscriptionResponse), err + } + + sub, err := s.findSubscription(req.Subscription) + if err != nil { + return nil, err + } + sub.topic.deleteSub(sub) + return &pb.DetachSubscriptionResponse{}, nil +} + func (s *GServer) Publish(_ context.Context, req *pb.PublishRequest) (*pb.PublishResponse, error) { s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "Publish", &pb.PublishResponse{}); handled || err != nil { + return ret.(*pb.PublishResponse), err + } + if req.Topic == "" { return nil, status.Errorf(codes.InvalidArgument, "missing topic") } @@ -493,22 +717,29 @@ func (s *GServer) Publish(_ context.Context, req *pb.PublishRequest) (*pb.Publis if top == nil { return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) } + + if !s.autoPublishResponse { + r := <-s.publishResponses + if r.err != nil { + return nil, r.err + } + return r.resp, nil + } + var ids []string for _, pm := range req.Messages { id := fmt.Sprintf("m%d", s.nextID) s.nextID++ pm.MessageId = id - pubTime := timeNow() - tsPubTime, err := ptypes.TimestampProto(pubTime) - if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) - } + pubTime := s.timeNowFunc() + tsPubTime := timestamppb.New(pubTime) pm.PublishTime = tsPubTime m := &Message{ ID: id, Data: pm.Data, Attributes: pm.Attributes, PublishTime: pubTime, + OrderingKey: pm.OrderingKey, } top.publish(pm, m) ids = append(ids, id) @@ -533,7 +764,6 @@ func newTopic(pt *pb.Topic) *topic { func (t *topic) stop() { for _, sub := range t.subs { sub.proto.Topic = "_deleted-topic_" - sub.stop() } } @@ -557,27 +787,32 @@ func (t *topic) publish(pm *pb.PubsubMessage, m *Message) { } type subscription struct { - topic *topic - mu *sync.Mutex // the server mutex, here for convenience - proto *pb.Subscription - ackTimeout time.Duration - msgs map[string]*message // unacked messages by message ID - streams []*stream - done chan struct{} + topic *topic + deadLetterTopic *topic + mu *sync.Mutex // the server mutex, here for convenience + proto *pb.Subscription + ackTimeout time.Duration + msgs map[string]*message // unacked messages by message ID + streams []*stream + done chan struct{} + timeNowFunc func() time.Time } -func newSubscription(t *topic, mu *sync.Mutex, ps *pb.Subscription) *subscription { +func newSubscription(t *topic, mu *sync.Mutex, timeNowFunc func() time.Time, deadLetterTopic *topic, ps *pb.Subscription) *subscription { at := time.Duration(ps.AckDeadlineSeconds) * time.Second if at == 0 { at = 10 * time.Second } + ps.State = pb.Subscription_ACTIVE return &subscription{ - topic: t, - mu: mu, - proto: ps, - ackTimeout: at, - msgs: map[string]*message{}, - done: make(chan struct{}), + topic: t, + deadLetterTopic: deadLetterTopic, + mu: mu, + proto: ps, + ackTimeout: at, + msgs: map[string]*message{}, + done: make(chan struct{}), + timeNowFunc: timeNowFunc, } } @@ -604,6 +839,10 @@ func (s *GServer) Acknowledge(_ context.Context, req *pb.AcknowledgeRequest) (*e s.mu.Lock() defer s.mu.Unlock() + if handled, ret, err := s.runReactor(req, "Acknowledge", &emptypb.Empty{}); handled || err != nil { + return ret.(*emptypb.Empty), err + } + sub, err := s.findSubscription(req.Subscription) if err != nil { return nil, err @@ -617,13 +856,18 @@ func (s *GServer) Acknowledge(_ context.Context, req *pb.AcknowledgeRequest) (*e func (s *GServer) ModifyAckDeadline(_ context.Context, req *pb.ModifyAckDeadlineRequest) (*emptypb.Empty, error) { s.mu.Lock() defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "ModifyAckDeadline", &emptypb.Empty{}); handled || err != nil { + return ret.(*emptypb.Empty), err + } + sub, err := s.findSubscription(req.Subscription) if err != nil { return nil, err } now := time.Now() for _, id := range req.AckIds { - s.msgsByID[id].Modacks = append(s.msgsByID[id].Modacks, Modack{AckID: id, AckDeadline: req.AckDeadlineSeconds, ReceivedAt: now}) + s.msgsByID[id].modacks = append(s.msgsByID[id].modacks, Modack{AckID: id, AckDeadline: req.AckDeadlineSeconds, ReceivedAt: now}) } dur := secsToDur(req.AckDeadlineSeconds) for _, id := range req.AckIds { @@ -634,6 +878,12 @@ func (s *GServer) ModifyAckDeadline(_ context.Context, req *pb.ModifyAckDeadline func (s *GServer) Pull(ctx context.Context, req *pb.PullRequest) (*pb.PullResponse, error) { s.mu.Lock() + + if handled, ret, err := s.runReactor(req, "Pull", &pb.PullResponse{}); handled || err != nil { + s.mu.Unlock() + return ret.(*pb.PullResponse), err + } + sub, err := s.findSubscription(req.Subscription) if err != nil { s.mu.Unlock() @@ -683,6 +933,7 @@ func (s *GServer) StreamingPull(sps pb.Subscriber_StreamingPullServer) error { } // Create a new stream to handle the pull. st := sub.newStream(sps, s.streamTimeout) + st.ackTimeout = time.Duration(req.StreamAckDeadlineSeconds) * time.Second err = st.pull(&s.wg) sub.deleteStream(st) return err @@ -696,11 +947,7 @@ func (s *GServer) Seek(ctx context.Context, req *pb.SeekRequest) (*pb.SeekRespon case nil: return nil, status.Errorf(codes.InvalidArgument, "missing Seek target type") case *pb.SeekRequest_Time: - var err error - target, err = ptypes.Timestamp(v.Time) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "bad Time target: %v", err) - } + target = v.Time.AsTime() default: return nil, status.Errorf(codes.Unimplemented, "unhandled Seek target type %T", v) } @@ -709,6 +956,11 @@ func (s *GServer) Seek(ctx context.Context, req *pb.SeekRequest) (*pb.SeekRespon // because the messages don't have any other synchronization. s.mu.Lock() defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "Seek", &pb.SeekResponse{}); handled || err != nil { + return ret.(*pb.SeekResponse), err + } + sub, err := s.findSubscription(req.Subscription) if err != nil { return nil, err @@ -756,13 +1008,21 @@ func (s *GServer) findSubscription(name string) (*subscription, error) { // Must be called with the lock held. func (s *subscription) pull(max int) []*pb.ReceivedMessage { - now := timeNow() + now := s.timeNowFunc() s.maintainMessages(now) var msgs []*pb.ReceivedMessage - for _, m := range s.msgs { + for id, m := range filterMsgs(s.msgs, s.proto.EnableMessageOrdering) { if m.outstanding() { continue } + if s.deadLetterCandidate(m) { + s.ack(id) + s.publishToDeadLetter(m) + continue + } + if s.proto.DeadLetterPolicy != nil { + m.proto.DeliveryAttempt = int32(*m.deliveries) + } (*m.deliveries)++ m.ackDeadline = now.Add(s.ackTimeout) msgs = append(msgs, m.proto) @@ -773,18 +1033,49 @@ func (s *subscription) pull(max int) []*pb.ReceivedMessage { return msgs } +func filterMsgs(msgs map[string]*message, enableMessageOrdering bool) map[string]*message { + if !enableMessageOrdering { + return msgs + } + result := make(map[string]*message) + + type msg struct { + id string + m *message + } + orderingKeyMap := make(map[string]msg) + for id, m := range msgs { + orderingKey := m.proto.Message.OrderingKey + if orderingKey == "" { + orderingKey = id + } + if val, ok := orderingKeyMap[orderingKey]; !ok || m.proto.Message.PublishTime.AsTime().Before(val.m.proto.Message.PublishTime.AsTime()) { + orderingKeyMap[orderingKey] = msg{m: m, id: id} + } + } + for _, val := range orderingKeyMap { + result[val.id] = val.m + } + return result +} + func (s *subscription) deliver() { s.mu.Lock() defer s.mu.Unlock() - now := timeNow() + now := s.timeNowFunc() s.maintainMessages(now) // Try to deliver each remaining message. curIndex := 0 - for _, m := range s.msgs { + for id, m := range filterMsgs(s.msgs, s.proto.EnableMessageOrdering) { if m.outstanding() { continue } + if s.deadLetterCandidate(m) { + s.ack(id) + s.publishToDeadLetter(m) + continue + } // If the message was never delivered before, start with the stream at // curIndex. If it was delivered before, start with the stream after the one // that owned it. @@ -843,12 +1134,10 @@ func (s *subscription) maintainMessages(now time.Time) { if m.outstanding() && now.After(m.ackDeadline) { m.makeAvailable() } - pubTime, err := ptypes.Timestamp(m.proto.Message.PublishTime) - if err != nil { - panic(err) - } + pubTime := m.proto.Message.PublishTime.AsTime() // Remove messages that have been undelivered for a long time. if !m.outstanding() && now.Sub(pubTime) > retentionDuration { + s.publishToDeadLetter(m) delete(s.msgs, id) } } @@ -856,12 +1145,14 @@ func (s *subscription) maintainMessages(now time.Time) { func (s *subscription) newStream(gs pb.Subscriber_StreamingPullServer, timeout time.Duration) *stream { st := &stream{ - sub: s, - done: make(chan struct{}), - msgc: make(chan *pb.ReceivedMessage), - gstream: gs, - ackTimeout: s.ackTimeout, - timeout: timeout, + sub: s, + done: make(chan struct{}), + msgc: make(chan *pb.ReceivedMessage), + gstream: gs, + ackTimeout: s.ackTimeout, + timeout: timeout, + enableExactlyOnceDelivery: s.proto.EnableExactlyOnceDelivery, + enableOrdering: s.proto.EnableMessageOrdering, } s.mu.Lock() s.streams = append(s.streams, st) @@ -882,6 +1173,33 @@ func (s *subscription) deleteStream(st *stream) { s.streams = deleteStreamAt(s.streams, i) } } + +func (s *subscription) deadLetterCandidate(m *message) bool { + if s.proto.DeadLetterPolicy == nil { + return false + } + if m.retriesDone(s.proto.DeadLetterPolicy.MaxDeliveryAttempts) { + return true + } + return false +} + +func (s *subscription) publishToDeadLetter(m *message) { + acks := 0 + if m.acks != nil { + acks = *m.acks + } + deliveries := 0 + if m.deliveries != nil { + deliveries = *m.deliveries + } + s.deadLetterTopic.publish(m.proto.Message, &Message{ + PublishTime: m.publishTime, + Acks: acks, + Deliveries: deliveries, + }) +} + func deleteStreamAt(s []*stream, i int) []*stream { // Preserve order for round-robin delivery. return append(s[:i], s[i+1:]...) @@ -901,17 +1219,24 @@ func (m *message) outstanding() bool { return !m.ackDeadline.IsZero() } +// A message is outstanding if it is owned by some stream. +func (m *message) retriesDone(maxRetries int32) bool { + return m.deliveries != nil && int32(*m.deliveries) >= maxRetries +} + func (m *message) makeAvailable() { m.ackDeadline = time.Time{} } type stream struct { - sub *subscription - done chan struct{} // closed when the stream is finished - msgc chan *pb.ReceivedMessage - gstream pb.Subscriber_StreamingPullServer - ackTimeout time.Duration - timeout time.Duration + sub *subscription + done chan struct{} // closed when the stream is finished + msgc chan *pb.ReceivedMessage + gstream pb.Subscriber_StreamingPullServer + ackTimeout time.Duration + timeout time.Duration + enableExactlyOnceDelivery bool + enableOrdering bool } // pull manages the StreamingPull interaction for the life of the stream. @@ -949,7 +1274,13 @@ func (st *stream) sendLoop() error { case <-st.done: return nil case rm := <-st.msgc: - res := &pb.StreamingPullResponse{ReceivedMessages: []*pb.ReceivedMessage{rm}} + res := &pb.StreamingPullResponse{ + ReceivedMessages: []*pb.ReceivedMessage{rm}, + SubscriptionProperties: &pb.StreamingPullResponse_SubscriptionProperties{ + ExactlyOnceDeliveryEnabled: st.enableExactlyOnceDelivery, + MessageOrderingEnabled: st.enableOrdering, + }, + } if err := st.gstream.Send(res); err != nil { return err } @@ -1001,10 +1332,162 @@ func (s *subscription) modifyAckDeadline(id string, d time.Duration) { if d == 0 { // nack m.makeAvailable() } else { // extend the deadline by d - m.ackDeadline = timeNow().Add(d) + m.ackDeadline = s.timeNowFunc().Add(d) } } func secsToDur(secs int32) time.Duration { return time.Duration(secs) * time.Second } + +// runReactor looks up the reactors for a function, then launches them until handled=true +// or err is returned. If the reactor returns nil, the function returns defaultObj instead. +func (s *GServer) runReactor(req interface{}, funcName string, defaultObj interface{}) (bool, interface{}, error) { + if val, ok := s.reactorOptions[funcName]; ok { + for _, reactor := range val { + handled, ret, err := reactor.React(req) + // If handled=true, that means the reactor has successfully reacted to the request, + // so use the output directly. If err occurs, that means the request is invalidated + // by the reactor somehow. + if handled || err != nil { + if ret == nil { + ret = defaultObj + } + return true, ret, err + } + } + } + return false, nil, nil +} + +// errorInjectionReactor is a reactor to inject an error message with status code. +type errorInjectionReactor struct { + code codes.Code + msg string +} + +// React simply returns an error with defined error message and status code. +func (e *errorInjectionReactor) React(_ interface{}) (handled bool, ret interface{}, err error) { + return true, nil, status.Errorf(e.code, e.msg) +} + +// WithErrorInjection creates a ServerReactorOption that injects error with defined status code and +// message for a certain function. +func WithErrorInjection(funcName string, code codes.Code, msg string) ServerReactorOption { + return ServerReactorOption{ + FuncName: funcName, + Reactor: &errorInjectionReactor{code: code, msg: msg}, + } +} + +func (s *GServer) CreateSchema(_ context.Context, req *pb.CreateSchemaRequest) (*pb.Schema, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "CreateSchema", &pb.Schema{}); handled || err != nil { + return ret.(*pb.Schema), err + } + + name := fmt.Sprintf("%s/schemas/%s", req.Parent, req.SchemaId) + sc := &pb.Schema{ + Name: name, + Type: req.Schema.Type, + Definition: req.Schema.Definition, + } + s.schemas[name] = sc + + return sc, nil +} + +func (s *GServer) GetSchema(_ context.Context, req *pb.GetSchemaRequest) (*pb.Schema, error) { + + s.mu.Lock() + defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "GetSchema", &pb.Schema{}); handled || err != nil { + return ret.(*pb.Schema), err + } + + sc, ok := s.schemas[req.Name] + if !ok { + return nil, status.Errorf(codes.NotFound, "schema(%q) not found", req.Name) + } + return sc, nil +} + +func (s *GServer) ListSchemas(_ context.Context, req *pb.ListSchemasRequest) (*pb.ListSchemasResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "ListSchemas", &pb.ListSchemasResponse{}); handled || err != nil { + return ret.(*pb.ListSchemasResponse), err + } + ss := make([]*pb.Schema, 0) + for _, sc := range s.schemas { + ss = append(ss, sc) + } + return &pb.ListSchemasResponse{ + Schemas: ss, + }, nil +} + +func (s *GServer) DeleteSchema(_ context.Context, req *pb.DeleteSchemaRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "DeleteSchema", &emptypb.Empty{}); handled || err != nil { + return ret.(*emptypb.Empty), err + } + + schema := s.schemas[req.Name] + if schema == nil { + return nil, status.Errorf(codes.NotFound, "schema %q", req.Name) + } + + delete(s.schemas, req.Name) + return &emptypb.Empty{}, nil +} + +// ValidateSchema mocks the ValidateSchema call but only checks that the schema definition is not empty. +func (s *GServer) ValidateSchema(_ context.Context, req *pb.ValidateSchemaRequest) (*pb.ValidateSchemaResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "ValidateSchema", &pb.ValidateSchemaResponse{}); handled || err != nil { + return ret.(*pb.ValidateSchemaResponse), err + } + + if req.Schema.Definition == "" { + return nil, status.Error(codes.InvalidArgument, "schema definition cannot be empty") + } + return &pb.ValidateSchemaResponse{}, nil +} + +// ValidateMessage mocks the ValidateMessage call but only checks that the schema definition to validate the +// message against is not empty. +func (s *GServer) ValidateMessage(_ context.Context, req *pb.ValidateMessageRequest) (*pb.ValidateMessageResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if handled, ret, err := s.runReactor(req, "ValidateMessage", &pb.ValidateMessageResponse{}); handled || err != nil { + return ret.(*pb.ValidateMessageResponse), err + } + + spec := req.GetSchemaSpec() + if valReq, ok := spec.(*pb.ValidateMessageRequest_Name); ok { + sc, ok := s.schemas[valReq.Name] + if !ok { + return nil, status.Errorf(codes.NotFound, "schema(%q) not found", valReq.Name) + } + if sc.Definition == "" { + return nil, status.Error(codes.InvalidArgument, "schema definition cannot be empty") + } + } + if valReq, ok := spec.(*pb.ValidateMessageRequest_Schema); ok { + if valReq.Schema.Definition == "" { + return nil, status.Error(codes.InvalidArgument, "schema definition cannot be empty") + } + } + + return &pb.ValidateMessageResponse{}, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/pubsub.go b/vendor/cloud.google.com/go/pubsub/pubsub.go index a14f6699784b..0b44ea5583e4 100644 --- a/vendor/cloud.google.com/go/pubsub/pubsub.go +++ b/vendor/cloud.google.com/go/pubsub/pubsub.go @@ -18,11 +18,14 @@ import ( "context" "fmt" "os" + "reflect" "runtime" + "strings" "time" - "cloud.google.com/go/internal/version" vkit "cloud.google.com/go/pubsub/apiv1" + "cloud.google.com/go/pubsub/internal" + gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/option" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" @@ -36,8 +39,6 @@ const ( // ScopeCloudPlatform grants permissions to view and manage your data // across Google Cloud Platform services. ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" - - maxAckDeadline = 10 * time.Minute ) // Client is a Google Pub/Sub client scoped to a single project. @@ -50,17 +51,85 @@ type Client struct { subc *vkit.SubscriberClient } -// NewClient creates a new PubSub client. +// ClientConfig has configurations for the client. +type ClientConfig struct { + PublisherCallOptions *vkit.PublisherCallOptions + SubscriberCallOptions *vkit.SubscriberCallOptions +} + +// mergePublisherCallOptions merges two PublisherCallOptions into one and the first argument has +// a lower order of precedence than the second one. If either is nil, return the other. +func mergePublisherCallOptions(a *vkit.PublisherCallOptions, b *vkit.PublisherCallOptions) *vkit.PublisherCallOptions { + if a == nil { + return b + } + if b == nil { + return a + } + res := &vkit.PublisherCallOptions{} + resVal := reflect.ValueOf(res).Elem() + aVal := reflect.ValueOf(a).Elem() + bVal := reflect.ValueOf(b).Elem() + + t := aVal.Type() + + for i := 0; i < aVal.NumField(); i++ { + fieldName := t.Field(i).Name + + aFieldVal := aVal.Field(i).Interface().([]gax.CallOption) + bFieldVal := bVal.Field(i).Interface().([]gax.CallOption) + + merged := append(aFieldVal, bFieldVal...) + resVal.FieldByName(fieldName).Set(reflect.ValueOf(merged)) + } + return res +} + +// mergeSubscribercallOptions merges two SubscriberCallOptions into one and the first argument has +// a lower order of precedence than the second one. If either is nil, the other is used. +func mergeSubscriberCallOptions(a *vkit.SubscriberCallOptions, b *vkit.SubscriberCallOptions) *vkit.SubscriberCallOptions { + if a == nil { + return b + } + if b == nil { + return a + } + res := &vkit.SubscriberCallOptions{} + resVal := reflect.ValueOf(res).Elem() + aVal := reflect.ValueOf(a).Elem() + bVal := reflect.ValueOf(b).Elem() + + t := aVal.Type() + + for i := 0; i < aVal.NumField(); i++ { + fieldName := t.Field(i).Name + + aFieldVal := aVal.Field(i).Interface().([]gax.CallOption) + bFieldVal := bVal.Field(i).Interface().([]gax.CallOption) + + merged := append(aFieldVal, bFieldVal...) + resVal.FieldByName(fieldName).Set(reflect.ValueOf(merged)) + } + return res +} + +// NewClient creates a new PubSub client. It uses a default configuration. func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (c *Client, err error) { + return NewClientWithConfig(ctx, projectID, nil, opts...) +} + +// NewClientWithConfig creates a new PubSub client. +func NewClientWithConfig(ctx context.Context, projectID string, config *ClientConfig, opts ...option.ClientOption) (c *Client, err error) { var o []option.ClientOption // Environment variables for gcloud emulator: // https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/ if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" { conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { - return nil, fmt.Errorf("grpc.Dial: %v", err) + return nil, fmt.Errorf("grpc.Dial: %w", err) } o = []option.ClientOption{option.WithGRPCConn(conn)} + o = append(o, option.WithTelemetryDisabled()) } else { numConns := runtime.GOMAXPROCS(0) if numConns > 4 { @@ -73,21 +142,21 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio Time: 5 * time.Minute, })), } - o = append(o, openCensusOptions()...) } o = append(o, opts...) pubc, err := vkit.NewPublisherClient(ctx, o...) if err != nil { - return nil, fmt.Errorf("pubsub: %v", err) + return nil, fmt.Errorf("pubsub(publisher): %w", err) } - subc, err := vkit.NewSubscriberClient(ctx, option.WithGRPCConn(pubc.Connection())) + subc, err := vkit.NewSubscriberClient(ctx, o...) if err != nil { - // Should never happen, since we are passing in the connection. - // If it does, we cannot close, because the user may have passed in their - // own connection originally. - return nil, fmt.Errorf("pubsub: %v", err) + return nil, fmt.Errorf("pubsub(subscriber): %w", err) + } + if config != nil { + pubc.CallOptions = mergePublisherCallOptions(pubc.CallOptions, config.PublisherCallOptions) + subc.CallOptions = mergeSubscriberCallOptions(subc.CallOptions, config.SubscriberCallOptions) } - pubc.SetGoogleClientInfo("gccl", version.Repo) + pubc.SetGoogleClientInfo("gccl", internal.Version) return &Client{ projectID: projectID, pubc: pubc, @@ -101,10 +170,22 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio // If the client is available for the lifetime of the program, then Close need not be // called at exit. func (c *Client) Close() error { - // Return the first error, because the first call closes the connection. - err := c.pubc.Close() - _ = c.subc.Close() - return err + pubErr := c.pubc.Close() + subErr := c.subc.Close() + if pubErr != nil { + return fmt.Errorf("pubsub publisher closing error: %w", pubErr) + } + if subErr != nil { + // Suppress client connection closing errors. This will only happen + // when using the client in conjunction with the Pub/Sub emulator + // or fake (pstest). Closing both clients separately will never + // return this error against the live Pub/Sub service. + if strings.Contains(subErr.Error(), "the client connection is closing") { + return nil + } + return fmt.Errorf("pubsub subscriber closing error: %w", subErr) + } + return nil } func (c *Client) fullyQualifiedProjectName() string { diff --git a/vendor/cloud.google.com/go/pubsub/pullstream.go b/vendor/cloud.google.com/go/pubsub/pullstream.go index dd0e6c6805be..2137cdb9f6eb 100644 --- a/vendor/cloud.google.com/go/pubsub/pullstream.go +++ b/vendor/cloud.google.com/go/pubsub/pullstream.go @@ -28,8 +28,9 @@ import ( // A pullStream supports the methods of a StreamingPullClient, but re-opens // the stream on a retryable error. type pullStream struct { - ctx context.Context - open func() (pb.Subscriber_StreamingPullClient, error) + ctx context.Context + open func() (pb.Subscriber_StreamingPullClient, error) + cancel context.CancelFunc mu sync.Mutex spc *pb.Subscriber_StreamingPullClient @@ -39,18 +40,27 @@ type pullStream struct { // for testing type streamingPullFunc func(context.Context, ...gax.CallOption) (pb.Subscriber_StreamingPullClient, error) -func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName string) *pullStream { +func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName string, maxOutstandingMessages, maxOutstandingBytes int, maxDurationPerLeaseExtension time.Duration) *pullStream { ctx = withSubscriptionKey(ctx, subName) + ctx, cancel := context.WithCancel(ctx) return &pullStream{ - ctx: ctx, + ctx: ctx, + cancel: cancel, open: func() (pb.Subscriber_StreamingPullClient, error) { spc, err := streamingPull(ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) if err == nil { recordStat(ctx, StreamRequestCount, 1) + streamAckDeadline := int32(maxDurationPerLeaseExtension / time.Second) + // By default, maxDurationPerLeaseExtension, aka MaxExtensionPeriod, is disabled, + // so in these cases, use a healthy default of 60 seconds. + if streamAckDeadline <= 0 { + streamAckDeadline = 60 + } err = spc.Send(&pb.StreamingPullRequest{ - Subscription: subName, - // We modack messages when we receive them, so this value doesn't matter too much. - StreamAckDeadlineSeconds: 60, + Subscription: subName, + StreamAckDeadlineSeconds: streamAckDeadline, + MaxOutstandingMessages: int64(maxOutstandingMessages), + MaxOutstandingBytes: int64(maxOutstandingBytes), }) } if err != nil { diff --git a/vendor/cloud.google.com/go/pubsub/schema.go b/vendor/cloud.google.com/go/pubsub/schema.go new file mode 100644 index 000000000000..1d3ff03af6bc --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/schema.go @@ -0,0 +1,263 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "context" + "fmt" + + "google.golang.org/api/option" + + vkit "cloud.google.com/go/pubsub/apiv1" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +// SchemaClient is a Pub/Sub schema client scoped to a single project. +type SchemaClient struct { + sc *vkit.SchemaClient + projectID string +} + +// Close closes the schema client and frees up resources. +func (s *SchemaClient) Close() error { + return s.sc.Close() +} + +// NewSchemaClient creates a new Pub/Sub Schema client. +func NewSchemaClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*SchemaClient, error) { + sc, err := vkit.NewSchemaClient(ctx, opts...) + if err != nil { + return nil, err + } + return &SchemaClient{sc: sc, projectID: projectID}, nil +} + +// SchemaConfig is a reference to a PubSub schema. +type SchemaConfig struct { + // The name of the schema populated by the server. This field is read-only. + Name string + + // The type of the schema definition. + Type SchemaType + + // The definition of the schema. This should contain a string representing + // the full definition of the schema that is a valid schema definition of + // the type specified in `type`. + Definition string +} + +// SchemaType is the possible shcema definition types. +type SchemaType pb.Schema_Type + +const ( + // SchemaTypeUnspecified is the unused default value. + SchemaTypeUnspecified SchemaType = 0 + // SchemaProtocolBuffer is a protobuf schema definition. + SchemaProtocolBuffer SchemaType = 1 + // SchemaAvro is an Avro schema definition. + SchemaAvro SchemaType = 2 +) + +// SchemaView is a view of Schema object fields to be returned +// by GetSchema and ListSchemas. +type SchemaView pb.SchemaView + +const ( + // SchemaViewUnspecified is the default/unset value. + SchemaViewUnspecified SchemaView = 0 + // SchemaViewBasic includes the name and type of the schema, but not the definition. + SchemaViewBasic SchemaView = 1 + // SchemaViewFull includes all Schema object fields. + SchemaViewFull SchemaView = 2 +) + +// SchemaSettings are settings for validating messages +// published against a schema. +type SchemaSettings struct { + Schema string + Encoding SchemaEncoding +} + +func schemaSettingsToProto(schema *SchemaSettings) *pb.SchemaSettings { + if schema == nil { + return nil + } + return &pb.SchemaSettings{ + Schema: schema.Schema, + Encoding: pb.Encoding(schema.Encoding), + } +} + +func protoToSchemaSettings(pbs *pb.SchemaSettings) *SchemaSettings { + if pbs == nil { + return nil + } + return &SchemaSettings{ + Schema: pbs.Schema, + Encoding: SchemaEncoding(pbs.Encoding), + } +} + +// SchemaEncoding is the encoding expected for messages. +type SchemaEncoding pb.Encoding + +const ( + // EncodingUnspecified is the default unused value. + EncodingUnspecified SchemaEncoding = 0 + // EncodingJSON is the JSON encoding type for a message. + EncodingJSON SchemaEncoding = 1 + // EncodingBinary is the binary encoding type for a message. + // For some schema types, binary encoding may not be available. + EncodingBinary SchemaEncoding = 2 +) + +func (s *SchemaConfig) toProto() *pb.Schema { + pbs := &pb.Schema{ + Name: s.Name, + Type: pb.Schema_Type(s.Type), + Definition: s.Definition, + } + return pbs +} + +func protoToSchemaConfig(pbs *pb.Schema) *SchemaConfig { + return &SchemaConfig{ + Name: pbs.Name, + Type: SchemaType(pbs.Type), + Definition: pbs.Definition, + } +} + +// CreateSchema creates a new schema with the given schemaID +// and config. Schemas cannot be updated after creation. +func (c *SchemaClient) CreateSchema(ctx context.Context, schemaID string, s SchemaConfig) (*SchemaConfig, error) { + req := &pb.CreateSchemaRequest{ + Parent: fmt.Sprintf("projects/%s", c.projectID), + Schema: s.toProto(), + SchemaId: schemaID, + } + pbs, err := c.sc.CreateSchema(ctx, req) + if err != nil { + return nil, err + } + return protoToSchemaConfig(pbs), nil +} + +// Schema retrieves the configuration of a schema given a schemaID and a view. +func (c *SchemaClient) Schema(ctx context.Context, schemaID string, view SchemaView) (*SchemaConfig, error) { + schemaPath := fmt.Sprintf("projects/%s/schemas/%s", c.projectID, schemaID) + req := &pb.GetSchemaRequest{ + Name: schemaPath, + View: pb.SchemaView(view), + } + s, err := c.sc.GetSchema(ctx, req) + if err != nil { + return nil, err + } + return protoToSchemaConfig(s), nil +} + +// Schemas returns an iterator which returns all of the schemas for the client's project. +func (c *SchemaClient) Schemas(ctx context.Context, view SchemaView) *SchemaIterator { + return &SchemaIterator{ + it: c.sc.ListSchemas(ctx, &pb.ListSchemasRequest{ + Parent: fmt.Sprintf("projects/%s", c.projectID), + View: pb.SchemaView(view), + }), + } +} + +// SchemaIterator is a struct used to iterate over schemas. +type SchemaIterator struct { + it *vkit.SchemaIterator + err error +} + +// Next returns the next schema. If there are no more schemas, iterator.Done will be returned. +func (s *SchemaIterator) Next() (*SchemaConfig, error) { + if s.err != nil { + return nil, s.err + } + pbs, err := s.it.Next() + if err != nil { + return nil, err + } + return protoToSchemaConfig(pbs), nil +} + +// DeleteSchema deletes an existing schema given a schema ID. +func (s *SchemaClient) DeleteSchema(ctx context.Context, schemaID string) error { + schemaPath := fmt.Sprintf("projects/%s/schemas/%s", s.projectID, schemaID) + return s.sc.DeleteSchema(ctx, &pb.DeleteSchemaRequest{ + Name: schemaPath, + }) +} + +// ValidateSchemaResult is the response for the ValidateSchema method. +// Reserved for future use. +type ValidateSchemaResult struct{} + +// ValidateSchema validates a schema config and returns an error if invalid. +func (s *SchemaClient) ValidateSchema(ctx context.Context, schema SchemaConfig) (*ValidateSchemaResult, error) { + req := &pb.ValidateSchemaRequest{ + Parent: fmt.Sprintf("projects/%s", s.projectID), + Schema: schema.toProto(), + } + _, err := s.sc.ValidateSchema(ctx, req) + if err != nil { + return nil, err + } + return &ValidateSchemaResult{}, nil +} + +// ValidateMessageResult is the response for the ValidateMessage method. +// Reserved for future use. +type ValidateMessageResult struct{} + +// ValidateMessageWithConfig validates a message against an schema specified +// by a schema config. +func (s *SchemaClient) ValidateMessageWithConfig(ctx context.Context, msg []byte, encoding SchemaEncoding, config SchemaConfig) (*ValidateMessageResult, error) { + req := &pb.ValidateMessageRequest{ + Parent: fmt.Sprintf("projects/%s", s.projectID), + SchemaSpec: &pb.ValidateMessageRequest_Schema{ + Schema: config.toProto(), + }, + Message: msg, + Encoding: pb.Encoding(encoding), + } + _, err := s.sc.ValidateMessage(ctx, req) + if err != nil { + return nil, err + } + return &ValidateMessageResult{}, nil +} + +// ValidateMessageWithID validates a message against an schema specified +// by the schema ID of an existing schema. +func (s *SchemaClient) ValidateMessageWithID(ctx context.Context, msg []byte, encoding SchemaEncoding, schemaID string) (*ValidateMessageResult, error) { + req := &pb.ValidateMessageRequest{ + Parent: fmt.Sprintf("projects/%s", s.projectID), + SchemaSpec: &pb.ValidateMessageRequest_Name{ + Name: fmt.Sprintf("projects/%s/schemas/%s", s.projectID, schemaID), + }, + Message: msg, + Encoding: pb.Encoding(encoding), + } + _, err := s.sc.ValidateMessage(ctx, req) + if err != nil { + return nil, err + } + return &ValidateMessageResult{}, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/service.go b/vendor/cloud.google.com/go/pubsub/service.go index a22b9147fccc..928a77d3b1c5 100644 --- a/vendor/cloud.google.com/go/pubsub/service.go +++ b/vendor/cloud.google.com/go/pubsub/service.go @@ -15,13 +15,11 @@ package pubsub import ( - "fmt" "math" "strings" "time" gax "github.com/googleapis/gax-go/v2" - pb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -37,18 +35,6 @@ const ( maxSendRecvBytes = 20 * 1024 * 1024 // 20M ) -func convertMessages(rms []*pb.ReceivedMessage) ([]*Message, error) { - msgs := make([]*Message, 0, len(rms)) - for i, m := range rms { - msg, err := toMessage(m) - if err != nil { - return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m) - } - msgs = append(msgs, msg) - } - return msgs, nil -} - func trunc32(i int64) int32 { if i > math.MaxInt32 { i = math.MaxInt32 @@ -61,7 +47,7 @@ type defaultRetryer struct { } // Logic originally from -// https://github.com/GoogleCloudPlatform/google-cloud-java/blob/master/google-cloud-clients/google-cloud-pubsub/src/main/java/com/google/cloud/pubsub/v1/StatusUtil.java +// https://github.com/googleapis/java-pubsub/blob/main/google-cloud-pubsub/src/main/java/com/google/cloud/pubsub/v1/StatusUtil.java func (r *defaultRetryer) Retry(err error) (pause time.Duration, shouldRetry bool) { s, ok := status.FromError(err) if !ok { // includes io.EOF, normal stream close, which causes us to reopen @@ -76,6 +62,13 @@ func (r *defaultRetryer) Retry(err error) (pause time.Duration, shouldRetry bool return r.bo.Pause(), true } return 0, false + case codes.Unknown: + // Retry GOAWAY, see https://github.com/googleapis/google-cloud-go/issues/4257. + isGoaway := strings.Contains(s.Message(), "received prior goaway: code: NO_ERROR") + if isGoaway { + return r.bo.Pause(), true + } + return 0, false default: return 0, false } @@ -98,3 +91,42 @@ func (r *streamingPullRetryer) Retry(err error) (pause time.Duration, shouldRetr return r.defaultRetryer.Retry(err) } } + +type publishRetryer struct { + defaultRetryer gax.Retryer +} + +func (r *publishRetryer) Retry(err error) (pause time.Duration, shouldRetry bool) { + s, ok := status.FromError(err) + if !ok { + return r.defaultRetryer.Retry(err) + } + if s.Code() == codes.Internal && strings.Contains(s.Message(), "string field contains invalid UTF-8") { + return 0, false + } + return r.defaultRetryer.Retry(err) +} + +var ( + exactlyOnceDeliveryTemporaryRetryErrors = map[codes.Code]struct{}{ + codes.DeadlineExceeded: {}, + codes.ResourceExhausted: {}, + codes.Aborted: {}, + codes.Internal: {}, + codes.Unavailable: {}, + } +) + +// contains checks if grpc code v is in t, a set of retryable error codes. +func contains(v codes.Code, t map[codes.Code]struct{}) bool { + _, ok := t[v] + return ok +} + +func newExactlyOnceBackoff() gax.Backoff { + return gax.Backoff{ + Initial: 1 * time.Second, + Max: 64 * time.Second, + Multiplier: 2, + } +} diff --git a/vendor/cloud.google.com/go/pubsub/snapshot.go b/vendor/cloud.google.com/go/pubsub/snapshot.go index c2a28d781990..78797f36475d 100644 --- a/vendor/cloud.google.com/go/pubsub/snapshot.go +++ b/vendor/cloud.google.com/go/pubsub/snapshot.go @@ -20,8 +20,9 @@ import ( "strings" "time" - "github.com/golang/protobuf/ptypes" pb "google.golang.org/genproto/googleapis/pubsub/v1" + fmpb "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/protobuf/types/known/timestamppb" ) // Snapshot is a reference to a PubSub snapshot. @@ -42,11 +43,30 @@ func (s *Snapshot) ID() string { return s.name[slash+1:] } +// SetLabels sets or replaces the labels on a given snapshot. +func (s *Snapshot) SetLabels(ctx context.Context, label map[string]string) (*SnapshotConfig, error) { + sc, err := s.c.subc.UpdateSnapshot(ctx, &pb.UpdateSnapshotRequest{ + Snapshot: &pb.Snapshot{ + Name: s.name, + Labels: label, + }, + UpdateMask: &fmpb.FieldMask{ + Paths: []string{"labels"}, + }, + }) + if err != nil { + return nil, err + } + return toSnapshotConfig(sc, s.c) +} + // SnapshotConfig contains the details of a Snapshot. type SnapshotConfig struct { *Snapshot Topic *Topic Expiration time.Time + // The set of labels for the snapshot. + Labels map[string]string } // Snapshot creates a reference to a snapshot. @@ -100,11 +120,8 @@ func (s *Snapshot) Delete(ctx context.Context) error { // creation time), only retained messages will be marked as unacknowledged, // and already-expunged messages will not be restored. func (s *Subscription) SeekToTime(ctx context.Context, t time.Time) error { - ts, err := ptypes.TimestampProto(t) - if err != nil { - return err - } - _, err = s.c.subc.Seek(ctx, &pb.SeekRequest{ + ts := timestamppb.New(t) + _, err := s.c.subc.Seek(ctx, &pb.SeekRequest{ Subscription: s.name, Target: &pb.SeekRequest_Time{Time: ts}, }) @@ -116,11 +133,12 @@ func (s *Subscription) SeekToTime(ctx context.Context, t time.Time) error { // If the name is empty string, a unique name is assigned. // // The created snapshot is guaranteed to retain: -// (a) The existing backlog on the subscription. More precisely, this is -// defined as the messages in the subscription's backlog that are -// unacknowledged when Snapshot returns without error. -// (b) Any messages published to the subscription's topic following -// Snapshot returning without error. +// +// (a) The existing backlog on the subscription. More precisely, this is +// defined as the messages in the subscription's backlog that are +// unacknowledged when Snapshot returns without error. +// (b) Any messages published to the subscription's topic following +// Snapshot returning without error. func (s *Subscription) CreateSnapshot(ctx context.Context, name string) (*SnapshotConfig, error) { if name != "" { name = fmt.Sprintf("projects/%s/snapshots/%s", strings.Split(s.name, "/")[1], name) @@ -148,13 +166,11 @@ func (s *Subscription) SeekToSnapshot(ctx context.Context, snap *Snapshot) error } func toSnapshotConfig(snap *pb.Snapshot, c *Client) (*SnapshotConfig, error) { - exp, err := ptypes.Timestamp(snap.ExpireTime) - if err != nil { - return nil, err - } + exp := snap.ExpireTime.AsTime() return &SnapshotConfig{ Snapshot: &Snapshot{c: c, name: snap.Name}, Topic: newTopic(c, snap.Topic), Expiration: exp, + Labels: snap.Labels, }, nil } diff --git a/vendor/cloud.google.com/go/pubsub/subscription.go b/vendor/cloud.google.com/go/pubsub/subscription.go index 045ddef6109c..f325e65e3147 100644 --- a/vendor/cloud.google.com/go/pubsub/subscription.go +++ b/vendor/cloud.google.com/go/pubsub/subscription.go @@ -25,14 +25,17 @@ import ( "cloud.google.com/go/iam" "cloud.google.com/go/internal/optional" - "github.com/golang/protobuf/ptypes" - durpb "github.com/golang/protobuf/ptypes/duration" + ipubsub "cloud.google.com/go/internal/pubsub" + "cloud.google.com/go/pubsub/internal/scheduler" gax "github.com/googleapis/gax-go/v2" "golang.org/x/sync/errgroup" pb "google.golang.org/genproto/googleapis/pubsub/v1" fmpb "google.golang.org/genproto/protobuf/field_mask" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + durpb "google.golang.org/protobuf/types/known/durationpb" + + vkit "cloud.google.com/go/pubsub/apiv1" ) // Subscription is a reference to a PubSub subscription. @@ -47,6 +50,8 @@ type Subscription struct { mu sync.Mutex receiveActive bool + + enableOrdering bool } // Subscription creates a reference to a subscription. @@ -83,7 +88,8 @@ func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator { Project: c.fullyQualifiedProjectName(), }) return &SubscriptionIterator{ - c: c, + c: c, + it: it, next: func() (string, error) { sub, err := it.Next() if err != nil { @@ -97,6 +103,7 @@ func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator { // SubscriptionIterator is an iterator that returns a series of subscriptions. type SubscriptionIterator struct { c *Client + it *vkit.SubscriptionIterator next func() (string, error) } @@ -109,6 +116,22 @@ func (subs *SubscriptionIterator) Next() (*Subscription, error) { return &Subscription{c: subs.c, name: subName}, nil } +// NextConfig returns the next subscription config. If there are no more subscriptions, +// iterator.Done will be returned. +// This call shares the underlying iterator with calls to `SubscriptionIterator.Next`. +// If you wish to use mix calls, create separate iterator instances for both. +func (subs *SubscriptionIterator) NextConfig() (*SubscriptionConfig, error) { + spb, err := subs.it.Next() + if err != nil { + return nil, err + } + cfg, err := protoToSubscriptionConfig(spb, subs.c) + if err != nil { + return nil, err + } + return &cfg, nil +} + // PushConfig contains configuration for subscriptions that operate in push mode. type PushConfig struct { // A URL locating the endpoint to which messages should be pushed. @@ -188,11 +211,106 @@ func (oidcToken *OIDCToken) toProto() *pb.PushConfig_OidcToken_ { } } +// BigQueryConfigState denotes the possible states for a BigQuery Subscription. +type BigQueryConfigState int + +const ( + // BigQueryConfigStateUnspecified is the default value. This value is unused. + BigQueryConfigStateUnspecified = iota + + // BigQueryConfigActive means the subscription can actively send messages to BigQuery. + BigQueryConfigActive + + // BigQueryConfigPermissionDenied means the subscription cannot write to the BigQuery table because of permission denied errors. + BigQueryConfigPermissionDenied + + // BigQueryConfigNotFound means the subscription cannot write to the BigQuery table because it does not exist. + BigQueryConfigNotFound + + // BigQueryConfigSchemaMismatch means the subscription cannot write to the BigQuery table due to a schema mismatch. + BigQueryConfigSchemaMismatch +) + +// BigQueryConfig configures the subscription to deliver to a BigQuery table. +type BigQueryConfig struct { + // The name of the table to which to write data, of the form + // {projectId}:{datasetId}.{tableId} + Table string + + // When true, use the topic's schema as the columns to write to in BigQuery, + // if it exists. + UseTopicSchema bool + + // When true, write the subscription name, message_id, publish_time, + // attributes, and ordering_key to additional columns in the table. The + // subscription name, message_id, and publish_time fields are put in their own + // columns while all other message properties (other than data) are written to + // a JSON object in the attributes column. + WriteMetadata bool + + // When true and use_topic_schema is true, any fields that are a part of the + // topic schema that are not part of the BigQuery table schema are dropped + // when writing to BigQuery. Otherwise, the schemas must be kept in sync and + // any messages with extra fields are not written and remain in the + // subscription's backlog. + DropUnknownFields bool + + // This is an output-only field that indicates whether or not the subscription can + // receive messages. This field is set only in responses from the server; + // it is ignored if it is set in any requests. + State BigQueryConfigState +} + +func (bc *BigQueryConfig) toProto() *pb.BigQueryConfig { + if bc == nil { + return nil + } + pbCfg := &pb.BigQueryConfig{ + Table: bc.Table, + UseTopicSchema: bc.UseTopicSchema, + WriteMetadata: bc.WriteMetadata, + DropUnknownFields: bc.DropUnknownFields, + State: pb.BigQueryConfig_State(bc.State), + } + return pbCfg +} + +// SubscriptionState denotes the possible states for a Subscription. +type SubscriptionState int + +const ( + // SubscriptionStateUnspecified is the default value. This value is unused. + SubscriptionStateUnspecified = iota + + // SubscriptionStateActive means the subscription can actively send messages to BigQuery. + SubscriptionStateActive + + // SubscriptionStateResourceError means the subscription receive messages because of an + // error with the resource to which it pushes messages. + // See the more detailed error state in the corresponding configuration. + SubscriptionStateResourceError +) + // SubscriptionConfig describes the configuration of a subscription. type SubscriptionConfig struct { - Topic *Topic + // The fully qualified identifier for the subscription, in the format "projects//subscriptions/" + name string + + // The topic from which this subscription is receiving messages. + Topic *Topic + + // If push delivery is used with this subscription, this field is + // used to configure it. Either `PushConfig` or `BigQueryConfig` can be set, + // but not both. If both are empty, then the subscriber will pull and ack + // messages using API methods. PushConfig PushConfig + // If delivery to BigQuery is used with this subscription, this field is + // used to configure it. Either `PushConfig` or `BigQueryConfig` can be set, + // but not both. If both are empty, then the subscriber will pull and ack + // messages using API methods. + BigQueryConfig BigQueryConfig + // The default maximum time after a subscriber receives a message before // the subscriber should acknowledge the message. Note: messages which are // obtained via Subscription.Receive need not be acknowledged within this @@ -222,13 +340,92 @@ type SubscriptionConfig struct { // The set of labels for the subscription. Labels map[string]string + // EnableMessageOrdering enables message ordering on this subscription. + // This value is only used for subscription creation and update, and + // is not read locally in calls like Subscription.Receive(). + // + // If set to false, even if messages are published with ordering keys, + // messages will not be delivered in order. + // + // When calling Subscription.Receive(), the client will check this + // value with a call to Subscription.Config(), which requires the + // roles/viewer or roles/pubsub.viewer role on your service account. + // If that call fails, mesages with ordering keys will be delivered in order. + EnableMessageOrdering bool + // DeadLetterPolicy specifies the conditions for dead lettering messages in // a subscription. If not set, dead lettering is disabled. - // - // It is EXPERIMENTAL and a part of a closed alpha that may not be - // accessible to all users. This field is subject to change or removal - // without notice. DeadLetterPolicy *DeadLetterPolicy + + // Filter is an expression written in the Cloud Pub/Sub filter language. If + // non-empty, then only `PubsubMessage`s whose `attributes` field matches the + // filter are delivered on this subscription. If empty, then no messages are + // filtered out. Cannot be changed after the subscription is created. + Filter string + + // RetryPolicy specifies how Cloud Pub/Sub retries message delivery. + RetryPolicy *RetryPolicy + + // Detached indicates whether the subscription is detached from its topic. + // Detached subscriptions don't receive messages from their topic and don't + // retain any backlog. `Pull` and `StreamingPull` requests will return + // FAILED_PRECONDITION. If the subscription is a push subscription, pushes to + // the endpoint will not be made. + Detached bool + + // TopicMessageRetentionDuration indicates the minimum duration for which a message is + // retained after it is published to the subscription's topic. If this field is + // set, messages published to the subscription's topic in the last + // `TopicMessageRetentionDuration` are always available to subscribers. + // You can enable both topic and subscription retention for the same topic. + // In this situation, the maximum of the retention durations takes effect. + // + // This is an output only field, meaning it will only appear in responses from the backend + // and will be ignored if sent in a request. + TopicMessageRetentionDuration time.Duration + + // EnableExactlyOnceDelivery configures Pub/Sub to provide the following guarantees + // for the delivery of a message with a given MessageID on this subscription: + // + // The message sent to a subscriber is guaranteed not to be resent + // before the message's acknowledgement deadline expires. + // An acknowledged message will not be resent to a subscriber. + // + // Note that subscribers may still receive multiple copies of a message + // when `enable_exactly_once_delivery` is true if the message was published + // multiple times by a publisher client. These copies are considered distinct + // by Pub/Sub and have distinct MessageID values. + // + // Lastly, to guarantee messages have been acked or nacked properly, you must + // call Message.AckWithResponse() or Message.NackWithResponse(). These return an + // AckResponse which will be ready if the message has been acked (or failed to be acked). + EnableExactlyOnceDelivery bool + + // State indicates whether or not the subscription can receive messages. + // This is an output-only field that indicates whether or not the subscription can + // receive messages. This field is set only in responses from the server; + // it is ignored if it is set in any requests. + State SubscriptionState +} + +// String returns the globally unique printable name of the subscription config. +// This method only works when the subscription config is returned from the server, +// such as when calling `client.Subscription` or `client.Subscriptions`. +// Otherwise, this will return an empty string. +func (s *SubscriptionConfig) String() string { + return s.name +} + +// ID returns the unique identifier of the subscription within its project. +// This method only works when the subscription config is returned from the server, +// such as when calling `client.Subscription` or `client.Subscriptions`. +// Otherwise, this will return an empty string. +func (s *SubscriptionConfig) ID() string { + slash := strings.LastIndex(s.name, "/") + if slash == -1 { + return "" + } + return s.name[slash+1:] } func (cfg *SubscriptionConfig) toProto(name string) *pb.Subscription { @@ -236,57 +433,75 @@ func (cfg *SubscriptionConfig) toProto(name string) *pb.Subscription { if cfg.PushConfig.Endpoint != "" || len(cfg.PushConfig.Attributes) != 0 || cfg.PushConfig.AuthenticationMethod != nil { pbPushConfig = cfg.PushConfig.toProto() } + var pbBigQueryConfig *pb.BigQueryConfig + if cfg.BigQueryConfig.Table != "" { + pbBigQueryConfig = cfg.BigQueryConfig.toProto() + } var retentionDuration *durpb.Duration if cfg.RetentionDuration != 0 { - retentionDuration = ptypes.DurationProto(cfg.RetentionDuration) + retentionDuration = durpb.New(cfg.RetentionDuration) } var pbDeadLetter *pb.DeadLetterPolicy if cfg.DeadLetterPolicy != nil { pbDeadLetter = cfg.DeadLetterPolicy.toProto() } + var pbRetryPolicy *pb.RetryPolicy + if cfg.RetryPolicy != nil { + pbRetryPolicy = cfg.RetryPolicy.toProto() + } return &pb.Subscription{ - Name: name, - Topic: cfg.Topic.name, - PushConfig: pbPushConfig, - AckDeadlineSeconds: trunc32(int64(cfg.AckDeadline.Seconds())), - RetainAckedMessages: cfg.RetainAckedMessages, - MessageRetentionDuration: retentionDuration, - Labels: cfg.Labels, - ExpirationPolicy: expirationPolicyToProto(cfg.ExpirationPolicy), - DeadLetterPolicy: pbDeadLetter, + Name: name, + Topic: cfg.Topic.name, + PushConfig: pbPushConfig, + BigqueryConfig: pbBigQueryConfig, + AckDeadlineSeconds: trunc32(int64(cfg.AckDeadline.Seconds())), + RetainAckedMessages: cfg.RetainAckedMessages, + MessageRetentionDuration: retentionDuration, + Labels: cfg.Labels, + ExpirationPolicy: expirationPolicyToProto(cfg.ExpirationPolicy), + EnableMessageOrdering: cfg.EnableMessageOrdering, + DeadLetterPolicy: pbDeadLetter, + Filter: cfg.Filter, + RetryPolicy: pbRetryPolicy, + Detached: cfg.Detached, + EnableExactlyOnceDelivery: cfg.EnableExactlyOnceDelivery, } } func protoToSubscriptionConfig(pbSub *pb.Subscription, c *Client) (SubscriptionConfig, error) { rd := time.Hour * 24 * 7 - var err error if pbSub.MessageRetentionDuration != nil { - rd, err = ptypes.Duration(pbSub.MessageRetentionDuration) - if err != nil { - return SubscriptionConfig{}, err - } + rd = pbSub.MessageRetentionDuration.AsDuration() } var expirationPolicy time.Duration if ttl := pbSub.ExpirationPolicy.GetTtl(); ttl != nil { - expirationPolicy, err = ptypes.Duration(ttl) - if err != nil { - return SubscriptionConfig{}, err - } + expirationPolicy = ttl.AsDuration() } dlp := protoToDLP(pbSub.DeadLetterPolicy) + rp := protoToRetryPolicy(pbSub.RetryPolicy) subC := SubscriptionConfig{ - Topic: newTopic(c, pbSub.Topic), - AckDeadline: time.Second * time.Duration(pbSub.AckDeadlineSeconds), - RetainAckedMessages: pbSub.RetainAckedMessages, - RetentionDuration: rd, - Labels: pbSub.Labels, - ExpirationPolicy: expirationPolicy, - DeadLetterPolicy: dlp, - } - pc := protoToPushConfig(pbSub.PushConfig) - if pc != nil { + name: pbSub.Name, + Topic: newTopic(c, pbSub.Topic), + AckDeadline: time.Second * time.Duration(pbSub.AckDeadlineSeconds), + RetainAckedMessages: pbSub.RetainAckedMessages, + RetentionDuration: rd, + Labels: pbSub.Labels, + ExpirationPolicy: expirationPolicy, + EnableMessageOrdering: pbSub.EnableMessageOrdering, + DeadLetterPolicy: dlp, + Filter: pbSub.Filter, + RetryPolicy: rp, + Detached: pbSub.Detached, + TopicMessageRetentionDuration: pbSub.TopicMessageRetentionDuration.AsDuration(), + EnableExactlyOnceDelivery: pbSub.EnableExactlyOnceDelivery, + State: SubscriptionState(pbSub.State), + } + if pc := protoToPushConfig(pbSub.PushConfig); pc != nil { subC.PushConfig = *pc } + if bq := protoToBQConfig(pbSub.GetBigqueryConfig()); bq != nil { + subC.BigQueryConfig = *bq + } return subC, nil } @@ -309,11 +524,22 @@ func protoToPushConfig(pbPc *pb.PushConfig) *PushConfig { return pc } +func protoToBQConfig(pbBQ *pb.BigQueryConfig) *BigQueryConfig { + if pbBQ == nil { + return nil + } + bq := &BigQueryConfig{ + Table: pbBQ.GetTable(), + UseTopicSchema: pbBQ.GetUseTopicSchema(), + DropUnknownFields: pbBQ.GetDropUnknownFields(), + WriteMetadata: pbBQ.GetWriteMetadata(), + State: BigQueryConfigState(pbBQ.State), + } + return bq +} + // DeadLetterPolicy specifies the conditions for dead lettering messages in // a subscription. -// -// It is EXPERIMENTAL and a part of a closed alpha that may not be -// accessible to all users. type DeadLetterPolicy struct { DeadLetterTopic string MaxDeliveryAttempts int @@ -338,6 +564,80 @@ func protoToDLP(pbDLP *pb.DeadLetterPolicy) *DeadLetterPolicy { } } +// RetryPolicy specifies how Cloud Pub/Sub retries message delivery. +// +// Retry delay will be exponential based on provided minimum and maximum +// backoffs. https://en.wikipedia.org/wiki/Exponential_backoff. +// +// RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded +// events for a given message. +// +// Retry Policy is implemented on a best effort basis. At times, the delay +// between consecutive deliveries may not match the configuration. That is, +// delay can be more or less than configured backoff. +type RetryPolicy struct { + // MinimumBackoff is the minimum delay between consecutive deliveries of a + // given message. Value should be between 0 and 600 seconds. Defaults to 10 seconds. + MinimumBackoff optional.Duration + // MaximumBackoff is the maximum delay between consecutive deliveries of a + // given message. Value should be between 0 and 600 seconds. Defaults to 600 seconds. + MaximumBackoff optional.Duration +} + +func (rp *RetryPolicy) toProto() *pb.RetryPolicy { + if rp == nil { + return nil + } + // If RetryPolicy is the empty struct, take this as an instruction + // to remove RetryPolicy from the subscription. + if rp.MinimumBackoff == nil && rp.MaximumBackoff == nil { + return nil + } + + // Initialize minDur and maxDur to be negative, such that if the conversion from an + // optional fails, RetryPolicy won't be updated in the proto as it will remain nil. + var minDur time.Duration = -1 + var maxDur time.Duration = -1 + if rp.MinimumBackoff != nil { + minDur = optional.ToDuration(rp.MinimumBackoff) + } + if rp.MaximumBackoff != nil { + maxDur = optional.ToDuration(rp.MaximumBackoff) + } + + var minDurPB, maxDurPB *durpb.Duration + if minDur > 0 { + minDurPB = durpb.New(minDur) + } + if maxDur > 0 { + maxDurPB = durpb.New(maxDur) + } + + return &pb.RetryPolicy{ + MinimumBackoff: minDurPB, + MaximumBackoff: maxDurPB, + } +} + +func protoToRetryPolicy(rp *pb.RetryPolicy) *RetryPolicy { + if rp == nil { + return nil + } + var minBackoff, maxBackoff time.Duration + if rp.MinimumBackoff != nil { + minBackoff = rp.MinimumBackoff.AsDuration() + } + if rp.MaximumBackoff != nil { + maxBackoff = rp.MaximumBackoff.AsDuration() + } + + retryPolicy := &RetryPolicy{ + MinimumBackoff: minBackoff, + MaximumBackoff: maxBackoff, + } + return retryPolicy +} + // ReceiveSettings configure the Receive method. // A zero ReceiveSettings will result in values equivalent to DefaultReceiveSettings. type ReceiveSettings struct { @@ -356,10 +656,20 @@ type ReceiveSettings struct { // bounds the maximum amount of time before a message redelivery in the // event the subscriber fails to extend the deadline. // - // MaxExtensionPeriod configuration can be disabled by specifying a - // duration less than (or equal to) 0. + // MaxExtensionPeriod must be between 10s and 600s (inclusive). This configuration + // can be disabled by specifying a duration less than (or equal to) 0. MaxExtensionPeriod time.Duration + // MinExtensionPeriod is the the min duration for a single lease extension attempt. + // By default the 99th percentile of ack latency is used to determine lease extension + // periods but this value can be set to minimize the number of extraneous RPCs sent. + // + // MinExtensionPeriod must be between 10s and 600s (inclusive). This configuration + // can be disabled by specifying a duration less than (or equal to) 0. + // Defaults to off but set to 60 seconds if the subscription has exactly-once delivery enabled, + // which will be added in a future release. + MinExtensionPeriod time.Duration + // MaxOutstandingMessages is the maximum number of unprocessed messages // (unacknowledged but not yet expired). If MaxOutstandingMessages is 0, it // will be treated as if it were DefaultReceiveSettings.MaxOutstandingMessages. @@ -374,9 +684,17 @@ type ReceiveSettings struct { // for unprocessed messages. MaxOutstandingBytes int - // NumGoroutines is the number of goroutines Receive will spawn to pull - // messages concurrently. If NumGoroutines is less than 1, it will be treated - // as if it were DefaultReceiveSettings.NumGoroutines. + // UseLegacyFlowControl disables enforcing flow control settings at the Cloud + // PubSub server and the less accurate method of only enforcing flow control + // at the client side is used. + // The default is false. + UseLegacyFlowControl bool + + // NumGoroutines is the number of goroutines that each datastructure along + // the Receive path will spawn. Adjusting this value adjusts concurrency + // along the receive path. + // + // NumGoroutines defaults to DefaultReceiveSettings.NumGoroutines. // // NumGoroutines does not limit the number of messages that can be processed // concurrently. Even with one goroutine, many messages might be processed at @@ -385,12 +703,21 @@ type ReceiveSettings struct { // processed concurrently, set MaxOutstandingMessages. NumGoroutines int - // If Synchronous is true, then no more than MaxOutstandingMessages will be in - // memory at one time. (In contrast, when Synchronous is false, more than - // MaxOutstandingMessages may have been received from the service and in memory - // before being processed.) MaxOutstandingBytes still refers to the total bytes - // processed, rather than in memory. NumGoroutines is ignored. + // Synchronous switches the underlying receiving mechanism to unary Pull. + // When Synchronous is false, the more performant StreamingPull is used. + // StreamingPull also has the benefit of subscriber affinity when using + // ordered delivery. + // When Synchronous is true, NumGoroutines is set to 1 and only one Pull + // RPC will be made to poll messages at a time. // The default is false. + // + // Deprecated. + // Previously, users might use Synchronous mode since StreamingPull had a limitation + // where MaxOutstandingMessages was not always respected with large batches of + // small messages. With server side flow control, this is no longer an issue + // and we recommend switching to the default StreamingPull mode by setting + // Synchronous to false. + // Synchronous mode does not work with exactly once delivery. Synchronous bool } @@ -413,16 +740,14 @@ type ReceiveSettings struct { // idea of a duration that is short, but not so short that we perform excessive RPCs. const synchronousWaitTime = 100 * time.Millisecond -// This is a var so that tests can change it. -var minAckDeadline = 10 * time.Second - // DefaultReceiveSettings holds the default values for ReceiveSettings. var DefaultReceiveSettings = ReceiveSettings{ MaxExtension: 60 * time.Minute, MaxExtensionPeriod: 0, + MinExtensionPeriod: 0, MaxOutstandingMessages: 1000, MaxOutstandingBytes: 1e9, // 1G - NumGoroutines: 1, + NumGoroutines: 10, } // Delete deletes the subscription. @@ -457,9 +782,14 @@ func (s *Subscription) Config(ctx context.Context) (SubscriptionConfig, error) { // SubscriptionConfigToUpdate describes how to update a subscription. type SubscriptionConfigToUpdate struct { - // If non-nil, the push config is changed. + // If non-nil, the push config is changed. Cannot be set at the same time as BigQueryConfig. + // If currently in push mode, set this value to the zero value to revert to a Pull based subscription. PushConfig *PushConfig + // If non-nil, the bigquery config is changed. Cannot be set at the same time as PushConfig. + // If currently in bigquery mode, set this value to the zero value to revert to a Pull based subscription, + BigQueryConfig *BigQueryConfig + // If non-zero, the ack deadline is changed. AckDeadline time.Duration @@ -474,9 +804,6 @@ type SubscriptionConfigToUpdate struct { // If non-nil, DeadLetterPolicy is changed. To remove dead lettering from // a subscription, use the zero value for this struct. - // - // It is EXPERIMENTAL and a part of a closed alpha that may not be - // accessible to all users. DeadLetterPolicy *DeadLetterPolicy // If non-nil, the current set of labels is completely @@ -484,6 +811,14 @@ type SubscriptionConfigToUpdate struct { // This field has beta status. It is not subject to the stability guarantee // and may change. Labels map[string]string + + // If non-nil, RetryPolicy is changed. To remove an existing retry policy + // (to redeliver messages as soon as possible) use a pointer to the zero value + // for this struct. + RetryPolicy *RetryPolicy + + // If set, EnableExactlyOnce is changed. + EnableExactlyOnceDelivery optional.Bool } // Update changes an existing subscription according to the fields set in cfg. @@ -493,7 +828,7 @@ type SubscriptionConfigToUpdate struct { func (s *Subscription) Update(ctx context.Context, cfg SubscriptionConfigToUpdate) (SubscriptionConfig, error) { req := s.updateRequest(&cfg) if err := cfg.validate(); err != nil { - return SubscriptionConfig{}, fmt.Errorf("pubsub: UpdateSubscription %v", err) + return SubscriptionConfig{}, fmt.Errorf("pubsub: UpdateSubscription %w", err) } if len(req.UpdateMask.Paths) == 0 { return SubscriptionConfig{}, errors.New("pubsub: UpdateSubscription call with nothing to update") @@ -512,6 +847,10 @@ func (s *Subscription) updateRequest(cfg *SubscriptionConfigToUpdate) *pb.Update psub.PushConfig = cfg.PushConfig.toProto() paths = append(paths, "push_config") } + if cfg.BigQueryConfig != nil { + psub.BigqueryConfig = cfg.BigQueryConfig.toProto() + paths = append(paths, "bigquery_config") + } if cfg.AckDeadline != 0 { psub.AckDeadlineSeconds = trunc32(int64(cfg.AckDeadline.Seconds())) paths = append(paths, "ack_deadline_seconds") @@ -521,7 +860,7 @@ func (s *Subscription) updateRequest(cfg *SubscriptionConfigToUpdate) *pb.Update paths = append(paths, "retain_acked_messages") } if cfg.RetentionDuration != 0 { - psub.MessageRetentionDuration = ptypes.DurationProto(cfg.RetentionDuration) + psub.MessageRetentionDuration = durpb.New(cfg.RetentionDuration) paths = append(paths, "message_retention_duration") } if cfg.ExpirationPolicy != nil { @@ -536,6 +875,14 @@ func (s *Subscription) updateRequest(cfg *SubscriptionConfigToUpdate) *pb.Update psub.Labels = cfg.Labels paths = append(paths, "labels") } + if cfg.RetryPolicy != nil { + psub.RetryPolicy = cfg.RetryPolicy.toProto() + paths = append(paths, "retry_policy") + } + if cfg.EnableExactlyOnceDelivery != nil { + psub.EnableExactlyOnceDelivery = optional.ToBool(cfg.EnableExactlyOnceDelivery) + paths = append(paths, "enable_exactly_once_delivery") + } return &pb.UpdateSubscriptionRequest{ Subscription: psub, UpdateMask: &fmpb.FieldMask{Paths: paths}, @@ -556,11 +903,11 @@ func (cfg *SubscriptionConfigToUpdate) validate() error { if cfg == nil || cfg.ExpirationPolicy == nil { return nil } - policy, min := optional.ToDuration(cfg.ExpirationPolicy), minExpirationPolicy - if policy == 0 || policy >= min { - return nil + expPolicy, min := optional.ToDuration(cfg.ExpirationPolicy), minExpirationPolicy + if expPolicy != 0 && expPolicy < min { + return fmt.Errorf("invalid expiration policy(%q) < minimum(%q)", expPolicy, min) } - return fmt.Errorf("invalid expiration policy(%q) < minimum(%q)", policy, min) + return nil } func expirationPolicyToProto(expirationPolicy optional.Duration) *pb.ExpirationPolicy { @@ -574,7 +921,7 @@ func expirationPolicyToProto(expirationPolicy optional.Duration) *pb.ExpirationP // https://godoc.org/google.golang.org/genproto/googleapis/pubsub/v1#ExpirationPolicy.Ttl // if ExpirationPolicy.Ttl is set to nil, the expirationPolicy is toggled to NEVER expire. if dur != 0 { - ttl = ptypes.DurationProto(dur) + ttl = durpb.New(dur) } return &pb.ExpirationPolicy{ Ttl: ttl, @@ -634,9 +981,9 @@ var errReceiveInProgress = errors.New("pubsub: Receive already in progress for t // // The standard way to terminate a Receive is to cancel its context: // -// cctx, cancel := context.WithCancel(ctx) -// err := sub.Receive(cctx, callback) -// // Call cancel from callback, or another goroutine. +// cctx, cancel := context.WithCancel(ctx) +// err := sub.Receive(cctx, callback) +// // Call cancel from callback, or another goroutine. // // If the service returns a non-retryable error, Receive returns that error after // all of the outstanding calls to f have returned. If ctx is done, Receive @@ -666,6 +1013,9 @@ func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Mes s.mu.Unlock() defer func() { s.mu.Lock(); s.receiveActive = false; s.mu.Unlock() }() + s.checkOrdering(ctx) + + // TODO(hongalex): move settings check to a helper function to make it more testable maxCount := s.ReceiveSettings.MaxOutstandingMessages if maxCount == 0 { maxCount = DefaultReceiveSettings.MaxOutstandingMessages @@ -681,6 +1031,15 @@ func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Mes // If MaxExtension is negative, disable automatic extension. maxExt = 0 } + maxExtPeriod := s.ReceiveSettings.MaxExtensionPeriod + if maxExtPeriod < 0 { + maxExtPeriod = DefaultReceiveSettings.MaxExtensionPeriod + } + minExtPeriod := s.ReceiveSettings.MinExtensionPeriod + if minExtPeriod < 0 { + minExtPeriod = DefaultReceiveSettings.MinExtensionPeriod + } + var numGoroutines int switch { case s.ReceiveSettings.Synchronous: @@ -692,110 +1051,194 @@ func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Mes } // TODO(jba): add tests that verify that ReceiveSettings are correctly processed. po := &pullOptions{ - maxExtension: maxExt, - maxPrefetch: trunc32(int64(maxCount)), - synchronous: s.ReceiveSettings.Synchronous, + maxExtension: maxExt, + maxExtensionPeriod: maxExtPeriod, + minExtensionPeriod: minExtPeriod, + maxPrefetch: trunc32(int64(maxCount)), + synchronous: s.ReceiveSettings.Synchronous, + maxOutstandingMessages: maxCount, + maxOutstandingBytes: maxBytes, + useLegacyFlowControl: s.ReceiveSettings.UseLegacyFlowControl, } - fc := newFlowController(maxCount, maxBytes) + fc := newSubscriptionFlowController(FlowControlSettings{ + MaxOutstandingMessages: maxCount, + MaxOutstandingBytes: maxBytes, + LimitExceededBehavior: FlowControlBlock, + }) + + sched := scheduler.NewReceiveScheduler(maxCount) // Wait for all goroutines started by Receive to return, so instead of an // obscure goroutine leak we have an obvious blocked call to Receive. group, gctx := errgroup.WithContext(ctx) - for i := 0; i < numGoroutines; i++ { - group.Go(func() error { - return s.receive(gctx, po, fc, f) - }) + + type closeablePair struct { + wg *sync.WaitGroup + iter *messageIterator } - return group.Wait() -} -func (s *Subscription) receive(ctx context.Context, po *pullOptions, fc *flowController, f func(context.Context, *Message)) error { - // Cancel a sub-context when we return, to kick the context-aware callbacks - // and the goroutine below. - ctx2, cancel := context.WithCancel(ctx) - // The iterator does not use the context passed to Receive. If it did, canceling - // that context would immediately stop the iterator without waiting for unacked - // messages. - iter := newMessageIterator(s.c.subc, s.name, &s.ReceiveSettings.MaxExtensionPeriod, po) - - // We cannot use errgroup from Receive here. Receive might already be calling group.Wait, - // and group.Wait cannot be called concurrently with group.Go. We give each receive() its - // own WaitGroup instead. - // Since wg.Add is only called from the main goroutine, wg.Wait is guaranteed - // to be called after all Adds. - var wg sync.WaitGroup - wg.Add(1) - go func() { - <-ctx2.Done() - // Call stop when Receive's context is done. - // Stop will block until all outstanding messages have been acknowledged - // or there was a fatal service error. - iter.stop() - wg.Done() - }() - defer wg.Wait() - - defer cancel() - for { - var maxToPull int32 // maximum number of messages to pull - if po.synchronous { - if po.maxPrefetch < 0 { - // If there is no limit on the number of messages to pull, use a reasonable default. - maxToPull = 1000 - } else { - // Limit the number of messages in memory to MaxOutstandingMessages - // (here, po.maxPrefetch). For each message currently in memory, we have - // called fc.acquire but not fc.release: this is fc.count(). The next - // call to Pull should fetch no more than the difference between these - // values. - maxToPull = po.maxPrefetch - int32(fc.count()) - if maxToPull <= 0 { - // Wait for some callbacks to finish. - if err := gax.Sleep(ctx, synchronousWaitTime); err != nil { + var pairs []closeablePair + + // Cancel a sub-context which, when we finish a single receiver, will kick + // off the context-aware callbacks and the goroutine below (which stops + // all receivers, iterators, and the scheduler). + ctx2, cancel2 := context.WithCancel(gctx) + defer cancel2() + + for i := 0; i < numGoroutines; i++ { + // The iterator does not use the context passed to Receive. If it did, + // canceling that context would immediately stop the iterator without + // waiting for unacked messages. + iter := newMessageIterator(s.c.subc, s.name, po) + + // We cannot use errgroup from Receive here. Receive might already be + // calling group.Wait, and group.Wait cannot be called concurrently with + // group.Go. We give each receive() its own WaitGroup instead. + // + // Since wg.Add is only called from the main goroutine, wg.Wait is + // guaranteed to be called after all Adds. + var wg sync.WaitGroup + wg.Add(1) + pairs = append(pairs, closeablePair{wg: &wg, iter: iter}) + + group.Go(func() error { + defer wg.Wait() + defer cancel2() + for { + var maxToPull int32 // maximum number of messages to pull + if po.synchronous { + if po.maxPrefetch < 0 { + // If there is no limit on the number of messages to + // pull, use a reasonable default. + maxToPull = 1000 + } else { + // Limit the number of messages in memory to MaxOutstandingMessages + // (here, po.maxPrefetch). For each message currently in memory, we have + // called fc.acquire but not fc.release: this is fc.count(). The next + // call to Pull should fetch no more than the difference between these + // values. + maxToPull = po.maxPrefetch - int32(fc.count()) + if maxToPull <= 0 { + // Wait for some callbacks to finish. + if err := gax.Sleep(ctx, synchronousWaitTime); err != nil { + // Return nil if the context is done, not err. + return nil + } + continue + } + } + } + // If the context is done, don't pull more messages. + select { + case <-ctx.Done(): + return nil + default: + } + msgs, err := iter.receive(maxToPull) + if err == io.EOF { + return nil + } + if err != nil { + return err + } + // If context is done and messages have been pulled, + // nack them. + select { + case <-ctx.Done(): + for _, m := range msgs { + m.Nack() + } + return nil + default: + } + for i, msg := range msgs { + msg := msg + // TODO(jba): call acquire closer to when the message is allocated. + if err := fc.acquire(ctx, len(msg.Data)); err != nil { + // TODO(jba): test that these "orphaned" messages are nacked immediately when ctx is done. + for _, m := range msgs[i:] { + m.Nack() + } // Return nil if the context is done, not err. return nil } - continue - } - } - } - msgs, err := iter.receive(maxToPull) - if err == io.EOF { - return nil - } - if err != nil { - return err - } - for i, msg := range msgs { - msg := msg - // TODO(jba): call acquire closer to when the message is allocated. - if err := fc.acquire(ctx, len(msg.Data)); err != nil { - // TODO(jba): test that these "orphaned" messages are nacked immediately when ctx is done. - for _, m := range msgs[i:] { - m.Nack() + iter.eoMu.RLock() + ackh, _ := msgAckHandler(msg, iter.enableExactlyOnceDelivery) + iter.eoMu.RUnlock() + old := ackh.doneFunc + msgLen := len(msg.Data) + ackh.doneFunc = func(ackID string, ack bool, r *ipubsub.AckResult, receiveTime time.Time) { + defer fc.release(ctx, msgLen) + old(ackID, ack, r, receiveTime) + } + wg.Add(1) + // Make sure the subscription has ordering enabled before adding to scheduler. + var key string + if s.enableOrdering { + key = msg.OrderingKey + } + // TODO(deklerk): Can we have a generic handler at the + // constructor level? + if err := sched.Add(key, msg, func(msg interface{}) { + defer wg.Done() + f(ctx2, msg.(*Message)) + }); err != nil { + wg.Done() + // If there are any errors with scheduling messages, + // nack them so they can be redelivered. + msg.Nack() + // Currently, only this error is returned by the receive scheduler. + if errors.Is(err, scheduler.ErrReceiveDraining) { + return nil + } + return err + } } - // Return nil if the context is done, not err. - return nil - } - old := msg.doneFunc - msgLen := len(msg.Data) - msg.doneFunc = func(ackID string, ack bool, receiveTime time.Time) { - defer fc.release(msgLen) - old(ackID, ack, receiveTime) } - wg.Add(1) - go func() { - defer wg.Done() - f(ctx2, msg) - }() + }) + } + + go func() { + <-ctx2.Done() + + // Wait for all iterators to stop. + for _, p := range pairs { + p.iter.stop() + p.wg.Done() } + + // This _must_ happen after every iterator has stopped, or some + // iterator will still have undelivered messages but the scheduler will + // already be shut down. + sched.Shutdown() + }() + + return group.Wait() +} + +// checkOrdering calls Config to check theEnableMessageOrdering field. +// If this call fails (e.g. because the service account doesn't have +// the roles/viewer or roles/pubsub.viewer role) we will assume +// EnableMessageOrdering to be true. +// See: https://github.com/googleapis/google-cloud-go/issues/3884 +func (s *Subscription) checkOrdering(ctx context.Context) { + cfg, err := s.Config(ctx) + if err != nil { + s.enableOrdering = true + } else { + s.enableOrdering = cfg.EnableMessageOrdering } } type pullOptions struct { - maxExtension time.Duration - maxPrefetch int32 + maxExtension time.Duration // the maximum time to extend a message's ack deadline in total + maxExtensionPeriod time.Duration // the maximum time to extend a message's ack deadline per modack rpc + minExtensionPeriod time.Duration // the minimum time to extend a message's lease duration per modack + maxPrefetch int32 // the max number of outstanding messages, used to calculate maxToPull // If true, use unary Pull instead of StreamingPull, and never pull more // than maxPrefetch messages. - synchronous bool + synchronous bool + maxOutstandingMessages int + maxOutstandingBytes int + useLegacyFlowControl bool } diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go index cf8fc5d3460e..c96b9ce9bce8 100644 --- a/vendor/cloud.google.com/go/pubsub/topic.go +++ b/vendor/cloud.google.com/go/pubsub/topic.go @@ -25,7 +25,10 @@ import ( "time" "cloud.google.com/go/iam" - "github.com/golang/protobuf/proto" + "cloud.google.com/go/internal/optional" + ipubsub "cloud.google.com/go/internal/pubsub" + vkit "cloud.google.com/go/pubsub/apiv1" + "cloud.google.com/go/pubsub/internal/scheduler" gax "github.com/googleapis/gax-go/v2" "go.opencensus.io/stats" "go.opencensus.io/tag" @@ -35,6 +38,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" ) const ( @@ -47,6 +52,13 @@ const ( MaxPublishRequestBytes = 1e7 ) +const ( + // TODO: math.MaxInt was added in Go 1.17. We should use that once 1.17 + // becomes the minimum supported version of Go. + intSize = 32 << (^uint(0) >> 63) + maxInt = 1<<(intSize-1) - 1 +) + // ErrOversizedMessage indicates that a message's size exceeds MaxPublishRequestBytes. var ErrOversizedMessage = bundler.ErrOversizedItem @@ -62,9 +74,14 @@ type Topic struct { // first call to Publish. The default is DefaultPublishSettings. PublishSettings PublishSettings - mu sync.RWMutex - stopped bool - bundler *bundler.Bundler + mu sync.RWMutex + stopped bool + scheduler *scheduler.PublishScheduler + + flowController + + // EnableMessageOrdering enables delivery of ordered keys. + EnableMessageOrdering bool } // PublishSettings control the bundling of published messages. @@ -80,7 +97,9 @@ type PublishSettings struct { // Publish a batch when its size in bytes reaches this value. ByteThreshold int - // The number of goroutines that invoke the Publish RPC concurrently. + // The number of goroutines used in each of the data structures that are + // involved along the the Publish path. Adjusting this value adjusts + // concurrency along the publish path. // // Defaults to a multiple of GOMAXPROCS. NumGoroutines int @@ -89,10 +108,15 @@ type PublishSettings struct { Timeout time.Duration // The maximum number of bytes that the Bundler will keep in memory before - // returning ErrOverflow. + // returning ErrOverflow. This is now superseded by FlowControlSettings.MaxOutstandingBytes. + // If MaxOutstandingBytes is set, that value will override BufferedByteLimit. // // Defaults to DefaultPublishSettings.BufferedByteLimit. + // Deprecated: Set `Topic.PublishSettings.FlowControlSettings.MaxOutstandingBytes` instead. BufferedByteLimit int + + // FlowControlSettings defines publisher flow control settings. + FlowControlSettings FlowControlSettings } // DefaultPublishSettings holds the default values for topics' PublishSettings. @@ -105,6 +129,11 @@ var DefaultPublishSettings = PublishSettings{ // chosen as a reasonable amount of messages in the worst case whilst still // capping the number to a low enough value to not OOM users. BufferedByteLimit: 10 * MaxPublishRequestBytes, + FlowControlSettings: FlowControlSettings{ + MaxOutstandingMessages: 1000, + MaxOutstandingBytes: -1, + LimitExceededBehavior: FlowControlIgnore, + }, } // CreateTopic creates a new topic. @@ -136,12 +165,9 @@ func (c *Client) CreateTopic(ctx context.Context, topicID string) (*Topic, error // If the topic already exists, an error will be returned. func (c *Client) CreateTopicWithConfig(ctx context.Context, topicID string, tc *TopicConfig) (*Topic, error) { t := c.Topic(topicID) - _, err := c.pubc.CreateTopic(ctx, &pb.Topic{ - Name: t.name, - Labels: tc.Labels, - MessageStoragePolicy: messageStoragePolicyToProto(&tc.MessageStoragePolicy), - KmsKeyName: tc.KMSKeyName, - }) + topic := tc.toProto() + topic.Name = t.name + _, err := c.pubc.CreateTopic(ctx, topic) if err != nil { return nil, err } @@ -178,6 +204,9 @@ func newTopic(c *Client, name string) *Topic { // TopicConfig describes the configuration of a topic. type TopicConfig struct { + // The fully qualified identifier for the topic, in the format "projects//topics/" + name string + // The set of labels for the topic. Labels map[string]string @@ -188,6 +217,57 @@ type TopicConfig struct { // published to this topic, in the format // "projects/P/locations/L/keyRings/R/cryptoKeys/K". KMSKeyName string + + // Schema defines the schema settings upon topic creation. This cannot + // be modified after a topic has been created. + SchemaSettings *SchemaSettings + + // RetentionDuration configures the minimum duration to retain a message + // after it is published to the topic. If this field is set, messages published + // to the topic in the last `RetentionDuration` are always available to subscribers. + // For instance, it allows any attached subscription to [seek to a + // timestamp](https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) + // that is up to `RetentionDuration` in the past. If this field is + // not set, message retention is controlled by settings on individual + // subscriptions. Cannot be more than 7 days or less than 10 minutes. + // + // For more information, see https://cloud.google.com/pubsub/docs/replay-overview#topic_message_retention. + RetentionDuration optional.Duration +} + +// String returns the printable globally unique name for the topic config. +// This method only works when the topic config is returned from the server, +// such as when calling `client.Topic` or `client.Topics`. +// Otherwise, this will return an empty string. +func (t *TopicConfig) String() string { + return t.name +} + +// ID returns the unique identifier of the topic within its project. +// This method only works when the topic config is returned from the server, +// such as when calling `client.Topic` or `client.Topics`. +// Otherwise, this will return an empty string. +func (t *TopicConfig) ID() string { + slash := strings.LastIndex(t.name, "/") + if slash == -1 { + return "" + } + return t.name[slash+1:] +} + +func (tc *TopicConfig) toProto() *pb.Topic { + var retDur *durationpb.Duration + if tc.RetentionDuration != nil { + retDur = durationpb.New(optional.ToDuration(tc.RetentionDuration)) + } + pbt := &pb.Topic{ + Labels: tc.Labels, + MessageStoragePolicy: messageStoragePolicyToProto(&tc.MessageStoragePolicy), + KmsKeyName: tc.KMSKeyName, + SchemaSettings: schemaSettingsToProto(tc.SchemaSettings), + MessageRetentionDuration: retDur, + } + return pbt } // TopicConfigToUpdate describes how to update a topic. @@ -207,14 +287,43 @@ type TopicConfigToUpdate struct { // This field has beta status. It is not subject to the stability guarantee // and may change. MessageStoragePolicy *MessageStoragePolicy + + // If set to a positive duration between 10 minutes and 7 days, RetentionDuration is changed. + // If set to a negative value, this clears RetentionDuration from the topic. + // If nil, the retention duration remains unchanged. + RetentionDuration optional.Duration } func protoToTopicConfig(pbt *pb.Topic) TopicConfig { - return TopicConfig{ + tc := TopicConfig{ + name: pbt.Name, Labels: pbt.Labels, MessageStoragePolicy: protoToMessageStoragePolicy(pbt.MessageStoragePolicy), KMSKeyName: pbt.KmsKeyName, + SchemaSettings: protoToSchemaSettings(pbt.SchemaSettings), + } + if pbt.GetMessageRetentionDuration() != nil { + tc.RetentionDuration = pbt.GetMessageRetentionDuration().AsDuration() } + return tc +} + +// DetachSubscriptionResult is the response for the DetachSubscription method. +// Reserved for future use. +type DetachSubscriptionResult struct{} + +// DetachSubscription detaches a subscription from its topic. All messages +// retained in the subscription are dropped. Subsequent `Pull` and `StreamingPull` +// requests will return FAILED_PRECONDITION. If the subscription is a push +// subscription, pushes to the endpoint will stop. +func (c *Client) DetachSubscription(ctx context.Context, sub string) (*DetachSubscriptionResult, error) { + _, err := c.pubc.DetachSubscription(ctx, &pb.DetachSubscriptionRequest{ + Subscription: sub, + }) + if err != nil { + return nil, err + } + return &DetachSubscriptionResult{}, nil } // MessageStoragePolicy constrains how messages published to the topic may be stored. It @@ -285,6 +394,15 @@ func (t *Topic) updateRequest(cfg TopicConfigToUpdate) *pb.UpdateTopicRequest { pt.MessageStoragePolicy = messageStoragePolicyToProto(cfg.MessageStoragePolicy) paths = append(paths, "message_storage_policy") } + if cfg.RetentionDuration != nil { + r := optional.ToDuration(cfg.RetentionDuration) + pt.MessageRetentionDuration = durationpb.New(r) + if r < 0 { + // Clear MessageRetentionDuration if sentinel value is read. + pt.MessageRetentionDuration = nil + } + paths = append(paths, "message_retention_duration") + } return &pb.UpdateTopicRequest{ Topic: pt, UpdateMask: &fmpb.FieldMask{Paths: paths}, @@ -295,7 +413,8 @@ func (t *Topic) updateRequest(cfg TopicConfigToUpdate) *pb.UpdateTopicRequest { func (c *Client) Topics(ctx context.Context) *TopicIterator { it := c.pubc.ListTopics(ctx, &pb.ListTopicsRequest{Project: c.fullyQualifiedProjectName()}) return &TopicIterator{ - c: c, + c: c, + it: it, next: func() (string, error) { topic, err := it.Next() if err != nil { @@ -309,6 +428,7 @@ func (c *Client) Topics(ctx context.Context) *TopicIterator { // TopicIterator is an iterator that returns a series of topics. type TopicIterator struct { c *Client + it *vkit.TopicIterator next func() (string, error) } @@ -321,6 +441,19 @@ func (tps *TopicIterator) Next() (*Topic, error) { return newTopic(tps.c, topicName), nil } +// NextConfig returns the next topic config. If there are no more topics, +// iterator.Done will be returned. +// This call shares the underlying iterator with calls to `TopicIterator.Next`. +// If you wish to use mix calls, create separate iterator instances for both. +func (t *TopicIterator) NextConfig() (*TopicConfig, error) { + tpb, err := t.it.Next() + if err != nil { + return nil, err + } + cfg := protoToTopicConfig(tpb) + return &cfg, nil +} + // ID returns the unique identifier of the topic within its project. func (t *Topic) ID() string { slash := strings.LastIndex(t.name, "/") @@ -376,6 +509,17 @@ func (t *Topic) Subscriptions(ctx context.Context) *SubscriptionIterator { var errTopicStopped = errors.New("pubsub: Stop has been called for this topic") +// A PublishResult holds the result from a call to Publish. +// +// Call Get to obtain the result of the Publish call. Example: +// +// // Get blocks until Publish completes or ctx is done. +// id, err := r.Get(ctx) +// if err != nil { +// // TODO: Handle error. +// } +type PublishResult = ipubsub.PublishResult + // Publish publishes msg to the topic asynchronously. Messages are batched and // sent according to the topic's PublishSettings. Publish never blocks. // @@ -386,34 +530,44 @@ var errTopicStopped = errors.New("pubsub: Stop has been called for this topic") // need to be stopped by calling t.Stop(). Once stopped, future calls to Publish // will immediately return a PublishResult with an error. func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult { - // Use a PublishRequest with only the Messages field to calculate the size - // of an individual message. This accurately calculates the size of the - // encoded proto message by accounting for the length of an individual - // PubSubMessage and Data/Attributes field. - // TODO(hongalex): if this turns out to take significant time, try to approximate it. - msg.size = proto.Size(&pb.PublishRequest{ - Messages: []*pb.PubsubMessage{ - { - Data: msg.Data, - Attributes: msg.Attributes, - }, - }, + ctx, err := tag.New(ctx, tag.Insert(keyStatus, "OK"), tag.Upsert(keyTopic, t.name)) + if err != nil { + log.Printf("pubsub: cannot create context with tag in Publish: %v", err) + } + + r := ipubsub.NewPublishResult() + if !t.EnableMessageOrdering && msg.OrderingKey != "" { + ipubsub.SetPublishResult(r, "", errors.New("Topic.EnableMessageOrdering=false, but an OrderingKey was set in Message. Please remove the OrderingKey or turn on Topic.EnableMessageOrdering")) + return r + } + + // Calculate the size of the encoded proto message by accounting + // for the length of an individual PubSubMessage and Data/Attributes field. + msgSize := proto.Size(&pb.PubsubMessage{ + Data: msg.Data, + Attributes: msg.Attributes, + OrderingKey: msg.OrderingKey, }) - r := &PublishResult{ready: make(chan struct{})} + t.initBundler() t.mu.RLock() defer t.mu.RUnlock() // TODO(aboulhosn) [from bcmills] consider changing the semantics of bundler to perform this logic so we don't have to do it here if t.stopped { - r.set("", errTopicStopped) + ipubsub.SetPublishResult(r, "", errTopicStopped) return r } - // TODO(jba) [from bcmills] consider using a shared channel per bundle - // (requires Bundler API changes; would reduce allocations) - err := t.bundler.Add(&bundledMessage{msg, r}, msg.size) + if err := t.flowController.acquire(ctx, msgSize); err != nil { + t.scheduler.Pause(msg.OrderingKey) + ipubsub.SetPublishResult(r, "", err) + return r + } + err = t.scheduler.Add(msg.OrderingKey, &bundledMessage{msg, r, msgSize}, msgSize) if err != nil { - r.set("", err) + fmt.Printf("got err: %v\n", err) + t.scheduler.Pause(msg.OrderingKey) + ipubsub.SetPublishResult(r, "", err) } return r } @@ -423,57 +577,32 @@ func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult { // failed to be sent. func (t *Topic) Stop() { t.mu.Lock() - noop := t.stopped || t.bundler == nil + noop := t.stopped || t.scheduler == nil t.stopped = true t.mu.Unlock() if noop { return } - t.bundler.Flush() -} - -// A PublishResult holds the result from a call to Publish. -type PublishResult struct { - ready chan struct{} - serverID string - err error + t.scheduler.FlushAndStop() } -// Ready returns a channel that is closed when the result is ready. -// When the Ready channel is closed, Get is guaranteed not to block. -func (r *PublishResult) Ready() <-chan struct{} { return r.ready } - -// Get returns the server-generated message ID and/or error result of a Publish call. -// Get blocks until the Publish call completes or the context is done. -func (r *PublishResult) Get(ctx context.Context) (serverID string, err error) { - // If the result is already ready, return it even if the context is done. - select { - case <-r.Ready(): - return r.serverID, r.err - default: - } - select { - case <-ctx.Done(): - return "", ctx.Err() - case <-r.Ready(): - return r.serverID, r.err +// Flush blocks until all remaining messages are sent. +func (t *Topic) Flush() { + if t.stopped || t.scheduler == nil { + return } -} - -func (r *PublishResult) set(sid string, err error) { - r.serverID = sid - r.err = err - close(r.ready) + t.scheduler.Flush() } type bundledMessage struct { - msg *Message - res *PublishResult + msg *Message + res *PublishResult + size int } func (t *Topic) initBundler() { t.mu.RLock() - noop := t.stopped || t.bundler != nil + noop := t.stopped || t.scheduler != nil t.mu.RUnlock() if noop { return @@ -481,12 +610,21 @@ func (t *Topic) initBundler() { t.mu.Lock() defer t.mu.Unlock() // Must re-check, since we released the lock. - if t.stopped || t.bundler != nil { + if t.stopped || t.scheduler != nil { return } timeout := t.PublishSettings.Timeout - t.bundler = bundler.NewBundler(&bundledMessage{}, func(items interface{}) { + + workers := t.PublishSettings.NumGoroutines + // Unless overridden, allow many goroutines per CPU to call the Publish RPC + // concurrently. The default value was determined via extensive load + // testing (see the loadtest subdirectory). + if t.PublishSettings.NumGoroutines == 0 { + workers = 25 * runtime.GOMAXPROCS(0) + } + + t.scheduler = scheduler.NewPublishScheduler(workers, func(bundle interface{}) { // TODO(jba): use a context detached from the one passed to NewClient. ctx := context.TODO() if timeout != 0 { @@ -494,30 +632,42 @@ func (t *Topic) initBundler() { ctx, cancel = context.WithTimeout(ctx, timeout) defer cancel() } - t.publishMessageBundle(ctx, items.([]*bundledMessage)) + t.publishMessageBundle(ctx, bundle.([]*bundledMessage)) }) - t.bundler.DelayThreshold = t.PublishSettings.DelayThreshold - t.bundler.BundleCountThreshold = t.PublishSettings.CountThreshold - if t.bundler.BundleCountThreshold > MaxPublishRequestCount { - t.bundler.BundleCountThreshold = MaxPublishRequestCount + t.scheduler.DelayThreshold = t.PublishSettings.DelayThreshold + t.scheduler.BundleCountThreshold = t.PublishSettings.CountThreshold + if t.scheduler.BundleCountThreshold > MaxPublishRequestCount { + t.scheduler.BundleCountThreshold = MaxPublishRequestCount } - t.bundler.BundleByteThreshold = t.PublishSettings.ByteThreshold + t.scheduler.BundleByteThreshold = t.PublishSettings.ByteThreshold + + fcs := DefaultPublishSettings.FlowControlSettings + fcs.LimitExceededBehavior = t.PublishSettings.FlowControlSettings.LimitExceededBehavior + if t.PublishSettings.FlowControlSettings.MaxOutstandingBytes > 0 { + b := t.PublishSettings.FlowControlSettings.MaxOutstandingBytes + fcs.MaxOutstandingBytes = b + + // If MaxOutstandingBytes is set, disable BufferedByteLimit by setting it to maxint. + // This is because there's no way to set "unlimited" for BufferedByteLimit, + // and simply setting it to MaxOutstandingBytes occasionally leads to issues where + // BufferedByteLimit is reached even though there are resources available. + t.PublishSettings.BufferedByteLimit = maxInt + } + if t.PublishSettings.FlowControlSettings.MaxOutstandingMessages > 0 { + fcs.MaxOutstandingMessages = t.PublishSettings.FlowControlSettings.MaxOutstandingMessages + } + + t.flowController = newTopicFlowController(fcs) bufferedByteLimit := DefaultPublishSettings.BufferedByteLimit if t.PublishSettings.BufferedByteLimit > 0 { bufferedByteLimit = t.PublishSettings.BufferedByteLimit } - t.bundler.BufferedByteLimit = bufferedByteLimit + t.scheduler.BufferedByteLimit = bufferedByteLimit - // Set the bundler's max size per payload, accounting for topic name's overhead. - t.bundler.BundleByteLimit = MaxPublishRequestBytes - calcFieldSizeString(t.name) - // Unless overridden, allow many goroutines per CPU to call the Publish RPC concurrently. - // The default value was determined via extensive load testing (see the loadtest subdirectory). - if t.PublishSettings.NumGoroutines > 0 { - t.bundler.HandlerLimit = t.PublishSettings.NumGoroutines - } else { - t.bundler.HandlerLimit = 25 * runtime.GOMAXPROCS(0) - } + // Calculate the max limit of a single bundle. 5 comes from the number of bytes + // needed to be reserved for encoding the PubsubMessage repeated field. + t.scheduler.BundleByteLimit = MaxPublishRequestBytes - calcFieldSizeString(t.name) - 5 } func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) { @@ -526,20 +676,38 @@ func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) log.Printf("pubsub: cannot create context with tag in publishMessageBundle: %v", err) } pbMsgs := make([]*pb.PubsubMessage, len(bms)) + var orderingKey string for i, bm := range bms { + orderingKey = bm.msg.OrderingKey pbMsgs[i] = &pb.PubsubMessage{ - Data: bm.msg.Data, - Attributes: bm.msg.Attributes, + Data: bm.msg.Data, + Attributes: bm.msg.Attributes, + OrderingKey: bm.msg.OrderingKey, } bm.msg = nil // release bm.msg for GC } + var res *pb.PublishResponse start := time.Now() - res, err := t.c.pubc.Publish(ctx, &pb.PublishRequest{ - Topic: t.name, - Messages: pbMsgs, - }, gax.WithGRPCOptions(grpc.MaxCallSendMsgSize(maxSendRecvBytes))) + if orderingKey != "" && t.scheduler.IsPaused(orderingKey) { + err = fmt.Errorf("pubsub: Publishing for ordering key, %s, paused due to previous error. Call topic.ResumePublish(orderingKey) before resuming publishing", orderingKey) + } else { + // Apply custom publish retryer on top of user specified retryer and + // default retryer. + opts := t.c.pubc.CallOptions.Publish + var settings gax.CallSettings + for _, opt := range opts { + opt.Resolve(&settings) + } + r := &publishRetryer{defaultRetryer: settings.Retry()} + res, err = t.c.pubc.Publish(ctx, &pb.PublishRequest{ + Topic: t.name, + Messages: pbMsgs, + }, gax.WithGRPCOptions(grpc.MaxCallSendMsgSize(maxSendRecvBytes)), + gax.WithRetry(func() gax.Retryer { return r })) + } end := time.Now() if err != nil { + t.scheduler.Pause(orderingKey) // Update context with error tag for OpenCensus, // using same stats.Record() call as success case. ctx, _ = tag.New(ctx, tag.Upsert(keyStatus, "ERROR"), @@ -549,10 +717,26 @@ func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) PublishLatency.M(float64(end.Sub(start)/time.Millisecond)), PublishedMessages.M(int64(len(bms)))) for i, bm := range bms { + t.flowController.release(ctx, bm.size) if err != nil { - bm.res.set("", err) + ipubsub.SetPublishResult(bm.res, "", err) } else { - bm.res.set(res.MessageIds[i], nil) + ipubsub.SetPublishResult(bm.res, res.MessageIds[i], nil) } } } + +// ResumePublish resumes accepting messages for the provided ordering key. +// Publishing using an ordering key might be paused if an error is +// encountered while publishing, to prevent messages from being published +// out of order. +func (t *Topic) ResumePublish(orderingKey string) { + t.mu.RLock() + noop := t.scheduler == nil + t.mu.RUnlock() + if noop { + return + } + + t.scheduler.Resume(orderingKey) +} diff --git a/vendor/cloud.google.com/go/pubsub/trace.go b/vendor/cloud.google.com/go/pubsub/trace.go index 4036a350f981..cadc3eb6d50a 100644 --- a/vendor/cloud.google.com/go/pubsub/trace.go +++ b/vendor/cloud.google.com/go/pubsub/trace.go @@ -19,20 +19,11 @@ import ( "log" "sync" - "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" - "google.golang.org/api/option" - "google.golang.org/grpc" ) -func openCensusOptions() []option.ClientOption { - return []option.ClientOption{ - option.WithGRPCDialOption(grpc.WithStatsHandler(&ocgrpc.ClientHandler{})), - } -} - // The following keys are used to tag requests with a specific topic/subscription ID. var ( keyTopic = tag.MustNewKey("topic") @@ -93,6 +84,22 @@ var ( // StreamResponseCount is a measure of the number of responses received on a streaming-pull stream. // It is EXPERIMENTAL and subject to change or removal without notice. StreamResponseCount = stats.Int64(statsPrefix+"stream_response_count", "Number of gRPC StreamingPull response messages received", stats.UnitDimensionless) + + // OutstandingMessages is a measure of the number of outstanding messages held by the client before they are processed. + // It is EXPERIMENTAL and subject to change or removal without notice. + OutstandingMessages = stats.Int64(statsPrefix+"outstanding_messages", "Number of outstanding Pub/Sub messages", stats.UnitDimensionless) + + // OutstandingBytes is a measure of the number of bytes all outstanding messages held by the client take up. + // It is EXPERIMENTAL and subject to change or removal without notice. + OutstandingBytes = stats.Int64(statsPrefix+"outstanding_bytes", "Number of outstanding bytes", stats.UnitDimensionless) + + // PublisherOutstandingMessages is a measure of the number of published outstanding messages held by the client before they are processed. + // It is EXPERIMENTAL and subject to change or removal without notice. + PublisherOutstandingMessages = stats.Int64(statsPrefix+"publisher_outstanding_messages", "Number of outstanding publish messages", stats.UnitDimensionless) + + // PublisherOutstandingBytes is a measure of the number of bytes all outstanding publish messages held by the client take up. + // It is EXPERIMENTAL and subject to change or removal without notice. + PublisherOutstandingBytes = stats.Int64(statsPrefix+"publisher_outstanding_bytes", "Number of outstanding publish bytes", stats.UnitDimensionless) ) var ( @@ -139,11 +146,29 @@ var ( // StreamResponseCountView is a cumulative sum of StreamResponseCount. // It is EXPERIMENTAL and subject to change or removal without notice. StreamResponseCountView *view.View + + // OutstandingMessagesView is the last value of OutstandingMessages + // It is EXPERIMENTAL and subject to change or removal without notice. + OutstandingMessagesView *view.View + + // OutstandingBytesView is the last value of OutstandingBytes + // It is EXPERIMENTAL and subject to change or removal without notice. + OutstandingBytesView *view.View + + // PublisherOutstandingMessagesView is the last value of OutstandingMessages + // It is EXPERIMENTAL and subject to change or removal without notice. + PublisherOutstandingMessagesView *view.View + + // PublisherOutstandingBytesView is the last value of OutstandingBytes + // It is EXPERIMENTAL and subject to change or removal without notice. + PublisherOutstandingBytesView *view.View ) func init() { PublishedMessagesView = createCountView(stats.Measure(PublishedMessages), keyTopic, keyStatus, keyError) PublishLatencyView = createDistView(PublishLatency, keyTopic, keyStatus, keyError) + PublisherOutstandingMessagesView = createLastValueView(PublisherOutstandingMessages, keyTopic) + PublisherOutstandingBytesView = createLastValueView(PublisherOutstandingBytes, keyTopic) PullCountView = createCountView(PullCount, keySubscription) AckCountView = createCountView(AckCount, keySubscription) NackCountView = createCountView(NackCount, keySubscription) @@ -153,10 +178,14 @@ func init() { StreamRetryCountView = createCountView(StreamRetryCount, keySubscription) StreamRequestCountView = createCountView(StreamRequestCount, keySubscription) StreamResponseCountView = createCountView(StreamResponseCount, keySubscription) + OutstandingMessagesView = createLastValueView(OutstandingMessages, keySubscription) + OutstandingBytesView = createLastValueView(OutstandingBytes, keySubscription) DefaultPublishViews = []*view.View{ PublishedMessagesView, PublishLatencyView, + PublisherOutstandingMessagesView, + PublisherOutstandingBytesView, } DefaultSubscribeViews = []*view.View{ @@ -169,6 +198,8 @@ func init() { StreamRetryCountView, StreamRequestCountView, StreamResponseCountView, + OutstandingMessagesView, + OutstandingBytesView, } } @@ -199,6 +230,16 @@ func createDistView(m stats.Measure, keys ...tag.Key) *view.View { } } +func createLastValueView(m stats.Measure, keys ...tag.Key) *view.View { + return &view.View{ + Name: m.Name(), + Description: m.Description(), + TagKeys: keys, + Measure: m, + Aggregation: view.LastValue(), + } +} + var logOnce sync.Once // withSubscriptionKey returns a new context modified with the subscriptionKey tag map. diff --git a/vendor/cloud.google.com/go/release-please-config-individual.json b/vendor/cloud.google.com/go/release-please-config-individual.json new file mode 100644 index 000000000000..b8766e0f2ea7 --- /dev/null +++ b/vendor/cloud.google.com/go/release-please-config-individual.json @@ -0,0 +1,45 @@ +{ + "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json", + "release-type": "go-yoshi", + "include-component-in-tag": true, + "separate-pull-requests": true, + "tag-separator": "/", + "packages": { + "bigquery": { + "component": "bigquery" + }, + "bigtable": { + "component": "bigtable" + }, + "datastore": { + "component": "datastore" + }, + "errorreporting": { + "component": "errorreporting" + }, + "firestore": { + "component": "firestore" + }, + "logging": { + "component": "logging" + }, + "profiler": { + "component": "profiler" + }, + "pubsub": { + "component": "pubsub" + }, + "pubsublite": { + "component": "pubsublite" + }, + "spanner": { + "component": "spanner" + }, + "storage": { + "component": "storage" + } + }, + "plugins": [ + "sentence-case" + ] +} \ No newline at end of file diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json new file mode 100644 index 000000000000..a0b54cb5de00 --- /dev/null +++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json @@ -0,0 +1,347 @@ +{ + "release-type": "go-yoshi", + "include-component-in-tag": true, + "tag-separator": "/", + "packages": { + "accessapproval": { + "component": "accessapproval" + }, + "accesscontextmanager": { + "component": "accesscontextmanager" + }, + "aiplatform": { + "component": "aiplatform" + }, + "analytics": { + "component": "analytics" + }, + "apigateway": { + "component": "apigateway" + }, + "apigeeconnect": { + "component": "apigeeconnect" + }, + "apigeeregistry": { + "component": "apigeeregistry" + }, + "apikeys": { + "component": "apikeys" + }, + "appengine": { + "component": "appengine" + }, + "area120": { + "component": "area120" + }, + "artifactregistry": { + "component": "artifactregistry" + }, + "asset": { + "component": "asset" + }, + "assuredworkloads": { + "component": "assuredworkloads" + }, + "automl": { + "component": "automl" + }, + "baremetalsolution": { + "component": "baremetalsolution" + }, + "batch": { + "component": "batch" + }, + "beyondcorp": { + "component": "beyondcorp" + }, + "billing": { + "component": "billing" + }, + "binaryauthorization": { + "component": "binaryauthorization" + }, + "certificatemanager": { + "component": "certificatemanager" + }, + "channel": { + "component": "channel" + }, + "cloudbuild": { + "component": "cloudbuild" + }, + "clouddms": { + "component": "clouddms" + }, + "cloudtasks": { + "component": "cloudtasks" + }, + "compute": { + "component": "compute" + }, + "compute/metadata": { + "component": "compute/metadata" + }, + "contactcenterinsights": { + "component": "contactcenterinsights" + }, + "container": { + "component": "container" + }, + "containeranalysis": { + "component": "containeranalysis" + }, + "datacatalog": { + "component": "datacatalog" + }, + "dataflow": { + "component": "dataflow" + }, + "dataform": { + "component": "dataform" + }, + "datafusion": { + "component": "datafusion" + }, + "datalabeling": { + "component": "datalabeling" + }, + "dataplex": { + "component": "dataplex" + }, + "dataproc": { + "component": "dataproc" + }, + "dataqna": { + "component": "dataqna" + }, + "datastream": { + "component": "datastream" + }, + "deploy": { + "component": "deploy" + }, + "dialogflow": { + "component": "dialogflow" + }, + "dlp": { + "component": "dlp" + }, + "documentai": { + "component": "documentai" + }, + "domains": { + "component": "domains" + }, + "edgecontainer": { + "component": "edgecontainer" + }, + "essentialcontacts": { + "component": "essentialcontacts" + }, + "eventarc": { + "component": "eventarc" + }, + "filestore": { + "component": "filestore" + }, + "functions": { + "component": "functions" + }, + "gaming": { + "component": "gaming" + }, + "gkebackup": { + "component": "gkebackup" + }, + "gkeconnect": { + "component": "gkeconnect" + }, + "gkehub": { + "component": "gkehub" + }, + "gkemulticloud": { + "component": "gkemulticloud" + }, + "grafeas": { + "component": "grafeas" + }, + "gsuiteaddons": { + "component": "gsuiteaddons" + }, + "iam": { + "component": "iam" + }, + "iap": { + "component": "iap" + }, + "ids": { + "component": "ids" + }, + "iot": { + "component": "iot" + }, + "kms": { + "component": "kms" + }, + "language": { + "component": "language" + }, + "lifesciences": { + "component": "lifesciences" + }, + "longrunning": { + "component": "longrunning" + }, + "managedidentities": { + "component": "managedidentities" + }, + "maps": { + "component": "maps" + }, + "mediatranslation": { + "component": "mediatranslation" + }, + "memcache": { + "component": "memcache" + }, + "metastore": { + "component": "metastore" + }, + "monitoring": { + "component": "monitoring" + }, + "networkconnectivity": { + "component": "networkconnectivity" + }, + "networkmanagement": { + "component": "networkmanagement" + }, + "networksecurity": { + "component": "networksecurity" + }, + "notebooks": { + "component": "notebooks" + }, + "optimization": { + "component": "optimization" + }, + "orchestration": { + "component": "orchestration" + }, + "orgpolicy": { + "component": "orgpolicy" + }, + "osconfig": { + "component": "osconfig" + }, + "oslogin": { + "component": "oslogin" + }, + "phishingprotection": { + "component": "phishingprotection" + }, + "policytroubleshooter": { + "component": "policytroubleshooter" + }, + "privatecatalog": { + "component": "privatecatalog" + }, + "recaptchaenterprise/v2": { + "component": "recaptchaenterprise" + }, + "recommendationengine": { + "component": "recommendationengine" + }, + "recommender": { + "component": "recommender" + }, + "redis": { + "component": "redis" + }, + "resourcemanager": { + "component": "resourcemanager" + }, + "resourcesettings": { + "component": "resourcesettings" + }, + "retail": { + "component": "retail" + }, + "run": { + "component": "run" + }, + "scheduler": { + "component": "scheduler" + }, + "secretmanager": { + "component": "secretmanager" + }, + "security": { + "component": "security" + }, + "securitycenter": { + "component": "securitycenter" + }, + "servicecontrol": { + "component": "servicecontrol" + }, + "servicedirectory": { + "component": "servicedirectory" + }, + "servicemanagement": { + "component": "servicemanagement" + }, + "serviceusage": { + "component": "serviceusage" + }, + "shell": { + "component": "shell" + }, + "speech": { + "component": "speech" + }, + "storagetransfer": { + "component": "storagetransfer" + }, + "talent": { + "component": "talent" + }, + "texttospeech": { + "component": "texttospeech" + }, + "tpu": { + "component": "tpu" + }, + "trace": { + "component": "trace" + }, + "translate": { + "component": "translate" + }, + "video": { + "component": "video" + }, + "videointelligence": { + "component": "videointelligence" + }, + "vision/v2": { + "component": "vision" + }, + "vmmigration": { + "component": "vmmigration" + }, + "vpcaccess": { + "component": "vpcaccess" + }, + "webrisk": { + "component": "webrisk" + }, + "websecurityscanner": { + "component": "websecurityscanner" + }, + "workflows": { + "component": "workflows" + } + }, + "plugins": ["sentence-case"] +} diff --git a/vendor/cloud.google.com/go/release-please-config.json b/vendor/cloud.google.com/go/release-please-config.json new file mode 100644 index 000000000000..1400245b8a3b --- /dev/null +++ b/vendor/cloud.google.com/go/release-please-config.json @@ -0,0 +1,11 @@ +{ + "release-type": "go-yoshi", + "separate-pull-requests": true, + "include-component-in-tag": false, + "packages": { + ".": { + "component": "main" + } + }, + "plugins": ["sentence-case"] +} diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index f6d57be5085d..f12da250ef38 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,5 +1,252 @@ # Changes + +## [1.28.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.0...storage/v1.28.1) (2022-12-02) + + +### Bug Fixes + +* **storage:** downgrade some dependencies ([7540152](https://github.com/googleapis/google-cloud-go/commit/754015236d5af7c82a75da218b71a87b9ead6eb5)) + +## [1.28.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.27.0...storage/v1.28.0) (2022-11-03) + + +### Features + +* **storage/internal:** Add routing annotations ([ce3f945](https://github.com/googleapis/google-cloud-go/commit/ce3f9458e511eca0910992763232abbcd64698f1)) +* **storage:** Add Autoclass support ([#6828](https://github.com/googleapis/google-cloud-go/issues/6828)) ([f7c7f41](https://github.com/googleapis/google-cloud-go/commit/f7c7f41e4d7fcffe05860e1114cb20f40c869da8)) + + +### Bug Fixes + +* **storage:** Fix read-write race in Writer.Write ([#6817](https://github.com/googleapis/google-cloud-go/issues/6817)) ([4766d3e](https://github.com/googleapis/google-cloud-go/commit/4766d3e1004119b93c6bd352024b5bf3404252eb)) +* **storage:** Fix request token passing for Copier.Run ([#6863](https://github.com/googleapis/google-cloud-go/issues/6863)) ([faaab06](https://github.com/googleapis/google-cloud-go/commit/faaab066d8e509dc440bcbc87391557ecee7dbf2)), refs [#6857](https://github.com/googleapis/google-cloud-go/issues/6857) + + +### Documentation + +* **storage:** Update broken links for SignURL and PostPolicy ([#6779](https://github.com/googleapis/google-cloud-go/issues/6779)) ([776138b](https://github.com/googleapis/google-cloud-go/commit/776138bc06a1e5fd45acbf8f9d36e9dc6ce31dd3)) + +## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.26.0...storage/v1.27.0) (2022-09-22) + + +### Features + +* **storage:** Find GoogleAccessID when using impersonated creds ([#6591](https://github.com/googleapis/google-cloud-go/issues/6591)) ([a2d16a7](https://github.com/googleapis/google-cloud-go/commit/a2d16a7a778c85d13217fc67955ec5dac1da34e8)) + +## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29) + + +### Features + +* **storage:** export ShouldRetry ([#6370](https://github.com/googleapis/google-cloud-go/issues/6370)) ([0da9ab0](https://github.com/googleapis/google-cloud-go/commit/0da9ab0831540569dc04c0a23437b084b1564e15)), refs [#6362](https://github.com/googleapis/google-cloud-go/issues/6362) + + +### Bug Fixes + +* **storage:** allow to use age=0 in OLM conditions ([#6204](https://github.com/googleapis/google-cloud-go/issues/6204)) ([c85704f](https://github.com/googleapis/google-cloud-go/commit/c85704f4284626ce728cb48f3b130f2ce2a0165e)) + +## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.24.0...storage/v1.25.0) (2022-08-11) + + +### Features + +* **storage/internal:** Add routing annotations ([8a8ba85](https://github.com/googleapis/google-cloud-go/commit/8a8ba85311f85701c97fd7c10f1d88b738ce423f)) +* **storage:** refactor to use transport-agnostic interface ([#6465](https://github.com/googleapis/google-cloud-go/issues/6465)) ([d03c3e1](https://github.com/googleapis/google-cloud-go/commit/d03c3e15a79fe9afa1232d9c8bd4c484a9bb927e)) + +## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.23.0...storage/v1.24.0) (2022-07-20) + + +### Features + +* **storage:** add Custom Placement Config Dual Region Support ([#6294](https://github.com/googleapis/google-cloud-go/issues/6294)) ([5a8c607](https://github.com/googleapis/google-cloud-go/commit/5a8c607e3a9a3265887e27cb13f8943f3e3fa23d)) + +## [1.23.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.1...storage/v1.23.0) (2022-06-23) + + +### Features + +* **storage:** add support for OLM Prefix/Suffix ([#5929](https://github.com/googleapis/google-cloud-go/issues/5929)) ([ec21d10](https://github.com/googleapis/google-cloud-go/commit/ec21d10d6d1b01aa97a52560319775041707690d)) +* **storage:** support AbortIncompleteMultipartUpload LifecycleAction ([#5812](https://github.com/googleapis/google-cloud-go/issues/5812)) ([fdec929](https://github.com/googleapis/google-cloud-go/commit/fdec929b9da6e01dda0ab3c72544d44d6bd82bd4)), refs [#5795](https://github.com/googleapis/google-cloud-go/issues/5795) + + +### Bug Fixes + +* **storage:** allow for Age *int64 type and int64 type ([#6230](https://github.com/googleapis/google-cloud-go/issues/6230)) ([cc7acb8](https://github.com/googleapis/google-cloud-go/commit/cc7acb8bffb31828e9e96d4834a65f9728494473)) + +### [1.22.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.0...storage/v1.22.1) (2022-05-19) + + +### Bug Fixes + +* **storage:** bump genproto, remove deadcode ([#6059](https://github.com/googleapis/google-cloud-go/issues/6059)) ([bb10f9f](https://github.com/googleapis/google-cloud-go/commit/bb10f9faca57dc3b987e0fb601090887b3507f07)) +* **storage:** remove field that no longer exists ([#6061](https://github.com/googleapis/google-cloud-go/issues/6061)) ([ee150cf](https://github.com/googleapis/google-cloud-go/commit/ee150cfd194463ddfcb59898cfb0237e47777973)) + +## [1.22.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.21.0...storage/v1.22.0) (2022-03-31) + + +### Features + +* **storage:** allow specifying includeTrailingDelimiter ([#5617](https://github.com/googleapis/google-cloud-go/issues/5617)) ([a34503b](https://github.com/googleapis/google-cloud-go/commit/a34503bc0f0b95399285e8db66976b227e3b0072)) +* **storage:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) + + +### Bug Fixes + +* **storage:** respect STORAGE_EMULATOR_HOST in signedURL ([#5673](https://github.com/googleapis/google-cloud-go/issues/5673)) ([1c249ae](https://github.com/googleapis/google-cloud-go/commit/1c249ae5b4980cf53fa74635943ca8bf6a96a341)) + +## [1.21.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.20.0...storage/v1.21.0) (2022-02-17) + + +### Features + +* **storage:** add better version metadata to calls ([#5507](https://github.com/googleapis/google-cloud-go/issues/5507)) ([13fe0bc](https://github.com/googleapis/google-cloud-go/commit/13fe0bc0d8acbffd46b59ab69b25449f1cbd6a88)), refs [#2749](https://github.com/googleapis/google-cloud-go/issues/2749) +* **storage:** add Writer.ChunkRetryDeadline ([#5482](https://github.com/googleapis/google-cloud-go/issues/5482)) ([498a746](https://github.com/googleapis/google-cloud-go/commit/498a746769fa43958b92af8875b927879947128e)) + +## [1.20.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.19.0...storage/v1.20.0) (2022-02-04) + + +### Features + +* **storage/internal:** Update definition of RewriteObjectRequest to bring to parity with JSON API support ([#5447](https://www.github.com/googleapis/google-cloud-go/issues/5447)) ([7d175ef](https://www.github.com/googleapis/google-cloud-go/commit/7d175ef12b7b3e75585427f5dd2aab4a175e92d6)) + +## [1.19.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.2...storage/v1.19.0) (2022-01-25) + + +### Features + +* **storage:** add fully configurable and idempotency-aware retry strategy ([#5384](https://www.github.com/googleapis/google-cloud-go/issues/5384), [#5185](https://www.github.com/googleapis/google-cloud-go/issues/5185), [#5170](https://www.github.com/googleapis/google-cloud-go/issues/5170), [#5223](https://www.github.com/googleapis/google-cloud-go/issues/5223), [#5221](https://www.github.com/googleapis/google-cloud-go/issues/5221), [#5193](https://www.github.com/googleapis/google-cloud-go/issues/5193), [#5159](https://www.github.com/googleapis/google-cloud-go/issues/5159), [#5165](https://www.github.com/googleapis/google-cloud-go/issues/5165), [#5166](https://www.github.com/googleapis/google-cloud-go/issues/5166), [#5210](https://www.github.com/googleapis/google-cloud-go/issues/5210), [#5172](https://www.github.com/googleapis/google-cloud-go/issues/5172), [#5314](https://www.github.com/googleapis/google-cloud-go/issues/5314)) + * This release contains changes to fully align this library's retry strategy + with best practices as described in the + Cloud Storage [docs](https://cloud.google.com/storage/docs/retry-strategy). + * The library will now retry only idempotent operations by default. This means + that for certain operations, including object upload, compose, rewrite, + update, and delete, requests will not be retried by default unless + [idempotency conditions](https://cloud.google.com/storage/docs/retry-strategy#idempotency) + for the request have been met. + * The library now has methods to configure aspects of retry policy for + API calls, including which errors are retried, the timing of the + exponential backoff, and how idempotency is taken into account. + * If you wish to re-enable retries for a non-idempotent request, use the + [RetryAlways](https://pkg.go.dev/cloud.google.com/go/storage@main#RetryAlways) + policy. + * For full details on how to configure retries, see the + [package docs](https://pkg.go.dev/cloud.google.com/go/storage@main#hdr-Retrying_failed_requests) + and the + [Cloud Storage docs](https://cloud.google.com/storage/docs/retry-strategy) +* **storage:** GenerateSignedPostPolicyV4 can use existing creds to authenticate ([#5105](https://www.github.com/googleapis/google-cloud-go/issues/5105)) ([46489f4](https://www.github.com/googleapis/google-cloud-go/commit/46489f4c8a634068a3e7cf2fd5e5ca11b555c0a8)) +* **storage:** post policy can be signed with a fn that takes raw bytes ([#5079](https://www.github.com/googleapis/google-cloud-go/issues/5079)) ([25d1278](https://www.github.com/googleapis/google-cloud-go/commit/25d1278cab539fbfdd8563ed6b297e30d3fe555c)) +* **storage:** add rpo (turbo replication) support ([#5003](https://www.github.com/googleapis/google-cloud-go/issues/5003)) ([3bd5995](https://www.github.com/googleapis/google-cloud-go/commit/3bd59958e0c06d2655b67fcb5410668db3c52af0)) + +### Bug Fixes + +* **storage:** fix nil check in gRPC Reader ([#5376](https://www.github.com/googleapis/google-cloud-go/issues/5376)) ([5e7d722](https://www.github.com/googleapis/google-cloud-go/commit/5e7d722d18a62b28ba98169b3bdbb49401377264)) + +### [1.18.2](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.1...storage/v1.18.2) (2021-10-18) + + +### Bug Fixes + +* **storage:** upgrade genproto ([#4993](https://www.github.com/googleapis/google-cloud-go/issues/4993)) ([5ca462d](https://www.github.com/googleapis/google-cloud-go/commit/5ca462d99fe851b7cddfd70108798e2fa959bdfd)), refs [#4991](https://www.github.com/googleapis/google-cloud-go/issues/4991) + +### [1.18.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.0...storage/v1.18.1) (2021-10-14) + + +### Bug Fixes + +* **storage:** don't assume auth from a client option ([#4982](https://www.github.com/googleapis/google-cloud-go/issues/4982)) ([e17334d](https://www.github.com/googleapis/google-cloud-go/commit/e17334d1fe7645d89d14ae7148313498b984dfbb)) + +## [1.18.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.17.0...storage/v1.18.0) (2021-10-11) + + +### Features + +* **storage:** returned wrapped error for timeouts ([#4802](https://www.github.com/googleapis/google-cloud-go/issues/4802)) ([0e102a3](https://www.github.com/googleapis/google-cloud-go/commit/0e102a385dc67a06f6b444b3a93e6998428529be)), refs [#4197](https://www.github.com/googleapis/google-cloud-go/issues/4197) +* **storage:** SignedUrl can use existing creds to authenticate ([#4604](https://www.github.com/googleapis/google-cloud-go/issues/4604)) ([b824c89](https://www.github.com/googleapis/google-cloud-go/commit/b824c897e6941270747b612f6d36a8d6ae081315)) + + +### Bug Fixes + +* **storage:** update PAP to use inherited instead of unspecified ([#4909](https://www.github.com/googleapis/google-cloud-go/issues/4909)) ([dac26b1](https://www.github.com/googleapis/google-cloud-go/commit/dac26b1af2f2972f12775341173bcc5f982438b8)) + +## [1.17.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.1...storage/v1.17.0) (2021-09-28) + + +### Features + +* **storage:** add projectNumber field to bucketAttrs. ([#4805](https://www.github.com/googleapis/google-cloud-go/issues/4805)) ([07343af](https://www.github.com/googleapis/google-cloud-go/commit/07343afc15085b164cc41d202d13f9d46f5c0d02)) + + +### Bug Fixes + +* **storage:** align retry idempotency (part 1) ([#4715](https://www.github.com/googleapis/google-cloud-go/issues/4715)) ([ffa903e](https://www.github.com/googleapis/google-cloud-go/commit/ffa903eeec61aa3869e5220e2f09371127b5c393)) + +### [1.16.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.0...storage/v1.16.1) (2021-08-30) + + +### Bug Fixes + +* **storage/internal:** Update encryption_key fields to "bytes" type. fix: Improve date/times and field name clarity in lifecycle conditions. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) +* **storage:** accept emulator env var without scheme ([#4616](https://www.github.com/googleapis/google-cloud-go/issues/4616)) ([5f8cbb9](https://www.github.com/googleapis/google-cloud-go/commit/5f8cbb98070109e2a34409ac775ed63b94d37efd)) +* **storage:** preserve supplied endpoint's scheme ([#4609](https://www.github.com/googleapis/google-cloud-go/issues/4609)) ([ee2756f](https://www.github.com/googleapis/google-cloud-go/commit/ee2756fb0a335d591464a770c9fa4f8fe0ba2e01)) +* **storage:** remove unnecessary variable ([#4608](https://www.github.com/googleapis/google-cloud-go/issues/4608)) ([27fc784](https://www.github.com/googleapis/google-cloud-go/commit/27fc78456fb251652bdf5cdb493734a7e1e643e1)) +* **storage:** retry LockRetentionPolicy ([#4439](https://www.github.com/googleapis/google-cloud-go/issues/4439)) ([09879ea](https://www.github.com/googleapis/google-cloud-go/commit/09879ea80cb67f9bfd8fc9384b0fda335567cba9)), refs [#4437](https://www.github.com/googleapis/google-cloud-go/issues/4437) +* **storage:** revise Reader to send XML preconditions ([#4479](https://www.github.com/googleapis/google-cloud-go/issues/4479)) ([e36b29a](https://www.github.com/googleapis/google-cloud-go/commit/e36b29a3d43bce5c1c044f7daf6e1db00b0a49e0)), refs [#4470](https://www.github.com/googleapis/google-cloud-go/issues/4470) + +## [1.16.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.15.0...storage/v1.16.0) (2021-06-28) + + +### Features + +* **storage:** support PublicAccessPrevention ([#3608](https://www.github.com/googleapis/google-cloud-go/issues/3608)) ([99bc782](https://www.github.com/googleapis/google-cloud-go/commit/99bc782fb50a47602b45278384ef5d5b5da9263b)), refs [#3203](https://www.github.com/googleapis/google-cloud-go/issues/3203) + + +### Bug Fixes + +* **storage:** fix Writer.ChunkSize validation ([#4255](https://www.github.com/googleapis/google-cloud-go/issues/4255)) ([69c2e9d](https://www.github.com/googleapis/google-cloud-go/commit/69c2e9dc6303e1a004d3104a8178532fa738e742)), refs [#4167](https://www.github.com/googleapis/google-cloud-go/issues/4167) +* **storage:** try to reopen for failed Reads ([#4226](https://www.github.com/googleapis/google-cloud-go/issues/4226)) ([564102b](https://www.github.com/googleapis/google-cloud-go/commit/564102b335dbfb558bec8af883e5f898efb5dd10)), refs [#3040](https://www.github.com/googleapis/google-cloud-go/issues/3040) + +## [1.15.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.13.0...storage/v1.15.0) (2021-04-21) + + +### Features + +* **transport** Bump dependency on google.golang.org/api to pick up HTTP/2 + config updates (see [googleapis/google-api-go-client#882](https://github.com/googleapis/google-api-go-client/pull/882)). + +### Bug Fixes + +* **storage:** retry io.ErrUnexpectedEOF ([#3957](https://www.github.com/googleapis/google-cloud-go/issues/3957)) ([f6590cd](https://www.github.com/googleapis/google-cloud-go/commit/f6590cdc26c8479be5df48949fa59f879e0c24fc)) + + +## v1.14.0 + +- Updates to various dependencies. + +## [1.13.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.12.0...v1.13.0) (2021-02-03) + + +### Features + +* **storage:** add missing StorageClass in BucketAttrsToUpdate ([#3038](https://www.github.com/googleapis/google-cloud-go/issues/3038)) ([2fa1b72](https://www.github.com/googleapis/google-cloud-go/commit/2fa1b727f8a7b20aa62fe0990530744f6c109be0)) +* **storage:** add projection parameter for BucketHandle.Objects() ([#3549](https://www.github.com/googleapis/google-cloud-go/issues/3549)) ([9b9c3dc](https://www.github.com/googleapis/google-cloud-go/commit/9b9c3dce3ee10af5b6c4d070821bf47a861efd5b)) + + +### Bug Fixes + +* **storage:** fix endpoint selection logic ([#3172](https://www.github.com/googleapis/google-cloud-go/issues/3172)) ([99edf0d](https://www.github.com/googleapis/google-cloud-go/commit/99edf0d211a9e617f2586fbc83b6f9630da3c537)) + +## v1.12.0 +- V4 signed URL fixes: + - Fix encoding of spaces in query parameters. + - Add fields that were missing from PostPolicyV4 policy conditions. +- Fix Query to correctly list prefixes as well as objects when SetAttrSelection + is used. + +## v1.11.0 +- Add support for CustomTime and NoncurrentTime object lifecycle management + features. + ## v1.10.0 - Bump dependency on google.golang.org/api to capture changes to retry logic which will make retries on writes more resilient. diff --git a/vendor/cloud.google.com/go/storage/README.md b/vendor/cloud.google.com/go/storage/README.md index a2253c4bb5a9..b2f411210ca0 100644 --- a/vendor/cloud.google.com/go/storage/README.md +++ b/vendor/cloud.google.com/go/storage/README.md @@ -1,9 +1,9 @@ -## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) +## Cloud Storage [![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/storage.svg)](https://pkg.go.dev/cloud.google.com/go/storage) - [About Cloud Storage](https://cloud.google.com/storage/) - [API documentation](https://cloud.google.com/storage/docs) -- [Go client documentation](https://godoc.org/cloud.google.com/go/storage) -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) +- [Go client documentation](https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage) ### Example Usage @@ -25,8 +25,8 @@ if err != nil { log.Fatal(err) } defer rc.Close() -body, err := ioutil.ReadAll(rc) +body, err := io.ReadAll(rc) if err != nil { log.Fatal(err) } -``` \ No newline at end of file +``` diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go index 7855d110ad45..e0ab60073c2f 100644 --- a/vendor/cloud.google.com/go/storage/acl.go +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -20,7 +20,7 @@ import ( "reflect" "cloud.google.com/go/internal/trace" - "google.golang.org/api/googleapi" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" raw "google.golang.org/api/storage/v1" ) @@ -66,12 +66,15 @@ type ProjectTeam struct { } // ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. +// ACLHandle on an object operates on the latest generation of that object by default. +// Selecting a specific generation of an object is not currently supported by the client. type ACLHandle struct { c *Client bucket string object string isDefault bool userProject string // for requester-pays buckets + retry *retryConfig } // Delete permanently deletes the ACL entry for the given entity. @@ -117,114 +120,46 @@ func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) { } func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { - var acls *raw.ObjectAccessControls - var err error - err = runWithRetry(ctx, func() error { - req := a.c.raw.DefaultObjectAccessControls.List(a.bucket) - a.configureCall(ctx, req) - acls, err = req.Do() - return err - }) - if err != nil { - return nil, err - } - return toObjectACLRules(acls.Items), nil + opts := makeStorageOpts(true, a.retry, a.userProject) + return a.c.tc.ListDefaultObjectACLs(ctx, a.bucket, opts...) } func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { - return runWithRetry(ctx, func() error { - req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)) - a.configureCall(ctx, req) - return req.Do() - }) + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.DeleteDefaultObjectACL(ctx, a.bucket, entity, opts...) } func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { - var acls *raw.BucketAccessControls - var err error - err = runWithRetry(ctx, func() error { - req := a.c.raw.BucketAccessControls.List(a.bucket) - a.configureCall(ctx, req) - acls, err = req.Do() - return err - }) - if err != nil { - return nil, err - } - return toBucketACLRules(acls.Items), nil + opts := makeStorageOpts(true, a.retry, a.userProject) + return a.c.tc.ListBucketACLs(ctx, a.bucket, opts...) } func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { - acl := &raw.BucketAccessControl{ - Bucket: a.bucket, - Entity: string(entity), - Role: string(role), - } - err := runWithRetry(ctx, func() error { - req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl) - a.configureCall(ctx, req) - _, err := req.Do() - return err - }) - if err != nil { - return err - } - return nil + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.UpdateBucketACL(ctx, a.bucket, entity, role, opts...) } func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { - return runWithRetry(ctx, func() error { - req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)) - a.configureCall(ctx, req) - return req.Do() - }) + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.DeleteBucketACL(ctx, a.bucket, entity, opts...) } func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { - var acls *raw.ObjectAccessControls - var err error - err = runWithRetry(ctx, func() error { - req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object) - a.configureCall(ctx, req) - acls, err = req.Do() - return err - }) - if err != nil { - return nil, err - } - return toObjectACLRules(acls.Items), nil + opts := makeStorageOpts(true, a.retry, a.userProject) + return a.c.tc.ListObjectACLs(ctx, a.bucket, a.object, opts...) } func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { - type setRequest interface { - Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) - Header() http.Header - } - - acl := &raw.ObjectAccessControl{ - Bucket: a.bucket, - Entity: string(entity), - Role: string(role), - } - var req setRequest + opts := makeStorageOpts(false, a.retry, a.userProject) if isBucketDefault { - req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl) - } else { - req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl) + return a.c.tc.UpdateDefaultObjectACL(ctx, a.bucket, entity, role, opts...) } - a.configureCall(ctx, req) - return runWithRetry(ctx, func() error { - _, err := req.Do() - return err - }) + return a.c.tc.UpdateObjectACL(ctx, a.bucket, a.object, entity, role, opts...) } func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { - return runWithRetry(ctx, func() error { - req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)) - a.configureCall(ctx, req) - return req.Do() - }) + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...) } func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) { @@ -244,6 +179,14 @@ func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule { return rs } +func toObjectACLRulesFromProto(items []*storagepb.ObjectAccessControl) []ACLRule { + var rs []ACLRule + for _, item := range items { + rs = append(rs, toObjectACLRuleFromProto(item)) + } + return rs +} + func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule { var rs []ACLRule for _, item := range items { @@ -252,6 +195,14 @@ func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule { return rs } +func toBucketACLRulesFromProto(items []*storagepb.BucketAccessControl) []ACLRule { + var rs []ACLRule + for _, item := range items { + rs = append(rs, toBucketACLRuleFromProto(item)) + } + return rs +} + func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule { return ACLRule{ Entity: ACLEntity(a.Entity), @@ -263,6 +214,17 @@ func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule { } } +func toObjectACLRuleFromProto(a *storagepb.ObjectAccessControl) ACLRule { + return ACLRule{ + Entity: ACLEntity(a.GetEntity()), + EntityID: a.GetEntityId(), + Role: ACLRole(a.GetRole()), + Domain: a.GetDomain(), + Email: a.GetEmail(), + ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()), + } +} + func toBucketACLRule(a *raw.BucketAccessControl) ACLRule { return ACLRule{ Entity: ACLEntity(a.Entity), @@ -274,6 +236,17 @@ func toBucketACLRule(a *raw.BucketAccessControl) ACLRule { } } +func toBucketACLRuleFromProto(a *storagepb.BucketAccessControl) ACLRule { + return ACLRule{ + Entity: ACLEntity(a.GetEntity()), + EntityID: a.GetEntityId(), + Role: ACLRole(a.GetRole()), + Domain: a.GetDomain(), + Email: a.GetEmail(), + ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()), + } +} + func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl { if len(rules) == 0 { return nil @@ -285,6 +258,17 @@ func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl { return r } +func toProtoObjectACL(rules []ACLRule) []*storagepb.ObjectAccessControl { + if len(rules) == 0 { + return nil + } + r := make([]*storagepb.ObjectAccessControl, 0, len(rules)) + for _, rule := range rules { + r = append(r, rule.toProtoObjectAccessControl("")) // bucket name unnecessary + } + return r +} + func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl { if len(rules) == 0 { return nil @@ -296,6 +280,17 @@ func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl { return r } +func toProtoBucketACL(rules []ACLRule) []*storagepb.BucketAccessControl { + if len(rules) == 0 { + return nil + } + r := make([]*storagepb.BucketAccessControl, 0, len(rules)) + for _, rule := range rules { + r = append(r, rule.toProtoBucketAccessControl()) + } + return r +} + func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl { return &raw.BucketAccessControl{ Bucket: bucket, @@ -314,6 +309,22 @@ func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessContro } } +func (r ACLRule) toProtoObjectAccessControl(bucket string) *storagepb.ObjectAccessControl { + return &storagepb.ObjectAccessControl{ + Entity: string(r.Entity), + Role: string(r.Role), + // The other fields are not settable. + } +} + +func (r ACLRule) toProtoBucketAccessControl() *storagepb.BucketAccessControl { + return &storagepb.BucketAccessControl{ + Entity: string(r.Entity), + Role: string(r.Role), + // The other fields are not settable. + } +} + func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam { if p == nil { return nil @@ -324,6 +335,16 @@ func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam { } } +func toProjectTeamFromProto(p *storagepb.ProjectTeam) *ProjectTeam { + if p == nil { + return nil + } + return &ProjectTeam{ + ProjectNumber: p.GetProjectNumber(), + Team: p.GetTeam(), + } +} + func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam { if p == nil { return nil diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 478482645fab..28a73b8d9953 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -16,16 +16,25 @@ package storage import ( "context" + "encoding/base64" + "encoding/json" + "errors" "fmt" - "net/http" "reflect" + "strings" "time" + "cloud.google.com/go/compute/metadata" "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" "google.golang.org/api/googleapi" + "google.golang.org/api/iamcredentials/v1" "google.golang.org/api/iterator" + "google.golang.org/api/option" raw "google.golang.org/api/storage/v1" + dpb "google.golang.org/genproto/googleapis/type/date" + "google.golang.org/protobuf/proto" ) // BucketHandle provides operations on a Google Cloud Storage bucket. @@ -37,6 +46,7 @@ type BucketHandle struct { defaultObjectACL ACLHandle conds *BucketConditions userProject string // project for Requester Pays buckets + retry *retryConfig } // Bucket returns a BucketHandle, which provides operations on the named bucket. @@ -45,20 +55,25 @@ type BucketHandle struct { // The supplied name must contain only lowercase letters, numbers, dashes, // underscores, and dots. The full specification for valid bucket names can be // found at: -// https://cloud.google.com/storage/docs/bucket-naming +// +// https://cloud.google.com/storage/docs/bucket-naming func (c *Client) Bucket(name string) *BucketHandle { + retry := c.retry.clone() return &BucketHandle{ c: c, name: name, acl: ACLHandle{ c: c, bucket: name, + retry: retry, }, defaultObjectACL: ACLHandle{ c: c, bucket: name, isDefault: true, + retry: retry, }, + retry: retry, } } @@ -68,27 +83,11 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") defer func() { trace.EndSpan(ctx, err) }() - var bkt *raw.Bucket - if attrs != nil { - bkt = attrs.toRawBucket() - } else { - bkt = &raw.Bucket{} - } - bkt.Name = b.name - // If there is lifecycle information but no location, explicitly set - // the location. This is a GCS quirk/bug. - if bkt.Location == "" && bkt.Lifecycle != nil { - bkt.Location = "US" - } - req := b.c.raw.Buckets.Insert(projectID, bkt) - setClientHeader(req.Header()) - if attrs != nil && attrs.PredefinedACL != "" { - req.PredefinedAcl(attrs.PredefinedACL) - } - if attrs != nil && attrs.PredefinedDefaultObjectACL != "" { - req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) + o := makeStorageOpts(true, b.retry, b.userProject) + if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, o...); err != nil { + return err } - return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err }) + return nil } // Delete deletes the Bucket. @@ -96,23 +95,8 @@ func (b *BucketHandle) Delete(ctx context.Context) (err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete") defer func() { trace.EndSpan(ctx, err) }() - req, err := b.newDeleteCall() - if err != nil { - return err - } - return runWithRetry(ctx, func() error { return req.Context(ctx).Do() }) -} - -func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) { - req := b.c.raw.Buckets.Delete(b.name) - setClientHeader(req.Header()) - if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil { - return nil, err - } - if b.userProject != "" { - req.UserProject(b.userProject) - } - return req, nil + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.DeleteBucket(ctx, b.name, b.conds, o...) } // ACL returns an ACLHandle, which provides access to the bucket's access control list. @@ -130,12 +114,15 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle { } // Object returns an ObjectHandle, which provides operations on the named object. -// This call does not perform any network operations. +// This call does not perform any network operations such as fetching the object or verifying its existence. +// Use methods on ObjectHandle to perform network operations. // // name must consist entirely of valid UTF-8-encoded runes. The full specification // for valid object names can be found at: -// https://cloud.google.com/storage/docs/bucket-naming +// +// https://cloud.google.com/storage/docs/naming-objects func (b *BucketHandle) Object(name string) *ObjectHandle { + retry := b.retry.clone() return &ObjectHandle{ c: b.c, bucket: b.name, @@ -145,9 +132,11 @@ func (b *BucketHandle) Object(name string) *ObjectHandle { bucket: b.name, object: name, userProject: b.userProject, + retry: retry, }, gen: -1, userProject: b.userProject, + retry: retry, } } @@ -156,34 +145,8 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs") defer func() { trace.EndSpan(ctx, err) }() - req, err := b.newGetCall() - if err != nil { - return nil, err - } - var resp *raw.Bucket - err = runWithRetry(ctx, func() error { - resp, err = req.Context(ctx).Do() - return err - }) - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrBucketNotExist - } - if err != nil { - return nil, err - } - return newBucket(resp) -} - -func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) { - req := b.c.raw.Buckets.Get(b.name).Projection("full") - setClientHeader(req.Header()) - if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil { - return nil, err - } - if b.userProject != "" { - req.UserProject(b.userProject) - } - return req, nil + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.GetBucket(ctx, b.name, b.conds, o...) } // Update updates a bucket's attributes. @@ -191,35 +154,165 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) ( ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") defer func() { trace.EndSpan(ctx, err) }() - req, err := b.newPatchCall(&uattrs) - if err != nil { - return nil, err + isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0 + o := makeStorageOpts(isIdempotent, b.retry, b.userProject) + return b.c.tc.UpdateBucket(ctx, b.name, &uattrs, b.conds, o...) +} + +// SignedURL returns a URL for the specified object. Signed URLs allow anyone +// access to a restricted resource for a limited time without needing a Google +// account or signing in. +// For more information about signed URLs, see "[Overview of access control]." +// +// This method requires the Method and Expires fields in the specified +// SignedURLOptions to be non-nil. You may need to set the GoogleAccessID and +// PrivateKey fields in some cases. Read more on the [automatic detection of credentials] +// for this method. +// +// [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication +// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing +func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) { + if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) { + return SignedURL(b.name, object, opts) } - if uattrs.PredefinedACL != "" { - req.PredefinedAcl(uattrs.PredefinedACL) + // Make a copy of opts so we don't modify the pointer parameter. + newopts := opts.clone() + + if newopts.GoogleAccessID == "" { + id, err := b.detectDefaultGoogleAccessID() + if err != nil { + return "", err + } + newopts.GoogleAccessID = id + } + if newopts.SignBytes == nil && len(newopts.PrivateKey) == 0 { + if b.c.creds != nil && len(b.c.creds.JSON) > 0 { + var sa struct { + PrivateKey string `json:"private_key"` + } + err := json.Unmarshal(b.c.creds.JSON, &sa) + if err == nil && sa.PrivateKey != "" { + newopts.PrivateKey = []byte(sa.PrivateKey) + } + } + + // Don't error out if we can't unmarshal the private key from the client, + // fallback to the default sign function for the service account. + if len(newopts.PrivateKey) == 0 { + newopts.SignBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID) + } } - if uattrs.PredefinedDefaultObjectACL != "" { - req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL) + return SignedURL(b.name, object, newopts) +} + +// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. +// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. +// +// This method requires the Expires field in the specified PostPolicyV4Options +// to be non-nil. You may need to set the GoogleAccessID and PrivateKey fields +// in some cases. Read more on the [automatic detection of credentials] for this method. +// +// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing +func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { + if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) { + return GenerateSignedPostPolicyV4(b.name, object, opts) } - // TODO(jba): retry iff metagen is set? - rb, err := req.Context(ctx).Do() - if err != nil { - return nil, err + // Make a copy of opts so we don't modify the pointer parameter. + newopts := opts.clone() + + if newopts.GoogleAccessID == "" { + id, err := b.detectDefaultGoogleAccessID() + if err != nil { + return nil, err + } + newopts.GoogleAccessID = id + } + if newopts.SignBytes == nil && newopts.SignRawBytes == nil && len(newopts.PrivateKey) == 0 { + if b.c.creds != nil && len(b.c.creds.JSON) > 0 { + var sa struct { + PrivateKey string `json:"private_key"` + } + err := json.Unmarshal(b.c.creds.JSON, &sa) + if err == nil && sa.PrivateKey != "" { + newopts.PrivateKey = []byte(sa.PrivateKey) + } + } + + // Don't error out if we can't unmarshal the private key from the client, + // fallback to the default sign function for the service account. + if len(newopts.PrivateKey) == 0 { + newopts.SignRawBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID) + } } - return newBucket(rb) + return GenerateSignedPostPolicyV4(b.name, object, newopts) } -func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) { - rb := uattrs.toRawBucket() - req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full") - setClientHeader(req.Header()) - if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil { - return nil, err +func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) { + returnErr := errors.New("no credentials found on client and not on GCE (Google Compute Engine)") + + if b.c.creds != nil && len(b.c.creds.JSON) > 0 { + var sa struct { + ClientEmail string `json:"client_email"` + SAImpersonationURL string `json:"service_account_impersonation_url"` + CredType string `json:"type"` + } + + err := json.Unmarshal(b.c.creds.JSON, &sa) + if err != nil { + returnErr = err + } else if sa.CredType == "impersonated_service_account" { + start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":") + + if end <= start { + returnErr = errors.New("error parsing impersonated service account credentials") + } else { + return sa.SAImpersonationURL[start+1 : end], nil + } + } else if sa.CredType == "service_account" && sa.ClientEmail != "" { + return sa.ClientEmail, nil + } else { + returnErr = errors.New("unable to parse credentials; only service_account and impersonated_service_account credentials are supported") + } } - if b.userProject != "" { - req.UserProject(b.userProject) + + // Don't error out if we can't unmarshal, fallback to GCE check. + if metadata.OnGCE() { + email, err := metadata.Email("default") + if err == nil && email != "" { + return email, nil + } else if err != nil { + returnErr = err + } else { + returnErr = errors.New("empty email from GCE metadata service") + } + + } + return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4])", returnErr) +} + +func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, error) { + return func(in []byte) ([]byte, error) { + ctx := context.Background() + + // It's ok to recreate this service per call since we pass in the http client, + // circumventing the cost of recreating the auth/transport layer + svc, err := iamcredentials.NewService(ctx, option.WithHTTPClient(b.c.hc)) + if err != nil { + return nil, fmt.Errorf("unable to create iamcredentials client: %w", err) + } + + resp, err := svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{ + Payload: base64.StdEncoding.EncodeToString(in), + }).Do() + if err != nil { + return nil, fmt.Errorf("unable to sign bytes: %w", err) + } + out, err := base64.StdEncoding.DecodeString(resp.SignedBlob) + if err != nil { + return nil, fmt.Errorf("unable to base64 decode response: %w", err) + } + return out, nil } - return req, nil } // BucketAttrs represents the metadata for a Google Cloud Storage bucket. @@ -244,6 +337,13 @@ type BucketAttrs struct { // for more information. UniformBucketLevelAccess UniformBucketLevelAccess + // PublicAccessPrevention is the setting for the bucket's + // PublicAccessPrevention policy, which can be used to prevent public access + // of data in the bucket. See + // https://cloud.google.com/storage/docs/public-access-prevention for more + // information. + PublicAccessPrevention PublicAccessPrevention + // DefaultObjectACL is the list of access controls to // apply to new objects when no object ACL is provided. DefaultObjectACL []ACLRule @@ -267,8 +367,13 @@ type BucketAttrs struct { PredefinedDefaultObjectACL string // Location is the location of the bucket. It defaults to "US". + // If specifying a dual-region, CustomPlacementConfig should be set in conjunction. Location string + // The bucket's custom placement configuration that holds a list of + // regional locations for custom dual regions. + CustomPlacementConfig *CustomPlacementConfig + // MetaGeneration is the metadata generation of the bucket. // This field is read-only. MetaGeneration int64 @@ -329,6 +434,21 @@ type BucketAttrs struct { // Typical values are "multi-region", "region" and "dual-region". // This field is read-only. LocationType string + + // The project number of the project the bucket belongs to. + // This field is read-only. + ProjectNumber uint64 + + // RPO configures the Recovery Point Objective (RPO) policy of the bucket. + // Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket. + // See https://cloud.google.com/storage/docs/managing-turbo-replication for + // more information. + RPO RPO + + // Autoclass holds the bucket's autoclass configuration. If enabled, + // allows for the automatic selection of the best storage class + // based on object access patterns. + Autoclass *Autoclass } // BucketPolicyOnly is an alias for UniformBucketLevelAccess. @@ -353,6 +473,47 @@ type UniformBucketLevelAccess struct { LockedTime time.Time } +// PublicAccessPrevention configures the Public Access Prevention feature, which +// can be used to disallow public access to any data in a bucket. See +// https://cloud.google.com/storage/docs/public-access-prevention for more +// information. +type PublicAccessPrevention int + +const ( + // PublicAccessPreventionUnknown is a zero value, used only if this field is + // not set in a call to GCS. + PublicAccessPreventionUnknown PublicAccessPrevention = iota + + // PublicAccessPreventionUnspecified corresponds to a value of "unspecified". + // Deprecated: use PublicAccessPreventionInherited + PublicAccessPreventionUnspecified + + // PublicAccessPreventionEnforced corresponds to a value of "enforced". This + // enforces Public Access Prevention on the bucket. + PublicAccessPreventionEnforced + + // PublicAccessPreventionInherited corresponds to a value of "inherited" + // and is the default for buckets. + PublicAccessPreventionInherited + + publicAccessPreventionUnknown string = "" + // TODO: remove unspecified when change is fully completed + publicAccessPreventionUnspecified = "unspecified" + publicAccessPreventionEnforced = "enforced" + publicAccessPreventionInherited = "inherited" +) + +func (p PublicAccessPrevention) String() string { + switch p { + case PublicAccessPreventionInherited, PublicAccessPreventionUnspecified: + return publicAccessPreventionInherited + case PublicAccessPreventionEnforced: + return publicAccessPreventionEnforced + default: + return publicAccessPreventionUnknown + } +} + // Lifecycle is the lifecycle configuration for objects in the bucket. type Lifecycle struct { Rules []LifecycleRule @@ -389,7 +550,8 @@ type RetentionPolicy struct { } const ( - // RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule. + // RFC3339 timestamp with only the date segment, used for CreatedBefore, + // CustomTimeBefore, and NoncurrentTimeBefore in LifecycleRule. rfc3339Date = "2006-01-02" // DeleteAction is a lifecycle action that deletes a live and/or archived @@ -399,6 +561,13 @@ const ( // SetStorageClassAction changes the storage class of live and/or archived // objects. SetStorageClassAction = "SetStorageClass" + + // AbortIncompleteMPUAction is a lifecycle action that aborts an incomplete + // multipart upload when the multipart upload meets the conditions specified + // in the lifecycle rule. The AgeInDays condition is the only allowed + // condition for this action. AgeInDays is measured from the time the + // multipart upload was created. + AbortIncompleteMPUAction = "AbortIncompleteMultipartUpload" ) // LifecycleRule is a lifecycle configuration rule. @@ -419,9 +588,8 @@ type LifecycleRule struct { type LifecycleAction struct { // Type is the type of action to take on matching objects. // - // Acceptable values are "Delete" to delete matching objects and - // "SetStorageClass" to set the storage class defined in StorageClass on - // matching objects. + // Acceptable values are storage.DeleteAction, storage.SetStorageClassAction, + // and storage.AbortIncompleteMPUAction. Type string // StorageClass is the storage class to set on matching objects if the Action @@ -446,7 +614,12 @@ const ( // // All configured conditions must be met for the associated action to be taken. type LifecycleCondition struct { + // AllObjects is used to select all objects in a bucket by + // setting AgeInDays to 0. + AllObjects bool + // AgeInDays is the age of the object in days. + // If you want to set AgeInDays to `0` use AllObjects set to `true`. AgeInDays int64 // CreatedBefore is the time the object was created. @@ -455,20 +628,53 @@ type LifecycleCondition struct { // the specified date in UTC. CreatedBefore time.Time + // CustomTimeBefore is the CustomTime metadata field of the object. This + // condition is satisfied when an object's CustomTime timestamp is before + // midnight of the specified date in UTC. + // + // This condition can only be satisfied if CustomTime has been set. + CustomTimeBefore time.Time + + // DaysSinceCustomTime is the days elapsed since the CustomTime date of the + // object. This condition can only be satisfied if CustomTime has been set. + // Note: Using `0` as the value will be ignored by the library and not sent to the API. + DaysSinceCustomTime int64 + + // DaysSinceNoncurrentTime is the days elapsed since the noncurrent timestamp + // of the object. This condition is relevant only for versioned objects. + // Note: Using `0` as the value will be ignored by the library and not sent to the API. + DaysSinceNoncurrentTime int64 + // Liveness specifies the object's liveness. Relevant only for versioned objects Liveness Liveness + // MatchesPrefix is the condition matching an object if any of the + // matches_prefix strings are an exact prefix of the object's name. + MatchesPrefix []string + // MatchesStorageClasses is the condition matching the object's storage // class. // // Values include "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". MatchesStorageClasses []string + // MatchesSuffix is the condition matching an object if any of the + // matches_suffix strings are an exact suffix of the object's name. + MatchesSuffix []string + + // NoncurrentTimeBefore is the noncurrent timestamp of the object. This + // condition is satisfied when an object's noncurrent timestamp is before + // midnight of the specified date in UTC. + // + // This condition is relevant only for versioned objects. + NoncurrentTimeBefore time.Time + // NumNewerVersions is the condition matching objects with a number of newer versions. // // If the value is N, this condition is satisfied when there are at least N // versions (including the live version) newer than this version of the // object. + // Note: Using `0` as the value will be ignored by the library and not sent to the API. NumNewerVersions int64 } @@ -500,6 +706,29 @@ type BucketWebsite struct { NotFoundPage string } +// CustomPlacementConfig holds the bucket's custom placement +// configuration for Custom Dual Regions. See +// https://cloud.google.com/storage/docs/locations#location-dr for more information. +type CustomPlacementConfig struct { + // The list of regional locations in which data is placed. + // Custom Dual Regions require exactly 2 regional locations. + DataLocations []string +} + +// Autoclass holds the bucket's autoclass configuration. If enabled, +// allows for the automatic selection of the best storage class +// based on object access patterns. See +// https://cloud.google.com/storage/docs/using-autoclass for more information. +type Autoclass struct { + // Enabled specifies whether the autoclass feature is enabled + // on the bucket. + Enabled bool + // ToggleTime is the time from which Autoclass was last toggled. + // If Autoclass is enabled when the bucket is created, the ToggleTime + // is set to the bucket creation time. This field is read-only. + ToggleTime time.Time +} + func newBucket(b *raw.Bucket) (*BucketAttrs, error) { if b == nil { return nil, nil @@ -528,11 +757,49 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) { Website: toBucketWebsite(b.Website), BucketPolicyOnly: toBucketPolicyOnly(b.IamConfiguration), UniformBucketLevelAccess: toUniformBucketLevelAccess(b.IamConfiguration), + PublicAccessPrevention: toPublicAccessPrevention(b.IamConfiguration), Etag: b.Etag, LocationType: b.LocationType, + ProjectNumber: b.ProjectNumber, + RPO: toRPO(b), + CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig), + Autoclass: toAutoclassFromRaw(b.Autoclass), }, nil } +func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs { + if b == nil { + return nil + } + return &BucketAttrs{ + Name: parseBucketName(b.GetName()), + Location: b.GetLocation(), + MetaGeneration: b.GetMetageneration(), + DefaultEventBasedHold: b.GetDefaultEventBasedHold(), + StorageClass: b.GetStorageClass(), + Created: b.GetCreateTime().AsTime(), + VersioningEnabled: b.GetVersioning().GetEnabled(), + ACL: toBucketACLRulesFromProto(b.GetAcl()), + DefaultObjectACL: toObjectACLRulesFromProto(b.GetDefaultObjectAcl()), + Labels: b.GetLabels(), + RequesterPays: b.GetBilling().GetRequesterPays(), + Lifecycle: toLifecycleFromProto(b.GetLifecycle()), + RetentionPolicy: toRetentionPolicyFromProto(b.GetRetentionPolicy()), + CORS: toCORSFromProto(b.GetCors()), + Encryption: toBucketEncryptionFromProto(b.GetEncryption()), + Logging: toBucketLoggingFromProto(b.GetLogging()), + Website: toBucketWebsiteFromProto(b.GetWebsite()), + BucketPolicyOnly: toBucketPolicyOnlyFromProto(b.GetIamConfig()), + UniformBucketLevelAccess: toUniformBucketLevelAccessFromProto(b.GetIamConfig()), + PublicAccessPrevention: toPublicAccessPreventionFromProto(b.GetIamConfig()), + LocationType: b.GetLocationType(), + RPO: toRPOFromProto(b), + CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()), + ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based + Autoclass: toAutoclassFromProto(b.GetAutoclass()), + } +} + // toRawBucket copies the editable attribute from b to the raw library's Bucket type. func (b *BucketAttrs) toRawBucket() *raw.Bucket { // Copy label map. @@ -555,29 +822,179 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket { bb = &raw.BucketBilling{RequesterPays: true} } var bktIAM *raw.BucketIamConfiguration - if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled { - bktIAM = &raw.BucketIamConfiguration{ - UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{ + if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM = &raw.BucketIamConfiguration{} + if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled { + bktIAM.UniformBucketLevelAccess = &raw.BucketIamConfigurationUniformBucketLevelAccess{ Enabled: true, - }, + } + } + if b.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String() } } return &raw.Bucket{ - Name: b.Name, - Location: b.Location, - StorageClass: b.StorageClass, - Acl: toRawBucketACL(b.ACL), - DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL), - Versioning: v, - Labels: labels, - Billing: bb, - Lifecycle: toRawLifecycle(b.Lifecycle), - RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), - Cors: toRawCORS(b.CORS), - Encryption: b.Encryption.toRawBucketEncryption(), - Logging: b.Logging.toRawBucketLogging(), - Website: b.Website.toRawBucketWebsite(), - IamConfiguration: bktIAM, + Name: b.Name, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: toRawBucketACL(b.ACL), + DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL), + Versioning: v, + Labels: labels, + Billing: bb, + Lifecycle: toRawLifecycle(b.Lifecycle), + RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), + Cors: toRawCORS(b.CORS), + Encryption: b.Encryption.toRawBucketEncryption(), + Logging: b.Logging.toRawBucketLogging(), + Website: b.Website.toRawBucketWebsite(), + IamConfiguration: bktIAM, + Rpo: b.RPO.String(), + CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(), + Autoclass: b.Autoclass.toRawAutoclass(), + } +} + +func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket { + if b == nil { + return &storagepb.Bucket{} + } + + // Copy label map. + var labels map[string]string + if len(b.Labels) > 0 { + labels = make(map[string]string, len(b.Labels)) + for k, v := range b.Labels { + labels[k] = v + } + } + + // Ignore VersioningEnabled if it is false. This is OK because + // we only call this method when creating a bucket, and by default + // new buckets have versioning off. + var v *storagepb.Bucket_Versioning + if b.VersioningEnabled { + v = &storagepb.Bucket_Versioning{Enabled: true} + } + var bb *storagepb.Bucket_Billing + if b.RequesterPays { + bb = &storagepb.Bucket_Billing{RequesterPays: true} + } + var bktIAM *storagepb.Bucket_IamConfig + if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM = &storagepb.Bucket_IamConfig{} + if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled { + bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ + Enabled: true, + } + } + if b.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String() + } + } + + return &storagepb.Bucket{ + Name: b.Name, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: toProtoBucketACL(b.ACL), + DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL), + Versioning: v, + Labels: labels, + Billing: bb, + Lifecycle: toProtoLifecycle(b.Lifecycle), + RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(), + Cors: toProtoCORS(b.CORS), + Encryption: b.Encryption.toProtoBucketEncryption(), + Logging: b.Logging.toProtoBucketLogging(), + Website: b.Website.toProtoBucketWebsite(), + IamConfig: bktIAM, + Rpo: b.RPO.String(), + CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(), + Autoclass: b.Autoclass.toProtoAutoclass(), + } +} + +func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { + if ua == nil { + return &storagepb.Bucket{} + } + + // TODO(cathyo): Handle labels. Pending b/230510191. + + var v *storagepb.Bucket_Versioning + if ua.VersioningEnabled != nil { + v = &storagepb.Bucket_Versioning{Enabled: optional.ToBool(ua.VersioningEnabled)} + } + var bb *storagepb.Bucket_Billing + if ua.RequesterPays != nil { + bb = &storagepb.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)} + } + + var bktIAM *storagepb.Bucket_IamConfig + if ua.UniformBucketLevelAccess != nil || ua.BucketPolicyOnly != nil || ua.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM = &storagepb.Bucket_IamConfig{} + + if ua.BucketPolicyOnly != nil { + bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ + Enabled: optional.ToBool(ua.BucketPolicyOnly.Enabled), + } + } + + if ua.UniformBucketLevelAccess != nil { + // UniformBucketLevelAccess takes precedence over BucketPolicyOnly, + // so Enabled will be overriden here if both are set + bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ + Enabled: optional.ToBool(ua.UniformBucketLevelAccess.Enabled), + } + } + + if ua.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String() + } + } + + var defaultHold bool + if ua.DefaultEventBasedHold != nil { + defaultHold = optional.ToBool(ua.DefaultEventBasedHold) + } + var lifecycle Lifecycle + if ua.Lifecycle != nil { + lifecycle = *ua.Lifecycle + } + var bktACL []*storagepb.BucketAccessControl + if ua.acl != nil { + bktACL = toProtoBucketACL(ua.acl) + } + if ua.PredefinedACL != "" { + // Clear ACL or the call will fail. + bktACL = nil + } + var bktDefaultObjectACL []*storagepb.ObjectAccessControl + if ua.defaultObjectACL != nil { + bktDefaultObjectACL = toProtoObjectACL(ua.defaultObjectACL) + } + if ua.PredefinedDefaultObjectACL != "" { + // Clear ACLs or the call will fail. + bktDefaultObjectACL = nil + } + + return &storagepb.Bucket{ + StorageClass: ua.StorageClass, + Acl: bktACL, + DefaultObjectAcl: bktDefaultObjectACL, + DefaultEventBasedHold: defaultHold, + Versioning: v, + Billing: bb, + Lifecycle: toProtoLifecycle(lifecycle), + RetentionPolicy: ua.RetentionPolicy.toProtoRetentionPolicy(), + Cors: toProtoCORS(ua.CORS), + Encryption: ua.Encryption.toProtoBucketEncryption(), + Logging: ua.Logging.toProtoBucketLogging(), + Website: ua.Website.toProtoBucketWebsite(), + IamConfig: bktIAM, + Rpo: ua.RPO.String(), + Autoclass: ua.Autoclass.toProtoAutoclass(), } } @@ -638,6 +1055,21 @@ type BucketAttrsToUpdate struct { // for more information. UniformBucketLevelAccess *UniformBucketLevelAccess + // PublicAccessPrevention is the setting for the bucket's + // PublicAccessPrevention policy, which can be used to prevent public access + // of data in the bucket. See + // https://cloud.google.com/storage/docs/public-access-prevention for more + // information. + PublicAccessPrevention PublicAccessPrevention + + // StorageClass is the default storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "STANDARD", "NEARLINE", + // "COLDLINE" and "ARCHIVE". Defaults to "STANDARD". + // See https://cloud.google.com/storage/docs/storage-classes for all + // valid values. + StorageClass string + // If set, updates the retention policy of the bucket. Using // RetentionPolicy.RetentionPeriod = 0 will delete the existing policy. // @@ -672,6 +1104,27 @@ type BucketAttrsToUpdate struct { // See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch. PredefinedDefaultObjectACL string + // RPO configures the Recovery Point Objective (RPO) policy of the bucket. + // Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket. + // See https://cloud.google.com/storage/docs/managing-turbo-replication for + // more information. + RPO RPO + + // If set, updates the autoclass configuration of the bucket. + // See https://cloud.google.com/storage/docs/using-autoclass for more information. + Autoclass *Autoclass + + // acl is the list of access control rules on the bucket. + // It is unexported and only used internally by the gRPC client. + // Library users should use ACLHandle methods directly. + acl []ACLRule + + // defaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + // It is unexported and only used internally by the gRPC client. + // Library users should use ACLHandle methods directly. + defaultObjectACL []ACLRule + setLabels map[string]string deleteLabels map[string]bool } @@ -740,6 +1193,12 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { }, } } + if ua.PublicAccessPrevention != PublicAccessPreventionUnknown { + if rb.IamConfiguration == nil { + rb.IamConfiguration = &raw.BucketIamConfiguration{} + } + rb.IamConfiguration.PublicAccessPrevention = ua.PublicAccessPrevention.String() + } if ua.Encryption != nil { if ua.Encryption.DefaultKMSKeyName == "" { rb.NullFields = append(rb.NullFields, "Encryption") @@ -768,6 +1227,12 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { rb.Website = ua.Website.toRawBucketWebsite() } } + if ua.Autoclass != nil { + rb.Autoclass = &raw.BucketAutoclass{ + Enabled: ua.Autoclass.Enabled, + ForceSendFields: []string{"Enabled"}, + } + } if ua.PredefinedACL != "" { // Clear ACL or the call will fail. rb.Acl = nil @@ -778,6 +1243,10 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { rb.DefaultObjectAcl = nil rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl") } + + rb.StorageClass = ua.StorageClass + rb.Rpo = ua.RPO.String() + if ua.setLabels != nil || ua.deleteLabels != nil { rb.Labels = map[string]string{} for k, v := range ua.setLabels { @@ -852,13 +1321,8 @@ func (b *BucketHandle) UserProject(projectID string) *BucketHandle { // most customers. It might be changed in backwards-incompatible ways and is not // subject to any SLA or deprecation policy. func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error { - var metageneration int64 - if b.conds != nil { - metageneration = b.conds.MetagenerationMatch - } - req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration) - _, err := req.Context(ctx).Do() - return err + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...) } // applyBucketConds modifies the provided call using the conditions in conds. @@ -884,6 +1348,32 @@ func applyBucketConds(method string, conds *BucketConditions, call interface{}) return nil } +// applyBucketConds modifies the provided request message using the conditions +// in conds. msg is a protobuf Message that has fields if_metageneration_match +// and if_metageneration_not_match. +func applyBucketCondsProto(method string, conds *BucketConditions, msg proto.Message) error { + rmsg := msg.ProtoReflect() + + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + + switch { + case conds.MetagenerationMatch != 0: + if !setConditionProtoField(rmsg, "if_metageneration_match", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionProtoField(rmsg, "if_metageneration_not_match", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy { if rp == nil { return nil @@ -893,8 +1383,23 @@ func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy { } } -func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) { +func (rp *RetentionPolicy) toProtoRetentionPolicy() *storagepb.Bucket_RetentionPolicy { if rp == nil { + return nil + } + // RetentionPeriod must be greater than 0, so if it is 0, the user left it + // unset, and so we should not send it in the request i.e. nil is sent. + var period *int64 + if rp.RetentionPeriod != 0 { + period = proto.Int64(int64(rp.RetentionPeriod / time.Second)) + } + return &storagepb.Bucket_RetentionPolicy{ + RetentionPeriod: period, + } +} + +func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) { + if rp == nil || rp.EffectiveTime == "" { return nil, nil } t, err := time.Parse(time.RFC3339, rp.EffectiveTime) @@ -908,6 +1413,17 @@ func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) }, nil } +func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *RetentionPolicy { + if rp == nil || rp.GetEffectiveTime().AsTime().Unix() == 0 { + return nil + } + return &RetentionPolicy{ + RetentionPeriod: time.Duration(rp.GetRetentionPeriod()) * time.Second, + EffectiveTime: rp.GetEffectiveTime().AsTime(), + IsLocked: rp.GetIsLocked(), + } +} + func toRawCORS(c []CORS) []*raw.BucketCors { var out []*raw.BucketCors for _, v := range c { @@ -921,6 +1437,19 @@ func toRawCORS(c []CORS) []*raw.BucketCors { return out } +func toProtoCORS(c []CORS) []*storagepb.Bucket_Cors { + var out []*storagepb.Bucket_Cors + for _, v := range c { + out = append(out, &storagepb.Bucket_Cors{ + MaxAgeSeconds: int32(v.MaxAge / time.Second), + Method: v.Methods, + Origin: v.Origins, + ResponseHeader: v.ResponseHeaders, + }) + } + return out +} + func toCORS(rc []*raw.BucketCors) []CORS { var out []CORS for _, v := range rc { @@ -934,6 +1463,19 @@ func toCORS(rc []*raw.BucketCors) []CORS { return out } +func toCORSFromProto(rc []*storagepb.Bucket_Cors) []CORS { + var out []CORS + for _, v := range rc { + out = append(out, CORS{ + MaxAge: time.Duration(v.GetMaxAgeSeconds()) * time.Second, + Methods: v.GetMethod(), + Origins: v.GetOrigin(), + ResponseHeaders: v.GetResponseHeader(), + }) + } + return out +} + func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { var rl raw.BucketLifecycle if len(l.Rules) == 0 { @@ -946,12 +1488,25 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { StorageClass: r.Action.StorageClass, }, Condition: &raw.BucketLifecycleRuleCondition{ - Age: r.Condition.AgeInDays, - MatchesStorageClass: r.Condition.MatchesStorageClasses, - NumNewerVersions: r.Condition.NumNewerVersions, + DaysSinceCustomTime: r.Condition.DaysSinceCustomTime, + DaysSinceNoncurrentTime: r.Condition.DaysSinceNoncurrentTime, + MatchesPrefix: r.Condition.MatchesPrefix, + MatchesStorageClass: r.Condition.MatchesStorageClasses, + MatchesSuffix: r.Condition.MatchesSuffix, + NumNewerVersions: r.Condition.NumNewerVersions, }, } + // AllObjects takes precedent when both AllObjects and AgeInDays are set + // Rationale: If you've opted into using AllObjects, it makes sense that you + // understand the implications of how this option works with AgeInDays. + if r.Condition.AllObjects { + rr.Condition.Age = googleapi.Int64(0) + rr.Condition.ForceSendFields = []string{"Age"} + } else if r.Condition.AgeInDays > 0 { + rr.Condition.Age = googleapi.Int64(r.Condition.AgeInDays) + } + switch r.Condition.Liveness { case LiveAndArchived: rr.Condition.IsLive = nil @@ -964,6 +1519,64 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { if !r.Condition.CreatedBefore.IsZero() { rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date) } + if !r.Condition.CustomTimeBefore.IsZero() { + rr.Condition.CustomTimeBefore = r.Condition.CustomTimeBefore.Format(rfc3339Date) + } + if !r.Condition.NoncurrentTimeBefore.IsZero() { + rr.Condition.NoncurrentTimeBefore = r.Condition.NoncurrentTimeBefore.Format(rfc3339Date) + } + rl.Rule = append(rl.Rule, rr) + } + return &rl +} + +func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle { + var rl storagepb.Bucket_Lifecycle + + for _, r := range l.Rules { + rr := &storagepb.Bucket_Lifecycle_Rule{ + Action: &storagepb.Bucket_Lifecycle_Rule_Action{ + Type: r.Action.Type, + StorageClass: r.Action.StorageClass, + }, + Condition: &storagepb.Bucket_Lifecycle_Rule_Condition{ + // Note: The Apiary types use int64 (even though the Discovery + // doc states "format: int32"), so the client types used int64, + // but the proto uses int32 so we have a potentially lossy + // conversion. + AgeDays: proto.Int32(int32(r.Condition.AgeInDays)), + DaysSinceCustomTime: proto.Int32(int32(r.Condition.DaysSinceCustomTime)), + DaysSinceNoncurrentTime: proto.Int32(int32(r.Condition.DaysSinceNoncurrentTime)), + MatchesPrefix: r.Condition.MatchesPrefix, + MatchesStorageClass: r.Condition.MatchesStorageClasses, + MatchesSuffix: r.Condition.MatchesSuffix, + NumNewerVersions: proto.Int32(int32(r.Condition.NumNewerVersions)), + }, + } + + // TODO(#6205): This may not be needed for gRPC + if r.Condition.AllObjects { + rr.Condition.AgeDays = proto.Int32(0) + } + + switch r.Condition.Liveness { + case LiveAndArchived: + rr.Condition.IsLive = nil + case Live: + rr.Condition.IsLive = proto.Bool(true) + case Archived: + rr.Condition.IsLive = proto.Bool(false) + } + + if !r.Condition.CreatedBefore.IsZero() { + rr.Condition.CreatedBefore = timeToProtoDate(r.Condition.CreatedBefore) + } + if !r.Condition.CustomTimeBefore.IsZero() { + rr.Condition.CustomTimeBefore = timeToProtoDate(r.Condition.CustomTimeBefore) + } + if !r.Condition.NoncurrentTimeBefore.IsZero() { + rr.Condition.NoncurrentTimeBefore = timeToProtoDate(r.Condition.NoncurrentTimeBefore) + } rl.Rule = append(rl.Rule, rr) } return &rl @@ -981,11 +1594,20 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { StorageClass: rr.Action.StorageClass, }, Condition: LifecycleCondition{ - AgeInDays: rr.Condition.Age, - MatchesStorageClasses: rr.Condition.MatchesStorageClass, - NumNewerVersions: rr.Condition.NumNewerVersions, + DaysSinceCustomTime: rr.Condition.DaysSinceCustomTime, + DaysSinceNoncurrentTime: rr.Condition.DaysSinceNoncurrentTime, + MatchesPrefix: rr.Condition.MatchesPrefix, + MatchesStorageClasses: rr.Condition.MatchesStorageClass, + MatchesSuffix: rr.Condition.MatchesSuffix, + NumNewerVersions: rr.Condition.NumNewerVersions, }, } + if rr.Condition.Age != nil { + r.Condition.AgeInDays = *rr.Condition.Age + if *rr.Condition.Age == 0 { + r.Condition.AllObjects = true + } + } if rr.Condition.IsLive == nil { r.Condition.Liveness = LiveAndArchived @@ -998,6 +1620,61 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { if rr.Condition.CreatedBefore != "" { r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore) } + if rr.Condition.CustomTimeBefore != "" { + r.Condition.CustomTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.CustomTimeBefore) + } + if rr.Condition.NoncurrentTimeBefore != "" { + r.Condition.NoncurrentTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.NoncurrentTimeBefore) + } + l.Rules = append(l.Rules, r) + } + return l +} + +func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle { + var l Lifecycle + if rl == nil { + return l + } + for _, rr := range rl.GetRule() { + r := LifecycleRule{ + Action: LifecycleAction{ + Type: rr.GetAction().GetType(), + StorageClass: rr.GetAction().GetStorageClass(), + }, + Condition: LifecycleCondition{ + AgeInDays: int64(rr.GetCondition().GetAgeDays()), + DaysSinceCustomTime: int64(rr.GetCondition().GetDaysSinceCustomTime()), + DaysSinceNoncurrentTime: int64(rr.GetCondition().GetDaysSinceNoncurrentTime()), + MatchesPrefix: rr.GetCondition().GetMatchesPrefix(), + MatchesStorageClasses: rr.GetCondition().GetMatchesStorageClass(), + MatchesSuffix: rr.GetCondition().GetMatchesSuffix(), + NumNewerVersions: int64(rr.GetCondition().GetNumNewerVersions()), + }, + } + + // TODO(#6205): This may not be needed for gRPC + if rr.GetCondition().GetAgeDays() == 0 { + r.Condition.AllObjects = true + } + + if rr.GetCondition().IsLive == nil { + r.Condition.Liveness = LiveAndArchived + } else if rr.GetCondition().GetIsLive() { + r.Condition.Liveness = Live + } else { + r.Condition.Liveness = Archived + } + + if rr.GetCondition().GetCreatedBefore() != nil { + r.Condition.CreatedBefore = protoDateToUTCTime(rr.GetCondition().GetCreatedBefore()) + } + if rr.GetCondition().GetCustomTimeBefore() != nil { + r.Condition.CustomTimeBefore = protoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore()) + } + if rr.GetCondition().GetNoncurrentTimeBefore() != nil { + r.Condition.NoncurrentTimeBefore = protoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore()) + } l.Rules = append(l.Rules, r) } return l @@ -1012,6 +1689,15 @@ func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption { } } +func (e *BucketEncryption) toProtoBucketEncryption() *storagepb.Bucket_Encryption { + if e == nil { + return nil + } + return &storagepb.Bucket_Encryption{ + DefaultKmsKey: e.DefaultKMSKeyName, + } +} + func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption { if e == nil { return nil @@ -1019,6 +1705,13 @@ func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption { return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName} } +func toBucketEncryptionFromProto(e *storagepb.Bucket_Encryption) *BucketEncryption { + if e == nil { + return nil + } + return &BucketEncryption{DefaultKMSKeyName: e.GetDefaultKmsKey()} +} + func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging { if b == nil { return nil @@ -1029,6 +1722,16 @@ func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging { } } +func (b *BucketLogging) toProtoBucketLogging() *storagepb.Bucket_Logging { + if b == nil { + return nil + } + return &storagepb.Bucket_Logging{ + LogBucket: bucketResourceName(globalProjectAlias, b.LogBucket), + LogObjectPrefix: b.LogObjectPrefix, + } +} + func toBucketLogging(b *raw.BucketLogging) *BucketLogging { if b == nil { return nil @@ -1039,6 +1742,17 @@ func toBucketLogging(b *raw.BucketLogging) *BucketLogging { } } +func toBucketLoggingFromProto(b *storagepb.Bucket_Logging) *BucketLogging { + if b == nil { + return nil + } + lb := parseBucketName(b.GetLogBucket()) + return &BucketLogging{ + LogBucket: lb, + LogObjectPrefix: b.GetLogObjectPrefix(), + } +} + func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite { if w == nil { return nil @@ -1049,6 +1763,16 @@ func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite { } } +func (w *BucketWebsite) toProtoBucketWebsite() *storagepb.Bucket_Website { + if w == nil { + return nil + } + return &storagepb.Bucket_Website{ + MainPageSuffix: w.MainPageSuffix, + NotFoundPage: w.NotFoundPage, + } +} + func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite { if w == nil { return nil @@ -1059,6 +1783,16 @@ func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite { } } +func toBucketWebsiteFromProto(w *storagepb.Bucket_Website) *BucketWebsite { + if w == nil { + return nil + } + return &BucketWebsite{ + MainPageSuffix: w.GetMainPageSuffix(), + NotFoundPage: w.GetNotFoundPage(), + } +} + func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly { if b == nil || b.BucketPolicyOnly == nil || !b.BucketPolicyOnly.Enabled { return BucketPolicyOnly{} @@ -1075,6 +1809,16 @@ func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly { } } +func toBucketPolicyOnlyFromProto(b *storagepb.Bucket_IamConfig) BucketPolicyOnly { + if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() { + return BucketPolicyOnly{} + } + return BucketPolicyOnly{ + Enabled: true, + LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(), + } +} + func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLevelAccess { if b == nil || b.UniformBucketLevelAccess == nil || !b.UniformBucketLevelAccess.Enabled { return UniformBucketLevelAccess{} @@ -1091,23 +1835,186 @@ func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLeve } } -// Objects returns an iterator over the objects in the bucket that match the Query q. -// If q is nil, no filtering is done. +func toUniformBucketLevelAccessFromProto(b *storagepb.Bucket_IamConfig) UniformBucketLevelAccess { + if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() { + return UniformBucketLevelAccess{} + } + return UniformBucketLevelAccess{ + Enabled: true, + LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(), + } +} + +func toPublicAccessPrevention(b *raw.BucketIamConfiguration) PublicAccessPrevention { + if b == nil { + return PublicAccessPreventionUnknown + } + switch b.PublicAccessPrevention { + case publicAccessPreventionInherited, publicAccessPreventionUnspecified: + return PublicAccessPreventionInherited + case publicAccessPreventionEnforced: + return PublicAccessPreventionEnforced + default: + return PublicAccessPreventionUnknown + } +} + +func toPublicAccessPreventionFromProto(b *storagepb.Bucket_IamConfig) PublicAccessPrevention { + if b == nil { + return PublicAccessPreventionUnknown + } + switch b.GetPublicAccessPrevention() { + case publicAccessPreventionInherited, publicAccessPreventionUnspecified: + return PublicAccessPreventionInherited + case publicAccessPreventionEnforced: + return PublicAccessPreventionEnforced + default: + return PublicAccessPreventionUnknown + } +} + +func toRPO(b *raw.Bucket) RPO { + if b == nil { + return RPOUnknown + } + switch b.Rpo { + case rpoDefault: + return RPODefault + case rpoAsyncTurbo: + return RPOAsyncTurbo + default: + return RPOUnknown + } +} + +func toRPOFromProto(b *storagepb.Bucket) RPO { + if b == nil { + return RPOUnknown + } + switch b.GetRpo() { + case rpoDefault: + return RPODefault + case rpoAsyncTurbo: + return RPOAsyncTurbo + default: + return RPOUnknown + } +} + +func customPlacementFromRaw(c *raw.BucketCustomPlacementConfig) *CustomPlacementConfig { + if c == nil { + return nil + } + return &CustomPlacementConfig{DataLocations: c.DataLocations} +} + +func (c *CustomPlacementConfig) toRawCustomPlacement() *raw.BucketCustomPlacementConfig { + if c == nil { + return nil + } + return &raw.BucketCustomPlacementConfig{ + DataLocations: c.DataLocations, + } +} + +func (c *CustomPlacementConfig) toProtoCustomPlacement() *storagepb.Bucket_CustomPlacementConfig { + if c == nil { + return nil + } + return &storagepb.Bucket_CustomPlacementConfig{ + DataLocations: c.DataLocations, + } +} + +func customPlacementFromProto(c *storagepb.Bucket_CustomPlacementConfig) *CustomPlacementConfig { + if c == nil { + return nil + } + return &CustomPlacementConfig{DataLocations: c.GetDataLocations()} +} + +func (a *Autoclass) toRawAutoclass() *raw.BucketAutoclass { + if a == nil { + return nil + } + // Excluding read only field ToggleTime. + return &raw.BucketAutoclass{ + Enabled: a.Enabled, + } +} + +func (a *Autoclass) toProtoAutoclass() *storagepb.Bucket_Autoclass { + if a == nil { + return nil + } + // Excluding read only field ToggleTime. + return &storagepb.Bucket_Autoclass{ + Enabled: a.Enabled, + } +} + +func toAutoclassFromRaw(a *raw.BucketAutoclass) *Autoclass { + if a == nil || a.ToggleTime == "" { + return nil + } + // Return Autoclass.ToggleTime only if parsed with a valid value. + t, err := time.Parse(time.RFC3339, a.ToggleTime) + if err != nil { + return &Autoclass{ + Enabled: a.Enabled, + } + } + return &Autoclass{ + Enabled: a.Enabled, + ToggleTime: t, + } +} + +func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { + if a == nil || a.GetToggleTime().AsTime().Unix() == 0 { + return nil + } + return &Autoclass{ + Enabled: a.GetEnabled(), + ToggleTime: a.GetToggleTime().AsTime(), + } +} + +// Objects returns an iterator over the objects in the bucket that match the +// Query q. If q is nil, no filtering is done. Objects will be iterated over +// lexicographically by name. // // Note: The returned iterator is not safe for concurrent operations without explicit synchronization. func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { - it := &ObjectIterator{ - ctx: ctx, - bucket: b, + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.ListObjects(ctx, b.name, q, o...) +} + +// Retryer returns a bucket handle that is configured with custom retry +// behavior as specified by the options that are passed to it. All operations +// on the new handle will use the customized retry configuration. +// Retry options set on a object handle will take precedence over options set on +// the bucket handle. +// These retry options will merge with the client's retry configuration (if set) +// for the returned handle. Options passed into this method will take precedence +// over retry options on the client. Note that you must explicitly pass in each +// option you want to override. +func (b *BucketHandle) Retryer(opts ...RetryOption) *BucketHandle { + b2 := *b + var retry *retryConfig + if b.retry != nil { + // merge the options with the existing retry + retry = b.retry + } else { + retry = &retryConfig{} } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - it.fetch, - func() int { return len(it.items) }, - func() interface{} { b := it.items; it.items = nil; return b }) - if q != nil { - it.query = *q + for _, opt := range opts { + opt.apply(retry) } - return it + b2.retry = retry + b2.acl.retry = retry + b2.defaultObjectACL.retry = retry + return &b2 } // An ObjectIterator is an iterator over ObjectAttrs. @@ -1115,7 +2022,6 @@ func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { // Note: This iterator is not safe for concurrent operations without explicit synchronization. type ObjectIterator struct { ctx context.Context - bucket *BucketHandle query Query pageInfo *iterator.PageInfo nextFunc func() error @@ -1131,6 +2037,13 @@ func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // there are no more results. Once Next returns iterator.Done, all subsequent // calls will return iterator.Done. // +// In addition, if Next returns an error other than iterator.Done, all +// subsequent calls will return the same error. To continue iteration, a new +// `ObjectIterator` must be created. Since objects are ordered lexicographically +// by name, `Query.StartOffset` can be used to create a new iterator which will +// start at the desired place. See +// https://pkg.go.dev/cloud.google.com/go/storage?tab=doc#hdr-Listing_objects. +// // If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will // have a non-empty Prefix field, and a zero value for all other fields. These // represent prefixes. @@ -1145,44 +2058,6 @@ func (it *ObjectIterator) Next() (*ObjectAttrs, error) { return item, nil } -func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) { - req := it.bucket.c.raw.Objects.List(it.bucket.name) - setClientHeader(req.Header()) - req.Projection("full") - req.Delimiter(it.query.Delimiter) - req.Prefix(it.query.Prefix) - req.Versions(it.query.Versions) - if len(it.query.fieldSelection) > 0 { - req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection)) - } - req.PageToken(pageToken) - if it.bucket.userProject != "" { - req.UserProject(it.bucket.userProject) - } - if pageSize > 0 { - req.MaxResults(int64(pageSize)) - } - var resp *raw.Objects - var err error - err = runWithRetry(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() - return err - }) - if err != nil { - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - err = ErrBucketNotExist - } - return "", err - } - for _, item := range resp.Items { - it.items = append(it.items, newObject(item)) - } - for _, prefix := range resp.Prefixes { - it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) - } - return resp.NextPageToken, nil -} - // Buckets returns an iterator over the buckets in the project. You may // optionally set the iterator's Prefix field to restrict the list to buckets // whose names begin with the prefix. By default, all buckets in the project @@ -1190,17 +2065,8 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) // // Note: The returned iterator is not safe for concurrent operations without explicit synchronization. func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { - it := &BucketIterator{ - ctx: ctx, - client: c, - projectID: projectID, - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - it.fetch, - func() int { return len(it.buckets) }, - func() interface{} { b := it.buckets; it.buckets = nil; return b }) - - return it + o := makeStorageOpts(true, c.retry, "") + return c.tc.ListBuckets(ctx, projectID, o...) } // A BucketIterator is an iterator over BucketAttrs. @@ -1211,7 +2077,6 @@ type BucketIterator struct { Prefix string ctx context.Context - client *Client projectID string buckets []*BucketAttrs pageInfo *iterator.PageInfo @@ -1237,29 +2102,63 @@ func (it *BucketIterator) Next() (*BucketAttrs, error) { // Note: This method is not safe for concurrent operations without explicit synchronization. func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } -func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) { - req := it.client.raw.Buckets.List(it.projectID) - setClientHeader(req.Header()) - req.Projection("full") - req.Prefix(it.Prefix) - req.PageToken(pageToken) - if pageSize > 0 { - req.MaxResults(int64(pageSize)) - } - var resp *raw.Buckets - err = runWithRetry(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() - return err - }) - if err != nil { - return "", err +// RPO (Recovery Point Objective) configures the turbo replication feature. See +// https://cloud.google.com/storage/docs/managing-turbo-replication for more information. +type RPO int + +const ( + // RPOUnknown is a zero value. It may be returned from bucket.Attrs() if RPO + // is not present in the bucket metadata, that is, the bucket is not dual-region. + // This value is also used if the RPO field is not set in a call to GCS. + RPOUnknown RPO = iota + + // RPODefault represents default replication. It is used to reset RPO on an + // existing bucket that has this field set to RPOAsyncTurbo. Otherwise it + // is equivalent to RPOUnknown, and is always ignored. This value is valid + // for dual- or multi-region buckets. + RPODefault + + // RPOAsyncTurbo represents turbo replication and is used to enable Turbo + // Replication on a bucket. This value is only valid for dual-region buckets. + RPOAsyncTurbo + + rpoUnknown string = "" + rpoDefault = "DEFAULT" + rpoAsyncTurbo = "ASYNC_TURBO" +) + +func (rpo RPO) String() string { + switch rpo { + case RPODefault: + return rpoDefault + case RPOAsyncTurbo: + return rpoAsyncTurbo + default: + return rpoUnknown } - for _, item := range resp.Items { - b, err := newBucket(item) - if err != nil { - return "", err - } - it.buckets = append(it.buckets, b) +} + +// protoDateToUTCTime returns a new Time based on the google.type.Date, in UTC. +// +// Hours, minutes, seconds, and nanoseconds are set to 0. +func protoDateToUTCTime(d *dpb.Date) time.Time { + return protoDateToTime(d, time.UTC) +} + +// protoDateToTime returns a new Time based on the google.type.Date and provided +// *time.Location. +// +// Hours, minutes, seconds, and nanoseconds are set to 0. +func protoDateToTime(d *dpb.Date, l *time.Location) time.Time { + return time.Date(int(d.GetYear()), time.Month(d.GetMonth()), int(d.GetDay()), 0, 0, 0, 0, l) +} + +// timeToProtoDate returns a new google.type.Date based on the provided time.Time. +// The location is ignored, as is anything more precise than the day. +func timeToProtoDate(t time.Time) *dpb.Date { + return &dpb.Date{ + Year: int32(t.Year()), + Month: int32(t.Month()), + Day: int32(t.Day()), } - return resp.NextPageToken, nil } diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go new file mode 100644 index 000000000000..d579a2b1ee78 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/client.go @@ -0,0 +1,333 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "io" + "time" + + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/option" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +// TODO(noahdietz): Move existing factory methods to this file. + +// storageClient is an internal-only interface designed to separate the +// transport-specific logic of making Storage API calls from the logic of the +// client library. +// +// Implementation requirements beyond implementing the interface include: +// * factory method(s) must accept a `userProject string` param +// * `settings` must be retained per instance +// * `storageOption`s must be resolved in the order they are received +// * all API errors must be wrapped in the gax-go APIError type +// * any unimplemented interface methods must return a StorageUnimplementedErr +// +// TODO(noahdietz): This interface is currently not used in the production code +// paths +type storageClient interface { + + // Top-level methods. + + GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) + CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) + ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator + Close() error + + // Bucket methods. + + DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error + GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) + UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) + LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error + ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator + + // Object metadata methods. + + DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error + GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) + UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) + + // Default Object ACL methods. + + DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error + ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) + UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error + + // Bucket ACL methods. + + DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error + ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) + UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error + + // Object ACL methods. + + DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error + ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) + UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error + + // Media operations. + + ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) + RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) + + NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (*Reader, error) + OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) + + // IAM methods. + + GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) + SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error + TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) + + // HMAC Key methods. + + GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) + ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator + UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) + CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) + DeleteHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) error + + // Notification methods. + ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error) + CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error) + DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error +} + +// settings contains transport-agnostic configuration for API calls made via +// the storageClient inteface. All implementations must utilize settings +// and respect those that are applicable. +type settings struct { + // retry is the complete retry configuration to use when evaluating if an + // API call should be retried. + retry *retryConfig + + // gax is a set of gax.CallOption to be conveyed to gax.Invoke. + // Note: Not all storageClient interfaces will must use gax.Invoke. + gax []gax.CallOption + + // idempotent indicates if the call is idempotent or not when considering + // if the call should be retired or not. + idempotent bool + + // clientOption is a set of option.ClientOption to be used during client + // transport initialization. See https://pkg.go.dev/google.golang.org/api/option + // for a list of supported options. + clientOption []option.ClientOption + + // userProject is the user project that should be billed for the request. + userProject string +} + +func initSettings(opts ...storageOption) *settings { + s := &settings{} + resolveOptions(s, opts...) + return s +} + +func resolveOptions(s *settings, opts ...storageOption) { + for _, o := range opts { + o.Apply(s) + } +} + +// callSettings is a helper for resolving storage options against the settings +// in the context of an individual call. This is to ensure that client-level +// default settings are not mutated by two different calls getting options. +// +// Example: s := callSettings(c.settings, opts...) +func callSettings(defaults *settings, opts ...storageOption) *settings { + if defaults == nil { + return nil + } + // This does not make a deep copy of the pointer/slice fields, but all + // options replace the settings fields rather than modify their values in + // place. + cs := *defaults + resolveOptions(&cs, opts...) + return &cs +} + +// makeStorageOpts is a helper for generating a set of storageOption based on +// idempotency, retryConfig, and userProject. All top-level client operations +// will generally have to pass these options through the interface. +func makeStorageOpts(isIdempotent bool, retry *retryConfig, userProject string) []storageOption { + opts := []storageOption{idempotent(isIdempotent)} + if retry != nil { + opts = append(opts, withRetryConfig(retry)) + } + if userProject != "" { + opts = append(opts, withUserProject(userProject)) + } + return opts +} + +// storageOption is the transport-agnostic call option for the storageClient +// interface. +type storageOption interface { + Apply(s *settings) +} + +func withGAXOptions(opts ...gax.CallOption) storageOption { + return &gaxOption{opts} +} + +type gaxOption struct { + opts []gax.CallOption +} + +func (o *gaxOption) Apply(s *settings) { s.gax = o.opts } + +func withRetryConfig(rc *retryConfig) storageOption { + return &retryOption{rc} +} + +type retryOption struct { + rc *retryConfig +} + +func (o *retryOption) Apply(s *settings) { s.retry = o.rc } + +func idempotent(i bool) storageOption { + return &idempotentOption{i} +} + +type idempotentOption struct { + idempotency bool +} + +func (o *idempotentOption) Apply(s *settings) { s.idempotent = o.idempotency } + +func withClientOptions(opts ...option.ClientOption) storageOption { + return &clientOption{opts: opts} +} + +type clientOption struct { + opts []option.ClientOption +} + +func (o *clientOption) Apply(s *settings) { s.clientOption = o.opts } + +func withUserProject(project string) storageOption { + return &userProjectOption{project} +} + +type userProjectOption struct { + project string +} + +func (o *userProjectOption) Apply(s *settings) { s.userProject = o.project } + +type openWriterParams struct { + // Writer configuration + + // ctx is the context used by the writer routine to make all network calls + // and to manage the writer routine - see `Writer.ctx`. + // Required. + ctx context.Context + // chunkSize - see `Writer.ChunkSize`. + // Optional. + chunkSize int + // chunkRetryDeadline - see `Writer.ChunkRetryDeadline`. + // Optional. + chunkRetryDeadline time.Duration + + // Object/request properties + + // bucket - see `Writer.o.bucket`. + // Required. + bucket string + // attrs - see `Writer.ObjectAttrs`. + // Required. + attrs *ObjectAttrs + // conds - see `Writer.o.conds`. + // Optional. + conds *Conditions + // encryptionKey - see `Writer.o.encryptionKey` + // Optional. + encryptionKey []byte + // sendCRC32C - see `Writer.SendCRC32C`. + // Optional. + sendCRC32C bool + + // Writer callbacks + + // donec - see `Writer.donec`. + // Required. + donec chan struct{} + // setError callback for reporting errors - see `Writer.error`. + // Required. + setError func(error) + // progress callback for reporting upload progress - see `Writer.progress`. + // Required. + progress func(int64) + // setObj callback for reporting the resulting object - see `Writer.obj`. + // Required. + setObj func(*ObjectAttrs) +} + +type newRangeReaderParams struct { + bucket string + conds *Conditions + encryptionKey []byte + gen int64 + length int64 + object string + offset int64 + readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently. +} + +type composeObjectRequest struct { + dstBucket string + dstObject destinationObject + srcs []sourceObject + predefinedACL string + sendCRC32C bool +} + +type sourceObject struct { + name string + bucket string + gen int64 + conds *Conditions + encryptionKey []byte +} + +type destinationObject struct { + name string + bucket string + conds *Conditions + attrs *ObjectAttrs // attrs to set on the destination object. + encryptionKey []byte + keyName string +} + +type rewriteObjectRequest struct { + srcObject sourceObject + dstObject destinationObject + predefinedACL string + token string + maxBytesRewrittenPerCall int64 +} + +type rewriteObjectResponse struct { + resource *ObjectAttrs + done bool + written int64 + size int64 + token string +} diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go index 61983df5adae..a0b9a2683c7e 100644 --- a/vendor/cloud.google.com/go/storage/copy.go +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -20,7 +20,6 @@ import ( "fmt" "cloud.google.com/go/internal/trace" - raw "google.golang.org/api/storage/v1" ) // CopierFrom creates a Copier that can copy src to dst. @@ -70,6 +69,15 @@ type Copier struct { DestinationKMSKeyName string dst, src *ObjectHandle + + // The maximum number of bytes that will be rewritten per rewrite request. + // Most callers shouldn't need to specify this parameter - it is primarily + // in place to support testing. If specified the value must be an integral + // multiple of 1 MiB (1048576). Also, this only applies to requests where + // the source and destination span locations and/or storage classes. Finally, + // this value must not change across rewrite calls else you'll get an error + // that the `rewriteToken` is invalid. + maxBytesRewrittenPerCall int64 } // Run performs the copy. @@ -86,66 +94,59 @@ func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil { return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key") } + if c.dst.gen != defaultGen { + return nil, fmt.Errorf("storage: generation cannot be specified on copy destination, got %v", c.dst.gen) + } // Convert destination attributes to raw form, omitting the bucket. // If the bucket is included but name or content-type aren't, the service // returns a 400 with "Required" as the only message. Omitting the bucket // does not cause any problems. - rawObject := c.ObjectAttrs.toRawObject("") + req := &rewriteObjectRequest{ + srcObject: sourceObject{ + name: c.src.object, + bucket: c.src.bucket, + gen: c.src.gen, + conds: c.src.conds, + encryptionKey: c.src.encryptionKey, + }, + dstObject: destinationObject{ + name: c.dst.object, + bucket: c.dst.bucket, + conds: c.dst.conds, + attrs: &c.ObjectAttrs, + encryptionKey: c.dst.encryptionKey, + keyName: c.DestinationKMSKeyName, + }, + predefinedACL: c.PredefinedACL, + token: c.RewriteToken, + maxBytesRewrittenPerCall: c.maxBytesRewrittenPerCall, + } + + isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) + var userProject string + if c.dst.userProject != "" { + userProject = c.dst.userProject + } else if c.src.userProject != "" { + userProject = c.src.userProject + } + opts := makeStorageOpts(isIdempotent, c.dst.retry, userProject) + for { - res, err := c.callRewrite(ctx, rawObject) + res, err := c.dst.c.tc.RewriteObject(ctx, req, opts...) if err != nil { return nil, err } + c.RewriteToken = res.token + req.token = res.token if c.ProgressFunc != nil { - c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize)) + c.ProgressFunc(uint64(res.written), uint64(res.size)) } - if res.Done { // Finished successfully. - return newObject(res.Resource), nil + if res.done { // Finished successfully. + return res.resource, nil } } } -func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) { - call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj) - - call.Context(ctx).Projection("full") - if c.RewriteToken != "" { - call.RewriteToken(c.RewriteToken) - } - if c.DestinationKMSKeyName != "" { - call.DestinationKmsKeyName(c.DestinationKMSKeyName) - } - if c.PredefinedACL != "" { - call.DestinationPredefinedAcl(c.PredefinedACL) - } - if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { - return nil, err - } - if c.dst.userProject != "" { - call.UserProject(c.dst.userProject) - } else if c.src.userProject != "" { - call.UserProject(c.src.userProject) - } - if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { - return nil, err - } - if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { - return nil, err - } - if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil { - return nil, err - } - var res *raw.RewriteResponse - var err error - setClientHeader(call.Header()) - err = runWithRetry(ctx, func() error { res, err = call.Do(); return err }) - if err != nil { - return nil, err - } - c.RewriteToken = res.RewriteToken - return res, nil -} - // ComposerFrom creates a Composer that can compose srcs into dst. // You can immediately call Run on the returned Composer, or you can // configure it first. @@ -185,17 +186,13 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { if err := c.dst.validate(); err != nil { return nil, err } + if c.dst.gen != defaultGen { + return nil, fmt.Errorf("storage: generation cannot be specified on compose destination, got %v", c.dst.gen) + } if len(c.srcs) == 0 { return nil, errors.New("storage: at least one source object must be specified") } - req := &raw.ComposeRequest{} - // Compose requires a non-empty Destination, so we always set it, - // even if the caller-provided ObjectAttrs is the zero value. - req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) - if c.SendCRC32C { - req.Destination.Crc32c = encodeUint32(c.ObjectAttrs.CRC32C) - } for _, src := range c.srcs { if err := src.validate(); err != nil { return nil, err @@ -206,33 +203,31 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { if src.encryptionKey != nil { return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) } - srcObj := &raw.ComposeRequestSourceObjects{ - Name: src.object, - } - if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { - return nil, err - } - req.SourceObjects = append(req.SourceObjects, srcObj) } - call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx) - if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { - return nil, err - } - if c.dst.userProject != "" { - call.UserProject(c.dst.userProject) + req := &composeObjectRequest{ + dstBucket: c.dst.bucket, + predefinedACL: c.PredefinedACL, + sendCRC32C: c.SendCRC32C, } - if c.PredefinedACL != "" { - call.DestinationPredefinedAcl(c.PredefinedACL) - } - if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { - return nil, err + req.dstObject = destinationObject{ + name: c.dst.object, + bucket: c.dst.bucket, + conds: c.dst.conds, + attrs: &c.ObjectAttrs, + encryptionKey: c.dst.encryptionKey, } - var obj *raw.Object - setClientHeader(call.Header()) - err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) - if err != nil { - return nil, err + for _, src := range c.srcs { + s := sourceObject{ + name: src.object, + bucket: src.bucket, + gen: src.gen, + conds: src.conds, + } + req.srcs = append(req.srcs, s) } - return newObject(obj), nil + + isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) + opts := makeStorageOpts(isIdempotent, c.dst.retry, c.dst.userProject) + return c.dst.c.tc.ComposeObject(ctx, req, opts...) } diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index 614ea11a5904..8bf3098431e7 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -19,156 +19,186 @@ Google Cloud Storage stores data in named objects, which are grouped into bucket More information about Google Cloud Storage is available at https://cloud.google.com/storage/docs. -See https://godoc.org/cloud.google.com/go for authentication, timeouts, +See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts, connection pooling and similar aspects of this package. -All of the methods of this package use exponential backoff to retry calls that fail -with certain errors, as described in -https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues -indefinitely unless the controlling context is canceled or the client is closed. See -context.WithTimeout and context.WithCancel. +# Creating a Client +To start working with this package, create a [Client]: -Creating a Client - -To start working with this package, create a client: - - ctx := context.Background() - client, err := storage.NewClient(ctx) - if err != nil { - // TODO: Handle error. - } + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } -The client will use your default application credentials. +The client will use your default application credentials. Clients should be +reused instead of created as needed. The methods of [Client] are safe for +concurrent use by multiple goroutines. If you only wish to access public data, you can create an unauthenticated client with - client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + +To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST +environment variable to the address at which your emulator is running. This will +send requests to that address instead of to Cloud Storage. You can then create +and use a client as usual: + + // Set STORAGE_EMULATOR_HOST environment variable. + err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000") + if err != nil { + // TODO: Handle error. + } + + // Create client as usual. + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + // This request is now directed to http://localhost:9000/storage/v1/b + // instead of https://storage.googleapis.com/storage/v1/b + if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } + +Please note that there is no official emulator for Cloud Storage. -Buckets +# Buckets A Google Cloud Storage bucket is a collection of objects. To work with a bucket, make a bucket handle: - bkt := client.Bucket(bucketName) + bkt := client.Bucket(bucketName) A handle is a reference to a bucket. You can have a handle even if the bucket doesn't exist yet. To create a bucket in Google Cloud Storage, -call Create on the handle: +call [BucketHandle.Create]: - if err := bkt.Create(ctx, projectID, nil); err != nil { - // TODO: Handle error. - } + if err := bkt.Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } Note that although buckets are associated with projects, bucket names are global across all projects. Each bucket has associated metadata, represented in this package by -BucketAttrs. The third argument to BucketHandle.Create allows you to set -the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use -Attrs: +[BucketAttrs]. The third argument to [BucketHandle.Create] allows you to set +the initial [BucketAttrs] of a bucket. To retrieve a bucket's attributes, use +[BucketHandle.Attrs]: - attrs, err := bkt.Attrs(ctx) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", - attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) + attrs, err := bkt.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", + attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) -Objects +# Objects An object holds arbitrary data as a sequence of bytes, like a file. You refer to objects using a handle, just as with buckets, but unlike buckets you don't explicitly create an object. Instead, the first time you write -to an object it will be created. You can use the standard Go io.Reader -and io.Writer interfaces to read and write object data: - - obj := bkt.Object("data") - // Write something to obj. - // w implements io.Writer. - w := obj.NewWriter(ctx) - // Write some text to obj. This will either create the object or overwrite whatever is there already. - if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { - // TODO: Handle error. - } - // Close, just like writing a file. - if err := w.Close(); err != nil { - // TODO: Handle error. - } - - // Read it back. - r, err := obj.NewReader(ctx) - if err != nil { - // TODO: Handle error. - } - defer r.Close() - if _, err := io.Copy(os.Stdout, r); err != nil { - // TODO: Handle error. - } - // Prints "This object contains text." - -Objects also have attributes, which you can fetch with Attrs: - - objAttrs, err := obj.Attrs(ctx) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("object %s has size %d and can be read using %s\n", - objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) - -Listing objects - -Listing objects in a bucket is done with the Bucket.Objects method: - - query := &storage.Query{Prefix: ""} - - var names []string - it := bkt.Objects(ctx, query) - for { - attrs, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - log.Fatal(err) - } - names = append(names, attrs.Name) - } +to an object it will be created. You can use the standard Go [io.Reader] +and [io.Writer] interfaces to read and write object data: + + obj := bkt.Object("data") + // Write something to obj. + // w implements io.Writer. + w := obj.NewWriter(ctx) + // Write some text to obj. This will either create the object or overwrite whatever is there already. + if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { + // TODO: Handle error. + } + // Close, just like writing a file. + if err := w.Close(); err != nil { + // TODO: Handle error. + } + + // Read it back. + r, err := obj.NewReader(ctx) + if err != nil { + // TODO: Handle error. + } + defer r.Close() + if _, err := io.Copy(os.Stdout, r); err != nil { + // TODO: Handle error. + } + // Prints "This object contains text." + +Objects also have attributes, which you can fetch with [ObjectHandle.Attrs]: + + objAttrs, err := obj.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("object %s has size %d and can be read using %s\n", + objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) + +# Listing objects + +Listing objects in a bucket is done with the [BucketHandle.Objects] method: + + query := &storage.Query{Prefix: ""} + + var names []string + it := bkt.Objects(ctx, query) + for { + attrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + log.Fatal(err) + } + names = append(names, attrs.Name) + } + +Objects are listed lexicographically by name. To filter objects +lexicographically, [Query.StartOffset] and/or [Query.EndOffset] can be used: + + query := &storage.Query{ + Prefix: "", + StartOffset: "bar/", // Only list objects lexicographically >= "bar/" + EndOffset: "foo/", // Only list objects lexicographically < "foo/" + } + + // ... as before If only a subset of object attributes is needed when listing, specifying this -subset using Query.SetAttrSelection may speed up the listing process: +subset using [Query.SetAttrSelection] may speed up the listing process: - query := &storage.Query{Prefix: ""} - query.SetAttrSelection([]string{"Name"}) + query := &storage.Query{Prefix: ""} + query.SetAttrSelection([]string{"Name"}) - // ... as before + // ... as before -ACLs +# ACLs Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of ACLRules, each of which specifies the role of a user, group or project. ACLs are suitable for fine-grained control, but you may prefer using IAM to control -access at the project level (see -https://cloud.google.com/storage/docs/access-control/iam). +access at the project level (see [Cloud Storage IAM docs]. -To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method: +To list the ACLs of a bucket or object, obtain an [ACLHandle] and call [ACLHandle.List]: - acls, err := obj.ACL().List(ctx) - if err != nil { - // TODO: Handle error. - } - for _, rule := range acls { - fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) - } + acls, err := obj.ACL().List(ctx) + if err != nil { + // TODO: Handle error. + } + for _, rule := range acls { + fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) + } You can also set and delete ACLs. -Conditions +# Conditions Every object has a generation and a metageneration. The generation changes whenever the content changes, and the metageneration changes whenever the -metadata changes. Conditions let you check these values before an operation; +metadata changes. [Conditions] let you check these values before an operation; the operation only executes if the conditions match. You can use conditions to prevent race conditions in read-modify-write operations. @@ -176,43 +206,123 @@ For example, say you've read an object's metadata into objAttrs. Now you want to write to that object, but only if its contents haven't changed since you read it. Here is how to express that: - w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) - // Proceed with writing as above. + w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) + // Proceed with writing as above. -Signed URLs +# Signed URLs You can obtain a URL that lets anyone read or write an object for a limited time. -You don't need to create a client to do this. See the documentation of -SignedURL for details. +Signing a URL requires credentials authorized to sign a URL. To use the same +authentication that was used when instantiating the Storage client, use +[BucketHandle.SignedURL]. - url, err := storage.SignedURL(bucketName, "shared-object", opts) - if err != nil { - // TODO: Handle error. - } - fmt.Println(url) + url, err := client.Bucket(bucketName).SignedURL(objectName, opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) -Post Policy V4 Signed Request +You can also sign a URL without creating a client. See the documentation of +[SignedURL] for details. + + url, err := storage.SignedURL(bucketName, "shared-object", opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) + +# Post Policy V4 Signed Request A type of signed request that allows uploads through HTML forms directly to Cloud Storage with temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised by a user. -For more information, please see https://cloud.google.com/storage/docs/xml-api/post-object as well -as the documentation of GenerateSignedPostPolicyV4. - - pv4, err := storage.GenerateSignedPostPolicyV4(bucketName, objectName, opts) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields) - -Errors - -Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error). -These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example: +For more information, please see the [XML POST Object docs] as well +as the documentation of [BucketHandle.GenerateSignedPostPolicyV4]. - if e, ok := err.(*googleapi.Error); ok { + pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields) + +# Credential requirements for signing + +If the GoogleAccessID and PrivateKey option fields are not provided, they will +be automatically detected by [BucketHandle.SignedURL] and +[BucketHandle.GenerateSignedPostPolicyV4] if any of the following are true: + - you are authenticated to the Storage Client with a service account's + downloaded private key, either directly in code or by setting the + GOOGLE_APPLICATION_CREDENTIALS environment variable (see [Other Environments]), + - your application is running on Google Compute Engine (GCE), or + - you are logged into [gcloud using application default credentials] + with [impersonation enabled]. + +Detecting GoogleAccessID may not be possible if you are authenticated using a +token source or using [option.WithHTTPClient]. In this case, you can provide a +service account email for GoogleAccessID and the client will attempt to sign +the URL or Post Policy using that service account. + +To generate the signature, you must have: + - iam.serviceAccounts.signBlob permissions on the GoogleAccessID service + account, and + - the [IAM Service Account Credentials API] enabled (unless authenticating + with a downloaded private key). + +# Errors + +Errors returned by this client are often of the type [googleapi.Error]. +These errors can be introspected for more information by using [errors.As] +with the richer [googleapi.Error] type. For example: + + var e *googleapi.Error + if ok := errors.As(err, &e); ok { if e.Code == 409 { ... } } + +# Retrying failed requests + +Methods in this package may retry calls that fail with transient errors. +Retrying continues indefinitely unless the controlling context is canceled, the +client is closed, or a non-transient error is received. To stop retries from +continuing, use context timeouts or cancellation. + +The retry strategy in this library follows best practices for Cloud Storage. By +default, operations are retried only if they are idempotent, and exponential +backoff with jitter is employed. In addition, errors are only retried if they +are defined as transient by the service. See the [Cloud Storage retry docs] +for more information. + +Users can configure non-default retry behavior for a single library call (using +[BucketHandle.Retryer] and [ObjectHandle.Retryer]) or for all calls made by a +client (using [Client.SetRetry]). For example: + + o := client.Bucket(bucket).Object(object).Retryer( + // Use WithBackoff to change the timing of the exponential backoff. + storage.WithBackoff(gax.Backoff{ + Initial: 2 * time.Second, + }), + // Use WithPolicy to configure the idempotency policy. RetryAlways will + // retry the operation even if it is non-idempotent. + storage.WithPolicy(storage.RetryAlways), + ) + + // Use a context timeout to set an overall deadline on the call, including all + // potential retries. + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + // Delete an object using the specified strategy and timeout. + if err := o.Delete(ctx); err != nil { + // Handle err. + } + +[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam +[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object +[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy +[Other Environments]: https://cloud.google.com/storage/docs/authentication#libauth +[gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login +[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account +[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview */ package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/emulator_test.sh b/vendor/cloud.google.com/go/storage/emulator_test.sh new file mode 100644 index 000000000000..7bad7cf391cc --- /dev/null +++ b/vendor/cloud.google.com/go/storage/emulator_test.sh @@ -0,0 +1,92 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License.. + +# Fail on any error +set -eo pipefail + +# Display commands being run +set -x + +# Only run on Go 1.17+ +min_minor_ver=17 + +v=`go version | { read _ _ v _; echo ${v#go}; }` +comps=(${v//./ }) +minor_ver=${comps[1]} + +if [ "$minor_ver" -lt "$min_minor_ver" ]; then + echo minor version $minor_ver, skipping + exit 0 +fi + +export STORAGE_EMULATOR_HOST="http://localhost:9000" +export STORAGE_EMULATOR_HOST_GRPC="localhost:8888" + +DEFAULT_IMAGE_NAME='gcr.io/cloud-devrel-public-resources/storage-testbench' +DEFAULT_IMAGE_TAG='latest' +DOCKER_IMAGE=${DEFAULT_IMAGE_NAME}:${DEFAULT_IMAGE_TAG} +CONTAINER_NAME=storage_testbench + +# Note: --net=host makes the container bind directly to the Docker host’s network, +# with no network isolation. If we were to use port-mapping instead, reset connection errors +# would be captured differently and cause unexpected test behaviour. +# The host networking driver works only on Linux hosts. +# See more about using host networking: https://docs.docker.com/network/host/ +DOCKER_NETWORK="--net=host" +# Note: We do not expect the RetryConformanceTest suite to pass on darwin due to +# differences in the network errors emitted by the system. +if [ `go env GOOS` == 'darwin' ]; then + DOCKER_NETWORK="-p 9000:9000 -p 8888:8888" +fi + +# Get the docker image for the testbench +docker pull $DOCKER_IMAGE + +# Start the testbench + +docker run --name $CONTAINER_NAME --rm -d $DOCKER_NETWORK $DOCKER_IMAGE +echo "Running the Cloud Storage testbench: $STORAGE_EMULATOR_HOST" +sleep 1 + +# Stop the testbench & cleanup environment variables +function cleanup() { + echo "Cleanup testbench" + docker stop $CONTAINER_NAME + unset STORAGE_EMULATOR_HOST; + unset STORAGE_EMULATOR_HOST_GRPC; +} +trap cleanup EXIT + +# Check that the server is running - retry several times to allow for start-up time +response=$(curl -w "%{http_code}\n" $STORAGE_EMULATOR_HOST --retry-connrefused --retry 5 -o /dev/null) + +if [[ $response != 200 ]] +then + echo "Testbench server did not start correctly" + exit 1 +fi + +# Start the gRPC server on port 8888. +echo "Starting the gRPC server on port 8888" +response=$(curl -w "%{http_code}\n" --retry 5 --retry-max-time 40 -o /dev/null "$STORAGE_EMULATOR_HOST/start_grpc?port=8888") + +if [[ $response != 200 ]] +then + echo "Testbench gRPC server did not start correctly" + exit 1 +fi + +# Run tests +go test -v -timeout 10m ./ -run="^Test(RetryConformance|.*Emulated)$" -short 2>&1 | tee -a sponge_log.log diff --git a/vendor/cloud.google.com/go/storage/go110.go b/vendor/cloud.google.com/go/storage/go110.go deleted file mode 100644 index c1273d59ade6..000000000000 --- a/vendor/cloud.google.com/go/storage/go110.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.10 - -package storage - -import ( - "net/url" - "strings" - - "google.golang.org/api/googleapi" -) - -func shouldRetry(err error) bool { - switch e := err.(type) { - case *googleapi.Error: - // Retry on 429 and 5xx, according to - // https://cloud.google.com/storage/docs/exponential-backoff. - return e.Code == 429 || (e.Code >= 500 && e.Code < 600) - case *url.Error: - // Retry socket-level errors ECONNREFUSED and ENETUNREACH (from syscall). - // Unfortunately the error type is unexported, so we resort to string - // matching. - retriable := []string{"connection refused", "connection reset"} - for _, s := range retriable { - if strings.Contains(e.Error(), s) { - return true - } - } - return false - case interface{ Temporary() bool }: - return e.Temporary() - default: - return false - } -} diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go new file mode 100644 index 000000000000..4a44cee8b67c --- /dev/null +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -0,0 +1,1751 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "encoding/base64" + "fmt" + "io" + "net/url" + "os" + + "cloud.google.com/go/internal/trace" + gapic "cloud.google.com/go/storage/internal/apiv2" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + iampb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // defaultConnPoolSize is the default number of connections + // to initialize in the GAPIC gRPC connection pool. A larger + // connection pool may be necessary for jobs that require + // high throughput and/or leverage many concurrent streams. + // + // This is only used for the gRPC client. + defaultConnPoolSize = 4 + + // maxPerMessageWriteSize is the maximum amount of content that can be sent + // per WriteObjectRequest message. A buffer reaching this amount will + // precipitate a flush of the buffer. It is only used by the gRPC Writer + // implementation. + maxPerMessageWriteSize int = int(storagepb.ServiceConstants_MAX_WRITE_CHUNK_BYTES) + + // globalProjectAlias is the project ID alias used for global buckets. + // + // This is only used for the gRPC API. + globalProjectAlias = "_" + + // msgEntityNotSupported indicates ACL entites using project ID are not currently supported. + // + // This is only used for the gRPC API. + msgEntityNotSupported = "The gRPC API currently does not support ACL entities using project ID, use project numbers instead" +) + +// defaultGRPCOptions returns a set of the default client options +// for gRPC client initialization. +func defaultGRPCOptions() []option.ClientOption { + defaults := []option.ClientOption{ + option.WithGRPCConnectionPool(defaultConnPoolSize), + } + + // Set emulator options for gRPC if an emulator was specified. Note that in a + // hybrid client, STORAGE_EMULATOR_HOST will set the host to use for HTTP and + // STORAGE_EMULATOR_HOST_GRPC will set the host to use for gRPC (when using a + // local emulator, HTTP and gRPC must use different ports, so this is + // necessary). + // + // TODO: When the newHybridClient is not longer used, remove + // STORAGE_EMULATOR_HOST_GRPC and use STORAGE_EMULATOR_HOST for both the + // HTTP and gRPC based clients. + if host := os.Getenv("STORAGE_EMULATOR_HOST_GRPC"); host != "" { + // Strip the scheme from the emulator host. WithEndpoint does not take a + // scheme for gRPC. + host = stripScheme(host) + + defaults = append(defaults, + option.WithEndpoint(host), + option.WithGRPCDialOption(grpc.WithInsecure()), + option.WithoutAuthentication(), + ) + } else { + // Only enable DirectPath when the emulator is not being targeted. + defaults = append(defaults, internaloption.EnableDirectPath(true)) + } + + return defaults +} + +// grpcStorageClient is the gRPC API implementation of the transport-agnostic +// storageClient interface. +type grpcStorageClient struct { + raw *gapic.Client + settings *settings +} + +// newGRPCStorageClient initializes a new storageClient that uses the gRPC +// Storage API. +func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { + s := initSettings(opts...) + s.clientOption = append(defaultGRPCOptions(), s.clientOption...) + + g, err := gapic.NewClient(ctx, s.clientOption...) + if err != nil { + return nil, err + } + + return &grpcStorageClient{ + raw: g, + settings: s, + }, nil +} + +func (c *grpcStorageClient) Close() error { + return c.raw.Close() +} + +// Top-level methods. + +func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.GetServiceAccountRequest{ + Project: toProjectResource(project), + } + var resp *storagepb.ServiceAccount + err := run(ctx, func() error { + var err error + resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return "", err + } + return resp.EmailAddress, err +} + +func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + b := attrs.toProtoBucket() + b.Name = bucket + // If there is lifecycle information but no location, explicitly set + // the location. This is a GCS quirk/bug. + if b.GetLocation() == "" && b.GetLifecycle() != nil { + b.Location = "US" + } + + req := &storagepb.CreateBucketRequest{ + Parent: toProjectResource(project), + Bucket: b, + BucketId: b.GetName(), + } + if attrs != nil { + req.PredefinedAcl = attrs.PredefinedACL + req.PredefinedDefaultObjectAcl = attrs.PredefinedDefaultObjectACL + } + + var battrs *BucketAttrs + err := run(ctx, func() error { + res, err := c.raw.CreateBucket(ctx, req, s.gax...) + + battrs = newBucketFromProto(res) + + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + + return battrs, err +} + +func (c *grpcStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator { + s := callSettings(c.settings, opts...) + it := &BucketIterator{ + ctx: ctx, + projectID: project, + } + + var gitr *gapic.BucketIterator + fetch := func(pageSize int, pageToken string) (token string, err error) { + // Initialize GAPIC-based iterator when pageToken is empty, which + // indicates that this fetch call is attempting to get the first page. + // + // Note: Initializing the GAPIC-based iterator lazily is necessary to + // capture the BucketIterator.Prefix set by the user *after* the + // BucketIterator is returned to them from the veneer. + if pageToken == "" { + req := &storagepb.ListBucketsRequest{ + Parent: toProjectResource(it.projectID), + Prefix: it.Prefix, + } + gitr = c.raw.ListBuckets(it.ctx, req, s.gax...) + } + + var buckets []*storagepb.Bucket + var next string + err = run(it.ctx, func() error { + buckets, next, err = gitr.InternalFetch(pageSize, pageToken) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return "", err + } + + for _, bkt := range buckets { + b := newBucketFromProto(bkt) + it.buckets = append(it.buckets, b) + } + + return next, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.buckets) }, + func() interface{} { b := it.buckets; it.buckets = nil; return b }) + + return it +} + +// Bucket methods. + +func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteBucketRequest{ + Name: bucketResourceName(globalProjectAlias, bucket), + } + if err := applyBucketCondsProto("grpcStorageClient.DeleteBucket", conds, req); err != nil { + return err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + return run(ctx, func() error { + return c.raw.DeleteBucket(ctx, req, s.gax...) + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) +} + +func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.GetBucketRequest{ + Name: bucketResourceName(globalProjectAlias, bucket), + ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, + } + if err := applyBucketCondsProto("grpcStorageClient.GetBucket", conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + var battrs *BucketAttrs + err := run(ctx, func() error { + res, err := c.raw.GetBucket(ctx, req, s.gax...) + + battrs = newBucketFromProto(res) + + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return nil, ErrBucketNotExist + } + + return battrs, err +} +func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + b := uattrs.toProtoBucket() + b.Name = bucketResourceName(globalProjectAlias, bucket) + req := &storagepb.UpdateBucketRequest{ + Bucket: b, + PredefinedAcl: uattrs.PredefinedACL, + PredefinedDefaultObjectAcl: uattrs.PredefinedDefaultObjectACL, + } + if err := applyBucketCondsProto("grpcStorageClient.UpdateBucket", conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + var paths []string + fieldMask := &fieldmaskpb.FieldMask{ + Paths: paths, + } + if uattrs.CORS != nil { + fieldMask.Paths = append(fieldMask.Paths, "cors") + } + if uattrs.DefaultEventBasedHold != nil { + fieldMask.Paths = append(fieldMask.Paths, "default_event_based_hold") + } + if uattrs.RetentionPolicy != nil { + fieldMask.Paths = append(fieldMask.Paths, "retention_policy") + } + if uattrs.VersioningEnabled != nil { + fieldMask.Paths = append(fieldMask.Paths, "versioning") + } + if uattrs.RequesterPays != nil { + fieldMask.Paths = append(fieldMask.Paths, "billing") + } + if uattrs.BucketPolicyOnly != nil || uattrs.UniformBucketLevelAccess != nil || uattrs.PublicAccessPrevention != PublicAccessPreventionUnknown { + fieldMask.Paths = append(fieldMask.Paths, "iam_config") + } + if uattrs.Encryption != nil { + fieldMask.Paths = append(fieldMask.Paths, "encryption") + } + if uattrs.Lifecycle != nil { + fieldMask.Paths = append(fieldMask.Paths, "lifecycle") + } + if uattrs.Logging != nil { + fieldMask.Paths = append(fieldMask.Paths, "logging") + } + if uattrs.Website != nil { + fieldMask.Paths = append(fieldMask.Paths, "website") + } + if uattrs.PredefinedACL != "" { + // In cases where PredefinedACL is set, Acl is cleared. + fieldMask.Paths = append(fieldMask.Paths, "acl") + } + if uattrs.PredefinedDefaultObjectACL != "" { + // In cases where PredefinedDefaultObjectACL is set, DefaultObjectAcl is cleared. + fieldMask.Paths = append(fieldMask.Paths, "default_object_acl") + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + if uattrs.acl != nil { + // In cases where acl is set by UpdateBucketACL method. + fieldMask.Paths = append(fieldMask.Paths, "acl") + } + if uattrs.defaultObjectACL != nil { + // In cases where defaultObjectACL is set by UpdateBucketACL method. + fieldMask.Paths = append(fieldMask.Paths, "default_object_acl") + } + if uattrs.StorageClass != "" { + fieldMask.Paths = append(fieldMask.Paths, "storage_class") + } + if uattrs.RPO != RPOUnknown { + fieldMask.Paths = append(fieldMask.Paths, "rpo") + } + if uattrs.Autoclass != nil { + fieldMask.Paths = append(fieldMask.Paths, "autoclass") + } + // TODO(cathyo): Handle labels. Pending b/230510191. + req.UpdateMask = fieldMask + + var battrs *BucketAttrs + err := run(ctx, func() error { + res, err := c.raw.UpdateBucket(ctx, req, s.gax...) + battrs = newBucketFromProto(res) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + + return battrs, err +} +func (c *grpcStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := &storagepb.LockBucketRetentionPolicyRequest{ + Bucket: bucketResourceName(globalProjectAlias, bucket), + } + if err := applyBucketCondsProto("grpcStorageClient.LockBucketRetentionPolicy", conds, req); err != nil { + return err + } + + return run(ctx, func() error { + _, err := c.raw.LockBucketRetentionPolicy(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + +} +func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { + s := callSettings(c.settings, opts...) + it := &ObjectIterator{ + ctx: ctx, + } + if q != nil { + it.query = *q + } + req := &storagepb.ListObjectsRequest{ + Parent: bucketResourceName(globalProjectAlias, bucket), + Prefix: it.query.Prefix, + Delimiter: it.query.Delimiter, + Versions: it.query.Versions, + LexicographicStart: it.query.StartOffset, + LexicographicEnd: it.query.EndOffset, + IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter, + ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + gitr := c.raw.ListObjects(it.ctx, req, s.gax...) + fetch := func(pageSize int, pageToken string) (token string, err error) { + var objects []*storagepb.Object + err = run(it.ctx, func() error { + objects, token, err = gitr.InternalFetch(pageSize, pageToken) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound { + err = ErrBucketNotExist + } + return "", err + } + + for _, obj := range objects { + b := newObjectFromProto(obj) + it.items = append(it.items, b) + } + + // Response is always non-nil after a successful request. + res := gitr.Response.(*storagepb.ListObjectsResponse) + for _, prefix := range res.GetPrefixes() { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + + return token, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + + return it +} + +// Object metadata methods. + +func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteObjectRequest{ + Bucket: bucketResourceName(globalProjectAlias, bucket), + Object: object, + } + if err := applyCondsProto("grpcStorageClient.DeleteObject", gen, conds, req); err != nil { + return err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + err := run(ctx, func() error { + return c.raw.DeleteObject(ctx, req, s.gax...) + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return ErrObjectNotExist + } + return err +} + +func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.GetObjectRequest{ + Bucket: bucketResourceName(globalProjectAlias, bucket), + Object: object, + // ProjectionFull by default. + ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, + } + if err := applyCondsProto("grpcStorageClient.GetObject", gen, conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + if encryptionKey != nil { + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) + } + + var attrs *ObjectAttrs + err := run(ctx, func() error { + res, err := c.raw.GetObject(ctx, req, s.gax...) + attrs = newObjectFromProto(res) + + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return nil, ErrObjectNotExist + } + + return attrs, err +} + +func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, bucket), object) + req := &storagepb.UpdateObjectRequest{ + Object: o, + PredefinedAcl: uattrs.PredefinedACL, + } + if err := applyCondsProto("grpcStorageClient.UpdateObject", gen, conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + if encryptionKey != nil { + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) + } + + fieldMask := &fieldmaskpb.FieldMask{Paths: nil} + if uattrs.EventBasedHold != nil { + fieldMask.Paths = append(fieldMask.Paths, "event_based_hold") + } + if uattrs.TemporaryHold != nil { + fieldMask.Paths = append(fieldMask.Paths, "temporary_hold") + } + if uattrs.ContentType != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_type") + } + if uattrs.ContentLanguage != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_language") + } + if uattrs.ContentEncoding != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_encoding") + } + if uattrs.ContentDisposition != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_disposition") + } + if uattrs.CacheControl != nil { + fieldMask.Paths = append(fieldMask.Paths, "cache_control") + } + if !uattrs.CustomTime.IsZero() { + fieldMask.Paths = append(fieldMask.Paths, "custom_time") + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + if uattrs.ACL != nil || len(uattrs.PredefinedACL) > 0 { + fieldMask.Paths = append(fieldMask.Paths, "acl") + } + // TODO(cathyo): Handle metadata. Pending b/230510191. + + req.UpdateMask = fieldMask + + var attrs *ObjectAttrs + err := run(ctx, func() error { + res, err := c.raw.UpdateObject(ctx, req, s.gax...) + attrs = newObjectFromProto(res) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if e, ok := status.FromError(err); ok && e.Code() == codes.NotFound { + return nil, ErrObjectNotExist + } + + return attrs, err +} + +// Default Object ACL methods. + +func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve BucketAttrs. + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return err + } + // Delete the entity and copy other remaining ACL entities. + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + // Return error if entity is not found or a project ID is used. + invalidEntity := true + var acl []ACLRule + for _, a := range attrs.DefaultObjectACL { + if a.Entity != entity { + acl = append(acl, a) + } + if a.Entity == entity { + invalidEntity = false + } + } + if invalidEntity { + return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.DefaultObjectACL, msgEntityNotSupported) + } + uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl} + // Call UpdateBucket with a MetagenerationMatch precondition set. + if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { + return err + } + return nil +} + +func (c *grpcStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return nil, err + } + return attrs.DefaultObjectACL, nil +} + +func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve BucketAttrs. + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return err + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + var acl []ACLRule + aclRule := ACLRule{Entity: entity, Role: role} + acl = append(attrs.DefaultObjectACL, aclRule) + uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl} + // Call UpdateBucket with a MetagenerationMatch precondition set. + if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { + return err + } + return nil +} + +// Bucket ACL methods. + +func (c *grpcStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve BucketAttrs. + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return err + } + // Delete the entity and copy other remaining ACL entities. + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + // Return error if entity is not found or a project ID is used. + invalidEntity := true + var acl []ACLRule + for _, a := range attrs.ACL { + if a.Entity != entity { + acl = append(acl, a) + } + if a.Entity == entity { + invalidEntity = false + } + } + if invalidEntity { + return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported) + } + uattrs := &BucketAttrsToUpdate{acl: acl} + // Call UpdateBucket with a MetagenerationMatch precondition set. + if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { + return err + } + return nil +} + +func (c *grpcStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return nil, err + } + return attrs.ACL, nil +} + +func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve BucketAttrs. + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return err + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + var acl []ACLRule + aclRule := ACLRule{Entity: entity, Role: role} + acl = append(attrs.ACL, aclRule) + uattrs := &BucketAttrsToUpdate{acl: acl} + // Call UpdateBucket with a MetagenerationMatch precondition set. + if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { + return err + } + return nil +} + +// Object ACL methods. + +func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve ObjectAttrs. + attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + if err != nil { + return err + } + // Delete the entity and copy other remaining ACL entities. + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + // Return error if entity is not found or a project ID is used. + invalidEntity := true + var acl []ACLRule + for _, a := range attrs.ACL { + if a.Entity != entity { + acl = append(acl, a) + } + if a.Entity == entity { + invalidEntity = false + } + } + if invalidEntity { + return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported) + } + uattrs := &ObjectAttrsToUpdate{ACL: acl} + // Call UpdateObject with the specified metageneration. + if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil { + return err + } + return nil +} + +// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. +// Selecting a specific generation of this object is not currently supported by the client. +func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { + o, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + if err != nil { + return nil, err + } + return o.ACL, nil +} + +func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve ObjectAttrs. + attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + if err != nil { + return err + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + var acl []ACLRule + aclRule := ACLRule{Entity: entity, Role: role} + acl = append(attrs.ACL, aclRule) + uattrs := &ObjectAttrsToUpdate{ACL: acl} + // Call UpdateObject with the specified metageneration. + if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil { + return err + } + return nil +} + +// Media operations. + +func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + dstObjPb := req.dstObject.attrs.toProtoObject(req.dstBucket) + dstObjPb.Name = req.dstObject.name + if err := applyCondsProto("ComposeObject destination", defaultGen, req.dstObject.conds, dstObjPb); err != nil { + return nil, err + } + if req.sendCRC32C { + dstObjPb.Checksums.Crc32C = &req.dstObject.attrs.CRC32C + } + + srcs := []*storagepb.ComposeObjectRequest_SourceObject{} + for _, src := range req.srcs { + srcObjPb := &storagepb.ComposeObjectRequest_SourceObject{Name: src.name} + if err := applyCondsProto("ComposeObject source", src.gen, src.conds, srcObjPb); err != nil { + return nil, err + } + srcs = append(srcs, srcObjPb) + } + + rawReq := &storagepb.ComposeObjectRequest{ + Destination: dstObjPb, + SourceObjects: srcs, + } + if req.predefinedACL != "" { + rawReq.DestinationPredefinedAcl = req.predefinedACL + } + if req.dstObject.encryptionKey != nil { + rawReq.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey) + } + + var obj *storagepb.Object + var err error + if err := run(ctx, func() error { + obj, err = c.raw.ComposeObject(ctx, rawReq, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil { + return nil, err + } + + return newObjectFromProto(obj), nil +} +func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) { + s := callSettings(c.settings, opts...) + obj := req.dstObject.attrs.toProtoObject("") + call := &storagepb.RewriteObjectRequest{ + SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket), + SourceObject: req.srcObject.name, + RewriteToken: req.token, + DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket), + DestinationName: req.dstObject.name, + Destination: obj, + DestinationKmsKey: req.dstObject.keyName, + DestinationPredefinedAcl: req.predefinedACL, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(req.dstObject.encryptionKey), + } + + // The userProject, whether source or destination project, is decided by the code calling the interface. + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + if err := applyCondsProto("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { + return nil, err + } + if err := applySourceCondsProto(req.srcObject.gen, req.srcObject.conds, call); err != nil { + return nil, err + } + + if len(req.dstObject.encryptionKey) > 0 { + call.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey) + } + if len(req.srcObject.encryptionKey) > 0 { + srcParams := toProtoCommonObjectRequestParams(req.srcObject.encryptionKey) + call.CopySourceEncryptionAlgorithm = srcParams.GetEncryptionAlgorithm() + call.CopySourceEncryptionKeyBytes = srcParams.GetEncryptionKeyBytes() + call.CopySourceEncryptionKeySha256Bytes = srcParams.GetEncryptionKeySha256Bytes() + } + + call.MaxBytesRewrittenPerCall = req.maxBytesRewrittenPerCall + + var res *storagepb.RewriteResponse + var err error + + retryCall := func() error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err } + + if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil { + return nil, err + } + + r := &rewriteObjectResponse{ + done: res.GetDone(), + written: res.GetTotalBytesRewritten(), + size: res.GetObjectSize(), + token: res.GetRewriteToken(), + resource: newObjectFromProto(res.GetResource()), + } + + return r, nil +} + +func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + // A negative length means "read to the end of the object", but the + // read_limit field it corresponds to uses zero to mean the same thing. Thus + // we coerce the length to 0 to read to the end of the object. + if params.length < 0 { + params.length = 0 + } + + b := bucketResourceName(globalProjectAlias, params.bucket) + req := &storagepb.ReadObjectRequest{ + Bucket: b, + Object: params.object, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey), + } + // The default is a negative value, which means latest. + if params.gen >= 0 { + req.Generation = params.gen + } + + // Define a function that initiates a Read with offset and length, assuming + // we have already read seen bytes. + reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) { + // If the context has already expired, return immediately without making + // we call. + if err := ctx.Err(); err != nil { + return nil, nil, err + } + + cc, cancel := context.WithCancel(ctx) + + start := params.offset + seen + // Only set a ReadLimit if length is greater than zero, because zero + // means read it all. + if params.length > 0 { + req.ReadLimit = params.length - seen + } + req.ReadOffset = start + + if err := applyCondsProto("gRPCReader.reopen", params.gen, params.conds, req); err != nil { + cancel() + return nil, nil, err + } + + var stream storagepb.Storage_ReadObjectClient + var msg *storagepb.ReadObjectResponse + var err error + + err = run(cc, func() error { + stream, err = c.raw.ReadObject(cc, req, s.gax...) + if err != nil { + return err + } + + msg, err = stream.Recv() + // These types of errors show up on the Recv call, rather than the + // initialization of the stream via ReadObject above. + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return ErrObjectNotExist + } + + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + // Close the stream context we just created to ensure we don't leak + // resources. + cancel() + return nil, nil, err + } + + return &readStreamResponse{stream, msg}, cancel, nil + } + + res, cancel, err := reopen(0) + if err != nil { + return nil, err + } + + // The first message was Recv'd on stream open, use it to populate the + // object metadata. + msg := res.response + obj := msg.GetMetadata() + // This is the size of the entire object, even if only a range was requested. + size := obj.GetSize() + + r = &Reader{ + Attrs: ReaderObjectAttrs{ + Size: size, + ContentType: obj.GetContentType(), + ContentEncoding: obj.GetContentEncoding(), + CacheControl: obj.GetCacheControl(), + LastModified: obj.GetUpdateTime().AsTime(), + Metageneration: obj.GetMetageneration(), + Generation: obj.GetGeneration(), + }, + reader: &gRPCReader{ + stream: res.stream, + reopen: reopen, + cancel: cancel, + size: size, + // Store the content from the first Recv in the + // client buffer for reading later. + leftovers: msg.GetChecksummedData().GetContent(), + settings: s, + }, + } + + cr := msg.GetContentRange() + if cr != nil { + r.Attrs.StartOffset = cr.GetStart() + r.remain = cr.GetEnd() - cr.GetStart() + 1 + } else { + r.remain = size + } + + // Only support checksums when reading an entire object, not a range. + if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length == 0 { + r.wantCRC = checksums.GetCrc32C() + r.checkCRC = true + } + + return r, nil +} + +func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { + s := callSettings(c.settings, opts...) + + var offset int64 + errorf := params.setError + progress := params.progress + setObj := params.setObj + + pr, pw := io.Pipe() + gw := newGRPCWriter(c, params, pr) + gw.settings = s + if s.userProject != "" { + gw.ctx = setUserProjectMetadata(gw.ctx, s.userProject) + } + + // This function reads the data sent to the pipe and sends sets of messages + // on the gRPC client-stream as the buffer is filled. + go func() { + defer close(params.donec) + + // Loop until there is an error or the Object has been finalized. + for { + // Note: This blocks until either the buffer is full or EOF is read. + recvd, doneReading, err := gw.read() + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + + // The chunk buffer is full, but there is no end in sight. This + // means that a resumable upload will need to be used to send + // multiple chunks, until we are done reading data. Start a + // resumable upload if it has not already been started. + // Otherwise, all data will be sent over a single gRPC stream. + if !doneReading && gw.upid == "" { + err = gw.startResumableUpload() + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + } + + o, off, finalized, err := gw.uploadBuffer(recvd, offset, doneReading) + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + // At this point, the current buffer has been uploaded. Capture the + // committed offset here in case the upload was not finalized and + // another chunk is to be uploaded. + offset = off + progress(offset) + + // When we are done reading data and the chunk has been finalized, + // we are done. + if doneReading && finalized { + // Build Object from server's response. + setObj(newObjectFromProto(o)) + return + } + } + }() + + return pw, nil +} + +// IAM methods. + +func (c *grpcStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) { + // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. + s := callSettings(c.settings, opts...) + req := &iampb.GetIamPolicyRequest{ + Resource: bucketResourceName(globalProjectAlias, resource), + Options: &iampb.GetPolicyOptions{ + RequestedPolicyVersion: version, + }, + } + var rp *iampb.Policy + err := run(ctx, func() error { + var err error + rp, err = c.raw.GetIamPolicy(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + + return rp, err +} + +func (c *grpcStorageClient) SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error { + // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. + s := callSettings(c.settings, opts...) + + req := &iampb.SetIamPolicyRequest{ + Resource: bucketResourceName(globalProjectAlias, resource), + Policy: policy, + } + + return run(ctx, func() error { + _, err := c.raw.SetIamPolicy(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) +} + +func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { + // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. + s := callSettings(c.settings, opts...) + req := &iampb.TestIamPermissionsRequest{ + Resource: bucketResourceName(globalProjectAlias, resource), + Permissions: permissions, + } + var res *iampb.TestIamPermissionsResponse + err := run(ctx, func() error { + var err error + res, err = c.raw.TestIamPermissions(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// HMAC Key methods. + +func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.GetHmacKeyRequest{ + AccessId: accessID, + Project: toProjectResource(project), + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + var metadata *storagepb.HmacKeyMetadata + err := run(ctx, func() error { + var err error + metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + return toHMACKeyFromProto(metadata), nil +} + +func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator { + s := callSettings(c.settings, opts...) + req := &storagepb.ListHmacKeysRequest{ + Project: toProjectResource(project), + ServiceAccountEmail: serviceAccountEmail, + ShowDeletedKeys: showDeletedKeys, + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + it := &HMACKeysIterator{ + ctx: ctx, + projectID: project, + retry: s.retry, + } + gitr := c.raw.ListHmacKeys(it.ctx, req, s.gax...) + fetch := func(pageSize int, pageToken string) (token string, err error) { + var hmacKeys []*storagepb.HmacKeyMetadata + err = run(it.ctx, func() error { + hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return "", err + } + for _, hkmd := range hmacKeys { + hk := toHMACKeyFromProto(hkmd) + it.hmacKeys = append(it.hmacKeys, hk) + } + + return token, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.hmacKeys) - it.index }, + func() interface{} { + prev := it.hmacKeys + it.hmacKeys = it.hmacKeys[:0] + it.index = 0 + return prev + }) + return it +} + +func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + hk := &storagepb.HmacKeyMetadata{ + AccessId: accessID, + Project: toProjectResource(project), + ServiceAccountEmail: serviceAccountEmail, + State: string(attrs.State), + Etag: attrs.Etag, + } + var paths []string + fieldMask := &fieldmaskpb.FieldMask{ + Paths: paths, + } + if attrs.State != "" { + fieldMask.Paths = append(fieldMask.Paths, "state") + } + req := &storagepb.UpdateHmacKeyRequest{ + HmacKey: hk, + UpdateMask: fieldMask, + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + var metadata *storagepb.HmacKeyMetadata + err := run(ctx, func() error { + var err error + metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + return toHMACKeyFromProto(metadata), nil +} + +func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.CreateHmacKeyRequest{ + Project: toProjectResource(project), + ServiceAccountEmail: serviceAccountEmail, + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + var res *storagepb.CreateHmacKeyResponse + err := run(ctx, func() error { + var err error + res, err = c.raw.CreateHmacKey(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + key := toHMACKeyFromProto(res.Metadata) + key.Secret = base64.StdEncoding.EncodeToString(res.SecretKeyBytes) + + return key, nil +} + +func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteHmacKeyRequest{ + AccessId: accessID, + Project: toProjectResource(project), + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + return run(ctx, func() error { + return c.raw.DeleteHmacKey(ctx, req, s.gax...) + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) +} + +// Notification methods. + +func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.ListNotifications") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + req := &storagepb.ListNotificationsRequest{ + Parent: bucketResourceName(globalProjectAlias, bucket), + } + var notifications []*storagepb.Notification + err = run(ctx, func() error { + gitr := c.raw.ListNotifications(ctx, req, s.gax...) + for { + // PageSize is not set and fallbacks to the API default pageSize of 100. + items, nextPageToken, err := gitr.InternalFetch(int(req.GetPageSize()), req.GetPageToken()) + if err != nil { + return err + } + notifications = append(notifications, items...) + // If there are no more results, nextPageToken is empty and err is nil. + if nextPageToken == "" { + return err + } + req.PageToken = nextPageToken + } + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + + return notificationsToMapFromProto(notifications), nil +} + +func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.CreateNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + req := &storagepb.CreateNotificationRequest{ + Parent: bucketResourceName(globalProjectAlias, bucket), + Notification: toProtoNotification(n), + } + var pbn *storagepb.Notification + err = run(ctx, func() error { + var err error + pbn, err = c.raw.CreateNotification(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + return toNotificationFromProto(pbn), err +} + +func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.DeleteNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteNotificationRequest{Name: id} + return run(ctx, func() error { + return c.raw.DeleteNotification(ctx, req, s.gax...) + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) +} + +// setUserProjectMetadata appends a project ID to the outgoing Context metadata +// via the x-goog-user-project system parameter defined at +// https://cloud.google.com/apis/docs/system-parameters. This is only for +// billing purposes, and is generally optional, except for requester-pays +// buckets. +func setUserProjectMetadata(ctx context.Context, project string) context.Context { + return metadata.AppendToOutgoingContext(ctx, "x-goog-user-project", project) +} + +type readStreamResponse struct { + stream storagepb.Storage_ReadObjectClient + response *storagepb.ReadObjectResponse +} + +type gRPCReader struct { + seen, size int64 + stream storagepb.Storage_ReadObjectClient + reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) + leftovers []byte + cancel context.CancelFunc + settings *settings +} + +// Read reads bytes into the user's buffer from an open gRPC stream. +func (r *gRPCReader) Read(p []byte) (int, error) { + // No stream to read from, either never initiliazed or Close was called. + // Note: There is a potential concurrency issue if multiple routines are + // using the same reader. One encounters an error and the stream is closed + // and then reopened while the other routine attempts to read from it. + if r.stream == nil { + return 0, fmt.Errorf("reader has been closed") + } + + // The entire object has been read by this reader, return EOF. + if r.size != 0 && r.size == r.seen { + return 0, io.EOF + } + + var n int + // Read leftovers and return what was available to conform to the Reader + // interface: https://pkg.go.dev/io#Reader. + if len(r.leftovers) > 0 { + n = copy(p, r.leftovers) + r.seen += int64(n) + r.leftovers = r.leftovers[n:] + return n, nil + } + + // Attempt to Recv the next message on the stream. + msg, err := r.recv() + if err != nil { + return 0, err + } + + // TODO: Determine if we need to capture incremental CRC32C for this + // chunk. The Object CRC32C checksum is captured when directed to read + // the entire Object. If directed to read a range, we may need to + // calculate the range's checksum for verification if the checksum is + // present in the response here. + // TODO: Figure out if we need to support decompressive transcoding + // https://cloud.google.com/storage/docs/transcoding. + content := msg.GetChecksummedData().GetContent() + n = copy(p[n:], content) + leftover := len(content) - n + if leftover > 0 { + // Wasn't able to copy all of the data in the message, store for + // future Read calls. + r.leftovers = content[n:] + } + r.seen += int64(n) + + return n, nil +} + +// Close cancels the read stream's context in order for it to be closed and +// collected. +func (r *gRPCReader) Close() error { + if r.cancel != nil { + r.cancel() + } + r.stream = nil + return nil +} + +// recv attempts to Recv the next message on the stream. In the event +// that a retryable error is encountered, the stream will be closed, reopened, +// and Recv again. This will attempt to Recv until one of the following is true: +// +// * Recv is successful +// * A non-retryable error is encountered +// * The Reader's context is canceled +// +// The last error received is the one that is returned, which could be from +// an attempt to reopen the stream. +func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) { + msg, err := r.stream.Recv() + var shouldRetry = ShouldRetry + if r.settings.retry != nil && r.settings.retry.shouldRetry != nil { + shouldRetry = r.settings.retry.shouldRetry + } + if err != nil && shouldRetry(err) { + // This will "close" the existing stream and immediately attempt to + // reopen the stream, but will backoff if further attempts are necessary. + // Reopening the stream Recvs the first message, so if retrying is + // successful, the next logical chunk will be returned. + msg, err = r.reopenStream() + } + + return msg, err +} + +// reopenStream "closes" the existing stream and attempts to reopen a stream and +// sets the Reader's stream and cancelStream properties in the process. +func (r *gRPCReader) reopenStream() (*storagepb.ReadObjectResponse, error) { + // Close existing stream and initialize new stream with updated offset. + r.Close() + + res, cancel, err := r.reopen(r.seen) + if err != nil { + return nil, err + } + r.stream = res.stream + r.cancel = cancel + return res.response, nil +} + +func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter { + size := params.chunkSize + if params.chunkSize == 0 { + // TODO: Should we actually use the minimum of 256 KB here when the user + // indicates they want minimal memory usage? We cannot do a zero-copy, + // bufferless upload like HTTP/JSON can. + // TODO: We need to determine if we can avoid starting a + // resumable upload when the user *plans* to send more than bufSize but + // with a bufferless upload. + size = maxPerMessageWriteSize + } + + return &gRPCWriter{ + buf: make([]byte, size), + c: c, + ctx: params.ctx, + reader: r, + bucket: params.bucket, + attrs: params.attrs, + conds: params.conds, + encryptionKey: params.encryptionKey, + sendCRC32C: params.sendCRC32C, + } +} + +// gRPCWriter is a wrapper around the the gRPC client-stream API that manages +// sending chunks of data provided by the user over the stream. +type gRPCWriter struct { + c *grpcStorageClient + buf []byte + reader io.Reader + + ctx context.Context + + bucket string + attrs *ObjectAttrs + conds *Conditions + encryptionKey []byte + settings *settings + + sendCRC32C bool + + // The gRPC client-stream used for sending buffers. + stream storagepb.Storage_WriteObjectClient + + // The Resumable Upload ID started by a gRPC-based Writer. + upid string +} + +// startResumableUpload initializes a Resumable Upload with gRPC and sets the +// upload ID on the Writer. +func (w *gRPCWriter) startResumableUpload() error { + spec, err := w.writeObjectSpec() + if err != nil { + return err + } + return run(w.ctx, func() error { + upres, err := w.c.raw.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{ + WriteObjectSpec: spec, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey), + }) + w.upid = upres.GetUploadId() + return err + }, w.settings.retry, w.settings.idempotent, setRetryHeaderGRPC(w.ctx)) +} + +// queryProgress is a helper that queries the status of the resumable upload +// associated with the given upload ID. +func (w *gRPCWriter) queryProgress() (int64, error) { + var persistedSize int64 + err := run(w.ctx, func() error { + q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{ + UploadId: w.upid, + }) + persistedSize = q.GetPersistedSize() + return err + }, w.settings.retry, true, setRetryHeaderGRPC(w.ctx)) + + // q.GetCommittedSize() will return 0 if q is nil. + return persistedSize, err +} + +// uploadBuffer opens a Write stream and uploads the buffer at the given offset (if +// uploading a chunk for a resumable uploadBuffer), and will mark the write as +// finished if we are done receiving data from the user. The resulting write +// offset after uploading the buffer is returned, as well as a boolean +// indicating if the Object has been finalized. If it has been finalized, the +// final Object will be returned as well. Finalizing the upload is primarily +// important for Resumable Uploads. A simple or multi-part upload will always +// be finalized once the entire buffer has been written. +func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*storagepb.Object, int64, bool, error) { + var err error + var finishWrite bool + var sent, limit int = 0, maxPerMessageWriteSize + var shouldRetry = ShouldRetry + if w.settings.retry != nil && w.settings.retry.shouldRetry != nil { + shouldRetry = w.settings.retry.shouldRetry + } + offset := start + toWrite := w.buf[:recvd] + for { + first := sent == 0 + // This indicates that this is the last message and the remaining + // data fits in one message. + belowLimit := recvd-sent <= limit + if belowLimit { + limit = recvd - sent + } + if belowLimit && doneReading { + finishWrite = true + } + + // Prepare chunk section for upload. + data := toWrite[sent : sent+limit] + req := &storagepb.WriteObjectRequest{ + Data: &storagepb.WriteObjectRequest_ChecksummedData{ + ChecksummedData: &storagepb.ChecksummedData{ + Content: data, + }, + }, + WriteOffset: offset, + FinishWrite: finishWrite, + } + + // Open a new stream and set the first_message field on the request. + // The first message on the WriteObject stream must either be the + // Object or the Resumable Upload ID. + if first { + ctx := gapic.InsertMetadata(w.ctx, metadata.Pairs("x-goog-request-params", "bucket="+url.QueryEscape(w.bucket))) + w.stream, err = w.c.raw.WriteObject(ctx) + if err != nil { + return nil, 0, false, err + } + + if w.upid != "" { + req.FirstMessage = &storagepb.WriteObjectRequest_UploadId{UploadId: w.upid} + } else { + spec, err := w.writeObjectSpec() + if err != nil { + return nil, 0, false, err + } + req.FirstMessage = &storagepb.WriteObjectRequest_WriteObjectSpec{ + WriteObjectSpec: spec, + } + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(w.encryptionKey) + } + + // TODO: Currently the checksums are only sent on the first message + // of the stream, but in the future, we must also support sending it + // on the *last* message of the stream (instead of the first). + if w.sendCRC32C { + req.ObjectChecksums = &storagepb.ObjectChecksums{ + Crc32C: proto.Uint32(w.attrs.CRC32C), + } + } + if len(w.attrs.MD5) != 0 { + if cs := req.GetObjectChecksums(); cs == nil { + req.ObjectChecksums = &storagepb.ObjectChecksums{ + Md5Hash: w.attrs.MD5, + } + } else { + cs.Md5Hash = w.attrs.MD5 + } + } + } + + err = w.stream.Send(req) + if err == io.EOF { + // err was io.EOF. The client-side of a stream only gets an EOF on Send + // when the backend closes the stream and wants to return an error + // status. Closing the stream receives the status as an error. + _, err = w.stream.CloseAndRecv() + + // Retriable errors mean we should start over and attempt to + // resend the entire buffer via a new stream. + // If not retriable, falling through will return the error received + // from closing the stream. + if shouldRetry(err) { + sent = 0 + finishWrite = false + // TODO: Add test case for failure modes of querying progress. + offset, err = w.determineOffset(start) + if err == nil { + continue + } + } + } + if err != nil { + return nil, 0, false, err + } + + // Update the immediate stream's sent total and the upload offset with + // the data sent. + sent += len(data) + offset += int64(len(data)) + + // Not done sending data, do not attempt to commit it yet, loop around + // and send more data. + if recvd-sent > 0 { + continue + } + + // Done sending data. Close the stream to "commit" the data sent. + resp, finalized, err := w.commit() + // Retriable errors mean we should start over and attempt to + // resend the entire buffer via a new stream. + // If not retriable, falling through will return the error received + // from closing the stream. + if shouldRetry(err) { + sent = 0 + finishWrite = false + offset, err = w.determineOffset(start) + if err == nil { + continue + } + } + if err != nil { + return nil, 0, false, err + } + + return resp.GetResource(), offset, finalized, nil + } +} + +// determineOffset either returns the offset given to it in the case of a simple +// upload, or queries the write status in the case a resumable upload is being +// used. +func (w *gRPCWriter) determineOffset(offset int64) (int64, error) { + // For a Resumable Upload, we must start from however much data + // was committed. + if w.upid != "" { + committed, err := w.queryProgress() + if err != nil { + return 0, err + } + offset = committed + } + return offset, nil +} + +// commit closes the stream to commit the data sent and potentially receive +// the finalized object if finished uploading. If the last request sent +// indicated that writing was finished, the Object will be finalized and +// returned. If not, then the Object will be nil, and the boolean returned will +// be false. +func (w *gRPCWriter) commit() (*storagepb.WriteObjectResponse, bool, error) { + finalized := true + resp, err := w.stream.CloseAndRecv() + if err == io.EOF { + // Closing a stream for a resumable upload finish_write = false results + // in an EOF which can be ignored, as we aren't done uploading yet. + finalized = false + err = nil + } + // Drop the stream reference as it has been closed. + w.stream = nil + + return resp, finalized, err +} + +// writeObjectSpec constructs a WriteObjectSpec proto using the Writer's +// ObjectAttrs and applies its Conditions. This is only used for gRPC. +func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) { + // To avoid modifying the ObjectAttrs embeded in the calling writer, deref + // the ObjectAttrs pointer to make a copy, then assign the desired name to + // the attribute. + attrs := *w.attrs + + spec := &storagepb.WriteObjectSpec{ + Resource: attrs.toProtoObject(w.bucket), + } + // WriteObject doesn't support the generation condition, so use default. + if err := applyCondsProto("WriteObject", defaultGen, w.conds, spec); err != nil { + return nil, err + } + return spec, nil +} + +// read copies the data in the reader to the given buffer and reports how much +// data was read into the buffer and if there is no more data to read (EOF). +// Furthermore, if the attrs.ContentType is unset, the first bytes of content +// will be sniffed for a matching content type. +func (w *gRPCWriter) read() (int, bool, error) { + if w.attrs.ContentType == "" { + w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader) + } + // Set n to -1 to start the Read loop. + var n, recvd int = -1, 0 + var err error + for err == nil && n != 0 { + // The routine blocks here until data is received. + n, err = w.reader.Read(w.buf[recvd:]) + recvd += n + } + var done bool + if err == io.EOF { + done = true + err = nil + } + return recvd, done, err +} + +func checkCanceled(err error) error { + if status.Code(err) == codes.Canceled { + return context.Canceled + } + + return err +} diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go index 7d8185f37b81..422a7c2335bd 100644 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -20,6 +20,7 @@ import ( "fmt" "time" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" "google.golang.org/api/iterator" raw "google.golang.org/api/storage/v1" ) @@ -89,8 +90,8 @@ type HMACKey struct { type HMACKeyHandle struct { projectID string accessID string - - raw *raw.ProjectsHmacKeysService + retry *retryConfig + tc storageClient } // HMACKeyHandle creates a handle that will be used for HMACKey operations. @@ -100,7 +101,8 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { return &HMACKeyHandle{ projectID: projectID, accessID: accessID, - raw: raw.NewProjectsHmacKeysService(c.raw), + retry: c.retry, + tc: c.tc, } } @@ -112,32 +114,15 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { // // This method is EXPERIMENTAL and subject to change or removal without notice. func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) { - call := hkh.raw.Get(hkh.projectID, hkh.accessID) - desc := new(hmacKeyDesc) for _, opt := range opts { opt.withHMACKeyDesc(desc) } - if desc.userProjectID != "" { - call = call.UserProject(desc.userProjectID) - } - setClientHeader(call.Header()) - - var metadata *raw.HmacKeyMetadata - var err error - err = runWithRetry(ctx, func() error { - metadata, err = call.Context(ctx).Do() - return err - }) - if err != nil { - return nil, err - } + o := makeStorageOpts(true, hkh.retry, desc.userProjectID) + hk, err := hkh.tc.GetHMACKey(ctx, hkh.projectID, hkh.accessID, o...) - hkPb := &raw.HmacKey{ - Metadata: metadata, - } - return pbHmacKeyToHMACKey(hkPb, false) + return hk, err } // Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage. @@ -146,49 +131,59 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC // // This method is EXPERIMENTAL and subject to change or removal without notice. func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error { - delCall := hkh.raw.Delete(hkh.projectID, hkh.accessID) desc := new(hmacKeyDesc) for _, opt := range opts { opt.withHMACKeyDesc(desc) } - if desc.userProjectID != "" { - delCall = delCall.UserProject(desc.userProjectID) - } - setClientHeader(delCall.Header()) - return runWithRetry(ctx, func() error { - return delCall.Context(ctx).Do() - }) + o := makeStorageOpts(true, hkh.retry, desc.userProjectID) + return hkh.tc.DeleteHMACKey(ctx, hkh.projectID, hkh.accessID, o...) } -func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) { - pbmd := pb.Metadata - if pbmd == nil { +func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) { + hkmd := hk.Metadata + if hkmd == nil { return nil, errors.New("field Metadata cannot be nil") } - createdTime, err := time.Parse(time.RFC3339, pbmd.TimeCreated) + createdTime, err := time.Parse(time.RFC3339, hkmd.TimeCreated) if err != nil { - return nil, fmt.Errorf("field CreatedTime: %v", err) + return nil, fmt.Errorf("field CreatedTime: %w", err) } - updatedTime, err := time.Parse(time.RFC3339, pbmd.Updated) + updatedTime, err := time.Parse(time.RFC3339, hkmd.Updated) if err != nil && !updatedTimeCanBeNil { - return nil, fmt.Errorf("field UpdatedTime: %v", err) + return nil, fmt.Errorf("field UpdatedTime: %w", err) } - hmk := &HMACKey{ - AccessID: pbmd.AccessId, - Secret: pb.Secret, - Etag: pbmd.Etag, - ID: pbmd.Id, - State: HMACState(pbmd.State), - ProjectID: pbmd.ProjectId, + hmKey := &HMACKey{ + AccessID: hkmd.AccessId, + Secret: hk.Secret, + Etag: hkmd.Etag, + ID: hkmd.Id, + State: HMACState(hkmd.State), + ProjectID: hkmd.ProjectId, CreatedTime: createdTime, UpdatedTime: updatedTime, - ServiceAccountEmail: pbmd.ServiceAccountEmail, + ServiceAccountEmail: hkmd.ServiceAccountEmail, } - return hmk, nil + return hmKey, nil +} + +func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey { + if pbmd == nil { + return nil + } + + return &HMACKey{ + AccessID: pbmd.GetAccessId(), + ID: pbmd.GetId(), + State: HMACState(pbmd.GetState()), + ProjectID: pbmd.GetProject(), + CreatedTime: convertProtoTime(pbmd.GetCreateTime()), + UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()), + ServiceAccountEmail: pbmd.GetServiceAccountEmail(), + } } // CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey. @@ -202,29 +197,14 @@ func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEma return nil, errors.New("storage: expecting a non-blank service account email") } - svc := raw.NewProjectsHmacKeysService(c.raw) - call := svc.Create(projectID, serviceAccountEmail) desc := new(hmacKeyDesc) for _, opt := range opts { opt.withHMACKeyDesc(desc) } - if desc.userProjectID != "" { - call = call.UserProject(desc.userProjectID) - } - - setClientHeader(call.Header()) - - var hkPb *raw.HmacKey - var err error - err = runWithRetry(ctx, func() error { - hkPb, err = call.Context(ctx).Do() - return err - }) - if err != nil { - return nil, err - } - return pbHmacKeyToHMACKey(hkPb, true) + o := makeStorageOpts(false, c.retry, desc.userProjectID) + hk, err := c.tc.CreateHMACKey(ctx, projectID, serviceAccountEmail, o...) + return hk, err } // HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated. @@ -246,34 +226,15 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opt return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive) } - call := h.raw.Update(h.projectID, h.accessID, &raw.HmacKeyMetadata{ - Etag: au.Etag, - State: string(au.State), - }) - desc := new(hmacKeyDesc) for _, opt := range opts { opt.withHMACKeyDesc(desc) } - if desc.userProjectID != "" { - call = call.UserProject(desc.userProjectID) - } - setClientHeader(call.Header()) - var metadata *raw.HmacKeyMetadata - var err error - err = runWithRetry(ctx, func() error { - metadata, err = call.Context(ctx).Do() - return err - }) - - if err != nil { - return nil, err - } - hkPb := &raw.HmacKey{ - Metadata: metadata, - } - return pbHmacKeyToHMACKey(hkPb, false) + isIdempotent := len(au.Etag) > 0 + o := makeStorageOpts(isIdempotent, h.retry, desc.userProjectID) + hk, err := h.tc.UpdateHMACKey(ctx, h.projectID, desc.forServiceAccountEmail, h.accessID, &au, o...) + return hk, err } // An HMACKeysIterator is an iterator over HMACKeys. @@ -290,6 +251,7 @@ type HMACKeysIterator struct { nextFunc func() error index int desc hmacKeyDesc + retry *retryConfig } // ListHMACKeys returns an iterator for listing HMACKeys. @@ -298,26 +260,13 @@ type HMACKeysIterator struct { // // This method is EXPERIMENTAL and subject to change or removal without notice. func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator { - it := &HMACKeysIterator{ - ctx: ctx, - raw: raw.NewProjectsHmacKeysService(c.raw), - projectID: projectID, - } - + desc := new(hmacKeyDesc) for _, opt := range opts { - opt.withHMACKeyDesc(&it.desc) + opt.withHMACKeyDesc(desc) } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - it.fetch, - func() int { return len(it.hmacKeys) - it.index }, - func() interface{} { - prev := it.hmacKeys - it.hmacKeys = it.hmacKeys[:0] - it.index = 0 - return prev - }) - return it + o := makeStorageOpts(true, c.retry, desc.userProjectID) + return c.tc.ListHMACKeys(ctx, projectID, desc.forServiceAccountEmail, desc.showDeletedKeys, o...) } // Next returns the next result. Its second return value is iterator.Done if @@ -346,6 +295,8 @@ func (it *HMACKeysIterator) Next() (*HMACKey, error) { func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) { + // TODO: Remove fetch method upon integration. This method is internalized into + // httpStorageClient.ListHMACKeys() as it is the only caller. call := it.raw.List(it.projectID) setClientHeader(call.Header()) if pageToken != "" { @@ -366,19 +317,19 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, ctx := it.ctx var resp *raw.HmacKeysMetadata - err = runWithRetry(it.ctx, func() error { + err = run(it.ctx, func() error { resp, err = call.Context(ctx).Do() return err - }) + }, it.retry, true, setRetryHeaderHTTP(call)) if err != nil { return "", err } for _, metadata := range resp.Items { - hkPb := &raw.HmacKey{ + hk := &raw.HmacKey{ Metadata: metadata, } - hkey, err := pbHmacKeyToHMACKey(hkPb, true) + hkey, err := toHMACKeyFromRaw(hk, true) if err != nil { return "", err } diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go new file mode 100644 index 000000000000..fae96043a92a --- /dev/null +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -0,0 +1,1351 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "reflect" + "strconv" + "strings" + "time" + + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" + "golang.org/x/oauth2/google" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + raw "google.golang.org/api/storage/v1" + "google.golang.org/api/transport" + htransport "google.golang.org/api/transport/http" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +// httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic +// storageClient interface. +// +// This is an experimental API and not intended for public use. +type httpStorageClient struct { + creds *google.Credentials + hc *http.Client + readHost string + raw *raw.Service + scheme string + settings *settings +} + +// newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON +// Storage API. +// +// This is an experimental API and not intended for public use. +func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { + s := initSettings(opts...) + o := s.clientOption + + var creds *google.Credentials + // In general, it is recommended to use raw.NewService instead of htransport.NewClient + // since raw.NewService configures the correct default endpoints when initializing the + // internal http client. However, in our case, "NewRangeReader" in reader.go needs to + // access the http client directly to make requests, so we create the client manually + // here so it can be re-used by both reader.go and raw.NewService. This means we need to + // manually configure the default endpoint options on the http client. Furthermore, we + // need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints. + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { + // Prepend default options to avoid overriding options passed by the user. + o = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, o...) + + o = append(o, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/")) + o = append(o, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/")) + + // Don't error out here. The user may have passed in their own HTTP + // client which does not auth with ADC or other common conventions. + c, err := transport.Creds(ctx, o...) + if err == nil { + creds = c + o = append(o, internaloption.WithCredentials(creds)) + } + } else { + var hostURL *url.URL + + if strings.Contains(host, "://") { + h, err := url.Parse(host) + if err != nil { + return nil, err + } + hostURL = h + } else { + // Add scheme for user if not supplied in STORAGE_EMULATOR_HOST + // URL is only parsed correctly if it has a scheme, so we build it ourselves + hostURL = &url.URL{Scheme: "http", Host: host} + } + + hostURL.Path = "storage/v1/" + endpoint := hostURL.String() + + // Append the emulator host as default endpoint for the user + o = append([]option.ClientOption{option.WithoutAuthentication()}, o...) + + o = append(o, internaloption.WithDefaultEndpoint(endpoint)) + o = append(o, internaloption.WithDefaultMTLSEndpoint(endpoint)) + } + s.clientOption = o + + // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. + hc, ep, err := htransport.NewClient(ctx, s.clientOption...) + if err != nil { + return nil, fmt.Errorf("dialing: %w", err) + } + // RawService should be created with the chosen endpoint to take account of user override. + rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc)) + if err != nil { + return nil, fmt.Errorf("storage client: %w", err) + } + // Update readHost and scheme with the chosen endpoint. + u, err := url.Parse(ep) + if err != nil { + return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) + } + + return &httpStorageClient{ + creds: creds, + hc: hc, + readHost: u.Host, + raw: rawService, + scheme: u.Scheme, + settings: s, + }, nil +} + +func (c *httpStorageClient) Close() error { + c.hc.CloseIdleConnections() + return nil +} + +// Top-level methods. + +func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.ServiceAccount.Get(project) + var res *raw.ServiceAccount + err := run(ctx, func() error { + var err error + res, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + if err != nil { + return "", err + } + return res.EmailAddress, nil +} + +func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + var bkt *raw.Bucket + if attrs != nil { + bkt = attrs.toRawBucket() + } else { + bkt = &raw.Bucket{} + } + bkt.Name = bucket + // If there is lifecycle information but no location, explicitly set + // the location. This is a GCS quirk/bug. + if bkt.Location == "" && bkt.Lifecycle != nil { + bkt.Location = "US" + } + req := c.raw.Buckets.Insert(project, bkt) + setClientHeader(req.Header()) + if attrs != nil && attrs.PredefinedACL != "" { + req.PredefinedAcl(attrs.PredefinedACL) + } + if attrs != nil && attrs.PredefinedDefaultObjectACL != "" { + req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) + } + var battrs *BucketAttrs + err := run(ctx, func() error { + b, err := req.Context(ctx).Do() + if err != nil { + return err + } + battrs, err = newBucket(b) + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + return battrs, err +} + +func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator { + s := callSettings(c.settings, opts...) + it := &BucketIterator{ + ctx: ctx, + projectID: project, + } + + fetch := func(pageSize int, pageToken string) (token string, err error) { + req := c.raw.Buckets.List(it.projectID) + setClientHeader(req.Header()) + req.Projection("full") + req.Prefix(it.Prefix) + req.PageToken(pageToken) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Buckets + err = run(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + if err != nil { + return "", err + } + for _, item := range resp.Items { + b, err := newBucket(item) + if err != nil { + return "", err + } + it.buckets = append(it.buckets, b) + } + return resp.NextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.buckets) }, + func() interface{} { b := it.buckets; it.buckets = nil; return b }) + + return it +} + +// Bucket methods. + +func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.Buckets.Delete(bucket) + setClientHeader(req.Header()) + if err := applyBucketConds("httpStorageClient.DeleteBucket", conds, req); err != nil { + return err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + + return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} + +func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + req := c.raw.Buckets.Get(bucket).Projection("full") + setClientHeader(req.Header()) + err := applyBucketConds("httpStorageClient.GetBucket", conds, req) + if err != nil { + return nil, err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + + var resp *raw.Bucket + err = run(ctx, func() error { + resp, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return nil, ErrBucketNotExist + } + if err != nil { + return nil, err + } + return newBucket(resp) +} +func (c *httpStorageClient) UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + rb := uattrs.toRawBucket() + req := c.raw.Buckets.Patch(bucket, rb).Projection("full") + setClientHeader(req.Header()) + err := applyBucketConds("httpStorageClient.UpdateBucket", conds, req) + if err != nil { + return nil, err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + if uattrs != nil && uattrs.PredefinedACL != "" { + req.PredefinedAcl(uattrs.PredefinedACL) + } + if uattrs != nil && uattrs.PredefinedDefaultObjectACL != "" { + req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL) + } + + var rawBucket *raw.Bucket + err = run(ctx, func() error { + rawBucket, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + if err != nil { + return nil, err + } + return newBucket(rawBucket) +} + +func (c *httpStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + + var metageneration int64 + if conds != nil { + metageneration = conds.MetagenerationMatch + } + req := c.raw.Buckets.LockRetentionPolicy(bucket, metageneration) + + return run(ctx, func() error { + _, err := req.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} +func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { + s := callSettings(c.settings, opts...) + it := &ObjectIterator{ + ctx: ctx, + } + if q != nil { + it.query = *q + } + fetch := func(pageSize int, pageToken string) (string, error) { + req := c.raw.Objects.List(bucket) + setClientHeader(req.Header()) + projection := it.query.Projection + if projection == ProjectionDefault { + projection = ProjectionFull + } + req.Projection(projection.String()) + req.Delimiter(it.query.Delimiter) + req.Prefix(it.query.Prefix) + req.StartOffset(it.query.StartOffset) + req.EndOffset(it.query.EndOffset) + req.Versions(it.query.Versions) + req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter) + if selection := it.query.toFieldSelection(); selection != "" { + req.Fields("nextPageToken", googleapi.Field(selection)) + } + req.PageToken(pageToken) + if s.userProject != "" { + req.UserProject(s.userProject) + } + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Objects + var err error + err = run(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + if err != nil { + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + err = ErrBucketNotExist + } + return "", err + } + for _, item := range resp.Items { + it.items = append(it.items, newObject(item)) + } + for _, prefix := range resp.Prefixes { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + return resp.NextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + + return it +} + +// Object metadata methods. + +func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.Objects.Delete(bucket, object).Context(ctx) + if err := applyConds("Delete", gen, conds, req); err != nil { + return err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + err := run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + return err +} + +func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := c.raw.Objects.Get(bucket, object).Projection("full").Context(ctx) + if err := applyConds("Attrs", gen, conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + if err := setEncryptionHeaders(req.Header(), encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + err = run(ctx, func() error { + obj, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + + var attrs ObjectAttrs + // Lists of fields to send, and set to null, in the JSON. + var forceSendFields, nullFields []string + if uattrs.ContentType != nil { + attrs.ContentType = optional.ToString(uattrs.ContentType) + // For ContentType, sending the empty string is a no-op. + // Instead we send a null. + if attrs.ContentType == "" { + nullFields = append(nullFields, "ContentType") + } else { + forceSendFields = append(forceSendFields, "ContentType") + } + } + if uattrs.ContentLanguage != nil { + attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + // For ContentLanguage it's an error to send the empty string. + // Instead we send a null. + if attrs.ContentLanguage == "" { + nullFields = append(nullFields, "ContentLanguage") + } else { + forceSendFields = append(forceSendFields, "ContentLanguage") + } + } + if uattrs.ContentEncoding != nil { + attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) + forceSendFields = append(forceSendFields, "ContentEncoding") + } + if uattrs.ContentDisposition != nil { + attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + forceSendFields = append(forceSendFields, "ContentDisposition") + } + if uattrs.CacheControl != nil { + attrs.CacheControl = optional.ToString(uattrs.CacheControl) + forceSendFields = append(forceSendFields, "CacheControl") + } + if uattrs.EventBasedHold != nil { + attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold) + forceSendFields = append(forceSendFields, "EventBasedHold") + } + if uattrs.TemporaryHold != nil { + attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) + forceSendFields = append(forceSendFields, "TemporaryHold") + } + if !uattrs.CustomTime.IsZero() { + attrs.CustomTime = uattrs.CustomTime + forceSendFields = append(forceSendFields, "CustomTime") + } + if uattrs.Metadata != nil { + attrs.Metadata = uattrs.Metadata + if len(attrs.Metadata) == 0 { + // Sending the empty map is a no-op. We send null instead. + nullFields = append(nullFields, "Metadata") + } else { + forceSendFields = append(forceSendFields, "Metadata") + } + } + if uattrs.ACL != nil { + attrs.ACL = uattrs.ACL + // It's an error to attempt to delete the ACL, so + // we don't append to nullFields here. + forceSendFields = append(forceSendFields, "Acl") + } + rawObj := attrs.toRawObject(bucket) + rawObj.ForceSendFields = forceSendFields + rawObj.NullFields = nullFields + call := c.raw.Objects.Patch(bucket, object, rawObj).Projection("full").Context(ctx) + if err := applyConds("Update", gen, conds, call); err != nil { + return nil, err + } + if s.userProject != "" { + call.UserProject(s.userProject) + } + if uattrs.PredefinedACL != "" { + call.PredefinedAcl(uattrs.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + err = run(ctx, func() error { obj, err = call.Do(); return err }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + var e *googleapi.Error + if errors.As(err, &e) && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// Default Object ACL methods. + +func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.DefaultObjectAccessControls.Delete(bucket, string(entity)) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} + +func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { + s := callSettings(c.settings, opts...) + var acls *raw.ObjectAccessControls + var err error + req := c.raw.DefaultObjectAccessControls.List(bucket) + configureACLCall(ctx, s.userProject, req) + err = run(ctx, func() error { + acls, err = req.Do() + return err + }, s.retry, true, setRetryHeaderHTTP(req)) + if err != nil { + return nil, err + } + return toObjectACLRules(acls.Items), nil +} +func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + type setRequest interface { + Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) + Header() http.Header + } + acl := &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + var req setRequest + var err error + req = c.raw.DefaultObjectAccessControls.Update(bucket, string(entity), acl) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func() error { + _, err = req.Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} + +// Bucket ACL methods. + +func (c *httpStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.BucketAccessControls.Delete(bucket, string(entity)) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} + +func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { + s := callSettings(c.settings, opts...) + var acls *raw.BucketAccessControls + var err error + req := c.raw.BucketAccessControls.List(bucket) + configureACLCall(ctx, s.userProject, req) + err = run(ctx, func() error { + acls, err = req.Do() + return err + }, s.retry, true, setRetryHeaderHTTP(req)) + if err != nil { + return nil, err + } + return toBucketACLRules(acls.Items), nil +} + +func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + acl := &raw.BucketAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + req := c.raw.BucketAccessControls.Update(bucket, string(entity), acl) + configureACLCall(ctx, s.userProject, req) + var err error + return run(ctx, func() error { + _, err = req.Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} + +// configureACLCall sets the context, user project and headers on the apiary library call. +// This will panic if the call does not have the correct methods. +func configureACLCall(ctx context.Context, userProject string, call interface{ Header() http.Header }) { + vc := reflect.ValueOf(call) + vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) + if userProject != "" { + vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(userProject)}) + } + setClientHeader(call.Header()) +} + +// Object ACL methods. + +func (c *httpStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.ObjectAccessControls.Delete(bucket, object, string(entity)) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} + +// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. +// Selecting a specific generation of this object is not currently supported by the client. +func (c *httpStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { + s := callSettings(c.settings, opts...) + var acls *raw.ObjectAccessControls + var err error + req := c.raw.ObjectAccessControls.List(bucket, object) + configureACLCall(ctx, s.userProject, req) + err = run(ctx, func() error { + acls, err = req.Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + if err != nil { + return nil, err + } + return toObjectACLRules(acls.Items), nil +} + +func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + type setRequest interface { + Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) + Header() http.Header + } + + acl := &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + var req setRequest + var err error + req = c.raw.ObjectAccessControls.Update(bucket, object, string(entity), acl) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func() error { + _, err = req.Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) +} + +// Media operations. + +func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + rawReq := &raw.ComposeRequest{} + // Compose requires a non-empty Destination, so we always set it, + // even if the caller-provided ObjectAttrs is the zero value. + rawReq.Destination = req.dstObject.attrs.toRawObject(req.dstBucket) + if req.sendCRC32C { + rawReq.Destination.Crc32c = encodeUint32(req.dstObject.attrs.CRC32C) + } + for _, src := range req.srcs { + srcObj := &raw.ComposeRequestSourceObjects{ + Name: src.name, + } + if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { + return nil, err + } + rawReq.SourceObjects = append(rawReq.SourceObjects, srcObj) + } + + call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq).Context(ctx) + if err := applyConds("ComposeFrom destination", defaultGen, req.dstObject.conds, call); err != nil { + return nil, err + } + if s.userProject != "" { + call.UserProject(s.userProject) + } + if req.predefinedACL != "" { + call.DestinationPredefinedAcl(req.predefinedACL) + } + if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + + var err error + retryCall := func() error { obj, err = call.Do(); return err } + + if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + return newObject(obj), nil +} +func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) { + s := callSettings(c.settings, opts...) + rawObject := req.dstObject.attrs.toRawObject("") + call := c.raw.Objects.Rewrite(req.srcObject.bucket, req.srcObject.name, req.dstObject.bucket, req.dstObject.name, rawObject) + + call.Context(ctx).Projection("full") + if req.token != "" { + call.RewriteToken(req.token) + } + if req.dstObject.keyName != "" { + call.DestinationKmsKeyName(req.dstObject.keyName) + } + if req.predefinedACL != "" { + call.DestinationPredefinedAcl(req.predefinedACL) + } + if err := applyConds("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { + return nil, err + } + if err := applySourceConds(req.srcObject.gen, req.srcObject.conds, call); err != nil { + return nil, err + } + if s.userProject != "" { + call.UserProject(s.userProject) + } + // Set destination encryption headers. + if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil { + return nil, err + } + // Set source encryption headers. + if err := setEncryptionHeaders(call.Header(), req.srcObject.encryptionKey, true); err != nil { + return nil, err + } + + if req.maxBytesRewrittenPerCall != 0 { + call.MaxBytesRewrittenPerCall(req.maxBytesRewrittenPerCall) + } + + var res *raw.RewriteResponse + var err error + setClientHeader(call.Header()) + + retryCall := func() error { res, err = call.Do(); return err } + + if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + + r := &rewriteObjectResponse{ + done: res.Done, + written: res.TotalBytesRewritten, + size: res.ObjectSize, + token: res.RewriteToken, + resource: newObject(res.Resource), + } + + return r, nil +} + +func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.NewRangeReader") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + + u := &url.URL{ + Scheme: c.scheme, + Host: c.readHost, + Path: fmt.Sprintf("/%s/%s", params.bucket, params.object), + } + verb := "GET" + if params.length == 0 { + verb = "HEAD" + } + req, err := http.NewRequest(verb, u.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if s.userProject != "" { + req.Header.Set("X-Goog-User-Project", s.userProject) + } + if params.readCompressed { + req.Header.Set("Accept-Encoding", "gzip") + } + if err := setEncryptionHeaders(req.Header, params.encryptionKey, false); err != nil { + return nil, err + } + + // Define a function that initiates a Read with offset and length, assuming we + // have already read seen bytes. + reopen := func(seen int64) (*http.Response, error) { + // If the context has already expired, return immediately without making a + // call. + if err := ctx.Err(); err != nil { + return nil, err + } + start := params.offset + seen + if params.length < 0 && start < 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d", start)) + } else if params.length < 0 && start > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start)) + } else if params.length > 0 { + // The end character isn't affected by how many bytes we've seen. + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, params.offset+params.length-1)) + } + // We wait to assign conditions here because the generation number can change in between reopen() runs. + if err := setConditionsHeaders(req.Header, params.conds); err != nil { + return nil, err + } + // If an object generation is specified, include generation as query string parameters. + if params.gen >= 0 { + req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) + } + + var res *http.Response + err = run(ctx, func() error { + res, err = c.hc.Do(req) + if err != nil { + return err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + + partialContentNotSatisfied := + !decompressiveTranscoding(res) && + start > 0 && params.length != 0 && + res.StatusCode != http.StatusPartialContent + + if partialContentNotSatisfied { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + + // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves + // back the whole file regardless of the range count passed in as per: + // https://cloud.google.com/storage/docs/transcoding#range, + // thus we have to manually move the body forward by seen bytes. + if decompressiveTranscoding(res) && seen > 0 { + _, _ = io.CopyN(ioutil.Discard, res.Body, seen) + } + + // If a generation hasn't been specified, and this is the first response we get, let's record the + // generation. In future requests we'll use this generation as a precondition to avoid data races. + if params.gen < 0 && res.Header.Get("X-Goog-Generation") != "" { + gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) + if err != nil { + return err + } + params.gen = gen64 + } + return nil + }, s.retry, s.idempotent, setRetryHeaderHTTP(nil)) + if err != nil { + return nil, err + } + return res, nil + } + + res, err := reopen(0) + if err != nil { + return nil, err + } + var ( + size int64 // total size of object, even if a range was requested. + checkCRC bool + crc uint32 + startOffset int64 // non-zero if range request. + ) + if res.StatusCode == http.StatusPartialContent { + cr := strings.TrimSpace(res.Header.Get("Content-Range")) + if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + // Content range is formatted -/. We take + // the total size. + size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + + dashIndex := strings.Index(cr, "-") + if dashIndex >= 0 { + startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err) + } + } + } else { + size = res.ContentLength + // Check the CRC iff all of the following hold: + // - We asked for content (length != 0). + // - We got all the content (status != PartialContent). + // - The server sent a CRC header. + // - The Go http stack did not uncompress the file. + // - We were not served compressed data that was uncompressed on download. + // The problem with the last two cases is that the CRC will not match -- GCS + // computes it on the compressed contents, but we compute it on the + // uncompressed contents. + if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) { + crc, checkCRC = parseCRC32c(res) + } + } + + remain := res.ContentLength + body := res.Body + if params.length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + var metaGen int64 + if res.Header.Get("X-Goog-Metageneration") != "" { + metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) + if err != nil { + return nil, err + } + } + + var lm time.Time + if res.Header.Get("Last-Modified") != "" { + lm, err = http.ParseTime(res.Header.Get("Last-Modified")) + if err != nil { + return nil, err + } + } + + attrs := ReaderObjectAttrs{ + Size: size, + ContentType: res.Header.Get("Content-Type"), + ContentEncoding: res.Header.Get("Content-Encoding"), + CacheControl: res.Header.Get("Cache-Control"), + LastModified: lm, + StartOffset: startOffset, + Generation: params.gen, + Metageneration: metaGen, + } + return &Reader{ + Attrs: attrs, + size: size, + remain: remain, + wantCRC: crc, + checkCRC: checkCRC, + reader: &httpReader{ + reopen: reopen, + body: body, + }, + }, nil +} + +func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { + s := callSettings(c.settings, opts...) + errorf := params.setError + setObj := params.setObj + progress := params.progress + attrs := params.attrs + + mediaOpts := []googleapi.MediaOption{ + googleapi.ChunkSize(params.chunkSize), + } + if c := attrs.ContentType; c != "" { + mediaOpts = append(mediaOpts, googleapi.ContentType(c)) + } + if params.chunkRetryDeadline != 0 { + mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(params.chunkRetryDeadline)) + } + + pr, pw := io.Pipe() + + go func() { + defer close(params.donec) + + rawObj := attrs.toRawObject(params.bucket) + if params.sendCRC32C { + rawObj.Crc32c = encodeUint32(attrs.CRC32C) + } + if attrs.MD5 != nil { + rawObj.Md5Hash = base64.StdEncoding.EncodeToString(attrs.MD5) + } + call := c.raw.Objects.Insert(params.bucket, rawObj). + Media(pr, mediaOpts...). + Projection("full"). + Context(params.ctx). + Name(params.attrs.Name) + call.ProgressUpdater(func(n, _ int64) { progress(n) }) + + if attrs.KMSKeyName != "" { + call.KmsKeyName(attrs.KMSKeyName) + } + if attrs.PredefinedACL != "" { + call.PredefinedAcl(attrs.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), params.encryptionKey, false); err != nil { + errorf(err) + pr.CloseWithError(err) + return + } + var resp *raw.Object + err := applyConds("NewWriter", defaultGen, params.conds, call) + if err == nil { + if s.userProject != "" { + call.UserProject(s.userProject) + } + // TODO(tritone): Remove this code when Uploads begin to support + // retry attempt header injection with "client header" injection. + setClientHeader(call.Header()) + + // The internals that perform call.Do automatically retry both the initial + // call to set up the upload as well as calls to upload individual chunks + // for a resumable upload (as long as the chunk size is non-zero). Hence + // there is no need to add retries here. + + // Retry only when the operation is idempotent or the retry policy is RetryAlways. + var useRetry bool + if (s.retry == nil || s.retry.policy == RetryIdempotent) && s.idempotent { + useRetry = true + } else if s.retry != nil && s.retry.policy == RetryAlways { + useRetry = true + } + if useRetry { + if s.retry != nil { + call.WithRetry(s.retry.backoff, s.retry.shouldRetry) + } else { + call.WithRetry(nil, nil) + } + } + resp, err = call.Do() + } + if err != nil { + errorf(err) + pr.CloseWithError(err) + return + } + setObj(newObject(resp)) + }() + + return pw, nil +} + +// IAM methods. + +func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(version)) + setClientHeader(call.Header()) + if s.userProject != "" { + call.UserProject(s.userProject) + } + var rp *raw.Policy + err := run(ctx, func() error { + var err error + rp, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + if err != nil { + return nil, err + } + return iamFromStoragePolicy(rp), nil +} + +func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + + rp := iamToStoragePolicy(policy) + call := c.raw.Buckets.SetIamPolicy(resource, rp) + setClientHeader(call.Header()) + if s.userProject != "" { + call.UserProject(s.userProject) + } + + return run(ctx, func() error { + _, err := call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) +} + +func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Buckets.TestIamPermissions(resource, permissions) + setClientHeader(call.Header()) + if s.userProject != "" { + call.UserProject(s.userProject) + } + var res *raw.TestIamPermissionsResponse + err := run(ctx, func() error { + var err error + res, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// HMAC Key methods. + +func (c *httpStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Get(project, accessID) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + + var metadata *raw.HmacKeyMetadata + var err error + if err := run(ctx, func() error { + metadata, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + hk := &raw.HmacKey{ + Metadata: metadata, + } + return toHMACKeyFromRaw(hk, false) +} + +func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator { + s := callSettings(c.settings, opts...) + it := &HMACKeysIterator{ + ctx: ctx, + raw: c.raw.Projects.HmacKeys, + projectID: project, + retry: s.retry, + } + fetch := func(pageSize int, pageToken string) (token string, err error) { + call := c.raw.Projects.HmacKeys.List(project) + setClientHeader(call.Header()) + if pageToken != "" { + call = call.PageToken(pageToken) + } + if pageSize > 0 { + call = call.MaxResults(int64(pageSize)) + } + if showDeletedKeys { + call = call.ShowDeletedKeys(true) + } + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + if serviceAccountEmail != "" { + call = call.ServiceAccountEmail(serviceAccountEmail) + } + + var resp *raw.HmacKeysMetadata + err = run(it.ctx, func() error { + resp, err = call.Context(it.ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + if err != nil { + return "", err + } + + for _, metadata := range resp.Items { + hk := &raw.HmacKey{ + Metadata: metadata, + } + hkey, err := toHMACKeyFromRaw(hk, true) + if err != nil { + return "", err + } + it.hmacKeys = append(it.hmacKeys, hkey) + } + return resp.NextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.hmacKeys) - it.index }, + func() interface{} { + prev := it.hmacKeys + it.hmacKeys = it.hmacKeys[:0] + it.index = 0 + return prev + }) + return it +} + +func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Update(project, accessID, &raw.HmacKeyMetadata{ + Etag: attrs.Etag, + State: string(attrs.State), + }) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + + var metadata *raw.HmacKeyMetadata + var err error + if err := run(ctx, func() error { + metadata, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + hk := &raw.HmacKey{ + Metadata: metadata, + } + return toHMACKeyFromRaw(hk, false) +} + +func (c *httpStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Create(project, serviceAccountEmail) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + + var hk *raw.HmacKey + if err := run(ctx, func() error { + h, err := call.Context(ctx).Do() + hk = h + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + return toHMACKeyFromRaw(hk, true) +} + +func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Delete(project, accessID) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + return run(ctx, func() error { + return call.Context(ctx).Do() + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) +} + +// Notification methods. + +// ListNotifications returns all the Notifications configured for this bucket, as a map indexed by notification ID. +// +// Note: This API does not support pagination. However, entity limits cap the number of notifications on a single bucket, +// so all results will be returned in the first response. See https://cloud.google.com/storage/quotas#buckets. +func (c *httpStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.ListNotifications") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + call := c.raw.Notifications.List(bucket) + if s.userProject != "" { + call.UserProject(s.userProject) + } + var res *raw.Notifications + err = run(ctx, func() error { + res, err = call.Context(ctx).Do() + return err + }, s.retry, true, setRetryHeaderHTTP(call)) + if err != nil { + return nil, err + } + return notificationsToMap(res.Items), nil +} + +func (c *httpStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.CreateNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + call := c.raw.Notifications.Insert(bucket, toRawNotification(n)) + if s.userProject != "" { + call.UserProject(s.userProject) + } + var rn *raw.Notification + err = run(ctx, func() error { + rn, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + if err != nil { + return nil, err + } + return toNotification(rn), nil +} + +func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.DeleteNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + call := c.raw.Notifications.Delete(bucket, id) + if s.userProject != "" { + call.UserProject(s.userProject) + } + return run(ctx, func() error { + return call.Context(ctx).Do() + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) +} + +type httpReader struct { + body io.ReadCloser + seen int64 + reopen func(seen int64) (*http.Response, error) +} + +func (r *httpReader) Read(p []byte) (int, error) { + n := 0 + for len(p[n:]) > 0 { + m, err := r.body.Read(p[n:]) + n += m + r.seen += int64(m) + if err == nil || err == io.EOF { + return n, err + } + // Read failed (likely due to connection issues), but we will try to reopen + // the pipe and continue. Send a ranged read request that takes into account + // the number of bytes we've already seen. + res, err := r.reopen(r.seen) + if err != nil { + // reopen already retries + return n, err + } + r.body.Close() + r.body = res.Body + } + return n, nil +} + +func (r *httpReader) Close() error { + return r.body.Close() +} diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go index 5caefb059d59..408661718fcb 100644 --- a/vendor/cloud.google.com/go/storage/iam.go +++ b/vendor/cloud.google.com/go/storage/iam.go @@ -27,15 +27,17 @@ import ( // IAM provides access to IAM access control for the bucket. func (b *BucketHandle) IAM() *iam.Handle { return iam.InternalNewHandleClient(&iamClient{ - raw: b.c.raw, userProject: b.userProject, + retry: b.retry, + client: b.c, }, b.name) } // iamClient implements the iam.client interface. type iamClient struct { - raw *raw.Service userProject string + retry *retryConfig + client *Client } func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) { @@ -46,56 +48,25 @@ func (c *iamClient) GetWithVersion(ctx context.Context, resource string, request ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") defer func() { trace.EndSpan(ctx, err) }() - call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(requestedPolicyVersion)) - setClientHeader(call.Header()) - if c.userProject != "" { - call.UserProject(c.userProject) - } - var rp *raw.Policy - err = runWithRetry(ctx, func() error { - rp, err = call.Context(ctx).Do() - return err - }) - if err != nil { - return nil, err - } - return iamFromStoragePolicy(rp), nil + o := makeStorageOpts(true, c.retry, c.userProject) + return c.client.tc.GetIamPolicy(ctx, resource, requestedPolicyVersion, o...) } func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set") defer func() { trace.EndSpan(ctx, err) }() - rp := iamToStoragePolicy(p) - call := c.raw.Buckets.SetIamPolicy(resource, rp) - setClientHeader(call.Header()) - if c.userProject != "" { - call.UserProject(c.userProject) - } - return runWithRetry(ctx, func() error { - _, err := call.Context(ctx).Do() - return err - }) + isIdempotent := len(p.Etag) > 0 + o := makeStorageOpts(isIdempotent, c.retry, c.userProject) + return c.client.tc.SetIamPolicy(ctx, resource, p, o...) } func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test") defer func() { trace.EndSpan(ctx, err) }() - call := c.raw.Buckets.TestIamPermissions(resource, perms) - setClientHeader(call.Header()) - if c.userProject != "" { - call.UserProject(c.userProject) - } - var res *raw.TestIamPermissionsResponse - err = runWithRetry(ctx, func() error { - res, err = call.Context(ctx).Do() - return err - }) - if err != nil { - return nil, err - } - return res.Permissions, nil + o := makeStorageOpts(true, c.retry, c.userProject) + return c.client.tc.TestIamPermissions(ctx, resource, perms, o...) } func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go new file mode 100644 index 000000000000..56e97548126b --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -0,0 +1,174 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +// Package storage is an auto-generated package for the +// Cloud Storage API. +// +// Lets you store and retrieve potentially-large, immutable data objects. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// # Example usage +// +// To get started with this package, create a client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := storage.NewClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// The client will use your default application credentials. Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +// The returned client must be Closed when it is done being used. +// +// # Using the Client +// +// The following is an example of making an API call with the newly created client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := storage.NewClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &storagepb.DeleteBucketRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/cloud.google.com/go/storage/internal/apiv2/stubs#DeleteBucketRequest. +// } +// err = c.DeleteBucket(ctx, req) +// if err != nil { +// // TODO: Handle error. +// } +// +// # Use of Context +// +// The ctx passed to NewClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +// +// For information about setting deadlines, reusing contexts, and more +// please visit https://pkg.go.dev/cloud.google.com/go. +package storage // import "cloud.google.com/go/storage/internal/apiv2" + +import ( + "context" + "os" + "runtime" + "strconv" + "strings" + "unicode" + + "google.golang.org/api/option" + "google.golang.org/grpc/metadata" +) + +// For more information on implementing a client constructor hook, see +// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. +type clientHookParams struct{} +type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) + +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +func checkDisableDeadlines() (bool, error) { + raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE") + if !ok { + return false, nil + } + + b, err := strconv.ParseBool(raw) + return b, err +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + } +} + +// versionGo returns the Go runtime version. The returned string +// has no whitespace, suitable for reporting in header. +func versionGo() string { + const develPrefix = "devel +" + + s := runtime.Version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json new file mode 100644 index 000000000000..01103fa93bd1 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json @@ -0,0 +1,168 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", + "language": "go", + "protoPackage": "google.storage.v2", + "libraryPackage": "cloud.google.com/go/storage/internal/apiv2", + "services": { + "Storage": { + "clients": { + "grpc": { + "libraryClient": "Client", + "rpcs": { + "CancelResumableWrite": { + "methods": [ + "CancelResumableWrite" + ] + }, + "ComposeObject": { + "methods": [ + "ComposeObject" + ] + }, + "CreateBucket": { + "methods": [ + "CreateBucket" + ] + }, + "CreateHmacKey": { + "methods": [ + "CreateHmacKey" + ] + }, + "CreateNotification": { + "methods": [ + "CreateNotification" + ] + }, + "DeleteBucket": { + "methods": [ + "DeleteBucket" + ] + }, + "DeleteHmacKey": { + "methods": [ + "DeleteHmacKey" + ] + }, + "DeleteNotification": { + "methods": [ + "DeleteNotification" + ] + }, + "DeleteObject": { + "methods": [ + "DeleteObject" + ] + }, + "GetBucket": { + "methods": [ + "GetBucket" + ] + }, + "GetHmacKey": { + "methods": [ + "GetHmacKey" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetNotification": { + "methods": [ + "GetNotification" + ] + }, + "GetObject": { + "methods": [ + "GetObject" + ] + }, + "GetServiceAccount": { + "methods": [ + "GetServiceAccount" + ] + }, + "ListBuckets": { + "methods": [ + "ListBuckets" + ] + }, + "ListHmacKeys": { + "methods": [ + "ListHmacKeys" + ] + }, + "ListNotifications": { + "methods": [ + "ListNotifications" + ] + }, + "ListObjects": { + "methods": [ + "ListObjects" + ] + }, + "LockBucketRetentionPolicy": { + "methods": [ + "LockBucketRetentionPolicy" + ] + }, + "QueryWriteStatus": { + "methods": [ + "QueryWriteStatus" + ] + }, + "ReadObject": { + "methods": [ + "ReadObject" + ] + }, + "RewriteObject": { + "methods": [ + "RewriteObject" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "StartResumableWrite": { + "methods": [ + "StartResumableWrite" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateBucket": { + "methods": [ + "UpdateBucket" + ] + }, + "UpdateHmacKey": { + "methods": [ + "UpdateHmacKey" + ] + }, + "UpdateObject": { + "methods": [ + "UpdateObject" + ] + }, + "WriteObject": { + "methods": [ + "WriteObject" + ] + } + } + } + } + } + } +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go b/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go new file mode 100644 index 000000000000..6ff86c4fb49f --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go @@ -0,0 +1,26 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +// InsertMetadata inserts the given gRPC metadata into the outgoing context. +func InsertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + return insertMetadata(ctx, mds...) +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go new file mode 100644 index 000000000000..6e904ee5ce54 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -0,0 +1,1605 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package storage + +import ( + "context" + "fmt" + "math" + "net/url" + "regexp" + "strings" + + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + iampb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" +) + +var newClientHook clientHook + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + DeleteBucket []gax.CallOption + GetBucket []gax.CallOption + CreateBucket []gax.CallOption + ListBuckets []gax.CallOption + LockBucketRetentionPolicy []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption + UpdateBucket []gax.CallOption + DeleteNotification []gax.CallOption + GetNotification []gax.CallOption + CreateNotification []gax.CallOption + ListNotifications []gax.CallOption + ComposeObject []gax.CallOption + DeleteObject []gax.CallOption + CancelResumableWrite []gax.CallOption + GetObject []gax.CallOption + ReadObject []gax.CallOption + UpdateObject []gax.CallOption + WriteObject []gax.CallOption + ListObjects []gax.CallOption + RewriteObject []gax.CallOption + StartResumableWrite []gax.CallOption + QueryWriteStatus []gax.CallOption + GetServiceAccount []gax.CallOption + CreateHmacKey []gax.CallOption + DeleteHmacKey []gax.CallOption + GetHmacKey []gax.CallOption + ListHmacKeys []gax.CallOption + UpdateHmacKey []gax.CallOption +} + +func defaultGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("storage.googleapis.com:443"), + internaloption.WithDefaultMTLSEndpoint("storage.mtls.googleapis.com:443"), + internaloption.WithDefaultAudience("https://storage.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultCallOptions() *CallOptions { + return &CallOptions{ + DeleteBucket: []gax.CallOption{}, + GetBucket: []gax.CallOption{}, + CreateBucket: []gax.CallOption{}, + ListBuckets: []gax.CallOption{}, + LockBucketRetentionPolicy: []gax.CallOption{}, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, + UpdateBucket: []gax.CallOption{}, + DeleteNotification: []gax.CallOption{}, + GetNotification: []gax.CallOption{}, + CreateNotification: []gax.CallOption{}, + ListNotifications: []gax.CallOption{}, + ComposeObject: []gax.CallOption{}, + DeleteObject: []gax.CallOption{}, + CancelResumableWrite: []gax.CallOption{}, + GetObject: []gax.CallOption{}, + ReadObject: []gax.CallOption{}, + UpdateObject: []gax.CallOption{}, + WriteObject: []gax.CallOption{}, + ListObjects: []gax.CallOption{}, + RewriteObject: []gax.CallOption{}, + StartResumableWrite: []gax.CallOption{}, + QueryWriteStatus: []gax.CallOption{}, + GetServiceAccount: []gax.CallOption{}, + CreateHmacKey: []gax.CallOption{}, + DeleteHmacKey: []gax.CallOption{}, + GetHmacKey: []gax.CallOption{}, + ListHmacKeys: []gax.CallOption{}, + UpdateHmacKey: []gax.CallOption{}, + } +} + +// internalClient is an interface that defines the methods available from Cloud Storage API. +type internalClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + DeleteBucket(context.Context, *storagepb.DeleteBucketRequest, ...gax.CallOption) error + GetBucket(context.Context, *storagepb.GetBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) + CreateBucket(context.Context, *storagepb.CreateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) + ListBuckets(context.Context, *storagepb.ListBucketsRequest, ...gax.CallOption) *BucketIterator + LockBucketRetentionPolicy(context.Context, *storagepb.LockBucketRetentionPolicyRequest, ...gax.CallOption) (*storagepb.Bucket, error) + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) + UpdateBucket(context.Context, *storagepb.UpdateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) + DeleteNotification(context.Context, *storagepb.DeleteNotificationRequest, ...gax.CallOption) error + GetNotification(context.Context, *storagepb.GetNotificationRequest, ...gax.CallOption) (*storagepb.Notification, error) + CreateNotification(context.Context, *storagepb.CreateNotificationRequest, ...gax.CallOption) (*storagepb.Notification, error) + ListNotifications(context.Context, *storagepb.ListNotificationsRequest, ...gax.CallOption) *NotificationIterator + ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error) + DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error + CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) + GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error) + ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) + UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error) + WriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) + ListObjects(context.Context, *storagepb.ListObjectsRequest, ...gax.CallOption) *ObjectIterator + RewriteObject(context.Context, *storagepb.RewriteObjectRequest, ...gax.CallOption) (*storagepb.RewriteResponse, error) + StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) + QueryWriteStatus(context.Context, *storagepb.QueryWriteStatusRequest, ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) + GetServiceAccount(context.Context, *storagepb.GetServiceAccountRequest, ...gax.CallOption) (*storagepb.ServiceAccount, error) + CreateHmacKey(context.Context, *storagepb.CreateHmacKeyRequest, ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) + DeleteHmacKey(context.Context, *storagepb.DeleteHmacKeyRequest, ...gax.CallOption) error + GetHmacKey(context.Context, *storagepb.GetHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) + ListHmacKeys(context.Context, *storagepb.ListHmacKeysRequest, ...gax.CallOption) *HmacKeyMetadataIterator + UpdateHmacKey(context.Context, *storagepb.UpdateHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) +} + +// Client is a client for interacting with Cloud Storage API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// API Overview and Naming SyntaxThe Cloud Storage gRPC API allows applications to read and write data through +// the abstractions of buckets and objects. For a description of these +// abstractions please see https://cloud.google.com/storage/docs (at https://cloud.google.com/storage/docs). +// +// Resources are named as follows: +// +// Projects are referred to as they are defined by the Resource Manager API, +// using strings like projects/123456 or projects/my-string-id. +// +// Buckets are named using string names of the form: +// projects/{project}/buckets/{bucket} +// For globally unique buckets, _ may be substituted for the project. +// +// Objects are uniquely identified by their name along with the name of the +// bucket they belong to, as separate strings in this API. For example: +// +// ReadObjectRequest { +// bucket: ‘projects/_/buckets/my-bucket’ +// object: ‘my-object’ +// } +// Note that object names can contain / characters, which are treated as +// any other character (no special directory semantics). +type Client struct { + // The internal transport-dependent client. + internalClient internalClient + + // The call options for this service. + CallOptions *CallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *Client) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// DeleteBucket permanently deletes an empty bucket. +func (c *Client) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteBucket(ctx, req, opts...) +} + +// GetBucket returns metadata for the specified bucket. +func (c *Client) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + return c.internalClient.GetBucket(ctx, req, opts...) +} + +// CreateBucket creates a new bucket. +func (c *Client) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + return c.internalClient.CreateBucket(ctx, req, opts...) +} + +// ListBuckets retrieves a list of buckets for a given project. +func (c *Client) ListBuckets(ctx context.Context, req *storagepb.ListBucketsRequest, opts ...gax.CallOption) *BucketIterator { + return c.internalClient.ListBuckets(ctx, req, opts...) +} + +// LockBucketRetentionPolicy locks retention policy on a bucket. +func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + return c.internalClient.LockBucketRetentionPolicy(ctx, req, opts...) +} + +// GetIamPolicy gets the IAM policy for a specified bucket or object. +func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy updates an IAM policy for the specified bucket or object. +func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if +// any, are held by the caller. +func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// UpdateBucket updates a bucket. Equivalent to JSON API’s storage.buckets.patch method. +func (c *Client) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + return c.internalClient.UpdateBucket(ctx, req, opts...) +} + +// DeleteNotification permanently deletes a notification subscription. +func (c *Client) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteNotification(ctx, req, opts...) +} + +// GetNotification view a notification config. +func (c *Client) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { + return c.internalClient.GetNotification(ctx, req, opts...) +} + +// CreateNotification creates a notification subscription for a given bucket. +// These notifications, when triggered, publish messages to the specified +// Pub/Sub topics. +// See https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications). +func (c *Client) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { + return c.internalClient.CreateNotification(ctx, req, opts...) +} + +// ListNotifications retrieves a list of notification subscriptions for a given bucket. +func (c *Client) ListNotifications(ctx context.Context, req *storagepb.ListNotificationsRequest, opts ...gax.CallOption) *NotificationIterator { + return c.internalClient.ListNotifications(ctx, req, opts...) +} + +// ComposeObject concatenates a list of existing objects into a new object in the same +// bucket. +func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + return c.internalClient.ComposeObject(ctx, req, opts...) +} + +// DeleteObject deletes an object and its metadata. Deletions are permanent if versioning +// is not enabled for the bucket, or if the generation parameter is used. +func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteObject(ctx, req, opts...) +} + +// CancelResumableWrite cancels an in-progress resumable upload. +func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { + return c.internalClient.CancelResumableWrite(ctx, req, opts...) +} + +// GetObject retrieves an object’s metadata. +func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + return c.internalClient.GetObject(ctx, req, opts...) +} + +// ReadObject reads an object’s data. +func (c *Client) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) { + return c.internalClient.ReadObject(ctx, req, opts...) +} + +// UpdateObject updates an object’s metadata. +// Equivalent to JSON API’s storage.objects.patch. +func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + return c.internalClient.UpdateObject(ctx, req, opts...) +} + +// WriteObject stores a new object and metadata. +// +// An object can be written either in a single message stream or in a +// resumable sequence of message streams. To write using a single stream, +// the client should include in the first message of the stream an +// WriteObjectSpec describing the destination bucket, object, and any +// preconditions. Additionally, the final message must set ‘finish_write’ to +// true, or else it is an error. +// +// For a resumable write, the client should instead call +// StartResumableWrite(), populating a WriteObjectSpec into that request. +// They should then attach the returned upload_id to the first message of +// each following call to WriteObject. If the stream is closed before +// finishing the upload (either explicitly by the client or due to a network +// error or an error response from the server), the client should do as +// follows: +// +// Check the result Status of the stream, to determine if writing can be +// resumed on this stream or must be restarted from scratch (by calling +// StartResumableWrite()). The resumable errors are DEADLINE_EXCEEDED, +// INTERNAL, and UNAVAILABLE. For each case, the client should use binary +// exponential backoff before retrying. Additionally, writes can be +// resumed after RESOURCE_EXHAUSTED errors, but only after taking +// appropriate measures, which may include reducing aggregate send rate +// across clients and/or requesting a quota increase for your project. +// +// If the call to WriteObject returns ABORTED, that indicates +// concurrent attempts to update the resumable write, caused either by +// multiple racing clients or by a single client where the previous +// request was timed out on the client side but nonetheless reached the +// server. In this case the client should take steps to prevent further +// concurrent writes (e.g., increase the timeouts, stop using more than +// one process to perform the upload, etc.), and then should follow the +// steps below for resuming the upload. +// +// For resumable errors, the client should call QueryWriteStatus() and +// then continue writing from the returned persisted_size. This may be +// less than the amount of data the client previously sent. Note also that +// it is acceptable to send data starting at an offset earlier than the +// returned persisted_size; in this case, the service will skip data at +// offsets that were already persisted (without checking that it matches +// the previously written data), and write only the data starting from the +// persisted offset. This behavior can make client-side handling simpler +// in some cases. +// +// The service will not view the object as complete until the client has +// sent a WriteObjectRequest with finish_write set to true. Sending any +// requests on a stream after sending a request with finish_write set to +// true will cause an error. The client should check the response it +// receives to determine how much data the service was able to commit and +// whether the service views the object as complete. +// +// Attempting to resume an already finalized object will result in an OK +// status, with a WriteObjectResponse containing the finalized object’s +// metadata. +func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { + return c.internalClient.WriteObject(ctx, opts...) +} + +// ListObjects retrieves a list of objects matching the criteria. +func (c *Client) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { + return c.internalClient.ListObjects(ctx, req, opts...) +} + +// RewriteObject rewrites a source object to a destination object. Optionally overrides +// metadata. +func (c *Client) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) { + return c.internalClient.RewriteObject(ctx, req, opts...) +} + +// StartResumableWrite starts a resumable write. How long the write operation remains valid, and +// what happens when the write operation becomes invalid, are +// service-dependent. +func (c *Client) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) { + return c.internalClient.StartResumableWrite(ctx, req, opts...) +} + +// QueryWriteStatus determines the persisted_size for an object that is being written, which +// can then be used as the write_offset for the next Write() call. +// +// If the object does not exist (i.e., the object has been deleted, or the +// first Write() has not yet reached the service), this method returns the +// error NOT_FOUND. +// +// The client may call QueryWriteStatus() at any time to determine how +// much data has been processed for this object. This is useful if the +// client is buffering data and needs to know which data can be safely +// evicted. For any sequence of QueryWriteStatus() calls for a given +// object name, the sequence of returned persisted_size values will be +// non-decreasing. +func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) { + return c.internalClient.QueryWriteStatus(ctx, req, opts...) +} + +// GetServiceAccount retrieves the name of a project’s Google Cloud Storage service account. +func (c *Client) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { + return c.internalClient.GetServiceAccount(ctx, req, opts...) +} + +// CreateHmacKey creates a new HMAC key for the given service account. +func (c *Client) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { + return c.internalClient.CreateHmacKey(ctx, req, opts...) +} + +// DeleteHmacKey deletes a given HMAC key. Key must be in an INACTIVE state. +func (c *Client) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteHmacKey(ctx, req, opts...) +} + +// GetHmacKey gets an existing HMAC key metadata for the given id. +func (c *Client) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { + return c.internalClient.GetHmacKey(ctx, req, opts...) +} + +// ListHmacKeys lists HMAC keys under a given project with the additional filters provided. +func (c *Client) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { + return c.internalClient.ListHmacKeys(ctx, req, opts...) +} + +// UpdateHmacKey updates a given HMAC key state between ACTIVE and INACTIVE. +func (c *Client) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { + return c.internalClient.UpdateHmacKey(ctx, req, opts...) +} + +// gRPCClient is a client for interacting with Cloud Storage API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type gRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE + disableDeadlines bool + + // Points back to the CallOptions field of the containing Client + CallOptions **CallOptions + + // The gRPC API client. + client storagepb.StorageClient + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new storage client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// API Overview and Naming SyntaxThe Cloud Storage gRPC API allows applications to read and write data through +// the abstractions of buckets and objects. For a description of these +// abstractions please see https://cloud.google.com/storage/docs (at https://cloud.google.com/storage/docs). +// +// Resources are named as follows: +// +// Projects are referred to as they are defined by the Resource Manager API, +// using strings like projects/123456 or projects/my-string-id. +// +// Buckets are named using string names of the form: +// projects/{project}/buckets/{bucket} +// For globally unique buckets, _ may be substituted for the project. +// +// Objects are uniquely identified by their name along with the name of the +// bucket they belong to, as separate strings in this API. For example: +// +// ReadObjectRequest { +// bucket: ‘projects/_/buckets/my-bucket’ +// object: ‘my-object’ +// } +// Note that object names can contain / characters, which are treated as +// any other character (no special directory semantics). +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + clientOpts := defaultGRPCClientOptions() + if newClientHook != nil { + hookOpts, err := newClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + disableDeadlines, err := checkDisableDeadlines() + if err != nil { + return nil, err + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := Client{CallOptions: defaultCallOptions()} + + c := &gRPCClient{ + connPool: connPool, + disableDeadlines: disableDeadlines, + client: storagepb.NewStorageClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *gRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *gRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *gRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteBucket(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetBucket[0:len((*c.CallOptions).GetBucket):len((*c.CallOptions).GetBucket)], opts...) + var resp *storagepb.Bucket + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetBucket(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CreateBucket[0:len((*c.CallOptions).CreateBucket):len((*c.CallOptions).CreateBucket)], opts...) + var resp *storagepb.Bucket + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateBucket(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBucketsRequest, opts ...gax.CallOption) *BucketIterator { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListBuckets[0:len((*c.CallOptions).ListBuckets):len((*c.CallOptions).ListBuckets)], opts...) + it := &BucketIterator{} + req = proto.Clone(req).(*storagepb.ListBucketsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Bucket, string, error) { + resp := &storagepb.ListBucketsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListBuckets(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetBuckets(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).LockBucketRetentionPolicy[0:len((*c.CallOptions).LockBucketRetentionPolicy):len((*c.CallOptions).LockBucketRetentionPolicy)], opts...) + var resp *storagepb.Bucket + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.LockBucketRetentionPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket().GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).UpdateBucket[0:len((*c.CallOptions).UpdateBucket):len((*c.CallOptions).UpdateBucket)], opts...) + var resp *storagepb.Bucket + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateBucket(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).DeleteNotification[0:len((*c.CallOptions).DeleteNotification):len((*c.CallOptions).DeleteNotification)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteNotification(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetNotification[0:len((*c.CallOptions).GetNotification):len((*c.CallOptions).GetNotification)], opts...) + var resp *storagepb.Notification + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetNotification(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CreateNotification[0:len((*c.CallOptions).CreateNotification):len((*c.CallOptions).CreateNotification)], opts...) + var resp *storagepb.Notification + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateNotification(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListNotificationsRequest, opts ...gax.CallOption) *NotificationIterator { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListNotifications[0:len((*c.CallOptions).ListNotifications):len((*c.CallOptions).ListNotifications)], opts...) + it := &NotificationIterator{} + req = proto.Clone(req).(*storagepb.ListNotificationsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Notification, string, error) { + resp := &storagepb.ListNotificationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListNotifications(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetNotifications(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetDestination().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ComposeObject[0:len((*c.CallOptions).ComposeObject):len((*c.CallOptions).ComposeObject)], opts...) + var resp *storagepb.Object + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ComposeObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteObject(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CancelResumableWrite[0:len((*c.CallOptions).CancelResumableWrite):len((*c.CallOptions).CancelResumableWrite)], opts...) + var resp *storagepb.CancelResumableWriteResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CancelResumableWrite(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetObject[0:len((*c.CallOptions).GetObject):len((*c.CallOptions).GetObject)], opts...) + var resp *storagepb.Object + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + var resp storagepb.Storage_ReadObjectClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ReadObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetObject().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).UpdateObject[0:len((*c.CallOptions).UpdateObject):len((*c.CallOptions).UpdateObject)], opts...) + var resp *storagepb.Object + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + var resp storagepb.Storage_WriteObjectClient + opts = append((*c.CallOptions).WriteObject[0:len((*c.CallOptions).WriteObject):len((*c.CallOptions).WriteObject)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.WriteObject(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListObjects[0:len((*c.CallOptions).ListObjects):len((*c.CallOptions).ListObjects)], opts...) + it := &ObjectIterator{} + req = proto.Clone(req).(*storagepb.ListObjectsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Object, string, error) { + resp := &storagepb.ListObjectsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListObjects(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetObjects(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetSourceBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1])) > 0 { + routingHeadersMap["source_bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1]) + } + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetDestinationBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).RewriteObject[0:len((*c.CallOptions).RewriteObject):len((*c.CallOptions).RewriteObject)], opts...) + var resp *storagepb.RewriteResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RewriteObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetWriteObjectSpec().GetResource().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).StartResumableWrite[0:len((*c.CallOptions).StartResumableWrite):len((*c.CallOptions).StartResumableWrite)], opts...) + var resp *storagepb.StartResumableWriteResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.StartResumableWrite(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).QueryWriteStatus[0:len((*c.CallOptions).QueryWriteStatus):len((*c.CallOptions).QueryWriteStatus)], opts...) + var resp *storagepb.QueryWriteStatusResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.QueryWriteStatus(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...) + var resp *storagepb.ServiceAccount + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetServiceAccount(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...) + var resp *storagepb.CreateHmacKeyResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateHmacKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteHmacKey(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...) + var resp *storagepb.HmacKeyMetadata + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetHmacKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListHmacKeys[0:len((*c.CallOptions).ListHmacKeys):len((*c.CallOptions).ListHmacKeys)], opts...) + it := &HmacKeyMetadataIterator{} + req = proto.Clone(req).(*storagepb.ListHmacKeysRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.HmacKeyMetadata, string, error) { + resp := &storagepb.ListHmacKeysResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListHmacKeys(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetHmacKeys(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetHmacKey().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...) + var resp *storagepb.HmacKeyMetadata + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateHmacKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// BucketIterator manages a stream of *storagepb.Bucket. +type BucketIterator struct { + items []*storagepb.Bucket + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Bucket, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *BucketIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *BucketIterator) Next() (*storagepb.Bucket, error) { + var item *storagepb.Bucket + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *BucketIterator) bufLen() int { + return len(it.items) +} + +func (it *BucketIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// HmacKeyMetadataIterator manages a stream of *storagepb.HmacKeyMetadata. +type HmacKeyMetadataIterator struct { + items []*storagepb.HmacKeyMetadata + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.HmacKeyMetadata, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *HmacKeyMetadataIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *HmacKeyMetadataIterator) Next() (*storagepb.HmacKeyMetadata, error) { + var item *storagepb.HmacKeyMetadata + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *HmacKeyMetadataIterator) bufLen() int { + return len(it.items) +} + +func (it *HmacKeyMetadataIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationIterator manages a stream of *storagepb.Notification. +type NotificationIterator struct { + items []*storagepb.Notification + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Notification, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationIterator) Next() (*storagepb.Notification, error) { + var item *storagepb.Notification + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ObjectIterator manages a stream of *storagepb.Object. +type ObjectIterator struct { + items []*storagepb.Object + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Object, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ObjectIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ObjectIterator) Next() (*storagepb.Object, error) { + var item *storagepb.Object + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ObjectIterator) bufLen() int { + return len(it.items) +} + +func (it *ObjectIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go new file mode 100644 index 000000000000..c36634b1a148 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go @@ -0,0 +1,10652 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.9 +// source: google/storage/v2/storage.proto + +package storage + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + v1 "google.golang.org/genproto/googleapis/iam/v1" + date "google.golang.org/genproto/googleapis/type/date" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A collection of constant values meaningful to the Storage API. +type ServiceConstants_Values int32 + +const ( + // Unused. Proto3 requires first enum to be 0. + ServiceConstants_VALUES_UNSPECIFIED ServiceConstants_Values = 0 + // The maximum size chunk that can will be returned in a single + // ReadRequest. + // 2 MiB. + ServiceConstants_MAX_READ_CHUNK_BYTES ServiceConstants_Values = 2097152 + // The maximum size chunk that can be sent in a single WriteObjectRequest. + // 2 MiB. + ServiceConstants_MAX_WRITE_CHUNK_BYTES ServiceConstants_Values = 2097152 + // The maximum size of an object in MB - whether written in a single stream + // or composed from multiple other objects. + // 5 TiB. + ServiceConstants_MAX_OBJECT_SIZE_MB ServiceConstants_Values = 5242880 + // The maximum length field name that can be sent in a single + // custom metadata field. + // 1 KiB. + ServiceConstants_MAX_CUSTOM_METADATA_FIELD_NAME_BYTES ServiceConstants_Values = 1024 + // The maximum length field value that can be sent in a single + // custom_metadata field. + // 4 KiB. + ServiceConstants_MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES ServiceConstants_Values = 4096 + // The maximum total bytes that can be populated into all field names and + // values of the custom_metadata for one object. + // 8 KiB. + ServiceConstants_MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES ServiceConstants_Values = 8192 + // The maximum total bytes that can be populated into all bucket metadata + // fields. + // 20 KiB. + ServiceConstants_MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES ServiceConstants_Values = 20480 + // The maximum number of NotificationConfigs that can be registered + // for a given bucket. + ServiceConstants_MAX_NOTIFICATION_CONFIGS_PER_BUCKET ServiceConstants_Values = 100 + // The maximum number of LifecycleRules that can be registered for a given + // bucket. + ServiceConstants_MAX_LIFECYCLE_RULES_PER_BUCKET ServiceConstants_Values = 100 + // The maximum number of custom attributes per NotificationConfigs. + ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTES ServiceConstants_Values = 5 + // The maximum length of a custom attribute key included in + // NotificationConfig. + ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH ServiceConstants_Values = 256 + // The maximum length of a custom attribute value included in a + // NotificationConfig. + ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH ServiceConstants_Values = 1024 + // The maximum number of key/value entries per bucket label. + ServiceConstants_MAX_LABELS_ENTRIES_COUNT ServiceConstants_Values = 64 + // The maximum character length of the key or value in a bucket + // label map. + ServiceConstants_MAX_LABELS_KEY_VALUE_LENGTH ServiceConstants_Values = 63 + // The maximum byte size of the key or value in a bucket label + // map. + ServiceConstants_MAX_LABELS_KEY_VALUE_BYTES ServiceConstants_Values = 128 + // The maximum number of object IDs that can be included in a + // DeleteObjectsRequest. + ServiceConstants_MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST ServiceConstants_Values = 1000 + // The maximum number of days for which a token returned by the + // GetListObjectsSplitPoints RPC is valid. + ServiceConstants_SPLIT_TOKEN_MAX_VALID_DAYS ServiceConstants_Values = 14 +) + +// Enum value maps for ServiceConstants_Values. +var ( + ServiceConstants_Values_name = map[int32]string{ + 0: "VALUES_UNSPECIFIED", + 2097152: "MAX_READ_CHUNK_BYTES", + // Duplicate value: 2097152: "MAX_WRITE_CHUNK_BYTES", + 5242880: "MAX_OBJECT_SIZE_MB", + 1024: "MAX_CUSTOM_METADATA_FIELD_NAME_BYTES", + 4096: "MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES", + 8192: "MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES", + 20480: "MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES", + 100: "MAX_NOTIFICATION_CONFIGS_PER_BUCKET", + // Duplicate value: 100: "MAX_LIFECYCLE_RULES_PER_BUCKET", + 5: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTES", + 256: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH", + // Duplicate value: 1024: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH", + 64: "MAX_LABELS_ENTRIES_COUNT", + 63: "MAX_LABELS_KEY_VALUE_LENGTH", + 128: "MAX_LABELS_KEY_VALUE_BYTES", + 1000: "MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST", + 14: "SPLIT_TOKEN_MAX_VALID_DAYS", + } + ServiceConstants_Values_value = map[string]int32{ + "VALUES_UNSPECIFIED": 0, + "MAX_READ_CHUNK_BYTES": 2097152, + "MAX_WRITE_CHUNK_BYTES": 2097152, + "MAX_OBJECT_SIZE_MB": 5242880, + "MAX_CUSTOM_METADATA_FIELD_NAME_BYTES": 1024, + "MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES": 4096, + "MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES": 8192, + "MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES": 20480, + "MAX_NOTIFICATION_CONFIGS_PER_BUCKET": 100, + "MAX_LIFECYCLE_RULES_PER_BUCKET": 100, + "MAX_NOTIFICATION_CUSTOM_ATTRIBUTES": 5, + "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH": 256, + "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH": 1024, + "MAX_LABELS_ENTRIES_COUNT": 64, + "MAX_LABELS_KEY_VALUE_LENGTH": 63, + "MAX_LABELS_KEY_VALUE_BYTES": 128, + "MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST": 1000, + "SPLIT_TOKEN_MAX_VALID_DAYS": 14, + } +) + +func (x ServiceConstants_Values) Enum() *ServiceConstants_Values { + p := new(ServiceConstants_Values) + *p = x + return p +} + +func (x ServiceConstants_Values) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServiceConstants_Values) Descriptor() protoreflect.EnumDescriptor { + return file_google_storage_v2_storage_proto_enumTypes[0].Descriptor() +} + +func (ServiceConstants_Values) Type() protoreflect.EnumType { + return &file_google_storage_v2_storage_proto_enumTypes[0] +} + +func (x ServiceConstants_Values) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServiceConstants_Values.Descriptor instead. +func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39, 0} +} + +// Request message for DeleteBucket. +type DeleteBucketRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of a bucket to delete. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If set, only deletes the bucket if its metageneration matches this value. + IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // If set, only deletes the bucket if its metageneration does not match this + // value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` +} + +func (x *DeleteBucketRequest) Reset() { + *x = DeleteBucketRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteBucketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteBucketRequest) ProtoMessage() {} + +func (x *DeleteBucketRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteBucketRequest.ProtoReflect.Descriptor instead. +func (*DeleteBucketRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{0} +} + +func (x *DeleteBucketRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DeleteBucketRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *DeleteBucketRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +// Request message for GetBucket. +type GetBucketRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of a bucket. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If set, and if the bucket's current metageneration does not match the + // specified value, the request will return an error. + IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // If set, and if the bucket's current metageneration matches the specified + // value, the request will return an error. + IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // Mask specifying which fields to read. + // A "*" field may be used to indicate all fields. + // If no mask is specified, will default to all fields. + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` +} + +func (x *GetBucketRequest) Reset() { + *x = GetBucketRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBucketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBucketRequest) ProtoMessage() {} + +func (x *GetBucketRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBucketRequest.ProtoReflect.Descriptor instead. +func (*GetBucketRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{1} +} + +func (x *GetBucketRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *GetBucketRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *GetBucketRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *GetBucketRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +// Request message for CreateBucket. +type CreateBucketRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The project to which this bucket will belong. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Properties of the new bucket being inserted. + // The project and name of the bucket are specified in the parent and + // bucket_id fields, respectively. Populating those fields in `bucket` will + // result in an error. + Bucket *Bucket `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. The ID to use for this bucket, which will become the final component of + // the bucket's resource name. For example, the value `foo` might result in + // a bucket with the name `projects/123456/buckets/foo`. + BucketId string `protobuf:"bytes,3,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` + // Apply a predefined set of access controls to this bucket. + // Valid values are "authenticatedRead", "private", "projectPrivate", + // "publicRead", or "publicReadWrite". + PredefinedAcl string `protobuf:"bytes,6,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` + // Apply a predefined set of default object access controls to this bucket. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + PredefinedDefaultObjectAcl string `protobuf:"bytes,7,opt,name=predefined_default_object_acl,json=predefinedDefaultObjectAcl,proto3" json:"predefined_default_object_acl,omitempty"` +} + +func (x *CreateBucketRequest) Reset() { + *x = CreateBucketRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateBucketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateBucketRequest) ProtoMessage() {} + +func (x *CreateBucketRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateBucketRequest.ProtoReflect.Descriptor instead. +func (*CreateBucketRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{2} +} + +func (x *CreateBucketRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateBucketRequest) GetBucket() *Bucket { + if x != nil { + return x.Bucket + } + return nil +} + +func (x *CreateBucketRequest) GetBucketId() string { + if x != nil { + return x.BucketId + } + return "" +} + +func (x *CreateBucketRequest) GetPredefinedAcl() string { + if x != nil { + return x.PredefinedAcl + } + return "" +} + +func (x *CreateBucketRequest) GetPredefinedDefaultObjectAcl() string { + if x != nil { + return x.PredefinedDefaultObjectAcl + } + return "" +} + +// Request message for ListBuckets. +type ListBucketsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The project whose buckets we are listing. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Maximum number of buckets to return in a single response. The service will + // use this parameter or 1,000 items, whichever is smaller. If "acl" is + // present in the read_mask, the service will use this parameter of 200 items, + // whichever is smaller. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // A previously-returned page token representing part of the larger set of + // results to view. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // Filter results to buckets whose names begin with this prefix. + Prefix string `protobuf:"bytes,4,opt,name=prefix,proto3" json:"prefix,omitempty"` + // Mask specifying which fields to read from each result. + // If no mask is specified, will default to all fields except items.owner, + // items.acl, and items.default_object_acl. + // * may be used to mean "all fields". + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` +} + +func (x *ListBucketsRequest) Reset() { + *x = ListBucketsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListBucketsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBucketsRequest) ProtoMessage() {} + +func (x *ListBucketsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBucketsRequest.ProtoReflect.Descriptor instead. +func (*ListBucketsRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{3} +} + +func (x *ListBucketsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListBucketsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListBucketsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListBucketsRequest) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *ListBucketsRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +// The result of a call to Buckets.ListBuckets +type ListBucketsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of items. + Buckets []*Bucket `protobuf:"bytes,1,rep,name=buckets,proto3" json:"buckets,omitempty"` + // The continuation token, used to page through large result sets. Provide + // this value in a subsequent request to return the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListBucketsResponse) Reset() { + *x = ListBucketsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListBucketsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBucketsResponse) ProtoMessage() {} + +func (x *ListBucketsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBucketsResponse.ProtoReflect.Descriptor instead. +func (*ListBucketsResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{4} +} + +func (x *ListBucketsResponse) GetBuckets() []*Bucket { + if x != nil { + return x.Buckets + } + return nil +} + +func (x *ListBucketsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request message for LockBucketRetentionPolicyRequest. +type LockBucketRetentionPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of a bucket. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. Makes the operation conditional on whether bucket's current metageneration + // matches the given value. Must be positive. + IfMetagenerationMatch int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3" json:"if_metageneration_match,omitempty"` +} + +func (x *LockBucketRetentionPolicyRequest) Reset() { + *x = LockBucketRetentionPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LockBucketRetentionPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LockBucketRetentionPolicyRequest) ProtoMessage() {} + +func (x *LockBucketRetentionPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LockBucketRetentionPolicyRequest.ProtoReflect.Descriptor instead. +func (*LockBucketRetentionPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{5} +} + +func (x *LockBucketRetentionPolicyRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *LockBucketRetentionPolicyRequest) GetIfMetagenerationMatch() int64 { + if x != nil { + return x.IfMetagenerationMatch + } + return 0 +} + +// Request for UpdateBucket method. +type UpdateBucketRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The bucket to update. + // The bucket's `name` field will be used to identify the bucket. + Bucket *Bucket `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // If set, will only modify the bucket if its metageneration matches this + // value. + IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // If set, will only modify the bucket if its metageneration does not match + // this value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // Apply a predefined set of access controls to this bucket. + // Valid values are "authenticatedRead", "private", "projectPrivate", + // "publicRead", or "publicReadWrite". + PredefinedAcl string `protobuf:"bytes,8,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` + // Apply a predefined set of default object access controls to this bucket. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + PredefinedDefaultObjectAcl string `protobuf:"bytes,9,opt,name=predefined_default_object_acl,json=predefinedDefaultObjectAcl,proto3" json:"predefined_default_object_acl,omitempty"` + // Required. List of fields to be updated. + // + // To specify ALL fields, equivalent to the JSON API's "update" function, + // specify a single field with the value `*`. Note: not recommended. If a new + // field is introduced at a later time, an older client updating with the `*` + // may accidentally reset the new field's value. + // + // Not specifying any fields is an error. + // Not specifying a field while setting that field to a non-default value is + // an error. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateBucketRequest) Reset() { + *x = UpdateBucketRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateBucketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateBucketRequest) ProtoMessage() {} + +func (x *UpdateBucketRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateBucketRequest.ProtoReflect.Descriptor instead. +func (*UpdateBucketRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{6} +} + +func (x *UpdateBucketRequest) GetBucket() *Bucket { + if x != nil { + return x.Bucket + } + return nil +} + +func (x *UpdateBucketRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *UpdateBucketRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *UpdateBucketRequest) GetPredefinedAcl() string { + if x != nil { + return x.PredefinedAcl + } + return "" +} + +func (x *UpdateBucketRequest) GetPredefinedDefaultObjectAcl() string { + if x != nil { + return x.PredefinedDefaultObjectAcl + } + return "" +} + +func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request message for DeleteNotification. +type DeleteNotificationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The parent bucket of the notification. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteNotificationRequest) Reset() { + *x = DeleteNotificationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteNotificationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteNotificationRequest) ProtoMessage() {} + +func (x *DeleteNotificationRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteNotificationRequest.ProtoReflect.Descriptor instead. +func (*DeleteNotificationRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7} +} + +func (x *DeleteNotificationRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for GetNotification. +type GetNotificationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The parent bucket of the notification. + // Format: + // `projects/{project}/buckets/{bucket}/notificationConfigs/{notification}` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetNotificationRequest) Reset() { + *x = GetNotificationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationRequest) ProtoMessage() {} + +func (x *GetNotificationRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationRequest.ProtoReflect.Descriptor instead. +func (*GetNotificationRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8} +} + +func (x *GetNotificationRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for CreateNotification. +type CreateNotificationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The bucket to which this notification belongs. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. Properties of the notification to be inserted. + Notification *Notification `protobuf:"bytes,2,opt,name=notification,proto3" json:"notification,omitempty"` +} + +func (x *CreateNotificationRequest) Reset() { + *x = CreateNotificationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateNotificationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateNotificationRequest) ProtoMessage() {} + +func (x *CreateNotificationRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateNotificationRequest.ProtoReflect.Descriptor instead. +func (*CreateNotificationRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9} +} + +func (x *CreateNotificationRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateNotificationRequest) GetNotification() *Notification { + if x != nil { + return x.Notification + } + return nil +} + +// Request message for ListNotifications. +type ListNotificationsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of a Google Cloud Storage bucket. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // The maximum number of notifications to return. The service may return fewer + // than this value. + // The default value is 100. Specifying a value above 100 will result in a + // page_size of 100. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // A page token, received from a previous `ListNotifications` call. + // Provide this to retrieve the subsequent page. + // + // When paginating, all other parameters provided to `ListNotifications` must + // match the call that provided the page token. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListNotificationsRequest) Reset() { + *x = ListNotificationsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationsRequest) ProtoMessage() {} + +func (x *ListNotificationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationsRequest.ProtoReflect.Descriptor instead. +func (*ListNotificationsRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10} +} + +func (x *ListNotificationsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListNotificationsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListNotificationsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The result of a call to Notifications.ListNotifications +type ListNotificationsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of items. + Notifications []*Notification `protobuf:"bytes,1,rep,name=notifications,proto3" json:"notifications,omitempty"` + // A token, which can be sent as `page_token` to retrieve the next page. + // If this field is omitted, there are no subsequent pages. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListNotificationsResponse) Reset() { + *x = ListNotificationsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationsResponse) ProtoMessage() {} + +func (x *ListNotificationsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationsResponse.ProtoReflect.Descriptor instead. +func (*ListNotificationsResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11} +} + +func (x *ListNotificationsResponse) GetNotifications() []*Notification { + if x != nil { + return x.Notifications + } + return nil +} + +func (x *ListNotificationsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request message for ComposeObject. +type ComposeObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Properties of the resulting object. + Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` + // The list of source objects that will be concatenated into a single object. + SourceObjects []*ComposeObjectRequest_SourceObject `protobuf:"bytes,2,rep,name=source_objects,json=sourceObjects,proto3" json:"source_objects,omitempty"` + // Apply a predefined set of access controls to the destination object. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + DestinationPredefinedAcl string `protobuf:"bytes,9,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Resource name of the Cloud KMS key, of the form + // `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`, + // that will be used to encrypt the object. Overrides the object + // metadata's `kms_key_name` value, if any. + KmsKey string `protobuf:"bytes,6,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,7,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *ComposeObjectRequest) Reset() { + *x = ComposeObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComposeObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComposeObjectRequest) ProtoMessage() {} + +func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComposeObjectRequest.ProtoReflect.Descriptor instead. +func (*ComposeObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12} +} + +func (x *ComposeObjectRequest) GetDestination() *Object { + if x != nil { + return x.Destination + } + return nil +} + +func (x *ComposeObjectRequest) GetSourceObjects() []*ComposeObjectRequest_SourceObject { + if x != nil { + return x.SourceObjects + } + return nil +} + +func (x *ComposeObjectRequest) GetDestinationPredefinedAcl() string { + if x != nil { + return x.DestinationPredefinedAcl + } + return "" +} + +func (x *ComposeObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *ComposeObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *ComposeObjectRequest) GetKmsKey() string { + if x != nil { + return x.KmsKey + } + return "" +} + +func (x *ComposeObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +// Message for deleting an object. +// `bucket` and `object` **must** be set. +type DeleteObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which the object resides. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. The name of the object to delete (when not using a resumable write). + Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + // If present, permanently deletes a specific revision of this object (as + // opposed to the latest version, the default). + Generation int64 `protobuf:"varint,4,opt,name=generation,proto3" json:"generation,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,5,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *DeleteObjectRequest) Reset() { + *x = DeleteObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteObjectRequest) ProtoMessage() {} + +func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteObjectRequest.ProtoReflect.Descriptor instead. +func (*DeleteObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13} +} + +func (x *DeleteObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *DeleteObjectRequest) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *DeleteObjectRequest) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *DeleteObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *DeleteObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *DeleteObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *DeleteObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *DeleteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +// Message for canceling an in-progress resumable upload. +// `upload_id` **must** be set. +type CancelResumableWriteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The upload_id of the resumable upload to cancel. This should be copied + // from the `upload_id` field of `StartResumableWriteResponse`. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` +} + +func (x *CancelResumableWriteRequest) Reset() { + *x = CancelResumableWriteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CancelResumableWriteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelResumableWriteRequest) ProtoMessage() {} + +func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead. +func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} +} + +func (x *CancelResumableWriteRequest) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +// Empty response message for canceling an in-progress resumable upload, will be +// extended as needed. +type CancelResumableWriteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CancelResumableWriteResponse) Reset() { + *x = CancelResumableWriteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CancelResumableWriteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelResumableWriteResponse) ProtoMessage() {} + +func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead. +func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} +} + +// Request message for ReadObject. +type ReadObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the bucket containing the object to read. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. The name of the object to read. + Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + // If present, selects a specific revision of this object (as opposed + // to the latest version, the default). + Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // The offset for the first byte to return in the read, relative to the start + // of the object. + // + // A negative `read_offset` value will be interpreted as the number of bytes + // back from the end of the object to be returned. For example, if an object's + // length is 15 bytes, a ReadObjectRequest with `read_offset` = -5 and + // `read_limit` = 3 would return bytes 10 through 12 of the object. Requesting + // a negative offset with magnitude larger than the size of the object will + // return the entire object. + ReadOffset int64 `protobuf:"varint,4,opt,name=read_offset,json=readOffset,proto3" json:"read_offset,omitempty"` + // The maximum number of `data` bytes the server is allowed to return in the + // sum of all `Object` messages. A `read_limit` of zero indicates that there + // is no limit, and a negative `read_limit` will cause an error. + // + // If the stream returns fewer bytes than allowed by the `read_limit` and no + // error occurred, the stream includes all data from the `read_offset` to the + // end of the resource. + ReadLimit int64 `protobuf:"varint,5,opt,name=read_limit,json=readLimit,proto3" json:"read_limit,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,6,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` + // Mask specifying which fields to read. + // The checksummed_data field and its children will always be present. + // If no mask is specified, will default to all fields except metadata.owner + // and metadata.acl. + // * may be used to mean "all fields". + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` +} + +func (x *ReadObjectRequest) Reset() { + *x = ReadObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadObjectRequest) ProtoMessage() {} + +func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead. +func (*ReadObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} +} + +func (x *ReadObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *ReadObjectRequest) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *ReadObjectRequest) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *ReadObjectRequest) GetReadOffset() int64 { + if x != nil { + return x.ReadOffset + } + return 0 +} + +func (x *ReadObjectRequest) GetReadLimit() int64 { + if x != nil { + return x.ReadLimit + } + return 0 +} + +func (x *ReadObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *ReadObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *ReadObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *ReadObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *ReadObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +func (x *ReadObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +// Request message for GetObject. +type GetObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which the object resides. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. Name of the object. + Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + // If present, selects a specific revision of this object (as opposed to the + // latest version, the default). + Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` + // Mask specifying which fields to read. + // If no mask is specified, will default to all fields except metadata.acl and + // metadata.owner. + // * may be used to mean "all fields". + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` +} + +func (x *GetObjectRequest) Reset() { + *x = GetObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetObjectRequest) ProtoMessage() {} + +func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead. +func (*GetObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} +} + +func (x *GetObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *GetObjectRequest) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *GetObjectRequest) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *GetObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *GetObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *GetObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *GetObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *GetObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +// Response message for ReadObject. +type ReadObjectResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A portion of the data for the object. The service **may** leave `data` + // empty for any given `ReadResponse`. This enables the service to inform the + // client that the request is still live while it is running an operation to + // generate more data. + ChecksummedData *ChecksummedData `protobuf:"bytes,1,opt,name=checksummed_data,json=checksummedData,proto3" json:"checksummed_data,omitempty"` + // The checksums of the complete object. The client should compute one of + // these checksums over the downloaded object and compare it against the value + // provided here. + ObjectChecksums *ObjectChecksums `protobuf:"bytes,2,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` + // If read_offset and or read_limit was specified on the + // ReadObjectRequest, ContentRange will be populated on the first + // ReadObjectResponse message of the read stream. + ContentRange *ContentRange `protobuf:"bytes,3,opt,name=content_range,json=contentRange,proto3" json:"content_range,omitempty"` + // Metadata of the object whose media is being returned. + // Only populated in the first response in the stream. + Metadata *Object `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *ReadObjectResponse) Reset() { + *x = ReadObjectResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadObjectResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadObjectResponse) ProtoMessage() {} + +func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead. +func (*ReadObjectResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} +} + +func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData { + if x != nil { + return x.ChecksummedData + } + return nil +} + +func (x *ReadObjectResponse) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +func (x *ReadObjectResponse) GetContentRange() *ContentRange { + if x != nil { + return x.ContentRange + } + return nil +} + +func (x *ReadObjectResponse) GetMetadata() *Object { + if x != nil { + return x.Metadata + } + return nil +} + +// Describes an attempt to insert an object, possibly over multiple requests. +type WriteObjectSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Destination object, including its name and its metadata. + Resource *Object `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // Apply a predefined set of access controls to this object. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + PredefinedAcl string `protobuf:"bytes,7,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` + // Makes the operation conditional on whether the object's current + // generation matches the given value. Setting to 0 makes the operation + // succeed only if there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,3,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live + // generation does not match the given value. If no live object exists, the + // precondition fails. Setting to 0 makes the operation succeed only if + // there is a live version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,4,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // The expected final object size being uploaded. + // If this value is set, closing the stream after writing fewer or more than + // `object_size` bytes will result in an OUT_OF_RANGE error. + // + // This situation is considered a client error, and if such an error occurs + // you must start the upload over from scratch, this time sending the correct + // number of bytes. + // + // The `object_size` value is ignored for one-shot (non-resumable) writes. + ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"` +} + +func (x *WriteObjectSpec) Reset() { + *x = WriteObjectSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteObjectSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteObjectSpec) ProtoMessage() {} + +func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead. +func (*WriteObjectSpec) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} +} + +func (x *WriteObjectSpec) GetResource() *Object { + if x != nil { + return x.Resource + } + return nil +} + +func (x *WriteObjectSpec) GetPredefinedAcl() string { + if x != nil { + return x.PredefinedAcl + } + return "" +} + +func (x *WriteObjectSpec) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *WriteObjectSpec) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *WriteObjectSpec) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *WriteObjectSpec) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *WriteObjectSpec) GetObjectSize() int64 { + if x != nil && x.ObjectSize != nil { + return *x.ObjectSize + } + return 0 +} + +// Request message for WriteObject. +type WriteObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The first message of each stream should set one of the following. + // + // Types that are assignable to FirstMessage: + // + // *WriteObjectRequest_UploadId + // *WriteObjectRequest_WriteObjectSpec + FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` + // Required. The offset from the beginning of the object at which the data should be + // written. + // + // In the first `WriteObjectRequest` of a `WriteObject()` action, it + // indicates the initial offset for the `Write()` call. The value **must** be + // equal to the `persisted_size` that a call to `QueryWriteStatus()` would + // return (0 if this is the first write to the object). + // + // On subsequent calls, this value **must** be no larger than the sum of the + // first `write_offset` and the sizes of all `data` chunks sent previously on + // this stream. + // + // An incorrect value will cause an error. + WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"` + // A portion of the data for the object. + // + // Types that are assignable to Data: + // + // *WriteObjectRequest_ChecksummedData + Data isWriteObjectRequest_Data `protobuf_oneof:"data"` + // Checksums for the complete object. If the checksums computed by the service + // don't match the specifified checksums the call will fail. May only be + // provided in the first or last request (either with first_message, or + // finish_write set). + ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` + // If `true`, this indicates that the write is complete. Sending any + // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` + // will cause an error. + // For a non-resumable write (where the upload_id was not set in the first + // message), it is an error not to set this field in the final message of the + // stream. + FinishWrite bool `protobuf:"varint,7,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *WriteObjectRequest) Reset() { + *x = WriteObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteObjectRequest) ProtoMessage() {} + +func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead. +func (*WriteObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} +} + +func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage { + if m != nil { + return m.FirstMessage + } + return nil +} + +func (x *WriteObjectRequest) GetUploadId() string { + if x, ok := x.GetFirstMessage().(*WriteObjectRequest_UploadId); ok { + return x.UploadId + } + return "" +} + +func (x *WriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec { + if x, ok := x.GetFirstMessage().(*WriteObjectRequest_WriteObjectSpec); ok { + return x.WriteObjectSpec + } + return nil +} + +func (x *WriteObjectRequest) GetWriteOffset() int64 { + if x != nil { + return x.WriteOffset + } + return 0 +} + +func (m *WriteObjectRequest) GetData() isWriteObjectRequest_Data { + if m != nil { + return m.Data + } + return nil +} + +func (x *WriteObjectRequest) GetChecksummedData() *ChecksummedData { + if x, ok := x.GetData().(*WriteObjectRequest_ChecksummedData); ok { + return x.ChecksummedData + } + return nil +} + +func (x *WriteObjectRequest) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +func (x *WriteObjectRequest) GetFinishWrite() bool { + if x != nil { + return x.FinishWrite + } + return false +} + +func (x *WriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +type isWriteObjectRequest_FirstMessage interface { + isWriteObjectRequest_FirstMessage() +} + +type WriteObjectRequest_UploadId struct { + // For resumable uploads. This should be the `upload_id` returned from a + // call to `StartResumableWriteResponse`. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"` +} + +type WriteObjectRequest_WriteObjectSpec struct { + // For non-resumable uploads. Describes the overall upload, including the + // destination bucket and object name, preconditions, etc. + WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"` +} + +func (*WriteObjectRequest_UploadId) isWriteObjectRequest_FirstMessage() {} + +func (*WriteObjectRequest_WriteObjectSpec) isWriteObjectRequest_FirstMessage() {} + +type isWriteObjectRequest_Data interface { + isWriteObjectRequest_Data() +} + +type WriteObjectRequest_ChecksummedData struct { + // The data to insert. If a crc32c checksum is provided that doesn't match + // the checksum computed by the service, the request will fail. + ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"` +} + +func (*WriteObjectRequest_ChecksummedData) isWriteObjectRequest_Data() {} + +// Response message for WriteObject. +type WriteObjectResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The response will set one of the following. + // + // Types that are assignable to WriteStatus: + // + // *WriteObjectResponse_PersistedSize + // *WriteObjectResponse_Resource + WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` +} + +func (x *WriteObjectResponse) Reset() { + *x = WriteObjectResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteObjectResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteObjectResponse) ProtoMessage() {} + +func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead. +func (*WriteObjectResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} +} + +func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus { + if m != nil { + return m.WriteStatus + } + return nil +} + +func (x *WriteObjectResponse) GetPersistedSize() int64 { + if x, ok := x.GetWriteStatus().(*WriteObjectResponse_PersistedSize); ok { + return x.PersistedSize + } + return 0 +} + +func (x *WriteObjectResponse) GetResource() *Object { + if x, ok := x.GetWriteStatus().(*WriteObjectResponse_Resource); ok { + return x.Resource + } + return nil +} + +type isWriteObjectResponse_WriteStatus interface { + isWriteObjectResponse_WriteStatus() +} + +type WriteObjectResponse_PersistedSize struct { + // The total number of bytes that have been processed for the given object + // from all `WriteObject` calls. Only set if the upload has not finalized. + PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` +} + +type WriteObjectResponse_Resource struct { + // A resource containing the metadata for the uploaded object. Only set if + // the upload has finalized. + Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` +} + +func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {} + +func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {} + +// Request message for ListObjects. +type ListObjectsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which to look for objects. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Maximum number of `items` plus `prefixes` to return + // in a single page of responses. As duplicate `prefixes` are + // omitted, fewer total results may be returned than requested. The service + // will use this parameter or 1,000 items, whichever is smaller. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // A previously-returned page token representing part of the larger set of + // results to view. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // If set, returns results in a directory-like mode. `items` will contain + // only objects whose names, aside from the `prefix`, do not + // contain `delimiter`. Objects whose names, aside from the + // `prefix`, contain `delimiter` will have their name, + // truncated after the `delimiter`, returned in + // `prefixes`. Duplicate `prefixes` are omitted. + Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"` + // If true, objects that end in exactly one instance of `delimiter` + // will have their metadata included in `items` in addition to + // `prefixes`. + IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"` + // Filter results to objects whose names begin with this prefix. + Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"` + // If `true`, lists all versions of an object as distinct results. + // For more information, see + // [Object + // Versioning](https://cloud.google.com/storage/docs/object-versioning). + Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"` + // Mask specifying which fields to read from each result. + // If no mask is specified, will default to all fields except items.acl and + // items.owner. + // * may be used to mean "all fields". + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + // Filter results to objects whose names are lexicographically equal to or + // after lexicographic_start. If lexicographic_end is also set, the objects + // listed have names between lexicographic_start (inclusive) and + // lexicographic_end (exclusive). + LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"` + // Filter results to objects whose names are lexicographically before + // lexicographic_end. If lexicographic_start is also set, the objects listed + // have names between lexicographic_start (inclusive) and lexicographic_end + // (exclusive). + LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"` +} + +func (x *ListObjectsRequest) Reset() { + *x = ListObjectsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListObjectsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListObjectsRequest) ProtoMessage() {} + +func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. +func (*ListObjectsRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} +} + +func (x *ListObjectsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListObjectsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListObjectsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListObjectsRequest) GetDelimiter() string { + if x != nil { + return x.Delimiter + } + return "" +} + +func (x *ListObjectsRequest) GetIncludeTrailingDelimiter() bool { + if x != nil { + return x.IncludeTrailingDelimiter + } + return false +} + +func (x *ListObjectsRequest) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *ListObjectsRequest) GetVersions() bool { + if x != nil { + return x.Versions + } + return false +} + +func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +func (x *ListObjectsRequest) GetLexicographicStart() string { + if x != nil { + return x.LexicographicStart + } + return "" +} + +func (x *ListObjectsRequest) GetLexicographicEnd() string { + if x != nil { + return x.LexicographicEnd + } + return "" +} + +// Request object for `QueryWriteStatus`. +type QueryWriteStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the resume token for the object whose write status is being + // requested. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,2,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *QueryWriteStatusRequest) Reset() { + *x = QueryWriteStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryWriteStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryWriteStatusRequest) ProtoMessage() {} + +func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead. +func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} +} + +func (x *QueryWriteStatusRequest) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +func (x *QueryWriteStatusRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +// Response object for `QueryWriteStatus`. +type QueryWriteStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The response will set one of the following. + // + // Types that are assignable to WriteStatus: + // + // *QueryWriteStatusResponse_PersistedSize + // *QueryWriteStatusResponse_Resource + WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"` +} + +func (x *QueryWriteStatusResponse) Reset() { + *x = QueryWriteStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryWriteStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryWriteStatusResponse) ProtoMessage() {} + +func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead. +func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} +} + +func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus { + if m != nil { + return m.WriteStatus + } + return nil +} + +func (x *QueryWriteStatusResponse) GetPersistedSize() int64 { + if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_PersistedSize); ok { + return x.PersistedSize + } + return 0 +} + +func (x *QueryWriteStatusResponse) GetResource() *Object { + if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_Resource); ok { + return x.Resource + } + return nil +} + +type isQueryWriteStatusResponse_WriteStatus interface { + isQueryWriteStatusResponse_WriteStatus() +} + +type QueryWriteStatusResponse_PersistedSize struct { + // The total number of bytes that have been processed for the given object + // from all `WriteObject` calls. This is the correct value for the + // 'write_offset' field to use when resuming the `WriteObject` operation. + // Only set if the upload has not finalized. + PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` +} + +type QueryWriteStatusResponse_Resource struct { + // A resource containing the metadata for the uploaded object. Only set if + // the upload has finalized. + Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` +} + +func (*QueryWriteStatusResponse_PersistedSize) isQueryWriteStatusResponse_WriteStatus() {} + +func (*QueryWriteStatusResponse_Resource) isQueryWriteStatusResponse_WriteStatus() {} + +// Request message for RewriteObject. +// If the source object is encrypted using a Customer-Supplied Encryption Key +// the key information must be provided in the copy_source_encryption_algorithm, +// copy_source_encryption_key_bytes, and copy_source_encryption_key_sha256_bytes +// fields. If the destination object should be encrypted the keying information +// should be provided in the encryption_algorithm, encryption_key_bytes, and +// encryption_key_sha256_bytes fields of the +// common_object_request_params.customer_encryption field. +type RewriteObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Immutable. The name of the destination object. + // See the + // [Naming Guidelines](https://cloud.google.com/storage/docs/naming-objects). + // Example: `test.txt` + // The `name` field by itself does not uniquely identify a Cloud Storage + // object. A Cloud Storage object is uniquely identified by the tuple of + // (bucket, object, generation). + DestinationName string `protobuf:"bytes,24,opt,name=destination_name,json=destinationName,proto3" json:"destination_name,omitempty"` + // Required. Immutable. The name of the bucket containing the destination object. + DestinationBucket string `protobuf:"bytes,25,opt,name=destination_bucket,json=destinationBucket,proto3" json:"destination_bucket,omitempty"` + // The name of the Cloud KMS key that will be used to encrypt the destination + // object. The Cloud KMS key must be located in same location as the object. + // If the parameter is not specified, the request uses the destination + // bucket's default encryption key, if any, or else the Google-managed + // encryption key. + DestinationKmsKey string `protobuf:"bytes,27,opt,name=destination_kms_key,json=destinationKmsKey,proto3" json:"destination_kms_key,omitempty"` + // Properties of the destination, post-rewrite object. + // The `name`, `bucket` and `kms_key` fields must not be populated (these + // values are specified in the `destination_name`, `destination_bucket`, and + // `destination_kms_key` fields). + // If `destination` is present it will be used to construct the destination + // object's metadata; otherwise the destination object's metadata will be + // copied from the source object. + Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` + // Required. Name of the bucket in which to find the source object. + SourceBucket string `protobuf:"bytes,2,opt,name=source_bucket,json=sourceBucket,proto3" json:"source_bucket,omitempty"` + // Required. Name of the source object. + SourceObject string `protobuf:"bytes,3,opt,name=source_object,json=sourceObject,proto3" json:"source_object,omitempty"` + // If present, selects a specific revision of the source object (as opposed to + // the latest version, the default). + SourceGeneration int64 `protobuf:"varint,4,opt,name=source_generation,json=sourceGeneration,proto3" json:"source_generation,omitempty"` + // Include this field (from the previous rewrite response) on each rewrite + // request after the first one, until the rewrite response 'done' flag is + // true. Calls that provide a rewriteToken can omit all other request fields, + // but if included those fields must match the values provided in the first + // rewrite request. + RewriteToken string `protobuf:"bytes,5,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"` + // Apply a predefined set of access controls to the destination object. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + DestinationPredefinedAcl string `protobuf:"bytes,28,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,7,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the destination object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the destination object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,10,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // Makes the operation conditional on whether the source object's live + // generation matches the given value. + IfSourceGenerationMatch *int64 `protobuf:"varint,11,opt,name=if_source_generation_match,json=ifSourceGenerationMatch,proto3,oneof" json:"if_source_generation_match,omitempty"` + // Makes the operation conditional on whether the source object's live + // generation does not match the given value. + IfSourceGenerationNotMatch *int64 `protobuf:"varint,12,opt,name=if_source_generation_not_match,json=ifSourceGenerationNotMatch,proto3,oneof" json:"if_source_generation_not_match,omitempty"` + // Makes the operation conditional on whether the source object's current + // metageneration matches the given value. + IfSourceMetagenerationMatch *int64 `protobuf:"varint,13,opt,name=if_source_metageneration_match,json=ifSourceMetagenerationMatch,proto3,oneof" json:"if_source_metageneration_match,omitempty"` + // Makes the operation conditional on whether the source object's current + // metageneration does not match the given value. + IfSourceMetagenerationNotMatch *int64 `protobuf:"varint,14,opt,name=if_source_metageneration_not_match,json=ifSourceMetagenerationNotMatch,proto3,oneof" json:"if_source_metageneration_not_match,omitempty"` + // The maximum number of bytes that will be rewritten per rewrite request. + // Most callers + // shouldn't need to specify this parameter - it is primarily in place to + // support testing. If specified the value must be an integral multiple of + // 1 MiB (1048576). Also, this only applies to requests where the source and + // destination span locations and/or storage classes. Finally, this value must + // not change across rewrite calls else you'll get an error that the + // `rewriteToken` is invalid. + MaxBytesRewrittenPerCall int64 `protobuf:"varint,15,opt,name=max_bytes_rewritten_per_call,json=maxBytesRewrittenPerCall,proto3" json:"max_bytes_rewritten_per_call,omitempty"` + // The algorithm used to encrypt the source object, if any. Used if the source + // object was encrypted with a Customer-Supplied Encryption Key. + CopySourceEncryptionAlgorithm string `protobuf:"bytes,16,opt,name=copy_source_encryption_algorithm,json=copySourceEncryptionAlgorithm,proto3" json:"copy_source_encryption_algorithm,omitempty"` + // The raw bytes (not base64-encoded) AES-256 encryption key used to encrypt + // the source object, if it was encrypted with a Customer-Supplied Encryption + // Key. + CopySourceEncryptionKeyBytes []byte `protobuf:"bytes,21,opt,name=copy_source_encryption_key_bytes,json=copySourceEncryptionKeyBytes,proto3" json:"copy_source_encryption_key_bytes,omitempty"` + // The raw bytes (not base64-encoded) SHA256 hash of the encryption key used + // to encrypt the source object, if it was encrypted with a Customer-Supplied + // Encryption Key. + CopySourceEncryptionKeySha256Bytes []byte `protobuf:"bytes,22,opt,name=copy_source_encryption_key_sha256_bytes,json=copySourceEncryptionKeySha256Bytes,proto3" json:"copy_source_encryption_key_sha256_bytes,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,19,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *RewriteObjectRequest) Reset() { + *x = RewriteObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RewriteObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RewriteObjectRequest) ProtoMessage() {} + +func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead. +func (*RewriteObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} +} + +func (x *RewriteObjectRequest) GetDestinationName() string { + if x != nil { + return x.DestinationName + } + return "" +} + +func (x *RewriteObjectRequest) GetDestinationBucket() string { + if x != nil { + return x.DestinationBucket + } + return "" +} + +func (x *RewriteObjectRequest) GetDestinationKmsKey() string { + if x != nil { + return x.DestinationKmsKey + } + return "" +} + +func (x *RewriteObjectRequest) GetDestination() *Object { + if x != nil { + return x.Destination + } + return nil +} + +func (x *RewriteObjectRequest) GetSourceBucket() string { + if x != nil { + return x.SourceBucket + } + return "" +} + +func (x *RewriteObjectRequest) GetSourceObject() string { + if x != nil { + return x.SourceObject + } + return "" +} + +func (x *RewriteObjectRequest) GetSourceGeneration() int64 { + if x != nil { + return x.SourceGeneration + } + return 0 +} + +func (x *RewriteObjectRequest) GetRewriteToken() string { + if x != nil { + return x.RewriteToken + } + return "" +} + +func (x *RewriteObjectRequest) GetDestinationPredefinedAcl() string { + if x != nil { + return x.DestinationPredefinedAcl + } + return "" +} + +func (x *RewriteObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfSourceGenerationMatch() int64 { + if x != nil && x.IfSourceGenerationMatch != nil { + return *x.IfSourceGenerationMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfSourceGenerationNotMatch() int64 { + if x != nil && x.IfSourceGenerationNotMatch != nil { + return *x.IfSourceGenerationNotMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfSourceMetagenerationMatch() int64 { + if x != nil && x.IfSourceMetagenerationMatch != nil { + return *x.IfSourceMetagenerationMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfSourceMetagenerationNotMatch() int64 { + if x != nil && x.IfSourceMetagenerationNotMatch != nil { + return *x.IfSourceMetagenerationNotMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetMaxBytesRewrittenPerCall() int64 { + if x != nil { + return x.MaxBytesRewrittenPerCall + } + return 0 +} + +func (x *RewriteObjectRequest) GetCopySourceEncryptionAlgorithm() string { + if x != nil { + return x.CopySourceEncryptionAlgorithm + } + return "" +} + +func (x *RewriteObjectRequest) GetCopySourceEncryptionKeyBytes() []byte { + if x != nil { + return x.CopySourceEncryptionKeyBytes + } + return nil +} + +func (x *RewriteObjectRequest) GetCopySourceEncryptionKeySha256Bytes() []byte { + if x != nil { + return x.CopySourceEncryptionKeySha256Bytes + } + return nil +} + +func (x *RewriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +// A rewrite response. +type RewriteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The total bytes written so far, which can be used to provide a waiting user + // with a progress indicator. This property is always present in the response. + TotalBytesRewritten int64 `protobuf:"varint,1,opt,name=total_bytes_rewritten,json=totalBytesRewritten,proto3" json:"total_bytes_rewritten,omitempty"` + // The total size of the object being copied in bytes. This property is always + // present in the response. + ObjectSize int64 `protobuf:"varint,2,opt,name=object_size,json=objectSize,proto3" json:"object_size,omitempty"` + // `true` if the copy is finished; otherwise, `false` if + // the copy is in progress. This property is always present in the response. + Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"` + // A token to use in subsequent requests to continue copying data. This token + // is present in the response only when there is more data to copy. + RewriteToken string `protobuf:"bytes,4,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"` + // A resource containing the metadata for the copied-to object. This property + // is present in the response only when copying completes. + Resource *Object `protobuf:"bytes,5,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *RewriteResponse) Reset() { + *x = RewriteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RewriteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RewriteResponse) ProtoMessage() {} + +func (x *RewriteResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead. +func (*RewriteResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} +} + +func (x *RewriteResponse) GetTotalBytesRewritten() int64 { + if x != nil { + return x.TotalBytesRewritten + } + return 0 +} + +func (x *RewriteResponse) GetObjectSize() int64 { + if x != nil { + return x.ObjectSize + } + return 0 +} + +func (x *RewriteResponse) GetDone() bool { + if x != nil { + return x.Done + } + return false +} + +func (x *RewriteResponse) GetRewriteToken() string { + if x != nil { + return x.RewriteToken + } + return "" +} + +func (x *RewriteResponse) GetResource() *Object { + if x != nil { + return x.Resource + } + return nil +} + +// Request message StartResumableWrite. +type StartResumableWriteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The destination bucket, object, and metadata, as well as any preconditions. + WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,1,opt,name=write_object_spec,json=writeObjectSpec,proto3" json:"write_object_spec,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,3,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *StartResumableWriteRequest) Reset() { + *x = StartResumableWriteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartResumableWriteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartResumableWriteRequest) ProtoMessage() {} + +func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead. +func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} +} + +func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec { + if x != nil { + return x.WriteObjectSpec + } + return nil +} + +func (x *StartResumableWriteRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +// Response object for `StartResumableWrite`. +type StartResumableWriteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The upload_id of the newly started resumable write operation. This + // value should be copied into the `WriteObjectRequest.upload_id` field. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` +} + +func (x *StartResumableWriteResponse) Reset() { + *x = StartResumableWriteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartResumableWriteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartResumableWriteResponse) ProtoMessage() {} + +func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead. +func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} +} + +func (x *StartResumableWriteResponse) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +// Request message for UpdateObject. +type UpdateObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The object to update. + // The object's bucket and name fields are used to identify the object to + // update. If present, the object's generation field selects a specific + // revision of this object whose metadata should be updated. Otherwise, + // assumes the live version of the object. + Object *Object `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,2,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,4,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // Apply a predefined set of access controls to this object. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + PredefinedAcl string `protobuf:"bytes,10,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` + // Required. List of fields to be updated. + // + // To specify ALL fields, equivalent to the JSON API's "update" function, + // specify a single field with the value `*`. Note: not recommended. If a new + // field is introduced at a later time, an older client updating with the `*` + // may accidentally reset the new field's value. + // + // Not specifying any fields is an error. + // Not specifying a field while setting that field to a non-default value is + // an error. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *UpdateObjectRequest) Reset() { + *x = UpdateObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateObjectRequest) ProtoMessage() {} + +func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead. +func (*UpdateObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} +} + +func (x *UpdateObjectRequest) GetObject() *Object { + if x != nil { + return x.Object + } + return nil +} + +func (x *UpdateObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *UpdateObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *UpdateObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *UpdateObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *UpdateObjectRequest) GetPredefinedAcl() string { + if x != nil { + return x.PredefinedAcl + } + return "" +} + +func (x *UpdateObjectRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +func (x *UpdateObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +// Request message for GetServiceAccount. +type GetServiceAccountRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Project ID, in the format of "projects/". + // can be the project ID or project number. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` +} + +func (x *GetServiceAccountRequest) Reset() { + *x = GetServiceAccountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetServiceAccountRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetServiceAccountRequest) ProtoMessage() {} + +func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead. +func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} +} + +func (x *GetServiceAccountRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +// Request message for CreateHmacKey. +type CreateHmacKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The project that the HMAC-owning service account lives in, in the format of + // "projects/". + // can be the project ID or project number. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` + // Required. The service account to create the HMAC for. + ServiceAccountEmail string `protobuf:"bytes,2,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` +} + +func (x *CreateHmacKeyRequest) Reset() { + *x = CreateHmacKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateHmacKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateHmacKeyRequest) ProtoMessage() {} + +func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead. +func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} +} + +func (x *CreateHmacKeyRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *CreateHmacKeyRequest) GetServiceAccountEmail() string { + if x != nil { + return x.ServiceAccountEmail + } + return "" +} + +// Create hmac response. The only time the secret for an HMAC will be returned. +type CreateHmacKeyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key metadata. + Metadata *HmacKeyMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // HMAC key secret material. + // In raw bytes format (not base64-encoded). + SecretKeyBytes []byte `protobuf:"bytes,3,opt,name=secret_key_bytes,json=secretKeyBytes,proto3" json:"secret_key_bytes,omitempty"` +} + +func (x *CreateHmacKeyResponse) Reset() { + *x = CreateHmacKeyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateHmacKeyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateHmacKeyResponse) ProtoMessage() {} + +func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead. +func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} +} + +func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *CreateHmacKeyResponse) GetSecretKeyBytes() []byte { + if x != nil { + return x.SecretKeyBytes + } + return nil +} + +// Request object to delete a given HMAC key. +type DeleteHmacKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The identifying key for the HMAC to delete. + AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` + // Required. The project that owns the HMAC key, in the format of + // "projects/". + // can be the project ID or project number. + Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` +} + +func (x *DeleteHmacKeyRequest) Reset() { + *x = DeleteHmacKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteHmacKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteHmacKeyRequest) ProtoMessage() {} + +func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead. +func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} +} + +func (x *DeleteHmacKeyRequest) GetAccessId() string { + if x != nil { + return x.AccessId + } + return "" +} + +func (x *DeleteHmacKeyRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +// Request object to get metadata on a given HMAC key. +type GetHmacKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The identifying key for the HMAC to delete. + AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` + // Required. The project the HMAC key lies in, in the format of + // "projects/". + // can be the project ID or project number. + Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` +} + +func (x *GetHmacKeyRequest) Reset() { + *x = GetHmacKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetHmacKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetHmacKeyRequest) ProtoMessage() {} + +func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead. +func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} +} + +func (x *GetHmacKeyRequest) GetAccessId() string { + if x != nil { + return x.AccessId + } + return "" +} + +func (x *GetHmacKeyRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +// Request to fetch a list of HMAC keys under a given project. +type ListHmacKeysRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The project to list HMAC keys for, in the format of + // "projects/". + // can be the project ID or project number. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` + // The maximum number of keys to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // A previously returned token from ListHmacKeysResponse to get the next page. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // If set, filters to only return HMAC keys for specified service account. + ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` + // If set, return deleted keys that have not yet been wiped out. + ShowDeletedKeys bool `protobuf:"varint,5,opt,name=show_deleted_keys,json=showDeletedKeys,proto3" json:"show_deleted_keys,omitempty"` +} + +func (x *ListHmacKeysRequest) Reset() { + *x = ListHmacKeysRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListHmacKeysRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListHmacKeysRequest) ProtoMessage() {} + +func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead. +func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} +} + +func (x *ListHmacKeysRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *ListHmacKeysRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListHmacKeysRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListHmacKeysRequest) GetServiceAccountEmail() string { + if x != nil { + return x.ServiceAccountEmail + } + return "" +} + +func (x *ListHmacKeysRequest) GetShowDeletedKeys() bool { + if x != nil { + return x.ShowDeletedKeys + } + return false +} + +// Hmac key list response with next page information. +type ListHmacKeysResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of items. + HmacKeys []*HmacKeyMetadata `protobuf:"bytes,1,rep,name=hmac_keys,json=hmacKeys,proto3" json:"hmac_keys,omitempty"` + // The continuation token, used to page through large result sets. Provide + // this value in a subsequent request to return the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListHmacKeysResponse) Reset() { + *x = ListHmacKeysResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListHmacKeysResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListHmacKeysResponse) ProtoMessage() {} + +func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead. +func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} +} + +func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata { + if x != nil { + return x.HmacKeys + } + return nil +} + +func (x *ListHmacKeysResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request object to update an HMAC key state. +// HmacKeyMetadata.state is required and the only writable field in +// UpdateHmacKey operation. Specifying fields other than state will result in an +// error. +type UpdateHmacKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The HMAC key to update. + // If present, the hmac_key's `id` field will be used to identify the key. + // Otherwise, the hmac_key's access_id and project fields will be used to + // identify the key. + HmacKey *HmacKeyMetadata `protobuf:"bytes,1,opt,name=hmac_key,json=hmacKey,proto3" json:"hmac_key,omitempty"` + // Update mask for hmac_key. + // Not specifying any fields will mean only the `state` field is updated to + // the value specified in `hmac_key`. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateHmacKeyRequest) Reset() { + *x = UpdateHmacKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateHmacKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateHmacKeyRequest) ProtoMessage() {} + +func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead. +func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} +} + +func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata { + if x != nil { + return x.HmacKey + } + return nil +} + +func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Parameters that can be passed to any object request. +type CommonObjectRequestParams struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Encryption algorithm used with the Customer-Supplied Encryption Keys + // feature. + EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=encryption_algorithm,json=encryptionAlgorithm,proto3" json:"encryption_algorithm,omitempty"` + // Encryption key used with the Customer-Supplied Encryption Keys feature. + // In raw bytes format (not base64-encoded). + EncryptionKeyBytes []byte `protobuf:"bytes,4,opt,name=encryption_key_bytes,json=encryptionKeyBytes,proto3" json:"encryption_key_bytes,omitempty"` + // SHA256 hash of encryption key used with the Customer-Supplied Encryption + // Keys feature. + EncryptionKeySha256Bytes []byte `protobuf:"bytes,5,opt,name=encryption_key_sha256_bytes,json=encryptionKeySha256Bytes,proto3" json:"encryption_key_sha256_bytes,omitempty"` +} + +func (x *CommonObjectRequestParams) Reset() { + *x = CommonObjectRequestParams{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonObjectRequestParams) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonObjectRequestParams) ProtoMessage() {} + +func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. +func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} +} + +func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { + if x != nil { + return x.EncryptionAlgorithm + } + return "" +} + +func (x *CommonObjectRequestParams) GetEncryptionKeyBytes() []byte { + if x != nil { + return x.EncryptionKeyBytes + } + return nil +} + +func (x *CommonObjectRequestParams) GetEncryptionKeySha256Bytes() []byte { + if x != nil { + return x.EncryptionKeySha256Bytes + } + return nil +} + +// Shared constants. +type ServiceConstants struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ServiceConstants) Reset() { + *x = ServiceConstants{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceConstants) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceConstants) ProtoMessage() {} + +func (x *ServiceConstants) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. +func (*ServiceConstants) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} +} + +// A bucket. +type Bucket struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Immutable. The name of the bucket. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The user-chosen part of the bucket name. The `{bucket}` portion of the + // `name` field. For globally unique buckets, this is equal to the "bucket + // name" of other Cloud Storage APIs. Example: "pub". + BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` + // The etag of the bucket. + // If included in the metadata of an UpdateBucketRequest, the operation will + // only be performed if the etag matches that of the bucket. + Etag string `protobuf:"bytes,29,opt,name=etag,proto3" json:"etag,omitempty"` + // Immutable. The project which owns this bucket, in the format of + // "projects/". + // can be the project ID or project number. + Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` + // Output only. The metadata generation of this bucket. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` + // Immutable. The location of the bucket. Object data for objects in the bucket resides + // in physical storage within this region. Defaults to `US`. See the + // [https://developers.google.com/storage/docs/concepts-techniques#specifyinglocations"][developer's + // guide] for the authoritative list. Attempting to update this field after + // the bucket is created will result in an error. + Location string `protobuf:"bytes,5,opt,name=location,proto3" json:"location,omitempty"` + // Output only. The location type of the bucket (region, dual-region, multi-region, etc). + LocationType string `protobuf:"bytes,6,opt,name=location_type,json=locationType,proto3" json:"location_type,omitempty"` + // The bucket's default storage class, used whenever no storageClass is + // specified for a newly-created object. This defines how objects in the + // bucket are stored and determines the SLA and the cost of storage. + // If this value is not specified when the bucket is created, it will default + // to `STANDARD`. For more information, see + // https://developers.google.com/storage/docs/storage-classes. + StorageClass string `protobuf:"bytes,7,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + // The recovery point objective for cross-region replication of the bucket. + // Applicable only for dual- and multi-region buckets. "DEFAULT" uses default + // replication. "ASYNC_TURBO" enables turbo replication, valid for dual-region + // buckets only. If rpo is not specified when the bucket is created, it + // defaults to "DEFAULT". For more information, see + // https://cloud.google.com/storage/docs/turbo-replication. + Rpo string `protobuf:"bytes,27,opt,name=rpo,proto3" json:"rpo,omitempty"` + // Access controls on the bucket. + // If iamConfig.uniformBucketLevelAccess is enabled on this bucket, + // requests to set, read, or modify acl is an error. + Acl []*BucketAccessControl `protobuf:"bytes,8,rep,name=acl,proto3" json:"acl,omitempty"` + // Default access controls to apply to new objects when no ACL is provided. + // If iamConfig.uniformBucketLevelAccess is enabled on this bucket, + // requests to set, read, or modify acl is an error. + DefaultObjectAcl []*ObjectAccessControl `protobuf:"bytes,9,rep,name=default_object_acl,json=defaultObjectAcl,proto3" json:"default_object_acl,omitempty"` + // The bucket's lifecycle config. See + // [https://developers.google.com/storage/docs/lifecycle]Lifecycle Management] + // for more information. + Lifecycle *Bucket_Lifecycle `protobuf:"bytes,10,opt,name=lifecycle,proto3" json:"lifecycle,omitempty"` + // Output only. The creation time of the bucket. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // The bucket's [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] + // (CORS) config. + Cors []*Bucket_Cors `protobuf:"bytes,12,rep,name=cors,proto3" json:"cors,omitempty"` + // Output only. The modification time of the bucket. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + // The default value for event-based hold on newly created objects in this + // bucket. Event-based hold is a way to retain objects indefinitely until an + // event occurs, signified by the + // hold's release. After being released, such objects will be subject to + // bucket-level retention (if any). One sample use case of this flag is for + // banks to hold loan documents for at least 3 years after loan is paid in + // full. Here, bucket-level retention is 3 years and the event is loan being + // paid in full. In this example, these objects will be held intact for any + // number of years until the event has occurred (event-based hold on the + // object is released) and then 3 more years after that. That means retention + // duration of the objects begins from the moment event-based hold + // transitioned from true to false. Objects under event-based hold cannot be + // deleted, overwritten or archived until the hold is removed. + DefaultEventBasedHold bool `protobuf:"varint,14,opt,name=default_event_based_hold,json=defaultEventBasedHold,proto3" json:"default_event_based_hold,omitempty"` + // User-provided labels, in key/value pairs. + Labels map[string]string `protobuf:"bytes,15,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The bucket's website config, controlling how the service behaves + // when accessing bucket contents as a web site. See the + // [https://cloud.google.com/storage/docs/static-website][Static Website + // Examples] for more information. + Website *Bucket_Website `protobuf:"bytes,16,opt,name=website,proto3" json:"website,omitempty"` + // The bucket's versioning config. + Versioning *Bucket_Versioning `protobuf:"bytes,17,opt,name=versioning,proto3" json:"versioning,omitempty"` + // The bucket's logging config, which defines the destination bucket + // and name prefix (if any) for the current bucket's logs. + Logging *Bucket_Logging `protobuf:"bytes,18,opt,name=logging,proto3" json:"logging,omitempty"` + // Output only. The owner of the bucket. This is always the project team's owner group. + Owner *Owner `protobuf:"bytes,19,opt,name=owner,proto3" json:"owner,omitempty"` + // Encryption config for a bucket. + Encryption *Bucket_Encryption `protobuf:"bytes,20,opt,name=encryption,proto3" json:"encryption,omitempty"` + // The bucket's billing config. + Billing *Bucket_Billing `protobuf:"bytes,21,opt,name=billing,proto3" json:"billing,omitempty"` + // The bucket's retention policy. The retention policy enforces a minimum + // retention time for all objects contained in the bucket, based on their + // creation time. Any attempt to overwrite or delete objects younger than the + // retention period will result in a PERMISSION_DENIED error. An unlocked + // retention policy can be modified or removed from the bucket via a + // storage.buckets.update operation. A locked retention policy cannot be + // removed or shortened in duration for the lifetime of the bucket. + // Attempting to remove or decrease period of a locked retention policy will + // result in a PERMISSION_DENIED error. + RetentionPolicy *Bucket_RetentionPolicy `protobuf:"bytes,22,opt,name=retention_policy,json=retentionPolicy,proto3" json:"retention_policy,omitempty"` + // The bucket's IAM config. + IamConfig *Bucket_IamConfig `protobuf:"bytes,23,opt,name=iam_config,json=iamConfig,proto3" json:"iam_config,omitempty"` + // Reserved for future use. + SatisfiesPzs bool `protobuf:"varint,25,opt,name=satisfies_pzs,json=satisfiesPzs,proto3" json:"satisfies_pzs,omitempty"` + // Configuration that, if present, specifies the data placement for a + // [https://cloud.google.com/storage/docs/use-dual-regions][Dual Region]. + CustomPlacementConfig *Bucket_CustomPlacementConfig `protobuf:"bytes,26,opt,name=custom_placement_config,json=customPlacementConfig,proto3" json:"custom_placement_config,omitempty"` + // The bucket's Autoclass configuration. If there is no configuration, the + // Autoclass feature will be disabled and have no effect on the bucket. + Autoclass *Bucket_Autoclass `protobuf:"bytes,28,opt,name=autoclass,proto3" json:"autoclass,omitempty"` +} + +func (x *Bucket) Reset() { + *x = Bucket{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket) ProtoMessage() {} + +func (x *Bucket) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. +func (*Bucket) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} +} + +func (x *Bucket) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Bucket) GetBucketId() string { + if x != nil { + return x.BucketId + } + return "" +} + +func (x *Bucket) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *Bucket) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *Bucket) GetMetageneration() int64 { + if x != nil { + return x.Metageneration + } + return 0 +} + +func (x *Bucket) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Bucket) GetLocationType() string { + if x != nil { + return x.LocationType + } + return "" +} + +func (x *Bucket) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +func (x *Bucket) GetRpo() string { + if x != nil { + return x.Rpo + } + return "" +} + +func (x *Bucket) GetAcl() []*BucketAccessControl { + if x != nil { + return x.Acl + } + return nil +} + +func (x *Bucket) GetDefaultObjectAcl() []*ObjectAccessControl { + if x != nil { + return x.DefaultObjectAcl + } + return nil +} + +func (x *Bucket) GetLifecycle() *Bucket_Lifecycle { + if x != nil { + return x.Lifecycle + } + return nil +} + +func (x *Bucket) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *Bucket) GetCors() []*Bucket_Cors { + if x != nil { + return x.Cors + } + return nil +} + +func (x *Bucket) GetUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.UpdateTime + } + return nil +} + +func (x *Bucket) GetDefaultEventBasedHold() bool { + if x != nil { + return x.DefaultEventBasedHold + } + return false +} + +func (x *Bucket) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Bucket) GetWebsite() *Bucket_Website { + if x != nil { + return x.Website + } + return nil +} + +func (x *Bucket) GetVersioning() *Bucket_Versioning { + if x != nil { + return x.Versioning + } + return nil +} + +func (x *Bucket) GetLogging() *Bucket_Logging { + if x != nil { + return x.Logging + } + return nil +} + +func (x *Bucket) GetOwner() *Owner { + if x != nil { + return x.Owner + } + return nil +} + +func (x *Bucket) GetEncryption() *Bucket_Encryption { + if x != nil { + return x.Encryption + } + return nil +} + +func (x *Bucket) GetBilling() *Bucket_Billing { + if x != nil { + return x.Billing + } + return nil +} + +func (x *Bucket) GetRetentionPolicy() *Bucket_RetentionPolicy { + if x != nil { + return x.RetentionPolicy + } + return nil +} + +func (x *Bucket) GetIamConfig() *Bucket_IamConfig { + if x != nil { + return x.IamConfig + } + return nil +} + +func (x *Bucket) GetSatisfiesPzs() bool { + if x != nil { + return x.SatisfiesPzs + } + return false +} + +func (x *Bucket) GetCustomPlacementConfig() *Bucket_CustomPlacementConfig { + if x != nil { + return x.CustomPlacementConfig + } + return nil +} + +func (x *Bucket) GetAutoclass() *Bucket_Autoclass { + if x != nil { + return x.Autoclass + } + return nil +} + +// An access-control entry. +type BucketAccessControl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The access permission for the entity. + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + // The ID of the access-control entry. + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + // The entity holding the permission, in one of the following forms: + // * `user-{userid}` + // * `user-{email}` + // * `group-{groupid}` + // * `group-{email}` + // * `domain-{domain}` + // * `project-{team}-{projectnumber}` + // * `project-{team}-{projectid}` + // * `allUsers` + // * `allAuthenticatedUsers` + // Examples: + // * The user `liz@example.com` would be `user-liz@example.com`. + // * The group `example@googlegroups.com` would be + // `group-example@googlegroups.com` + // * All members of the Google Apps for Business domain `example.com` would be + // `domain-example.com` + // For project entities, `project-{team}-{projectnumber}` format will be + // returned on response. + Entity string `protobuf:"bytes,3,opt,name=entity,proto3" json:"entity,omitempty"` + // Output only. The alternative entity format, if exists. For project entities, + // `project-{team}-{projectid}` format will be returned on response. + EntityAlt string `protobuf:"bytes,9,opt,name=entity_alt,json=entityAlt,proto3" json:"entity_alt,omitempty"` + // The ID for the entity, if any. + EntityId string `protobuf:"bytes,4,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + // The etag of the BucketAccessControl. + // If included in the metadata of an update or delete request message, the + // operation operation will only be performed if the etag matches that of the + // bucket's BucketAccessControl. + Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` + // The email address associated with the entity, if any. + Email string `protobuf:"bytes,5,opt,name=email,proto3" json:"email,omitempty"` + // The domain associated with the entity, if any. + Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"` + // The project team associated with the entity, if any. + ProjectTeam *ProjectTeam `protobuf:"bytes,7,opt,name=project_team,json=projectTeam,proto3" json:"project_team,omitempty"` +} + +func (x *BucketAccessControl) Reset() { + *x = BucketAccessControl{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BucketAccessControl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BucketAccessControl) ProtoMessage() {} + +func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead. +func (*BucketAccessControl) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} +} + +func (x *BucketAccessControl) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *BucketAccessControl) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *BucketAccessControl) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *BucketAccessControl) GetEntityAlt() string { + if x != nil { + return x.EntityAlt + } + return "" +} + +func (x *BucketAccessControl) GetEntityId() string { + if x != nil { + return x.EntityId + } + return "" +} + +func (x *BucketAccessControl) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *BucketAccessControl) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *BucketAccessControl) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *BucketAccessControl) GetProjectTeam() *ProjectTeam { + if x != nil { + return x.ProjectTeam + } + return nil +} + +// Message used to convey content being read or written, along with an optional +// checksum. +type ChecksummedData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The data. + Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // If set, the CRC32C digest of the content field. + Crc32C *uint32 `protobuf:"fixed32,2,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` +} + +func (x *ChecksummedData) Reset() { + *x = ChecksummedData{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ChecksummedData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChecksummedData) ProtoMessage() {} + +func (x *ChecksummedData) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead. +func (*ChecksummedData) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} +} + +func (x *ChecksummedData) GetContent() []byte { + if x != nil { + return x.Content + } + return nil +} + +func (x *ChecksummedData) GetCrc32C() uint32 { + if x != nil && x.Crc32C != nil { + return *x.Crc32C + } + return 0 +} + +// Message used for storing full (not subrange) object checksums. +type ObjectChecksums struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // CRC32C digest of the object data. Computed by the Cloud Storage service for + // all written objects. + // If set in an WriteObjectRequest, service will validate that the stored + // object matches this checksum. + Crc32C *uint32 `protobuf:"fixed32,1,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` + // 128 bit MD5 hash of the object data. + // For more information about using the MD5 hash, see + // [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes and + // ETags: Best Practices]. + // Not all objects will provide an MD5 hash. For example, composite objects + // provide only crc32c hashes. + // This value is equivalent to running `cat object.txt | openssl md5 -binary` + Md5Hash []byte `protobuf:"bytes,2,opt,name=md5_hash,json=md5Hash,proto3" json:"md5_hash,omitempty"` +} + +func (x *ObjectChecksums) Reset() { + *x = ObjectChecksums{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectChecksums) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectChecksums) ProtoMessage() {} + +func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. +func (*ObjectChecksums) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} +} + +func (x *ObjectChecksums) GetCrc32C() uint32 { + if x != nil && x.Crc32C != nil { + return *x.Crc32C + } + return 0 +} + +func (x *ObjectChecksums) GetMd5Hash() []byte { + if x != nil { + return x.Md5Hash + } + return nil +} + +// Hmac Key Metadata, which includes all information other than the secret. +type HmacKeyMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Immutable. Resource name ID of the key in the format + // /. + // can be the project ID or project number. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Immutable. Globally unique id for keys. + AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` + // Immutable. Identifies the project that owns the service account of the specified HMAC + // key, in the format "projects/". can + // be the project ID or project number. + Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` + // Output only. Email of the service account the key authenticates as. + ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` + // State of the key. One of ACTIVE, INACTIVE, or DELETED. + // Writable, can be updated by UpdateHmacKey operation. + State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` + // Output only. The creation time of the HMAC key. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Output only. The last modification time of the HMAC key metadata. + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + // The etag of the HMAC key. + Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` +} + +func (x *HmacKeyMetadata) Reset() { + *x = HmacKeyMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HmacKeyMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HmacKeyMetadata) ProtoMessage() {} + +func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead. +func (*HmacKeyMetadata) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} +} + +func (x *HmacKeyMetadata) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *HmacKeyMetadata) GetAccessId() string { + if x != nil { + return x.AccessId + } + return "" +} + +func (x *HmacKeyMetadata) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *HmacKeyMetadata) GetServiceAccountEmail() string { + if x != nil { + return x.ServiceAccountEmail + } + return "" +} + +func (x *HmacKeyMetadata) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.UpdateTime + } + return nil +} + +func (x *HmacKeyMetadata) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +// A directive to publish Pub/Sub notifications upon changes to a bucket. +type Notification struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of this notification. + // Format: + // `projects/{project}/buckets/{bucket}/notificationConfigs/{notification}` + // The `{project}` portion may be `_` for globally unique buckets. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The Pub/Sub topic to which this subscription publishes. Formatted as: + // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + // The etag of the Notification. + // If included in the metadata of GetNotificationRequest, the operation will + // only be performed if the etag matches that of the Notification. + Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"` + // If present, only send notifications about listed event types. If empty, + // sent notifications for all event types. + EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"` + // A list of additional attributes to attach to each Pub/Sub + // message published for this notification subscription. + CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If present, only apply this notification config to object names that + // begin with this prefix. + ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"` + // Required. The desired content of the Payload. + PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"` +} + +func (x *Notification) Reset() { + *x = Notification{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Notification) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Notification) ProtoMessage() {} + +func (x *Notification) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Notification.ProtoReflect.Descriptor instead. +func (*Notification) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} +} + +func (x *Notification) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Notification) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *Notification) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *Notification) GetEventTypes() []string { + if x != nil { + return x.EventTypes + } + return nil +} + +func (x *Notification) GetCustomAttributes() map[string]string { + if x != nil { + return x.CustomAttributes + } + return nil +} + +func (x *Notification) GetObjectNamePrefix() string { + if x != nil { + return x.ObjectNamePrefix + } + return "" +} + +func (x *Notification) GetPayloadFormat() string { + if x != nil { + return x.PayloadFormat + } + return "" +} + +// Describes the Customer-Supplied Encryption Key mechanism used to store an +// Object's data at rest. +type CustomerEncryption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The encryption algorithm. + EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=encryption_algorithm,json=encryptionAlgorithm,proto3" json:"encryption_algorithm,omitempty"` + // SHA256 hash value of the encryption key. + // In raw bytes format (not base64-encoded). + KeySha256Bytes []byte `protobuf:"bytes,3,opt,name=key_sha256_bytes,json=keySha256Bytes,proto3" json:"key_sha256_bytes,omitempty"` +} + +func (x *CustomerEncryption) Reset() { + *x = CustomerEncryption{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomerEncryption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomerEncryption) ProtoMessage() {} + +func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead. +func (*CustomerEncryption) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} +} + +func (x *CustomerEncryption) GetEncryptionAlgorithm() string { + if x != nil { + return x.EncryptionAlgorithm + } + return "" +} + +func (x *CustomerEncryption) GetKeySha256Bytes() []byte { + if x != nil { + return x.KeySha256Bytes + } + return nil +} + +// An object. +type Object struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Immutable. The name of this object. Nearly any sequence of unicode characters is + // valid. See + // [Guidelines](https://cloud.google.com/storage/docs/naming-objects). + // Example: `test.txt` + // The `name` field by itself does not uniquely identify a Cloud Storage + // object. A Cloud Storage object is uniquely identified by the tuple of + // (bucket, object, generation). + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Immutable. The name of the bucket containing this object. + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // The etag of the object. + // If included in the metadata of an update or delete request message, the + // operation will only be performed if the etag matches that of the live + // object. + Etag string `protobuf:"bytes,27,opt,name=etag,proto3" json:"etag,omitempty"` + // Immutable. The content generation of this object. Used for object versioning. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // Output only. The version of the metadata for this generation of this object. Used for + // preconditions and for detecting changes in metadata. A metageneration + // number is only meaningful in the context of a particular generation of a + // particular object. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` + // Storage class of the object. + StorageClass string `protobuf:"bytes,5,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + // Output only. Content-Length of the object data in bytes, matching + // [https://tools.ietf.org/html/rfc7230#section-3.3.2][RFC 7230 §3.3.2]. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + Size int64 `protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` + // Content-Encoding of the object data, matching + // [https://tools.ietf.org/html/rfc7231#section-3.1.2.2][RFC 7231 §3.1.2.2] + ContentEncoding string `protobuf:"bytes,7,opt,name=content_encoding,json=contentEncoding,proto3" json:"content_encoding,omitempty"` + // Content-Disposition of the object data, matching + // [https://tools.ietf.org/html/rfc6266][RFC 6266]. + ContentDisposition string `protobuf:"bytes,8,opt,name=content_disposition,json=contentDisposition,proto3" json:"content_disposition,omitempty"` + // Cache-Control directive for the object data, matching + // [https://tools.ietf.org/html/rfc7234#section-5.2"][RFC 7234 §5.2]. + // If omitted, and the object is accessible to all anonymous users, the + // default will be `public, max-age=3600`. + CacheControl string `protobuf:"bytes,9,opt,name=cache_control,json=cacheControl,proto3" json:"cache_control,omitempty"` + // Access controls on the object. + // If iamConfig.uniformBucketLevelAccess is enabled on the parent + // bucket, requests to set, read, or modify acl is an error. + Acl []*ObjectAccessControl `protobuf:"bytes,10,rep,name=acl,proto3" json:"acl,omitempty"` + // Content-Language of the object data, matching + // [https://tools.ietf.org/html/rfc7231#section-3.1.3.2][RFC 7231 §3.1.3.2]. + ContentLanguage string `protobuf:"bytes,11,opt,name=content_language,json=contentLanguage,proto3" json:"content_language,omitempty"` + // Output only. The deletion time of the object. Will be returned if and only if this + // version of the object has been deleted. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` + // Content-Type of the object data, matching + // [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5]. + // If an object is stored without a Content-Type, it is served as + // `application/octet-stream`. + ContentType string `protobuf:"bytes,13,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // Output only. The creation time of the object. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Output only. Number of underlying components that make up this object. Components are + // accumulated by compose operations. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + ComponentCount int32 `protobuf:"varint,15,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty"` + // Output only. Hashes for the data part of this object. + Checksums *ObjectChecksums `protobuf:"bytes,16,opt,name=checksums,proto3" json:"checksums,omitempty"` + // Output only. The modification time of the object metadata. + // Set initially to object creation time and then updated whenever any + // metadata of the object changes. This includes changes made by a requester, + // such as modifying custom metadata, as well as changes made by Cloud Storage + // on behalf of a requester, such as changing the storage class based on an + // Object Lifecycle Configuration. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + // Cloud KMS Key used to encrypt this object, if the object is encrypted by + // such a key. + KmsKey string `protobuf:"bytes,18,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` + // Output only. The time at which the object's storage class was last changed. When the + // object is initially created, it will be set to time_created. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + UpdateStorageClassTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"` + // Whether an object is under temporary hold. While this flag is set to true, + // the object is protected against deletion and overwrites. A common use case + // of this flag is regulatory investigations where objects need to be retained + // while the investigation is ongoing. Note that unlike event-based hold, + // temporary hold does not impact retention expiration time of an object. + TemporaryHold bool `protobuf:"varint,20,opt,name=temporary_hold,json=temporaryHold,proto3" json:"temporary_hold,omitempty"` + // A server-determined value that specifies the earliest time that the + // object's retention period expires. + // Note 1: This field is not provided for objects with an active event-based + // hold, since retention expiration is unknown until the hold is removed. + // Note 2: This value can be provided even when temporary hold is set (so that + // the user can reason about policy without having to first unset the + // temporary hold). + RetentionExpireTime *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=retention_expire_time,json=retentionExpireTime,proto3" json:"retention_expire_time,omitempty"` + // User-provided metadata, in key/value pairs. + Metadata map[string]string `protobuf:"bytes,22,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Whether an object is under event-based hold. + // An event-based hold is a way to force the retention of an object until + // after some event occurs. Once the hold is released by explicitly setting + // this field to false, the object will become subject to any bucket-level + // retention policy, except that the retention duration will be calculated + // from the time the event based hold was lifted, rather than the time the + // object was created. + // + // In a WriteObject request, not setting this field implies that the value + // should be taken from the parent bucket's "default_event_based_hold" field. + // In a response, this field will always be set to true or false. + EventBasedHold *bool `protobuf:"varint,23,opt,name=event_based_hold,json=eventBasedHold,proto3,oneof" json:"event_based_hold,omitempty"` + // Output only. The owner of the object. This will always be the uploader of the object. + // Attempting to set or update this field will result in a + // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + Owner *Owner `protobuf:"bytes,24,opt,name=owner,proto3" json:"owner,omitempty"` + // Metadata of Customer-Supplied Encryption Key, if the object is encrypted by + // such a key. + CustomerEncryption *CustomerEncryption `protobuf:"bytes,25,opt,name=customer_encryption,json=customerEncryption,proto3" json:"customer_encryption,omitempty"` + // A user-specified timestamp set on an object. + CustomTime *timestamppb.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"` +} + +func (x *Object) Reset() { + *x = Object{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Object) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Object) ProtoMessage() {} + +func (x *Object) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Object.ProtoReflect.Descriptor instead. +func (*Object) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} +} + +func (x *Object) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Object) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *Object) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *Object) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *Object) GetMetageneration() int64 { + if x != nil { + return x.Metageneration + } + return 0 +} + +func (x *Object) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +func (x *Object) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *Object) GetContentEncoding() string { + if x != nil { + return x.ContentEncoding + } + return "" +} + +func (x *Object) GetContentDisposition() string { + if x != nil { + return x.ContentDisposition + } + return "" +} + +func (x *Object) GetCacheControl() string { + if x != nil { + return x.CacheControl + } + return "" +} + +func (x *Object) GetAcl() []*ObjectAccessControl { + if x != nil { + return x.Acl + } + return nil +} + +func (x *Object) GetContentLanguage() string { + if x != nil { + return x.ContentLanguage + } + return "" +} + +func (x *Object) GetDeleteTime() *timestamppb.Timestamp { + if x != nil { + return x.DeleteTime + } + return nil +} + +func (x *Object) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *Object) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *Object) GetComponentCount() int32 { + if x != nil { + return x.ComponentCount + } + return 0 +} + +func (x *Object) GetChecksums() *ObjectChecksums { + if x != nil { + return x.Checksums + } + return nil +} + +func (x *Object) GetUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.UpdateTime + } + return nil +} + +func (x *Object) GetKmsKey() string { + if x != nil { + return x.KmsKey + } + return "" +} + +func (x *Object) GetUpdateStorageClassTime() *timestamppb.Timestamp { + if x != nil { + return x.UpdateStorageClassTime + } + return nil +} + +func (x *Object) GetTemporaryHold() bool { + if x != nil { + return x.TemporaryHold + } + return false +} + +func (x *Object) GetRetentionExpireTime() *timestamppb.Timestamp { + if x != nil { + return x.RetentionExpireTime + } + return nil +} + +func (x *Object) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Object) GetEventBasedHold() bool { + if x != nil && x.EventBasedHold != nil { + return *x.EventBasedHold + } + return false +} + +func (x *Object) GetOwner() *Owner { + if x != nil { + return x.Owner + } + return nil +} + +func (x *Object) GetCustomerEncryption() *CustomerEncryption { + if x != nil { + return x.CustomerEncryption + } + return nil +} + +func (x *Object) GetCustomTime() *timestamppb.Timestamp { + if x != nil { + return x.CustomTime + } + return nil +} + +// An access-control entry. +type ObjectAccessControl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The access permission for the entity. + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + // The ID of the access-control entry. + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + // The entity holding the permission, in one of the following forms: + // * `user-{userid}` + // * `user-{email}` + // * `group-{groupid}` + // * `group-{email}` + // * `domain-{domain}` + // * `project-{team}-{projectnumber}` + // * `project-{team}-{projectid}` + // * `allUsers` + // * `allAuthenticatedUsers` + // Examples: + // * The user `liz@example.com` would be `user-liz@example.com`. + // * The group `example@googlegroups.com` would be + // `group-example@googlegroups.com`. + // * All members of the Google Apps for Business domain `example.com` would be + // `domain-example.com`. + // For project entities, `project-{team}-{projectnumber}` format will be + // returned on response. + Entity string `protobuf:"bytes,3,opt,name=entity,proto3" json:"entity,omitempty"` + // Output only. The alternative entity format, if exists. For project entities, + // `project-{team}-{projectid}` format will be returned on response. + EntityAlt string `protobuf:"bytes,9,opt,name=entity_alt,json=entityAlt,proto3" json:"entity_alt,omitempty"` + // The ID for the entity, if any. + EntityId string `protobuf:"bytes,4,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + // The etag of the ObjectAccessControl. + // If included in the metadata of an update or delete request message, the + // operation will only be performed if the etag matches that of the live + // object's ObjectAccessControl. + Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` + // The email address associated with the entity, if any. + Email string `protobuf:"bytes,5,opt,name=email,proto3" json:"email,omitempty"` + // The domain associated with the entity, if any. + Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"` + // The project team associated with the entity, if any. + ProjectTeam *ProjectTeam `protobuf:"bytes,7,opt,name=project_team,json=projectTeam,proto3" json:"project_team,omitempty"` +} + +func (x *ObjectAccessControl) Reset() { + *x = ObjectAccessControl{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectAccessControl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectAccessControl) ProtoMessage() {} + +func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead. +func (*ObjectAccessControl) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} +} + +func (x *ObjectAccessControl) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *ObjectAccessControl) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ObjectAccessControl) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *ObjectAccessControl) GetEntityAlt() string { + if x != nil { + return x.EntityAlt + } + return "" +} + +func (x *ObjectAccessControl) GetEntityId() string { + if x != nil { + return x.EntityId + } + return "" +} + +func (x *ObjectAccessControl) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *ObjectAccessControl) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *ObjectAccessControl) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ObjectAccessControl) GetProjectTeam() *ProjectTeam { + if x != nil { + return x.ProjectTeam + } + return nil +} + +// The result of a call to Objects.ListObjects +type ListObjectsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of items. + Objects []*Object `protobuf:"bytes,1,rep,name=objects,proto3" json:"objects,omitempty"` + // The list of prefixes of objects matching-but-not-listed up to and including + // the requested delimiter. + Prefixes []string `protobuf:"bytes,2,rep,name=prefixes,proto3" json:"prefixes,omitempty"` + // The continuation token, used to page through large result sets. Provide + // this value in a subsequent request to return the next page of results. + NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListObjectsResponse) Reset() { + *x = ListObjectsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListObjectsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListObjectsResponse) ProtoMessage() {} + +func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead. +func (*ListObjectsResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} +} + +func (x *ListObjectsResponse) GetObjects() []*Object { + if x != nil { + return x.Objects + } + return nil +} + +func (x *ListObjectsResponse) GetPrefixes() []string { + if x != nil { + return x.Prefixes + } + return nil +} + +func (x *ListObjectsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Represents the Viewers, Editors, or Owners of a given project. +type ProjectTeam struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The project number. + ProjectNumber string `protobuf:"bytes,1,opt,name=project_number,json=projectNumber,proto3" json:"project_number,omitempty"` + // The team. + Team string `protobuf:"bytes,2,opt,name=team,proto3" json:"team,omitempty"` +} + +func (x *ProjectTeam) Reset() { + *x = ProjectTeam{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProjectTeam) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProjectTeam) ProtoMessage() {} + +func (x *ProjectTeam) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead. +func (*ProjectTeam) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} +} + +func (x *ProjectTeam) GetProjectNumber() string { + if x != nil { + return x.ProjectNumber + } + return "" +} + +func (x *ProjectTeam) GetTeam() string { + if x != nil { + return x.Team + } + return "" +} + +// A service account, owned by Cloud Storage, which may be used when taking +// action on behalf of a given project, for example to publish Pub/Sub +// notifications or to retrieve security keys. +type ServiceAccount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ID of the notification. + EmailAddress string `protobuf:"bytes,1,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"` +} + +func (x *ServiceAccount) Reset() { + *x = ServiceAccount{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceAccount) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceAccount) ProtoMessage() {} + +func (x *ServiceAccount) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead. +func (*ServiceAccount) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} +} + +func (x *ServiceAccount) GetEmailAddress() string { + if x != nil { + return x.EmailAddress + } + return "" +} + +// The owner of a specific resource. +type Owner struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The entity, in the form `user-`*userId*. + Entity string `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"` + // The ID for the entity. + EntityId string `protobuf:"bytes,2,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` +} + +func (x *Owner) Reset() { + *x = Owner{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Owner) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Owner) ProtoMessage() {} + +func (x *Owner) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Owner.ProtoReflect.Descriptor instead. +func (*Owner) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} +} + +func (x *Owner) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *Owner) GetEntityId() string { + if x != nil { + return x.EntityId + } + return "" +} + +// Specifies a requested range of bytes to download. +type ContentRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The starting offset of the object data. + Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + // The ending offset of the object data. + End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` + // The complete length of the object data. + CompleteLength int64 `protobuf:"varint,3,opt,name=complete_length,json=completeLength,proto3" json:"complete_length,omitempty"` +} + +func (x *ContentRange) Reset() { + *x = ContentRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContentRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContentRange) ProtoMessage() {} + +func (x *ContentRange) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContentRange.ProtoReflect.Descriptor instead. +func (*ContentRange) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} +} + +func (x *ContentRange) GetStart() int64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *ContentRange) GetEnd() int64 { + if x != nil { + return x.End + } + return 0 +} + +func (x *ContentRange) GetCompleteLength() int64 { + if x != nil { + return x.CompleteLength + } + return 0 +} + +// Description of a source object for a composition request. +type ComposeObjectRequest_SourceObject struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The source object's name. All source objects must reside in the same + // bucket. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The generation of this object to use as the source. + Generation int64 `protobuf:"varint,2,opt,name=generation,proto3" json:"generation,omitempty"` + // Conditions that must be met for this operation to execute. + ObjectPreconditions *ComposeObjectRequest_SourceObject_ObjectPreconditions `protobuf:"bytes,3,opt,name=object_preconditions,json=objectPreconditions,proto3" json:"object_preconditions,omitempty"` +} + +func (x *ComposeObjectRequest_SourceObject) Reset() { + *x = ComposeObjectRequest_SourceObject{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComposeObjectRequest_SourceObject) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComposeObjectRequest_SourceObject) ProtoMessage() {} + +func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComposeObjectRequest_SourceObject.ProtoReflect.Descriptor instead. +func (*ComposeObjectRequest_SourceObject) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *ComposeObjectRequest_SourceObject) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ComposeObjectRequest_SourceObject) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *ComposeObjectRequest_SourceObject) GetObjectPreconditions() *ComposeObjectRequest_SourceObject_ObjectPreconditions { + if x != nil { + return x.ObjectPreconditions + } + return nil +} + +// Preconditions for a source object of a composition request. +type ComposeObjectRequest_SourceObject_ObjectPreconditions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Only perform the composition if the generation of the source object + // that would be used matches this value. If this value and a generation + // are both specified, they must be the same value or the call will fail. + IfGenerationMatch *int64 `protobuf:"varint,1,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` +} + +func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() { + *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {} + +func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComposeObjectRequest_SourceObject_ObjectPreconditions.ProtoReflect.Descriptor instead. +func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0, 0} +} + +func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +// Billing properties of a bucket. +type Bucket_Billing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // When set to true, Requester Pays is enabled for this bucket. + RequesterPays bool `protobuf:"varint,1,opt,name=requester_pays,json=requesterPays,proto3" json:"requester_pays,omitempty"` +} + +func (x *Bucket_Billing) Reset() { + *x = Bucket_Billing{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Billing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Billing) ProtoMessage() {} + +func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead. +func (*Bucket_Billing) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 0} +} + +func (x *Bucket_Billing) GetRequesterPays() bool { + if x != nil { + return x.RequesterPays + } + return false +} + +// Cross-Origin Response sharing (CORS) properties for a bucket. +// For more on Cloud Storage and CORS, see +// https://cloud.google.com/storage/docs/cross-origin. +// For more on CORS in general, see https://tools.ietf.org/html/rfc6454. +type Bucket_Cors struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of Origins eligible to receive CORS response headers. See + // [https://tools.ietf.org/html/rfc6454][RFC 6454] for more on origins. + // Note: "*" is permitted in the list of origins, and means "any Origin". + Origin []string `protobuf:"bytes,1,rep,name=origin,proto3" json:"origin,omitempty"` + // The list of HTTP methods on which to include CORS response headers, + // (`GET`, `OPTIONS`, `POST`, etc) Note: "*" is permitted in the list of + // methods, and means "any method". + Method []string `protobuf:"bytes,2,rep,name=method,proto3" json:"method,omitempty"` + // The list of HTTP headers other than the + // [https://www.w3.org/TR/cors/#simple-response-header][simple response + // headers] to give permission for the user-agent to share across domains. + ResponseHeader []string `protobuf:"bytes,3,rep,name=response_header,json=responseHeader,proto3" json:"response_header,omitempty"` + // The value, in seconds, to return in the + // [https://www.w3.org/TR/cors/#access-control-max-age-response-header][Access-Control-Max-Age + // header] used in preflight responses. + MaxAgeSeconds int32 `protobuf:"varint,4,opt,name=max_age_seconds,json=maxAgeSeconds,proto3" json:"max_age_seconds,omitempty"` +} + +func (x *Bucket_Cors) Reset() { + *x = Bucket_Cors{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Cors) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Cors) ProtoMessage() {} + +func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead. +func (*Bucket_Cors) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 1} +} + +func (x *Bucket_Cors) GetOrigin() []string { + if x != nil { + return x.Origin + } + return nil +} + +func (x *Bucket_Cors) GetMethod() []string { + if x != nil { + return x.Method + } + return nil +} + +func (x *Bucket_Cors) GetResponseHeader() []string { + if x != nil { + return x.ResponseHeader + } + return nil +} + +func (x *Bucket_Cors) GetMaxAgeSeconds() int32 { + if x != nil { + return x.MaxAgeSeconds + } + return 0 +} + +// Encryption properties of a bucket. +type Bucket_Encryption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Cloud KMS key that will be used to encrypt objects + // inserted into this bucket, if no encryption method is specified. + DefaultKmsKey string `protobuf:"bytes,1,opt,name=default_kms_key,json=defaultKmsKey,proto3" json:"default_kms_key,omitempty"` +} + +func (x *Bucket_Encryption) Reset() { + *x = Bucket_Encryption{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Encryption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Encryption) ProtoMessage() {} + +func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead. +func (*Bucket_Encryption) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 2} +} + +func (x *Bucket_Encryption) GetDefaultKmsKey() string { + if x != nil { + return x.DefaultKmsKey + } + return "" +} + +// Bucket restriction options. +type Bucket_IamConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Bucket restriction options currently enforced on the bucket. + UniformBucketLevelAccess *Bucket_IamConfig_UniformBucketLevelAccess `protobuf:"bytes,1,opt,name=uniform_bucket_level_access,json=uniformBucketLevelAccess,proto3" json:"uniform_bucket_level_access,omitempty"` + // Whether IAM will enforce public access prevention. Valid values are + // "enforced" or "inherited". + PublicAccessPrevention string `protobuf:"bytes,3,opt,name=public_access_prevention,json=publicAccessPrevention,proto3" json:"public_access_prevention,omitempty"` +} + +func (x *Bucket_IamConfig) Reset() { + *x = Bucket_IamConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_IamConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_IamConfig) ProtoMessage() {} + +func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead. +func (*Bucket_IamConfig) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3} +} + +func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess { + if x != nil { + return x.UniformBucketLevelAccess + } + return nil +} + +func (x *Bucket_IamConfig) GetPublicAccessPrevention() string { + if x != nil { + return x.PublicAccessPrevention + } + return "" +} + +// Lifecycle properties of a bucket. +// For more information, see https://cloud.google.com/storage/docs/lifecycle. +type Bucket_Lifecycle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A lifecycle management rule, which is made of an action to take and the + // condition(s) under which the action will be taken. + Rule []*Bucket_Lifecycle_Rule `protobuf:"bytes,1,rep,name=rule,proto3" json:"rule,omitempty"` +} + +func (x *Bucket_Lifecycle) Reset() { + *x = Bucket_Lifecycle{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Lifecycle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Lifecycle) ProtoMessage() {} + +func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead. +func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4} +} + +func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule { + if x != nil { + return x.Rule + } + return nil +} + +// Logging-related properties of a bucket. +type Bucket_Logging struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The destination bucket where the current bucket's logs should be placed, + // using path format (like `projects/123456/buckets/foo`). + LogBucket string `protobuf:"bytes,1,opt,name=log_bucket,json=logBucket,proto3" json:"log_bucket,omitempty"` + // A prefix for log object names. + LogObjectPrefix string `protobuf:"bytes,2,opt,name=log_object_prefix,json=logObjectPrefix,proto3" json:"log_object_prefix,omitempty"` +} + +func (x *Bucket_Logging) Reset() { + *x = Bucket_Logging{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Logging) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Logging) ProtoMessage() {} + +func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead. +func (*Bucket_Logging) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 5} +} + +func (x *Bucket_Logging) GetLogBucket() string { + if x != nil { + return x.LogBucket + } + return "" +} + +func (x *Bucket_Logging) GetLogObjectPrefix() string { + if x != nil { + return x.LogObjectPrefix + } + return "" +} + +// Retention policy properties of a bucket. +type Bucket_RetentionPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Server-determined value that indicates the time from which policy was + // enforced and effective. + EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"` + // Once locked, an object retention policy cannot be modified. + IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"` + // The duration in seconds that objects need to be retained. Retention + // duration must be greater than zero and less than 100 years. Note that + // enforcement of retention periods less than a day is not guaranteed. Such + // periods should only be used for testing purposes. + RetentionPeriod *int64 `protobuf:"varint,3,opt,name=retention_period,json=retentionPeriod,proto3,oneof" json:"retention_period,omitempty"` +} + +func (x *Bucket_RetentionPolicy) Reset() { + *x = Bucket_RetentionPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_RetentionPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_RetentionPolicy) ProtoMessage() {} + +func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead. +func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 6} +} + +func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { + if x != nil { + return x.EffectiveTime + } + return nil +} + +func (x *Bucket_RetentionPolicy) GetIsLocked() bool { + if x != nil { + return x.IsLocked + } + return false +} + +func (x *Bucket_RetentionPolicy) GetRetentionPeriod() int64 { + if x != nil && x.RetentionPeriod != nil { + return *x.RetentionPeriod + } + return 0 +} + +// Properties of a bucket related to versioning. +// For more on Cloud Storage versioning, see +// https://cloud.google.com/storage/docs/object-versioning. +type Bucket_Versioning struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // While set to true, versioning is fully enabled for this bucket. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` +} + +func (x *Bucket_Versioning) Reset() { + *x = Bucket_Versioning{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Versioning) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Versioning) ProtoMessage() {} + +func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead. +func (*Bucket_Versioning) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 7} +} + +func (x *Bucket_Versioning) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +// Properties of a bucket related to accessing the contents as a static +// website. For more on hosting a static website via Cloud Storage, see +// https://cloud.google.com/storage/docs/hosting-static-website. +type Bucket_Website struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If the requested object path is missing, the service will ensure the path + // has a trailing '/', append this suffix, and attempt to retrieve the + // resulting object. This allows the creation of `index.html` + // objects to represent directory pages. + MainPageSuffix string `protobuf:"bytes,1,opt,name=main_page_suffix,json=mainPageSuffix,proto3" json:"main_page_suffix,omitempty"` + // If the requested object path is missing, and any + // `mainPageSuffix` object is missing, if applicable, the service + // will return the named object from this bucket as the content for a + // [https://tools.ietf.org/html/rfc7231#section-6.5.4][404 Not Found] + // result. + NotFoundPage string `protobuf:"bytes,2,opt,name=not_found_page,json=notFoundPage,proto3" json:"not_found_page,omitempty"` +} + +func (x *Bucket_Website) Reset() { + *x = Bucket_Website{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Website) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Website) ProtoMessage() {} + +func (x *Bucket_Website) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead. +func (*Bucket_Website) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 8} +} + +func (x *Bucket_Website) GetMainPageSuffix() string { + if x != nil { + return x.MainPageSuffix + } + return "" +} + +func (x *Bucket_Website) GetNotFoundPage() string { + if x != nil { + return x.NotFoundPage + } + return "" +} + +// Configuration for Custom Dual Regions. It should specify precisely two +// eligible regions within the same Multiregion. More information on regions +// may be found [https://cloud.google.com/storage/docs/locations][here]. +type Bucket_CustomPlacementConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of locations to use for data placement. + DataLocations []string `protobuf:"bytes,1,rep,name=data_locations,json=dataLocations,proto3" json:"data_locations,omitempty"` +} + +func (x *Bucket_CustomPlacementConfig) Reset() { + *x = Bucket_CustomPlacementConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_CustomPlacementConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_CustomPlacementConfig) ProtoMessage() {} + +func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead. +func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 9} +} + +func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string { + if x != nil { + return x.DataLocations + } + return nil +} + +// Configuration for a bucket's Autoclass feature. +type Bucket_Autoclass struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables Autoclass. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Output only. Latest instant at which the `enabled` field was set to true after being + // disabled/unconfigured or set to false after being enabled. If Autoclass + // is enabled when the bucket is created, the toggle_time is set to the + // bucket creation time. + ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"` +} + +func (x *Bucket_Autoclass) Reset() { + *x = Bucket_Autoclass{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Autoclass) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Autoclass) ProtoMessage() {} + +func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead. +func (*Bucket_Autoclass) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 10} +} + +func (x *Bucket_Autoclass) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp { + if x != nil { + return x.ToggleTime + } + return nil +} + +// Settings for Uniform Bucket level access. +// See https://cloud.google.com/storage/docs/uniform-bucket-level-access. +type Bucket_IamConfig_UniformBucketLevelAccess struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set, access checks only use bucket-level IAM policies or above. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + // The deadline time for changing + // `iamConfig.uniformBucketLevelAccess.enabled` from `true` to `false`. + // Mutable until the specified deadline is reached, but not afterward. + LockTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"` +} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { + *x = Bucket_IamConfig_UniformBucketLevelAccess{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead. +func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3, 0} +} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamppb.Timestamp { + if x != nil { + return x.LockTime + } + return nil +} + +// A lifecycle Rule, combining an action to take on an object and a +// condition which will trigger that action. +type Bucket_Lifecycle_Rule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The action to take. + Action *Bucket_Lifecycle_Rule_Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` + // The condition(s) under which the action will be taken. + Condition *Bucket_Lifecycle_Rule_Condition `protobuf:"bytes,2,opt,name=condition,proto3" json:"condition,omitempty"` +} + +func (x *Bucket_Lifecycle_Rule) Reset() { + *x = Bucket_Lifecycle_Rule{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Lifecycle_Rule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Lifecycle_Rule) ProtoMessage() {} + +func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[69] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead. +func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0} +} + +func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action { + if x != nil { + return x.Action + } + return nil +} + +func (x *Bucket_Lifecycle_Rule) GetCondition() *Bucket_Lifecycle_Rule_Condition { + if x != nil { + return x.Condition + } + return nil +} + +// An action to take on an object. +type Bucket_Lifecycle_Rule_Action struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of the action. Currently, only `Delete`, `SetStorageClass`, and + // `AbortIncompleteMultipartUpload` are supported. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Target storage class. Required iff the type of the action is + // SetStorageClass. + StorageClass string `protobuf:"bytes,2,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` +} + +func (x *Bucket_Lifecycle_Rule_Action) Reset() { + *x = Bucket_Lifecycle_Rule_Action{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Lifecycle_Rule_Action) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} + +func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead. +func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 0} +} + +func (x *Bucket_Lifecycle_Rule_Action) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Bucket_Lifecycle_Rule_Action) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +// A condition of an object which triggers some action. +type Bucket_Lifecycle_Rule_Condition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Age of an object (in days). This condition is satisfied when an + // object reaches the specified age. + // A value of 0 indicates that all objects immediately match this + // condition. + AgeDays *int32 `protobuf:"varint,1,opt,name=age_days,json=ageDays,proto3,oneof" json:"age_days,omitempty"` + // This condition is satisfied when an object is created before midnight + // of the specified date in UTC. + CreatedBefore *date.Date `protobuf:"bytes,2,opt,name=created_before,json=createdBefore,proto3" json:"created_before,omitempty"` + // Relevant only for versioned objects. If the value is + // `true`, this condition matches live objects; if the value + // is `false`, it matches archived objects. + IsLive *bool `protobuf:"varint,3,opt,name=is_live,json=isLive,proto3,oneof" json:"is_live,omitempty"` + // Relevant only for versioned objects. If the value is N, this + // condition is satisfied when there are at least N versions (including + // the live version) newer than this version of the object. + NumNewerVersions *int32 `protobuf:"varint,4,opt,name=num_newer_versions,json=numNewerVersions,proto3,oneof" json:"num_newer_versions,omitempty"` + // Objects having any of the storage classes specified by this condition + // will be matched. Values include `MULTI_REGIONAL`, `REGIONAL`, + // `NEARLINE`, `COLDLINE`, `STANDARD`, and + // `DURABLE_REDUCED_AVAILABILITY`. + MatchesStorageClass []string `protobuf:"bytes,5,rep,name=matches_storage_class,json=matchesStorageClass,proto3" json:"matches_storage_class,omitempty"` + // Number of days that have elapsed since the custom timestamp set on an + // object. + // The value of the field must be a nonnegative integer. + DaysSinceCustomTime *int32 `protobuf:"varint,7,opt,name=days_since_custom_time,json=daysSinceCustomTime,proto3,oneof" json:"days_since_custom_time,omitempty"` + // An object matches this condition if the custom timestamp set on the + // object is before the specified date in UTC. + CustomTimeBefore *date.Date `protobuf:"bytes,8,opt,name=custom_time_before,json=customTimeBefore,proto3" json:"custom_time_before,omitempty"` + // This condition is relevant only for versioned objects. An object + // version satisfies this condition only if these many days have been + // passed since it became noncurrent. The value of the field must be a + // nonnegative integer. If it's zero, the object version will become + // eligible for Lifecycle action as soon as it becomes noncurrent. + DaysSinceNoncurrentTime *int32 `protobuf:"varint,9,opt,name=days_since_noncurrent_time,json=daysSinceNoncurrentTime,proto3,oneof" json:"days_since_noncurrent_time,omitempty"` + // This condition is relevant only for versioned objects. An object + // version satisfies this condition only if it became noncurrent before + // the specified date in UTC. + NoncurrentTimeBefore *date.Date `protobuf:"bytes,10,opt,name=noncurrent_time_before,json=noncurrentTimeBefore,proto3" json:"noncurrent_time_before,omitempty"` + // List of object name prefixes. If any prefix exactly matches the + // beginning of the object name, the condition evaluates to true. + MatchesPrefix []string `protobuf:"bytes,11,rep,name=matches_prefix,json=matchesPrefix,proto3" json:"matches_prefix,omitempty"` + // List of object name suffixes. If any suffix exactly matches the + // end of the object name, the condition evaluates to true. + MatchesSuffix []string `protobuf:"bytes,12,rep,name=matches_suffix,json=matchesSuffix,proto3" json:"matches_suffix,omitempty"` +} + +func (x *Bucket_Lifecycle_Rule_Condition) Reset() { + *x = Bucket_Lifecycle_Rule_Condition{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Lifecycle_Rule_Condition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} + +func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead. +func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 1} +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 { + if x != nil && x.AgeDays != nil { + return *x.AgeDays + } + return 0 +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetCreatedBefore() *date.Date { + if x != nil { + return x.CreatedBefore + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetIsLive() bool { + if x != nil && x.IsLive != nil { + return *x.IsLive + } + return false +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetNumNewerVersions() int32 { + if x != nil && x.NumNewerVersions != nil { + return *x.NumNewerVersions + } + return 0 +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesStorageClass() []string { + if x != nil { + return x.MatchesStorageClass + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceCustomTime() int32 { + if x != nil && x.DaysSinceCustomTime != nil { + return *x.DaysSinceCustomTime + } + return 0 +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetCustomTimeBefore() *date.Date { + if x != nil { + return x.CustomTimeBefore + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceNoncurrentTime() int32 { + if x != nil && x.DaysSinceNoncurrentTime != nil { + return *x.DaysSinceNoncurrentTime + } + return 0 +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetNoncurrentTimeBefore() *date.Date { + if x != nil { + return x.NoncurrentTimeBefore + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesPrefix() []string { + if x != nil { + return x.MatchesPrefix + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesSuffix() []string { + if x != nil { + return x.MatchesSuffix + } + return nil +} + +var File_google_storage_v2_storage_proto protoreflect.FileDescriptor + +var file_google_storage_v2_storage_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, + 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, + 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x22, 0xa1, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, + 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, + 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, + 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, + 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, + 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x41, 0x63, 0x6c, 0x22, 0x81, 0x02, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3c, 0x0a, 0x09, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, + 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, + 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x07, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, + 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9e, 0x01, 0x0a, + 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xb6, 0x03, + 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, + 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, + 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, + 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, + 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x5c, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x19, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, + 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x95, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, + 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf4, 0x06, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, + 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0d, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x3c, 0x0a, + 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, + 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, + 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x6d, + 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0xa8, 0x02, + 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc0, 0x04, 0x0a, + 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, + 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, + 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, + 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, + 0x3f, 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, + 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, + 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, + 0x22, 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, + 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0xca, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, + 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, + 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, + 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x89, 0x05, + 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, + 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, + 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, + 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, + 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, + 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, + 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, + 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, + 0x3a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, + 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, + 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, + 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, + 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, + 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, + 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, + 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, + 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, + 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, + 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, + 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, + 0xc9, 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, + 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, + 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, + 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, + 0x2f, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, + 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6c, 0x65, + 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x2b, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, + 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x78, + 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x42, 0x0c, 0x0a, + 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, + 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, + 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xc4, 0x0d, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, + 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, + 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, + 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, + 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, + 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, + 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, + 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, + 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, + 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, + 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, + 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xe0, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, + 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, + 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, + 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, + 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, + 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, + 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, + 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, + 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, + 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, + 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, + 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, + 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, + 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, + 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, + 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, + 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, + 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, + 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, + 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, + 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, + 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, + 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, + 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, + 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, + 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, + 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, + 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, + 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, + 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, + 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, + 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, + 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, + 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, + 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, + 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, + 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, + 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, + 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, + 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, + 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, + 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, + 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, + 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, + 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, + 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, + 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, + 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, + 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, + 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, + 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xa6, + 0x1e, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, + 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, + 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, + 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, + 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, + 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, + 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, + 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, + 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, + 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, + 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, + 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, + 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, + 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, + 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, + 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, + 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, + 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0x30, 0x0a, 0x07, + 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, + 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, + 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, + 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, + 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, + 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, + 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, + 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, + 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, + 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, + 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, + 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, + 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, + 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, + 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, + 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, + 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, + 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, + 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, + 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, + 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, + 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, + 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, + 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, + 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, + 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, + 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, + 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, + 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xb6, + 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, + 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0f, + 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x88, + 0x01, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, + 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, + 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, 0x75, + 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, + 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, + 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, + 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x67, 0x0a, 0x09, 0x41, 0x75, + 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, + 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, + 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, + 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, + 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, + 0x6d, 0x22, 0x53, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, + 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, + 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, + 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, + 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, + 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, 0x0a, + 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, + 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0xec, 0x03, + 0x0a, 0x0c, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x70, 0xea, 0x41, 0x6d, 0x0a, + 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0x71, 0x0a, 0x12, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, + 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, + 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, + 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, + 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, + 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, + 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, + 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, + 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, + 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, + 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, + 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, + 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, + 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, + 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, + 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x0d, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x22, 0x97, + 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, + 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, + 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, + 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x5f, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x6d, + 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, 0x05, 0x4f, 0x77, + 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, + 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, + 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x97, 0x26, 0x0a, 0x07, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6f, 0x0a, 0x09, 0x47, 0x65, 0x74, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8b, 0x01, 0x0a, 0x0c, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x38, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, + 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, + 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x26, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x22, 0x60, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, + 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, + 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, 0x65, + 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, + 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x14, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, + 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x12, 0x93, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, + 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x96, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x98, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x33, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x96, 0x01, 0x0a, 0x11, 0x4c, + 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, + 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xba, + 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, + 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, + 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, + 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x95, 0x01, 0x0a, 0x09, + 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, + 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, + 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, + 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, + 0x39, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0xc7, 0x01, 0x0a, 0x0b, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x61, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, + 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x2a, 0x2a, 0x28, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x98, 0x01, 0x0a, 0x0d, + 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, + 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, + 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x09, 0x75, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x80, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1b, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x0d, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x31, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, + 0x61, 0x69, 0x6c, 0x12, 0x77, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, + 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7d, 0x0a, 0x0a, + 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7c, 0x0a, 0x0c, 0x4c, + 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, + 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x9d, 0x01, 0x0a, 0x0d, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3f, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, + 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0xda, 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, + 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, + 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, + 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, + 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x42, 0xdc, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x76, 0x32, 0x3b, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, + 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, + 0x79, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_storage_v2_storage_proto_rawDescOnce sync.Once + file_google_storage_v2_storage_proto_rawDescData = file_google_storage_v2_storage_proto_rawDesc +) + +func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { + file_google_storage_v2_storage_proto_rawDescOnce.Do(func() { + file_google_storage_v2_storage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_storage_v2_storage_proto_rawDescData) + }) + return file_google_storage_v2_storage_proto_rawDescData +} + +var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 74) +var file_google_storage_v2_storage_proto_goTypes = []interface{}{ + (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values + (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest + (*GetBucketRequest)(nil), // 2: google.storage.v2.GetBucketRequest + (*CreateBucketRequest)(nil), // 3: google.storage.v2.CreateBucketRequest + (*ListBucketsRequest)(nil), // 4: google.storage.v2.ListBucketsRequest + (*ListBucketsResponse)(nil), // 5: google.storage.v2.ListBucketsResponse + (*LockBucketRetentionPolicyRequest)(nil), // 6: google.storage.v2.LockBucketRetentionPolicyRequest + (*UpdateBucketRequest)(nil), // 7: google.storage.v2.UpdateBucketRequest + (*DeleteNotificationRequest)(nil), // 8: google.storage.v2.DeleteNotificationRequest + (*GetNotificationRequest)(nil), // 9: google.storage.v2.GetNotificationRequest + (*CreateNotificationRequest)(nil), // 10: google.storage.v2.CreateNotificationRequest + (*ListNotificationsRequest)(nil), // 11: google.storage.v2.ListNotificationsRequest + (*ListNotificationsResponse)(nil), // 12: google.storage.v2.ListNotificationsResponse + (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest + (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest + (*CancelResumableWriteRequest)(nil), // 15: google.storage.v2.CancelResumableWriteRequest + (*CancelResumableWriteResponse)(nil), // 16: google.storage.v2.CancelResumableWriteResponse + (*ReadObjectRequest)(nil), // 17: google.storage.v2.ReadObjectRequest + (*GetObjectRequest)(nil), // 18: google.storage.v2.GetObjectRequest + (*ReadObjectResponse)(nil), // 19: google.storage.v2.ReadObjectResponse + (*WriteObjectSpec)(nil), // 20: google.storage.v2.WriteObjectSpec + (*WriteObjectRequest)(nil), // 21: google.storage.v2.WriteObjectRequest + (*WriteObjectResponse)(nil), // 22: google.storage.v2.WriteObjectResponse + (*ListObjectsRequest)(nil), // 23: google.storage.v2.ListObjectsRequest + (*QueryWriteStatusRequest)(nil), // 24: google.storage.v2.QueryWriteStatusRequest + (*QueryWriteStatusResponse)(nil), // 25: google.storage.v2.QueryWriteStatusResponse + (*RewriteObjectRequest)(nil), // 26: google.storage.v2.RewriteObjectRequest + (*RewriteResponse)(nil), // 27: google.storage.v2.RewriteResponse + (*StartResumableWriteRequest)(nil), // 28: google.storage.v2.StartResumableWriteRequest + (*StartResumableWriteResponse)(nil), // 29: google.storage.v2.StartResumableWriteResponse + (*UpdateObjectRequest)(nil), // 30: google.storage.v2.UpdateObjectRequest + (*GetServiceAccountRequest)(nil), // 31: google.storage.v2.GetServiceAccountRequest + (*CreateHmacKeyRequest)(nil), // 32: google.storage.v2.CreateHmacKeyRequest + (*CreateHmacKeyResponse)(nil), // 33: google.storage.v2.CreateHmacKeyResponse + (*DeleteHmacKeyRequest)(nil), // 34: google.storage.v2.DeleteHmacKeyRequest + (*GetHmacKeyRequest)(nil), // 35: google.storage.v2.GetHmacKeyRequest + (*ListHmacKeysRequest)(nil), // 36: google.storage.v2.ListHmacKeysRequest + (*ListHmacKeysResponse)(nil), // 37: google.storage.v2.ListHmacKeysResponse + (*UpdateHmacKeyRequest)(nil), // 38: google.storage.v2.UpdateHmacKeyRequest + (*CommonObjectRequestParams)(nil), // 39: google.storage.v2.CommonObjectRequestParams + (*ServiceConstants)(nil), // 40: google.storage.v2.ServiceConstants + (*Bucket)(nil), // 41: google.storage.v2.Bucket + (*BucketAccessControl)(nil), // 42: google.storage.v2.BucketAccessControl + (*ChecksummedData)(nil), // 43: google.storage.v2.ChecksummedData + (*ObjectChecksums)(nil), // 44: google.storage.v2.ObjectChecksums + (*HmacKeyMetadata)(nil), // 45: google.storage.v2.HmacKeyMetadata + (*Notification)(nil), // 46: google.storage.v2.Notification + (*CustomerEncryption)(nil), // 47: google.storage.v2.CustomerEncryption + (*Object)(nil), // 48: google.storage.v2.Object + (*ObjectAccessControl)(nil), // 49: google.storage.v2.ObjectAccessControl + (*ListObjectsResponse)(nil), // 50: google.storage.v2.ListObjectsResponse + (*ProjectTeam)(nil), // 51: google.storage.v2.ProjectTeam + (*ServiceAccount)(nil), // 52: google.storage.v2.ServiceAccount + (*Owner)(nil), // 53: google.storage.v2.Owner + (*ContentRange)(nil), // 54: google.storage.v2.ContentRange + (*ComposeObjectRequest_SourceObject)(nil), // 55: google.storage.v2.ComposeObjectRequest.SourceObject + (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 56: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + (*Bucket_Billing)(nil), // 57: google.storage.v2.Bucket.Billing + (*Bucket_Cors)(nil), // 58: google.storage.v2.Bucket.Cors + (*Bucket_Encryption)(nil), // 59: google.storage.v2.Bucket.Encryption + (*Bucket_IamConfig)(nil), // 60: google.storage.v2.Bucket.IamConfig + (*Bucket_Lifecycle)(nil), // 61: google.storage.v2.Bucket.Lifecycle + (*Bucket_Logging)(nil), // 62: google.storage.v2.Bucket.Logging + (*Bucket_RetentionPolicy)(nil), // 63: google.storage.v2.Bucket.RetentionPolicy + (*Bucket_Versioning)(nil), // 64: google.storage.v2.Bucket.Versioning + (*Bucket_Website)(nil), // 65: google.storage.v2.Bucket.Website + (*Bucket_CustomPlacementConfig)(nil), // 66: google.storage.v2.Bucket.CustomPlacementConfig + (*Bucket_Autoclass)(nil), // 67: google.storage.v2.Bucket.Autoclass + nil, // 68: google.storage.v2.Bucket.LabelsEntry + (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 69: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + (*Bucket_Lifecycle_Rule)(nil), // 70: google.storage.v2.Bucket.Lifecycle.Rule + (*Bucket_Lifecycle_Rule_Action)(nil), // 71: google.storage.v2.Bucket.Lifecycle.Rule.Action + (*Bucket_Lifecycle_Rule_Condition)(nil), // 72: google.storage.v2.Bucket.Lifecycle.Rule.Condition + nil, // 73: google.storage.v2.Notification.CustomAttributesEntry + nil, // 74: google.storage.v2.Object.MetadataEntry + (*fieldmaskpb.FieldMask)(nil), // 75: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 76: google.protobuf.Timestamp + (*date.Date)(nil), // 77: google.type.Date + (*v1.GetIamPolicyRequest)(nil), // 78: google.iam.v1.GetIamPolicyRequest + (*v1.SetIamPolicyRequest)(nil), // 79: google.iam.v1.SetIamPolicyRequest + (*v1.TestIamPermissionsRequest)(nil), // 80: google.iam.v1.TestIamPermissionsRequest + (*emptypb.Empty)(nil), // 81: google.protobuf.Empty + (*v1.Policy)(nil), // 82: google.iam.v1.Policy + (*v1.TestIamPermissionsResponse)(nil), // 83: google.iam.v1.TestIamPermissionsResponse +} +var file_google_storage_v2_storage_proto_depIdxs = []int32{ + 75, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask + 41, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 75, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask + 41, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket + 41, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 75, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask + 46, // 6: google.storage.v2.CreateNotificationRequest.notification:type_name -> google.storage.v2.Notification + 46, // 7: google.storage.v2.ListNotificationsResponse.notifications:type_name -> google.storage.v2.Notification + 48, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object + 55, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject + 39, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 39, // 11: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 39, // 12: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 75, // 13: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 39, // 14: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 75, // 15: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 43, // 16: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 44, // 17: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 54, // 18: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange + 48, // 19: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object + 48, // 20: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object + 20, // 21: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 43, // 22: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 44, // 23: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 39, // 24: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 48, // 25: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object + 75, // 26: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask + 39, // 27: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 48, // 28: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object + 48, // 29: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object + 39, // 30: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 48, // 31: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object + 20, // 32: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 39, // 33: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 48, // 34: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object + 75, // 35: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask + 39, // 36: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 45, // 37: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata + 45, // 38: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata + 45, // 39: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata + 75, // 40: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask + 42, // 41: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl + 49, // 42: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl + 61, // 43: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle + 76, // 44: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp + 58, // 45: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors + 76, // 46: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp + 68, // 47: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry + 65, // 48: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website + 64, // 49: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning + 62, // 50: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging + 53, // 51: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner + 59, // 52: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption + 57, // 53: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing + 63, // 54: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy + 60, // 55: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig + 66, // 56: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig + 67, // 57: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass + 51, // 58: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 76, // 59: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp + 76, // 60: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp + 73, // 61: google.storage.v2.Notification.custom_attributes:type_name -> google.storage.v2.Notification.CustomAttributesEntry + 49, // 62: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl + 76, // 63: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp + 76, // 64: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp + 44, // 65: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums + 76, // 66: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp + 76, // 67: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp + 76, // 68: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp + 74, // 69: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry + 53, // 70: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner + 47, // 71: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption + 76, // 72: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp + 51, // 73: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 48, // 74: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object + 56, // 75: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + 69, // 76: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + 70, // 77: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule + 76, // 78: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp + 76, // 79: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp + 76, // 80: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp + 71, // 81: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action + 72, // 82: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition + 77, // 83: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date + 77, // 84: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date + 77, // 85: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date + 1, // 86: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest + 2, // 87: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest + 3, // 88: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest + 4, // 89: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest + 6, // 90: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest + 78, // 91: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 79, // 92: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 80, // 93: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 7, // 94: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest + 8, // 95: google.storage.v2.Storage.DeleteNotification:input_type -> google.storage.v2.DeleteNotificationRequest + 9, // 96: google.storage.v2.Storage.GetNotification:input_type -> google.storage.v2.GetNotificationRequest + 10, // 97: google.storage.v2.Storage.CreateNotification:input_type -> google.storage.v2.CreateNotificationRequest + 11, // 98: google.storage.v2.Storage.ListNotifications:input_type -> google.storage.v2.ListNotificationsRequest + 13, // 99: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest + 14, // 100: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest + 15, // 101: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest + 18, // 102: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest + 17, // 103: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest + 30, // 104: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest + 21, // 105: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest + 23, // 106: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest + 26, // 107: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest + 28, // 108: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest + 24, // 109: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest + 31, // 110: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest + 32, // 111: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest + 34, // 112: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest + 35, // 113: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest + 36, // 114: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest + 38, // 115: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest + 81, // 116: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty + 41, // 117: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket + 41, // 118: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket + 5, // 119: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse + 41, // 120: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket + 82, // 121: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy + 82, // 122: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy + 83, // 123: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 41, // 124: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket + 81, // 125: google.storage.v2.Storage.DeleteNotification:output_type -> google.protobuf.Empty + 46, // 126: google.storage.v2.Storage.GetNotification:output_type -> google.storage.v2.Notification + 46, // 127: google.storage.v2.Storage.CreateNotification:output_type -> google.storage.v2.Notification + 12, // 128: google.storage.v2.Storage.ListNotifications:output_type -> google.storage.v2.ListNotificationsResponse + 48, // 129: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object + 81, // 130: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty + 16, // 131: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse + 48, // 132: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object + 19, // 133: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse + 48, // 134: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object + 22, // 135: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse + 50, // 136: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse + 27, // 137: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse + 29, // 138: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse + 25, // 139: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse + 52, // 140: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount + 33, // 141: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse + 81, // 142: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty + 45, // 143: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 37, // 144: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse + 45, // 145: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 116, // [116:146] is the sub-list for method output_type + 86, // [86:116] is the sub-list for method input_type + 86, // [86:86] is the sub-list for extension type_name + 86, // [86:86] is the sub-list for extension extendee + 0, // [0:86] is the sub-list for field type_name +} + +func init() { file_google_storage_v2_storage_proto_init() } +func file_google_storage_v2_storage_proto_init() { + if File_google_storage_v2_storage_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_storage_v2_storage_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListBucketsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListBucketsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LockBucketRetentionPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteNotificationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetNotificationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateNotificationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListNotificationsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListNotificationsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ComposeObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelResumableWriteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelResumableWriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadObjectResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteObjectSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteObjectResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListObjectsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryWriteStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryWriteStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RewriteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RewriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartResumableWriteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartResumableWriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetServiceAccountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateHmacKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateHmacKeyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteHmacKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetHmacKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListHmacKeysRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListHmacKeysResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateHmacKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonObjectRequestParams); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceConstants); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BucketAccessControl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChecksummedData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectChecksums); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HmacKeyMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Notification); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomerEncryption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Object); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectAccessControl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListObjectsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProjectTeam); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceAccount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Owner); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContentRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ComposeObjectRequest_SourceObject); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Billing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Cors); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Encryption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_IamConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Lifecycle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Logging); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_RetentionPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Versioning); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Website); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_CustomPlacementConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Autoclass); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Lifecycle_Rule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Lifecycle_Rule_Action); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[16].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{ + (*WriteObjectRequest_UploadId)(nil), + (*WriteObjectRequest_WriteObjectSpec)(nil), + (*WriteObjectRequest_ChecksummedData)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{ + (*WriteObjectResponse_PersistedSize)(nil), + (*WriteObjectResponse_Resource)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []interface{}{ + (*QueryWriteStatusResponse_PersistedSize)(nil), + (*QueryWriteStatusResponse_Resource)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[29].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[42].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[43].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[47].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[55].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[62].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[71].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, + NumEnums: 1, + NumMessages: 74, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_storage_v2_storage_proto_goTypes, + DependencyIndexes: file_google_storage_v2_storage_proto_depIdxs, + EnumInfos: file_google_storage_v2_storage_proto_enumTypes, + MessageInfos: file_google_storage_v2_storage_proto_msgTypes, + }.Build() + File_google_storage_v2_storage_proto = out.File + file_google_storage_v2_storage_proto_rawDesc = nil + file_google_storage_v2_storage_proto_goTypes = nil + file_google_storage_v2_storage_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// StorageClient is the client API for Storage service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type StorageClient interface { + // Permanently deletes an empty bucket. + DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Returns metadata for the specified bucket. + GetBucket(ctx context.Context, in *GetBucketRequest, opts ...grpc.CallOption) (*Bucket, error) + // Creates a new bucket. + CreateBucket(ctx context.Context, in *CreateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) + // Retrieves a list of buckets for a given project. + ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) + // Locks retention policy on a bucket. + LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) + // Gets the IAM policy for a specified bucket or object. + GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) + // Updates an IAM policy for the specified bucket or object. + SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) + // Tests a set of permissions on the given bucket or object to see which, if + // any, are held by the caller. + TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) + // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. + UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) + // Permanently deletes a notification subscription. + DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // View a notification config. + GetNotification(ctx context.Context, in *GetNotificationRequest, opts ...grpc.CallOption) (*Notification, error) + // Creates a notification subscription for a given bucket. + // These notifications, when triggered, publish messages to the specified + // Pub/Sub topics. + // See https://cloud.google.com/storage/docs/pubsub-notifications. + CreateNotification(ctx context.Context, in *CreateNotificationRequest, opts ...grpc.CallOption) (*Notification, error) + // Retrieves a list of notification subscriptions for a given bucket. + ListNotifications(ctx context.Context, in *ListNotificationsRequest, opts ...grpc.CallOption) (*ListNotificationsResponse, error) + // Concatenates a list of existing objects into a new object in the same + // bucket. + ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) + // Deletes an object and its metadata. Deletions are permanent if versioning + // is not enabled for the bucket, or if the `generation` parameter is used. + DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Cancels an in-progress resumable upload. + CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) + // Retrieves an object's metadata. + GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) + // Reads an object's data. + ReadObject(ctx context.Context, in *ReadObjectRequest, opts ...grpc.CallOption) (Storage_ReadObjectClient, error) + // Updates an object's metadata. + // Equivalent to JSON API's storage.objects.patch. + UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) + // Stores a new object and metadata. + // + // An object can be written either in a single message stream or in a + // resumable sequence of message streams. To write using a single stream, + // the client should include in the first message of the stream an + // `WriteObjectSpec` describing the destination bucket, object, and any + // preconditions. Additionally, the final message must set 'finish_write' to + // true, or else it is an error. + // + // For a resumable write, the client should instead call + // `StartResumableWrite()`, populating a `WriteObjectSpec` into that request. + // They should then attach the returned `upload_id` to the first message of + // each following call to `WriteObject`. If the stream is closed before + // finishing the upload (either explicitly by the client or due to a network + // error or an error response from the server), the client should do as + // follows: + // - Check the result Status of the stream, to determine if writing can be + // resumed on this stream or must be restarted from scratch (by calling + // `StartResumableWrite()`). The resumable errors are DEADLINE_EXCEEDED, + // INTERNAL, and UNAVAILABLE. For each case, the client should use binary + // exponential backoff before retrying. Additionally, writes can be + // resumed after RESOURCE_EXHAUSTED errors, but only after taking + // appropriate measures, which may include reducing aggregate send rate + // across clients and/or requesting a quota increase for your project. + // - If the call to `WriteObject` returns `ABORTED`, that indicates + // concurrent attempts to update the resumable write, caused either by + // multiple racing clients or by a single client where the previous + // request was timed out on the client side but nonetheless reached the + // server. In this case the client should take steps to prevent further + // concurrent writes (e.g., increase the timeouts, stop using more than + // one process to perform the upload, etc.), and then should follow the + // steps below for resuming the upload. + // - For resumable errors, the client should call `QueryWriteStatus()` and + // then continue writing from the returned `persisted_size`. This may be + // less than the amount of data the client previously sent. Note also that + // it is acceptable to send data starting at an offset earlier than the + // returned `persisted_size`; in this case, the service will skip data at + // offsets that were already persisted (without checking that it matches + // the previously written data), and write only the data starting from the + // persisted offset. This behavior can make client-side handling simpler + // in some cases. + // + // The service will not view the object as complete until the client has + // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any + // requests on a stream after sending a request with `finish_write` set to + // `true` will cause an error. The client **should** check the response it + // receives to determine how much data the service was able to commit and + // whether the service views the object as complete. + // + // Attempting to resume an already finalized object will result in an OK + // status, with a WriteObjectResponse containing the finalized object's + // metadata. + WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) + // Retrieves a list of objects matching the criteria. + ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) + // Rewrites a source object to a destination object. Optionally overrides + // metadata. + RewriteObject(ctx context.Context, in *RewriteObjectRequest, opts ...grpc.CallOption) (*RewriteResponse, error) + // Starts a resumable write. How long the write operation remains valid, and + // what happens when the write operation becomes invalid, are + // service-dependent. + StartResumableWrite(ctx context.Context, in *StartResumableWriteRequest, opts ...grpc.CallOption) (*StartResumableWriteResponse, error) + // Determines the `persisted_size` for an object that is being written, which + // can then be used as the `write_offset` for the next `Write()` call. + // + // If the object does not exist (i.e., the object has been deleted, or the + // first `Write()` has not yet reached the service), this method returns the + // error `NOT_FOUND`. + // + // The client **may** call `QueryWriteStatus()` at any time to determine how + // much data has been processed for this object. This is useful if the + // client is buffering data and needs to know which data can be safely + // evicted. For any sequence of `QueryWriteStatus()` calls for a given + // object name, the sequence of returned `persisted_size` values will be + // non-decreasing. + QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) + // Retrieves the name of a project's Google Cloud Storage service account. + GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) + // Creates a new HMAC key for the given service account. + CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) + // Deletes a given HMAC key. Key must be in an INACTIVE state. + DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Gets an existing HMAC key metadata for the given id. + GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) + // Lists HMAC keys under a given project with the additional filters provided. + ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) + // Updates a given HMAC key state between ACTIVE and INACTIVE. + UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) +} + +type storageClient struct { + cc grpc.ClientConnInterface +} + +func NewStorageClient(cc grpc.ClientConnInterface) StorageClient { + return &storageClient{cc} +} + +func (c *storageClient) DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteBucket", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetBucket(ctx context.Context, in *GetBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { + out := new(Bucket) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetBucket", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) CreateBucket(ctx context.Context, in *CreateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { + out := new(Bucket) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateBucket", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) { + out := new(ListBucketsResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListBuckets", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) { + out := new(Bucket) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/LockBucketRetentionPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) { + out := new(v1.Policy) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetIamPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) { + out := new(v1.Policy) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/SetIamPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) { + out := new(v1.TestIamPermissionsResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/TestIamPermissions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { + out := new(Bucket) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateBucket", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotification", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetNotification(ctx context.Context, in *GetNotificationRequest, opts ...grpc.CallOption) (*Notification, error) { + out := new(Notification) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotification", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) CreateNotification(ctx context.Context, in *CreateNotificationRequest, opts ...grpc.CallOption) (*Notification, error) { + out := new(Notification) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotification", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ListNotifications(ctx context.Context, in *ListNotificationsRequest, opts ...grpc.CallOption) (*ListNotificationsResponse, error) { + out := new(ListNotificationsResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotifications", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) { + out := new(Object) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ComposeObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) { + out := new(CancelResumableWriteResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CancelResumableWrite", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) { + out := new(Object) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ReadObject(ctx context.Context, in *ReadObjectRequest, opts ...grpc.CallOption) (Storage_ReadObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[0], "/google.storage.v2.Storage/ReadObject", opts...) + if err != nil { + return nil, err + } + x := &storageReadObjectClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Storage_ReadObjectClient interface { + Recv() (*ReadObjectResponse, error) + grpc.ClientStream +} + +type storageReadObjectClient struct { + grpc.ClientStream +} + +func (x *storageReadObjectClient) Recv() (*ReadObjectResponse, error) { + m := new(ReadObjectResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storageClient) UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) { + out := new(Object) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[1], "/google.storage.v2.Storage/WriteObject", opts...) + if err != nil { + return nil, err + } + x := &storageWriteObjectClient{stream} + return x, nil +} + +type Storage_WriteObjectClient interface { + Send(*WriteObjectRequest) error + CloseAndRecv() (*WriteObjectResponse, error) + grpc.ClientStream +} + +type storageWriteObjectClient struct { + grpc.ClientStream +} + +func (x *storageWriteObjectClient) Send(m *WriteObjectRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *storageWriteObjectClient) CloseAndRecv() (*WriteObjectResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(WriteObjectResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storageClient) ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) { + out := new(ListObjectsResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListObjects", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) RewriteObject(ctx context.Context, in *RewriteObjectRequest, opts ...grpc.CallOption) (*RewriteResponse, error) { + out := new(RewriteResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/RewriteObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) StartResumableWrite(ctx context.Context, in *StartResumableWriteRequest, opts ...grpc.CallOption) (*StartResumableWriteResponse, error) { + out := new(StartResumableWriteResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/StartResumableWrite", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) { + out := new(QueryWriteStatusResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/QueryWriteStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) { + out := new(ServiceAccount) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetServiceAccount", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) { + out := new(CreateHmacKeyResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateHmacKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) { + out := new(HmacKeyMetadata) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetHmacKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) { + out := new(ListHmacKeysResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListHmacKeys", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) { + out := new(HmacKeyMetadata) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateHmacKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// StorageServer is the server API for Storage service. +type StorageServer interface { + // Permanently deletes an empty bucket. + DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) + // Returns metadata for the specified bucket. + GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) + // Creates a new bucket. + CreateBucket(context.Context, *CreateBucketRequest) (*Bucket, error) + // Retrieves a list of buckets for a given project. + ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) + // Locks retention policy on a bucket. + LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) + // Gets the IAM policy for a specified bucket or object. + GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) + // Updates an IAM policy for the specified bucket or object. + SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) + // Tests a set of permissions on the given bucket or object to see which, if + // any, are held by the caller. + TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) + // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. + UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) + // Permanently deletes a notification subscription. + DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) + // View a notification config. + GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) + // Creates a notification subscription for a given bucket. + // These notifications, when triggered, publish messages to the specified + // Pub/Sub topics. + // See https://cloud.google.com/storage/docs/pubsub-notifications. + CreateNotification(context.Context, *CreateNotificationRequest) (*Notification, error) + // Retrieves a list of notification subscriptions for a given bucket. + ListNotifications(context.Context, *ListNotificationsRequest) (*ListNotificationsResponse, error) + // Concatenates a list of existing objects into a new object in the same + // bucket. + ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) + // Deletes an object and its metadata. Deletions are permanent if versioning + // is not enabled for the bucket, or if the `generation` parameter is used. + DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) + // Cancels an in-progress resumable upload. + CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) + // Retrieves an object's metadata. + GetObject(context.Context, *GetObjectRequest) (*Object, error) + // Reads an object's data. + ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error + // Updates an object's metadata. + // Equivalent to JSON API's storage.objects.patch. + UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) + // Stores a new object and metadata. + // + // An object can be written either in a single message stream or in a + // resumable sequence of message streams. To write using a single stream, + // the client should include in the first message of the stream an + // `WriteObjectSpec` describing the destination bucket, object, and any + // preconditions. Additionally, the final message must set 'finish_write' to + // true, or else it is an error. + // + // For a resumable write, the client should instead call + // `StartResumableWrite()`, populating a `WriteObjectSpec` into that request. + // They should then attach the returned `upload_id` to the first message of + // each following call to `WriteObject`. If the stream is closed before + // finishing the upload (either explicitly by the client or due to a network + // error or an error response from the server), the client should do as + // follows: + // - Check the result Status of the stream, to determine if writing can be + // resumed on this stream or must be restarted from scratch (by calling + // `StartResumableWrite()`). The resumable errors are DEADLINE_EXCEEDED, + // INTERNAL, and UNAVAILABLE. For each case, the client should use binary + // exponential backoff before retrying. Additionally, writes can be + // resumed after RESOURCE_EXHAUSTED errors, but only after taking + // appropriate measures, which may include reducing aggregate send rate + // across clients and/or requesting a quota increase for your project. + // - If the call to `WriteObject` returns `ABORTED`, that indicates + // concurrent attempts to update the resumable write, caused either by + // multiple racing clients or by a single client where the previous + // request was timed out on the client side but nonetheless reached the + // server. In this case the client should take steps to prevent further + // concurrent writes (e.g., increase the timeouts, stop using more than + // one process to perform the upload, etc.), and then should follow the + // steps below for resuming the upload. + // - For resumable errors, the client should call `QueryWriteStatus()` and + // then continue writing from the returned `persisted_size`. This may be + // less than the amount of data the client previously sent. Note also that + // it is acceptable to send data starting at an offset earlier than the + // returned `persisted_size`; in this case, the service will skip data at + // offsets that were already persisted (without checking that it matches + // the previously written data), and write only the data starting from the + // persisted offset. This behavior can make client-side handling simpler + // in some cases. + // + // The service will not view the object as complete until the client has + // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any + // requests on a stream after sending a request with `finish_write` set to + // `true` will cause an error. The client **should** check the response it + // receives to determine how much data the service was able to commit and + // whether the service views the object as complete. + // + // Attempting to resume an already finalized object will result in an OK + // status, with a WriteObjectResponse containing the finalized object's + // metadata. + WriteObject(Storage_WriteObjectServer) error + // Retrieves a list of objects matching the criteria. + ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) + // Rewrites a source object to a destination object. Optionally overrides + // metadata. + RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) + // Starts a resumable write. How long the write operation remains valid, and + // what happens when the write operation becomes invalid, are + // service-dependent. + StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) + // Determines the `persisted_size` for an object that is being written, which + // can then be used as the `write_offset` for the next `Write()` call. + // + // If the object does not exist (i.e., the object has been deleted, or the + // first `Write()` has not yet reached the service), this method returns the + // error `NOT_FOUND`. + // + // The client **may** call `QueryWriteStatus()` at any time to determine how + // much data has been processed for this object. This is useful if the + // client is buffering data and needs to know which data can be safely + // evicted. For any sequence of `QueryWriteStatus()` calls for a given + // object name, the sequence of returned `persisted_size` values will be + // non-decreasing. + QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) + // Retrieves the name of a project's Google Cloud Storage service account. + GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) + // Creates a new HMAC key for the given service account. + CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) + // Deletes a given HMAC key. Key must be in an INACTIVE state. + DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) + // Gets an existing HMAC key metadata for the given id. + GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) + // Lists HMAC keys under a given project with the additional filters provided. + ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) + // Updates a given HMAC key state between ACTIVE and INACTIVE. + UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) +} + +// UnimplementedStorageServer can be embedded to have forward compatible implementations. +type UnimplementedStorageServer struct { +} + +func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteBucket not implemented") +} +func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetBucket not implemented") +} +func (*UnimplementedStorageServer) CreateBucket(context.Context, *CreateBucketRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateBucket not implemented") +} +func (*UnimplementedStorageServer) ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListBuckets not implemented") +} +func (*UnimplementedStorageServer) LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method LockBucketRetentionPolicy not implemented") +} +func (*UnimplementedStorageServer) GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") +} +func (*UnimplementedStorageServer) SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") +} +func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") +} +func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented") +} +func (*UnimplementedStorageServer) DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteNotification not implemented") +} +func (*UnimplementedStorageServer) GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotification not implemented") +} +func (*UnimplementedStorageServer) CreateNotification(context.Context, *CreateNotificationRequest) (*Notification, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateNotification not implemented") +} +func (*UnimplementedStorageServer) ListNotifications(context.Context, *ListNotificationsRequest) (*ListNotificationsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNotifications not implemented") +} +func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented") +} +func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") +} +func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented") +} +func (*UnimplementedStorageServer) GetObject(context.Context, *GetObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetObject not implemented") +} +func (*UnimplementedStorageServer) ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error { + return status.Errorf(codes.Unimplemented, "method ReadObject not implemented") +} +func (*UnimplementedStorageServer) UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateObject not implemented") +} +func (*UnimplementedStorageServer) WriteObject(Storage_WriteObjectServer) error { + return status.Errorf(codes.Unimplemented, "method WriteObject not implemented") +} +func (*UnimplementedStorageServer) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListObjects not implemented") +} +func (*UnimplementedStorageServer) RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RewriteObject not implemented") +} +func (*UnimplementedStorageServer) StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartResumableWrite not implemented") +} +func (*UnimplementedStorageServer) QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method QueryWriteStatus not implemented") +} +func (*UnimplementedStorageServer) GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetServiceAccount not implemented") +} +func (*UnimplementedStorageServer) CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateHmacKey not implemented") +} +func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteHmacKey not implemented") +} +func (*UnimplementedStorageServer) GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetHmacKey not implemented") +} +func (*UnimplementedStorageServer) ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListHmacKeys not implemented") +} +func (*UnimplementedStorageServer) UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateHmacKey not implemented") +} + +func RegisterStorageServer(s *grpc.Server, srv StorageServer) { + s.RegisterService(&_Storage_serviceDesc, srv) +} + +func _Storage_DeleteBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteBucketRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).DeleteBucket(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/DeleteBucket", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).DeleteBucket(ctx, req.(*DeleteBucketRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBucketRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetBucket(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetBucket", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetBucket(ctx, req.(*GetBucketRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_CreateBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateBucketRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).CreateBucket(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/CreateBucket", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).CreateBucket(ctx, req.(*CreateBucketRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ListBuckets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBucketsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ListBuckets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ListBuckets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ListBuckets(ctx, req.(*ListBucketsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_LockBucketRetentionPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LockBucketRetentionPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).LockBucketRetentionPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/LockBucketRetentionPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).LockBucketRetentionPolicy(ctx, req.(*LockBucketRetentionPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1.GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetIamPolicy(ctx, req.(*v1.GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1.SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).SetIamPolicy(ctx, req.(*v1.SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1.TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).TestIamPermissions(ctx, req.(*v1.TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_UpdateBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateBucketRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).UpdateBucket(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/UpdateBucket", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).UpdateBucket(ctx, req.(*UpdateBucketRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_DeleteNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNotificationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).DeleteNotification(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/DeleteNotification", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).DeleteNotification(ctx, req.(*DeleteNotificationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetNotification(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetNotification", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetNotification(ctx, req.(*GetNotificationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_CreateNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNotificationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).CreateNotification(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/CreateNotification", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).CreateNotification(ctx, req.(*CreateNotificationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ListNotifications_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ListNotifications(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ListNotifications", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ListNotifications(ctx, req.(*ListNotificationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ComposeObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ComposeObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ComposeObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ComposeObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ComposeObject(ctx, req.(*ComposeObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_DeleteObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).DeleteObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/DeleteObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).DeleteObject(ctx, req.(*DeleteObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_CancelResumableWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelResumableWriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).CancelResumableWrite(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/CancelResumableWrite", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).CancelResumableWrite(ctx, req.(*CancelResumableWriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetObject(ctx, req.(*GetObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ReadObject_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadObjectRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageServer).ReadObject(m, &storageReadObjectServer{stream}) +} + +type Storage_ReadObjectServer interface { + Send(*ReadObjectResponse) error + grpc.ServerStream +} + +type storageReadObjectServer struct { + grpc.ServerStream +} + +func (x *storageReadObjectServer) Send(m *ReadObjectResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Storage_UpdateObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).UpdateObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/UpdateObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).UpdateObject(ctx, req.(*UpdateObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_WriteObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StorageServer).WriteObject(&storageWriteObjectServer{stream}) +} + +type Storage_WriteObjectServer interface { + SendAndClose(*WriteObjectResponse) error + Recv() (*WriteObjectRequest, error) + grpc.ServerStream +} + +type storageWriteObjectServer struct { + grpc.ServerStream +} + +func (x *storageWriteObjectServer) SendAndClose(m *WriteObjectResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *storageWriteObjectServer) Recv() (*WriteObjectRequest, error) { + m := new(WriteObjectRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Storage_ListObjects_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListObjectsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ListObjects(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ListObjects", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ListObjects(ctx, req.(*ListObjectsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_RewriteObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RewriteObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).RewriteObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/RewriteObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).RewriteObject(ctx, req.(*RewriteObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_StartResumableWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartResumableWriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).StartResumableWrite(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/StartResumableWrite", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).StartResumableWrite(ctx, req.(*StartResumableWriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_QueryWriteStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryWriteStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).QueryWriteStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/QueryWriteStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).QueryWriteStatus(ctx, req.(*QueryWriteStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetServiceAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetServiceAccount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetServiceAccount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetServiceAccount(ctx, req.(*GetServiceAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_CreateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateHmacKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).CreateHmacKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/CreateHmacKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).CreateHmacKey(ctx, req.(*CreateHmacKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_DeleteHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteHmacKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).DeleteHmacKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/DeleteHmacKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).DeleteHmacKey(ctx, req.(*DeleteHmacKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetHmacKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetHmacKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetHmacKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetHmacKey(ctx, req.(*GetHmacKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ListHmacKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListHmacKeysRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ListHmacKeys(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ListHmacKeys", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ListHmacKeys(ctx, req.(*ListHmacKeysRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_UpdateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateHmacKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).UpdateHmacKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/UpdateHmacKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).UpdateHmacKey(ctx, req.(*UpdateHmacKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Storage_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.storage.v2.Storage", + HandlerType: (*StorageServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DeleteBucket", + Handler: _Storage_DeleteBucket_Handler, + }, + { + MethodName: "GetBucket", + Handler: _Storage_GetBucket_Handler, + }, + { + MethodName: "CreateBucket", + Handler: _Storage_CreateBucket_Handler, + }, + { + MethodName: "ListBuckets", + Handler: _Storage_ListBuckets_Handler, + }, + { + MethodName: "LockBucketRetentionPolicy", + Handler: _Storage_LockBucketRetentionPolicy_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _Storage_GetIamPolicy_Handler, + }, + { + MethodName: "SetIamPolicy", + Handler: _Storage_SetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _Storage_TestIamPermissions_Handler, + }, + { + MethodName: "UpdateBucket", + Handler: _Storage_UpdateBucket_Handler, + }, + { + MethodName: "DeleteNotification", + Handler: _Storage_DeleteNotification_Handler, + }, + { + MethodName: "GetNotification", + Handler: _Storage_GetNotification_Handler, + }, + { + MethodName: "CreateNotification", + Handler: _Storage_CreateNotification_Handler, + }, + { + MethodName: "ListNotifications", + Handler: _Storage_ListNotifications_Handler, + }, + { + MethodName: "ComposeObject", + Handler: _Storage_ComposeObject_Handler, + }, + { + MethodName: "DeleteObject", + Handler: _Storage_DeleteObject_Handler, + }, + { + MethodName: "CancelResumableWrite", + Handler: _Storage_CancelResumableWrite_Handler, + }, + { + MethodName: "GetObject", + Handler: _Storage_GetObject_Handler, + }, + { + MethodName: "UpdateObject", + Handler: _Storage_UpdateObject_Handler, + }, + { + MethodName: "ListObjects", + Handler: _Storage_ListObjects_Handler, + }, + { + MethodName: "RewriteObject", + Handler: _Storage_RewriteObject_Handler, + }, + { + MethodName: "StartResumableWrite", + Handler: _Storage_StartResumableWrite_Handler, + }, + { + MethodName: "QueryWriteStatus", + Handler: _Storage_QueryWriteStatus_Handler, + }, + { + MethodName: "GetServiceAccount", + Handler: _Storage_GetServiceAccount_Handler, + }, + { + MethodName: "CreateHmacKey", + Handler: _Storage_CreateHmacKey_Handler, + }, + { + MethodName: "DeleteHmacKey", + Handler: _Storage_DeleteHmacKey_Handler, + }, + { + MethodName: "GetHmacKey", + Handler: _Storage_GetHmacKey_Handler, + }, + { + MethodName: "ListHmacKeys", + Handler: _Storage_ListHmacKeys_Handler, + }, + { + MethodName: "UpdateHmacKey", + Handler: _Storage_UpdateHmacKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ReadObject", + Handler: _Storage_ReadObject_Handler, + ServerStreams: true, + }, + { + StreamName: "WriteObject", + Handler: _Storage_WriteObject_Handler, + ClientStreams: true, + }, + }, + Metadata: "google/storage/v2/storage.proto", +} diff --git a/vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go b/vendor/cloud.google.com/go/storage/internal/apiv2/version.go similarity index 62% rename from vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go rename to vendor/cloud.google.com/go/storage/internal/apiv2/version.go index 7df7a1d7155a..fd9c94596c89 100644 --- a/vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/version.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the cloud.google.com/go import, won't actually become part of -// the resultant binary. -// +build modhack +// Code generated by gapicgen. DO NOT EDIT. package storage -// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "cloud.google.com/go" +import "cloud.google.com/go/storage/internal" + +func init() { + versionClient = internal.Version +} diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go new file mode 100644 index 000000000000..008568b40584 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.28.1" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index e755f197de80..810d64285d0c 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -16,22 +16,131 @@ package storage import ( "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strings" "cloud.google.com/go/internal" + "cloud.google.com/go/internal/version" + sinternal "cloud.google.com/go/storage/internal" + "github.com/google/uuid" gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) -// runWithRetry calls the function until it returns nil or a non-retryable error, or -// the context is done. -func runWithRetry(ctx context.Context, call func() error) error { - return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { +var defaultRetry *retryConfig = &retryConfig{} +var xGoogDefaultHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), sinternal.Version) + +// run determines whether a retry is necessary based on the config and +// idempotency information. It then calls the function with or without retries +// as appropriate, using the configured settings. +func run(ctx context.Context, call func() error, retry *retryConfig, isIdempotent bool, setHeader func(string, int)) error { + attempts := 1 + invocationID := uuid.New().String() + + if retry == nil { + retry = defaultRetry + } + if (retry.policy == RetryIdempotent && !isIdempotent) || retry.policy == RetryNever { + setHeader(invocationID, attempts) + return call() + } + bo := gax.Backoff{} + if retry.backoff != nil { + bo.Multiplier = retry.backoff.Multiplier + bo.Initial = retry.backoff.Initial + bo.Max = retry.backoff.Max + } + var errorFunc func(err error) bool = ShouldRetry + if retry.shouldRetry != nil { + errorFunc = retry.shouldRetry + } + + return internal.Retry(ctx, bo, func() (stop bool, err error) { + setHeader(invocationID, attempts) err = call() - if err == nil { - return true, nil + attempts++ + return !errorFunc(err), err + }) +} + +func setRetryHeaderHTTP(req interface{ Header() http.Header }) func(string, int) { + return func(invocationID string, attempts int) { + if req == nil { + return } - if shouldRetry(err) { - return false, nil + header := req.Header() + invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) + xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") + header.Set("x-goog-api-client", xGoogHeader) + } +} + +// TODO: Implement method setting header via context for gRPC +func setRetryHeaderGRPC(_ context.Context) func(string, int) { + return func(_ string, _ int) { + return + } +} + +// ShouldRetry returns true if an error is retryable, based on best practice +// guidance from GCS. See +// https://cloud.google.com/storage/docs/retry-strategy#go for more information +// on what errors are considered retryable. +// +// If you would like to customize retryable errors, use the WithErrorFunc to +// supply a RetryOption to your library calls. For example, to retry additional +// errors, you can write a custom func that wraps ShouldRetry and also specifies +// additional errors that should return true. +func ShouldRetry(err error) bool { + if err == nil { + return false + } + if errors.Is(err, io.ErrUnexpectedEOF) { + return true + } + + switch e := err.(type) { + case *net.OpError: + if strings.Contains(e.Error(), "use of closed network connection") { + // TODO: check against net.ErrClosed (go 1.16+) instead of string + return true } - return true, err - }) + case *googleapi.Error: + // Retry on 408, 429, and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600) + case *url.Error: + // Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall). + // Unfortunately the error type is unexported, so we resort to string + // matching. + retriable := []string{"connection refused", "connection reset"} + for _, s := range retriable { + if strings.Contains(e.Error(), s) { + return true + } + } + case interface{ Temporary() bool }: + if e.Temporary() { + return true + } + } + // HTTP 429, 502, 503, and 504 all map to gRPC UNAVAILABLE per + // https://grpc.github.io/grpc/core/md_doc_http-grpc-status-mapping.html. + // + // This is only necessary for the experimental gRPC-based media operations. + if st, ok := status.FromError(err); ok && st.Code() == codes.Unavailable { + return true + } + // Unwrap is only supported in go1.13.x+ + if e, ok := err.(interface{ Unwrap() error }); ok { + return ShouldRetry(e.Unwrap()) + } + return false } diff --git a/vendor/cloud.google.com/go/storage/not_go110.go b/vendor/cloud.google.com/go/storage/not_go110.go deleted file mode 100644 index 66fa45bea2fe..000000000000 --- a/vendor/cloud.google.com/go/storage/not_go110.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.10 - -package storage - -import ( - "net/url" - "strings" - - "google.golang.org/api/googleapi" -) - -func shouldRetry(err error) bool { - switch e := err.(type) { - case *googleapi.Error: - // Retry on 429 and 5xx, according to - // https://cloud.google.com/storage/docs/exponential-backoff. - return e.Code == 429 || (e.Code >= 500 && e.Code < 600) - case *url.Error: - // Retry on REFUSED_STREAM. - // Unfortunately the error type is unexported, so we resort to string - // matching. - return strings.Contains(e.Error(), "REFUSED_STREAM") - case interface{ Temporary() bool }: - return e.Temporary() - default: - return false - } -} diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go index 84619b6d58c7..614feb7b6daf 100644 --- a/vendor/cloud.google.com/go/storage/notifications.go +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -21,6 +21,7 @@ import ( "regexp" "cloud.google.com/go/internal/trace" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" raw "google.golang.org/api/storage/v1" ) @@ -91,6 +92,30 @@ func toNotification(rn *raw.Notification) *Notification { return n } +func toNotificationFromProto(pbn *storagepb.Notification) *Notification { + n := &Notification{ + ID: pbn.GetName(), + EventTypes: pbn.GetEventTypes(), + ObjectNamePrefix: pbn.GetObjectNamePrefix(), + CustomAttributes: pbn.GetCustomAttributes(), + PayloadFormat: pbn.GetPayloadFormat(), + } + n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic) + return n +} + +func toProtoNotification(n *Notification) *storagepb.Notification { + return &storagepb.Notification{ + Name: n.ID, + Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", + n.TopicProjectID, n.TopicID), + EventTypes: n.EventTypes, + ObjectNamePrefix: n.ObjectNamePrefix, + CustomAttributes: n.CustomAttributes, + PayloadFormat: n.PayloadFormat, + } +} + var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") // parseNotificationTopic extracts the project and topic IDs from from the full @@ -132,16 +157,10 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re if n.TopicID == "" { return nil, errors.New("storage: AddNotification: missing TopicID") } - call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n)) - setClientHeader(call.Header()) - if b.userProject != "" { - call.UserProject(b.userProject) - } - rn, err := call.Context(ctx).Do() - if err != nil { - return nil, err - } - return toNotification(rn), nil + + opts := makeStorageOpts(false, b.retry, b.userProject) + ret, err = b.c.tc.CreateNotification(ctx, b.name, n, opts...) + return ret, err } // Notifications returns all the Notifications configured for this bucket, as a map @@ -150,20 +169,9 @@ func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notific ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") defer func() { trace.EndSpan(ctx, err) }() - call := b.c.raw.Notifications.List(b.name) - setClientHeader(call.Header()) - if b.userProject != "" { - call.UserProject(b.userProject) - } - var res *raw.Notifications - err = runWithRetry(ctx, func() error { - res, err = call.Context(ctx).Do() - return err - }) - if err != nil { - return nil, err - } - return notificationsToMap(res.Items), nil + opts := makeStorageOpts(true, b.retry, b.userProject) + n, err = b.c.tc.ListNotifications(ctx, b.name, opts...) + return n, err } func notificationsToMap(rns []*raw.Notification) map[string]*Notification { @@ -174,15 +182,19 @@ func notificationsToMap(rns []*raw.Notification) map[string]*Notification { return m } +func notificationsToMapFromProto(ns []*storagepb.Notification) map[string]*Notification { + m := map[string]*Notification{} + for _, n := range ns { + m[n.Name] = toNotificationFromProto(n) + } + return m +} + // DeleteNotification deletes the notification with the given ID. func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") defer func() { trace.EndSpan(ctx, err) }() - call := b.c.raw.Notifications.Delete(b.name, id) - setClientHeader(call.Header()) - if b.userProject != "" { - call.UserProject(b.userProject) - } - return call.Context(ctx).Do() + opts := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.DeleteNotification(ctx, b.name, id, opts...) } diff --git a/vendor/cloud.google.com/go/storage/post_policy_v4.go b/vendor/cloud.google.com/go/storage/post_policy_v4.go index b9df7db95813..2961aca20628 100644 --- a/vendor/cloud.google.com/go/storage/post_policy_v4.go +++ b/vendor/cloud.google.com/go/storage/post_policy_v4.go @@ -52,22 +52,38 @@ type PostPolicyV4Options struct { // Exactly one of PrivateKey or SignBytes must be non-nil. PrivateKey []byte - // SignBytes is a function for implementing custom signing. For example, if + // SignBytes is a function for implementing custom signing. + // + // Deprecated: Use SignRawBytes. If both SignBytes and SignRawBytes are defined, + // SignBytes will be ignored. + // This SignBytes function expects the bytes it receives to be hashed, while + // SignRawBytes accepts the raw bytes without hashing, allowing more flexibility. + // Add the following to the top of your signing function to hash the bytes + // to use SignRawBytes instead: + // shaSum := sha256.Sum256(bytes) + // bytes = shaSum[:] + // + SignBytes func(hashBytes []byte) (signature []byte, err error) + + // SignRawBytes is a function for implementing custom signing. For example, if // your application is running on Google App Engine, you can use // appengine's internal signing function: - // ctx := appengine.NewContext(request) - // acc, _ := appengine.ServiceAccount(ctx) - // url, err := SignedURL("bucket", "object", &SignedURLOptions{ - // GoogleAccessID: acc, - // SignBytes: func(b []byte) ([]byte, error) { - // _, signedBytes, err := appengine.SignBytes(ctx, b) - // return signedBytes, err - // }, - // // etc. - // }) + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // &PostPolicyV4Options{ + // GoogleAccessID: acc, + // SignRawBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) // - // Exactly one of PrivateKey or SignBytes must be non-nil. - SignBytes func(hashBytes []byte) (signature []byte, err error) + // SignRawBytes is equivalent to the SignBytes field on SignedURLOptions; + // that is, you may use the same signing function for the two. + // + // Exactly one of PrivateKey or SignRawBytes must be non-nil. + SignRawBytes func(bytes []byte) (signature []byte, err error) // Expires is the expiration time on the signed URL. // It must be a time in the future. @@ -96,6 +112,23 @@ type PostPolicyV4Options struct { // a 4XX status code, back with the message describing the problem. // Optional. Conditions []PostPolicyV4Condition + + shouldHashSignBytes bool +} + +func (opts *PostPolicyV4Options) clone() *PostPolicyV4Options { + return &PostPolicyV4Options{ + GoogleAccessID: opts.GoogleAccessID, + PrivateKey: opts.PrivateKey, + SignBytes: opts.SignBytes, + SignRawBytes: opts.SignRawBytes, + Expires: opts.Expires, + Style: opts.Style, + Insecure: opts.Insecure, + Fields: opts.Fields, + Conditions: opts.Conditions, + shouldHashSignBytes: opts.shouldHashSignBytes, + } } // PolicyV4Fields describes the attributes for a PostPolicyV4 request. @@ -206,6 +239,8 @@ func conditionStatusCodeOnSuccess(statusCode int) PostPolicyV4Condition { // GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. // The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. +// If initializing a Storage Client, instead use the Bucket.GenerateSignedPostPolicyV4 +// method which uses the Client's credentials to handle authentication. func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { if bucket == "" { return nil, errors.New("storage: bucket must be non-empty") @@ -220,20 +255,22 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options var signingFn func(hashedBytes []byte) ([]byte, error) switch { - case opts.SignBytes != nil: + case opts.SignRawBytes != nil: + signingFn = opts.SignRawBytes + case opts.shouldHashSignBytes: signingFn = opts.SignBytes - case len(opts.PrivateKey) != 0: parsedRSAPrivKey, err := parseKey(opts.PrivateKey) if err != nil { return nil, err } - signingFn = func(hashedBytes []byte) ([]byte, error) { - return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, hashedBytes) + signingFn = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, sum[:]) } default: - return nil, errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + return nil, errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set") } var descFields PolicyV4Fields @@ -249,10 +286,16 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options conds := make([]PostPolicyV4Condition, len(opts.Conditions)) copy(conds, opts.Conditions) conds = append(conds, - conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), - conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), + // These are ordered lexicographically. Technically the order doesn't matter + // for creating the policy, but we use this order to match the + // cross-language conformance tests for this feature. &singleValueCondition{"acl", descFields.ACL}, &singleValueCondition{"cache-control", descFields.CacheControl}, + &singleValueCondition{"content-disposition", descFields.ContentDisposition}, + &singleValueCondition{"content-encoding", descFields.ContentEncoding}, + &singleValueCondition{"content-type", descFields.ContentType}, + conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), + conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), ) YYYYMMDD := now.Format(yearMonthDay) @@ -261,8 +304,12 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options "x-goog-date": now.Format(iso8601), "x-goog-credential": opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", "x-goog-algorithm": "GOOG4-RSA-SHA256", - "success_action_redirect": descFields.RedirectToURLOnSuccess, "acl": descFields.ACL, + "cache-control": descFields.CacheControl, + "content-disposition": descFields.ContentDisposition, + "content-encoding": descFields.ContentEncoding, + "content-type": descFields.ContentType, + "success_action_redirect": descFields.RedirectToURLOnSuccess, } for key, value := range descFields.Metadata { conds = append(conds, &singleValueCondition{key, value}) @@ -293,14 +340,22 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options "expiration": opts.Expires.Format(time.RFC3339), }) if err != nil { - return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %v", err) + return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %w", err) } b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON) - shaSum := sha256.Sum256([]byte(b64Policy)) - signature, err := signingFn(shaSum[:]) - if err != nil { - return nil, err + var signature []byte + var signErr error + + if opts.shouldHashSignBytes { + // SignBytes expects hashed bytes as input instead of raw bytes, so we hash them + shaSum := sha256.Sum256([]byte(b64Policy)) + signature, signErr = signingFn(shaSum[:]) + } else { + signature, signErr = signingFn([]byte(b64Policy)) + } + if signErr != nil { + return nil, signErr } policyFields["policy"] = b64Policy @@ -338,15 +393,16 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options // validatePostPolicyV4Options checks that: // * GoogleAccessID is set -// * either but not both PrivateKey and SignBytes are set or nil, but not both -// * Expires, the deadline is not in the past +// * either PrivateKey or SignRawBytes/SignBytes is set, but not both +// * the deadline set in Expires is not in the past // * if Style is not set, it'll use PathStyle +// * sets shouldHashSignBytes to true if opts.SignBytes should be used func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error { if opts == nil || opts.GoogleAccessID == "" { return errors.New("storage: missing required GoogleAccessID") } - if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil; privBlank == signBlank { - return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil && opts.SignRawBytes == nil; privBlank == signBlank { + return errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set") } if opts.Expires.Before(now) { return errors.New("storage: expecting Expires to be in the future") @@ -354,6 +410,9 @@ func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error if opts.Style == nil { opts.Style = PathStyle() } + if opts.SignRawBytes == nil && opts.SignBytes != nil { + opts.shouldHashSignBytes = true + } return nil } diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index d64f5ec778c3..46487d2b77de 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -16,20 +16,15 @@ package storage import ( "context" - "errors" "fmt" "hash/crc32" "io" "io/ioutil" "net/http" - "net/url" - "reflect" - "strconv" "strings" "time" "cloud.google.com/go/internal/trace" - "google.golang.org/api/googleapi" ) var crc32cTable = crc32.MakeTable(crc32.Castagnoli) @@ -106,194 +101,31 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) return nil, err } } - u := &url.URL{ - Scheme: o.c.scheme, - Host: o.c.readHost, - Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), - } - verb := "GET" - if length == 0 { - verb = "HEAD" - } - req, err := http.NewRequest(verb, u.String(), nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - if o.userProject != "" { - req.Header.Set("X-Goog-User-Project", o.userProject) - } - if o.readCompressed { - req.Header.Set("Accept-Encoding", "gzip") - } - if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { - return nil, err - } - - gen := o.gen - - // Define a function that initiates a Read with offset and length, assuming we - // have already read seen bytes. - reopen := func(seen int64) (*http.Response, error) { - start := offset + seen - if length < 0 && start < 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d", start)) - } else if length < 0 && start > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start)) - } else if length > 0 { - // The end character isn't affected by how many bytes we've seen. - req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1)) - } - // We wait to assign conditions here because the generation number can change in between reopen() runs. - req.URL.RawQuery = conditionsQuery(gen, o.conds) - var res *http.Response - err = runWithRetry(ctx, func() error { - res, err = o.c.hc.Do(req) - if err != nil { - return err - } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return ErrObjectNotExist - } - if res.StatusCode < 200 || res.StatusCode > 299 { - body, _ := ioutil.ReadAll(res.Body) - res.Body.Close() - return &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - Body: string(body), - } - } - - partialContentNotSatisfied := - !decompressiveTranscoding(res) && - start > 0 && length != 0 && - res.StatusCode != http.StatusPartialContent - - if partialContentNotSatisfied { - res.Body.Close() - return errors.New("storage: partial request not satisfied") - } - - // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves - // back the whole file regardless of the range count passed in as per: - // https://cloud.google.com/storage/docs/transcoding#range, - // thus we have to manually move the body forward by seen bytes. - if decompressiveTranscoding(res) && seen > 0 { - _, _ = io.CopyN(ioutil.Discard, res.Body, seen) - } - - // If a generation hasn't been specified, and this is the first response we get, let's record the - // generation. In future requests we'll use this generation as a precondition to avoid data races. - if gen < 0 && res.Header.Get("X-Goog-Generation") != "" { - gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) - if err != nil { - return err - } - gen = gen64 - } - return nil - }) - if err != nil { - return nil, err - } - return res, nil - } - - res, err := reopen(0) - if err != nil { - return nil, err - } - var ( - size int64 // total size of object, even if a range was requested. - checkCRC bool - crc uint32 - startOffset int64 // non-zero if range request. - ) - if res.StatusCode == http.StatusPartialContent { - cr := strings.TrimSpace(res.Header.Get("Content-Range")) - if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - - dashIndex := strings.Index(cr, "-") - if dashIndex >= 0 { - startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", cr, err) - } - } - } else { - size = res.ContentLength - // Check the CRC iff all of the following hold: - // - We asked for content (length != 0). - // - We got all the content (status != PartialContent). - // - The server sent a CRC header. - // - The Go http stack did not uncompress the file. - // - We were not served compressed data that was uncompressed on download. - // The problem with the last two cases is that the CRC will not match -- GCS - // computes it on the compressed contents, but we compute it on the - // uncompressed contents. - if length != 0 && !res.Uncompressed && !uncompressedByServer(res) { - crc, checkCRC = parseCRC32c(res) - } - } - remain := res.ContentLength - body := res.Body - if length == 0 { - remain = 0 - body.Close() - body = emptyBody - } - var metaGen int64 - if res.Header.Get("X-Goog-Metageneration") != "" { - metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) - if err != nil { - return nil, err - } + opts := makeStorageOpts(true, o.retry, o.userProject) + + params := &newRangeReaderParams{ + bucket: o.bucket, + object: o.object, + gen: o.gen, + offset: offset, + length: length, + encryptionKey: o.encryptionKey, + conds: o.conds, + readCompressed: o.readCompressed, } - var lm time.Time - if res.Header.Get("Last-Modified") != "" { - lm, err = http.ParseTime(res.Header.Get("Last-Modified")) - if err != nil { - return nil, err - } - } + r, err = o.c.tc.NewRangeReader(ctx, params, opts...) - attrs := ReaderObjectAttrs{ - Size: size, - ContentType: res.Header.Get("Content-Type"), - ContentEncoding: res.Header.Get("Content-Encoding"), - CacheControl: res.Header.Get("Cache-Control"), - LastModified: lm, - StartOffset: startOffset, - Generation: gen, - Metageneration: metaGen, - } - return &Reader{ - Attrs: attrs, - body: body, - size: size, - remain: remain, - wantCRC: crc, - checkCRC: checkCRC, - reopen: reopen, - }, nil + return r, err } // decompressiveTranscoding returns true if the request was served decompressed // and different than its original storage form. This happens when the "Content-Encoding" // header is "gzip". // See: -// * https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip -// * https://github.com/googleapis/google-cloud-go/issues/1800 +// - https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip +// - https://github.com/googleapis/google-cloud-go/issues/1800 func decompressiveTranscoding(res *http.Response) bool { // Decompressive Transcoding. return res.Header.Get("Content-Encoding") == "gzip" || @@ -320,6 +152,34 @@ func parseCRC32c(res *http.Response) (uint32, bool) { return 0, false } +// setConditionsHeaders sets precondition request headers for downloads +// using the XML API. It assumes that the conditions have been validated. +func setConditionsHeaders(headers http.Header, conds *Conditions) error { + if conds == nil { + return nil + } + if conds.MetagenerationMatch != 0 { + headers.Set("x-goog-if-metageneration-match", fmt.Sprint(conds.MetagenerationMatch)) + } + switch { + case conds.GenerationMatch != 0: + headers.Set("x-goog-if-generation-match", fmt.Sprint(conds.GenerationMatch)) + case conds.DoesNotExist: + headers.Set("x-goog-if-generation-match", "0") + } + return nil +} + +// Wrap a request to look similar to an apiary library request, in order to +// be used by run(). +type readerRequestWrapper struct { + req *http.Request +} + +func (w *readerRequestWrapper) Header() http.Header { + return w.req.Header +} + var emptyBody = ioutil.NopCloser(strings.NewReader("")) // Reader reads a Cloud Storage object. @@ -330,21 +190,21 @@ var emptyBody = ioutil.NopCloser(strings.NewReader("")) // is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding. type Reader struct { Attrs ReaderObjectAttrs - body io.ReadCloser seen, remain, size int64 checkCRC bool // should we check the CRC? wantCRC uint32 // the CRC32c value the server sent in the header gotCRC uint32 // running crc - reopen func(seen int64) (*http.Response, error) + + reader io.ReadCloser } // Close closes the Reader. It must be called when done reading. func (r *Reader) Close() error { - return r.body.Close() + return r.reader.Close() } func (r *Reader) Read(p []byte) (int, error) { - n, err := r.readWithRetry(p) + n, err := r.reader.Read(p) if r.remain != -1 { r.remain -= int64(n) } @@ -363,35 +223,6 @@ func (r *Reader) Read(p []byte) (int, error) { return n, err } -func (r *Reader) readWithRetry(p []byte) (int, error) { - n := 0 - for len(p[n:]) > 0 { - m, err := r.body.Read(p[n:]) - n += m - r.seen += int64(m) - if !shouldRetryRead(err) { - return n, err - } - // Read failed, but we will try again. Send a ranged read request that takes - // into account the number of bytes we've already seen. - res, err := r.reopen(r.seen) - if err != nil { - // reopen already retries - return n, err - } - r.body.Close() - r.body = res.Body - } - return n, nil -} - -func shouldRetryRead(err error) bool { - if err == nil { - return false - } - return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2") -} - // Size returns the size of the object in bytes. // The returned value is always the same and is not affected by // calls to Read or Close. diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 20d9518a42da..b5c10efc8a75 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -40,11 +40,20 @@ import ( "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" - "cloud.google.com/go/internal/version" + "cloud.google.com/go/storage/internal" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "github.com/googleapis/gax-go/v2" + "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" raw "google.golang.org/api/storage/v1" + "google.golang.org/api/transport" htransport "google.golang.org/api/transport/http" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/known/fieldmaskpb" + "google.golang.org/protobuf/types/known/timestamppb" ) // Methods which can be used in signed URLs. @@ -55,11 +64,14 @@ var ( ErrBucketNotExist = errors.New("storage: bucket doesn't exist") // ErrObjectNotExist indicates that the object does not exist. ErrObjectNotExist = errors.New("storage: object doesn't exist") + // errMethodNotSupported indicates that the method called is not currently supported by the client. + // TODO: Export this error when launching the transport-agnostic client. + errMethodNotSupported = errors.New("storage: method is not currently supported") // errMethodNotValid indicates that given HTTP method is not valid. errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys()) ) -var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", version.Repo) +var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", internal.Version) const ( // ScopeFullControl grants permissions to manage your @@ -73,12 +85,19 @@ const ( // ScopeReadWrite grants permissions to manage your // data in Google Cloud Storage. ScopeReadWrite = raw.DevstorageReadWriteScope -) -var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) + // aes256Algorithm is the AES256 encryption algorithm used with the + // Customer-Supplied Encryption Keys feature. + aes256Algorithm = "AES256" + + // defaultGen indicates the latest object generation by default, + // using a negative value. + defaultGen = int64(-1) +) +// TODO: remove this once header with invocation ID is applied to all methods. func setClientHeader(headers http.Header) { - headers.Set("x-goog-api-client", xGoogHeader) + headers.Set("x-goog-api-client", xGoogDefaultHeader) } // Client is a client for interacting with Google Cloud Storage. @@ -90,62 +109,127 @@ type Client struct { raw *raw.Service // Scheme describes the scheme under the current host. scheme string - // EnvHost is the host set on the STORAGE_EMULATOR_HOST variable. - envHost string // ReadHost is the default host used on the reader. readHost string + // May be nil. + creds *google.Credentials + retry *retryConfig + + // tc is the transport-agnostic client implemented with either gRPC or HTTP. + tc storageClient + // useGRPC flags whether the client uses gRPC. This is needed while the + // integration piece is only partially complete. + // TODO: remove before merging to main. + useGRPC bool } // NewClient creates a new Google Cloud Storage client. -// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. +// The default scope is ScopeFullControl. To use a different scope, like +// ScopeReadOnly, use option.WithScopes. +// +// Clients should be reused instead of created as needed. The methods of Client +// are safe for concurrent use by multiple goroutines. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - var host, readHost, scheme string - if host = os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { - scheme = "https" - readHost = "storage.googleapis.com" + // Use the experimental gRPC client if the env var is set. + // This is an experimental API and not intended for public use. + if withGRPC := os.Getenv("STORAGE_USE_GRPC"); withGRPC != "" { + return newGRPCClient(ctx, opts...) + } + + var creds *google.Credentials + // In general, it is recommended to use raw.NewService instead of htransport.NewClient + // since raw.NewService configures the correct default endpoints when initializing the + // internal http client. However, in our case, "NewRangeReader" in reader.go needs to + // access the http client directly to make requests, so we create the client manually + // here so it can be re-used by both reader.go and raw.NewService. This means we need to + // manually configure the default endpoint options on the http client. Furthermore, we + // need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints. + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { // Prepend default options to avoid overriding options passed by the user. - opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl), option.WithUserAgent(userAgent)}, opts...) + opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, opts...) + + opts = append(opts, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/")) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/")) + + // Don't error out here. The user may have passed in their own HTTP + // client which does not auth with ADC or other common conventions. + c, err := transport.Creds(ctx, opts...) + if err == nil { + creds = c + opts = append(opts, internaloption.WithCredentials(creds)) + } } else { - scheme = "http" - readHost = host + var hostURL *url.URL + if strings.Contains(host, "://") { + h, err := url.Parse(host) + if err != nil { + return nil, err + } + hostURL = h + } else { + // Add scheme for user if not supplied in STORAGE_EMULATOR_HOST + // URL is only parsed correctly if it has a scheme, so we build it ourselves + hostURL = &url.URL{Scheme: "http", Host: host} + } + + hostURL.Path = "storage/v1/" + endpoint := hostURL.String() + + // Append the emulator host as default endpoint for the user opts = append([]option.ClientOption{option.WithoutAuthentication()}, opts...) + + opts = append(opts, internaloption.WithDefaultEndpoint(endpoint)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(endpoint)) } + // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. hc, ep, err := htransport.NewClient(ctx, opts...) if err != nil { - return nil, fmt.Errorf("dialing: %v", err) + return nil, fmt.Errorf("dialing: %w", err) } - rawService, err := raw.NewService(ctx, option.WithHTTPClient(hc)) + // RawService should be created with the chosen endpoint to take account of user override. + rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc)) if err != nil { - return nil, fmt.Errorf("storage client: %v", err) + return nil, fmt.Errorf("storage client: %w", err) } - if ep == "" { - // Override the default value for BasePath from the raw client. - // TODO: remove when the raw client uses this endpoint as its default (~end of 2020) - rawService.BasePath = "https://storage.googleapis.com/storage/v1/" - } else { - // If the endpoint has been set explicitly, use this for the BasePath - // as well as readHost - rawService.BasePath = ep - u, err := url.Parse(ep) - if err != nil { - return nil, fmt.Errorf("supplied endpoint %v is not valid: %v", ep, err) - } - readHost = u.Host + // Update readHost and scheme with the chosen endpoint. + u, err := url.Parse(ep) + if err != nil { + return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) + } + + tc, err := newHTTPStorageClient(ctx, withClientOptions(opts...)) + if err != nil { + return nil, fmt.Errorf("storage: %w", err) } return &Client{ hc: hc, raw: rawService, - scheme: scheme, - envHost: host, - readHost: readHost, + scheme: u.Scheme, + readHost: u.Host, + creds: creds, + tc: tc, }, nil } +// newGRPCClient creates a new Storage client that initializes a gRPC-based +// client. Calls that have not been implemented in gRPC will panic. +// +// This is an experimental API and not intended for public use. +func newGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + opts = append(defaultGRPCOptions(), opts...) + tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...)) + if err != nil { + return nil, err + } + + return &Client{tc: tc, useGRPC: true}, nil +} + // Close closes the Client. // // Close need not be called at program exit. @@ -153,6 +237,10 @@ func (c *Client) Close() error { // Set fields to nil so that subsequent uses will panic. c.hc = nil c.raw = nil + c.creds = nil + if c.tc != nil { + return c.tc.Close() + } return nil } @@ -193,10 +281,18 @@ type bucketBoundHostname struct { } func (s pathStyle) host(bucket string) string { + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { + return stripScheme(host) + } + return "storage.googleapis.com" } func (s virtualHostedStyle) host(bucket string) string { + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { + return bucket + "." + stripScheme(host) + } + return bucket + ".storage.googleapis.com" } @@ -244,6 +340,14 @@ func BucketBoundHostname(hostname string) URLStyle { return bucketBoundHostname{hostname: hostname} } +// Strips the scheme from a host if it contains it +func stripScheme(host string) string { + if strings.Contains(host, "://") { + host = strings.SplitN(host, "://", 2)[1] + } + return host +} + // SignedURLOptions allows you to restrict the access to the signed URL. type SignedURLOptions struct { // GoogleAccessID represents the authorizer of the signed URL generation. @@ -336,6 +440,23 @@ type SignedURLOptions struct { Scheme SigningScheme } +func (opts *SignedURLOptions) clone() *SignedURLOptions { + return &SignedURLOptions{ + GoogleAccessID: opts.GoogleAccessID, + SignBytes: opts.SignBytes, + PrivateKey: opts.PrivateKey, + Method: opts.Method, + Expires: opts.Expires, + ContentType: opts.ContentType, + Headers: opts.Headers, + QueryParameters: opts.QueryParameters, + MD5: opts.MD5, + Style: opts.Style, + Insecure: opts.Insecure, + Scheme: opts.Scheme, + } +} + var ( tabRegex = regexp.MustCompile(`[\t]+`) // I was tempted to call this spacex. :) @@ -349,7 +470,7 @@ var ( ) // v2SanitizeHeaders applies the specifications for canonical extension headers at -// https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +// https://cloud.google.com/storage/docs/access-control/signed-urls-v2#about-canonical-extension-headers func v2SanitizeHeaders(hdrs []string) []string { headerMap := map[string][]string{} for _, hdr := range hdrs { @@ -397,16 +518,16 @@ func v2SanitizeHeaders(hdrs []string) []string { } // v4SanitizeHeaders applies the specifications for canonical extension headers -// at https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +// at https://cloud.google.com/storage/docs/authentication/canonical-requests#about-headers. // // V4 does a couple things differently from V2: -// - Headers get sorted by key, instead of by key:value. We do this in -// signedURLV4. -// - There's no canonical regexp: we simply split headers on :. -// - We don't exclude canonical headers. -// - We replace leading and trailing spaces in header values, like v2, but also -// all intermediate space duplicates get stripped. That is, there's only ever -// a single consecutive space. +// - Headers get sorted by key, instead of by key:value. We do this in +// signedURLV4. +// - There's no canonical regexp: we simply split headers on :. +// - We don't exclude canonical headers. +// - We replace leading and trailing spaces in header values, like v2, but also +// all intermediate space duplicates get stripped. That is, there's only ever +// a single consecutive space. func v4SanitizeHeaders(hdrs []string) []string { headerMap := map[string][]string{} for _, hdr := range hdrs { @@ -449,11 +570,13 @@ func v4SanitizeHeaders(hdrs []string) []string { return sanitizedHeaders } -// SignedURL returns a URL for the specified object. Signed URLs allow -// the users access to a restricted resource for a limited time without having a -// Google account or signing in. For more information about the signed -// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. -func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { +// SignedURL returns a URL for the specified object. Signed URLs allow anyone +// access to a restricted resource for a limited time without needing a +// Google account or signing in. For more information about signed URLs, see +// https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication +// If initializing a Storage Client, instead use the Bucket.SignedURL method +// which uses the Client's credentials to handle authentication. +func SignedURL(bucket, object string, opts *SignedURLOptions) (string, error) { now := utcNow() if err := validateOptions(opts, now); err != nil { return "", err @@ -462,13 +585,13 @@ func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { switch opts.Scheme { case SigningSchemeV2: opts.Headers = v2SanitizeHeaders(opts.Headers) - return signedURLV2(bucket, name, opts) + return signedURLV2(bucket, object, opts) case SigningSchemeV4: opts.Headers = v4SanitizeHeaders(opts.Headers) - return signedURLV4(bucket, name, opts, now) + return signedURLV4(bucket, object, opts, now) default: // SigningSchemeDefault opts.Headers = v2SanitizeHeaders(opts.Headers) - return signedURLV2(bucket, name, opts) + return signedURLV2(bucket, object, opts) } } @@ -583,8 +706,10 @@ func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (st for k, v := range opts.QueryParameters { canonicalQueryString[k] = append(canonicalQueryString[k], v...) } - - fmt.Fprintf(buf, "%s\n", canonicalQueryString.Encode()) + // url.Values.Encode escaping is correct, except that a space must be replaced + // by `%20` rather than `+`. + escapedQuery := strings.Replace(canonicalQueryString.Encode(), "+", "%20", -1) + fmt.Fprintf(buf, "%s\n", escapedQuery) // Fill in the hostname based on the desired URL style. u.Host = opts.Style.host(bucket) @@ -721,7 +846,7 @@ func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) { } encoded := base64.StdEncoding.EncodeToString(b) u.Scheme = "https" - u.Host = "storage.googleapis.com" + u.Host = PathStyle().host(bucket) q := u.Query() q.Set("GoogleAccessId", opts.GoogleAccessID) q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) @@ -742,6 +867,7 @@ type ObjectHandle struct { encryptionKey []byte // AES-256 key userProject string // for requester-pays buckets readCompressed bool // Accept-Encoding: gzip + retry *retryConfig } // ACL provides access to the object's access control list. @@ -793,30 +919,12 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error if err := o.validate(); err != nil { return nil, err } - call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx) - if err := applyConds("Attrs", o.gen, o.conds, call); err != nil { - return nil, err - } - if o.userProject != "" { - call.UserProject(o.userProject) - } - if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { - return nil, err - } - var obj *raw.Object - setClientHeader(call.Header()) - err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(obj), nil + opts := makeStorageOpts(true, o.retry, o.userProject) + return o.c.tc.GetObject(ctx, o.bucket, o.object, o.gen, o.encryptionKey, o.conds, opts...) } -// Update updates an object with the provided attributes. -// All zero-value attributes are ignored. +// Update updates an object with the provided attributes. See +// ObjectAttrsToUpdate docs for details on treatment of zero values. // ErrObjectNotExist will be returned if the object is not found. func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update") @@ -825,90 +933,9 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) ( if err := o.validate(); err != nil { return nil, err } - var attrs ObjectAttrs - // Lists of fields to send, and set to null, in the JSON. - var forceSendFields, nullFields []string - if uattrs.ContentType != nil { - attrs.ContentType = optional.ToString(uattrs.ContentType) - // For ContentType, sending the empty string is a no-op. - // Instead we send a null. - if attrs.ContentType == "" { - nullFields = append(nullFields, "ContentType") - } else { - forceSendFields = append(forceSendFields, "ContentType") - } - } - if uattrs.ContentLanguage != nil { - attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) - // For ContentLanguage it's an error to send the empty string. - // Instead we send a null. - if attrs.ContentLanguage == "" { - nullFields = append(nullFields, "ContentLanguage") - } else { - forceSendFields = append(forceSendFields, "ContentLanguage") - } - } - if uattrs.ContentEncoding != nil { - attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) - forceSendFields = append(forceSendFields, "ContentEncoding") - } - if uattrs.ContentDisposition != nil { - attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) - forceSendFields = append(forceSendFields, "ContentDisposition") - } - if uattrs.CacheControl != nil { - attrs.CacheControl = optional.ToString(uattrs.CacheControl) - forceSendFields = append(forceSendFields, "CacheControl") - } - if uattrs.EventBasedHold != nil { - attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold) - forceSendFields = append(forceSendFields, "EventBasedHold") - } - if uattrs.TemporaryHold != nil { - attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) - forceSendFields = append(forceSendFields, "TemporaryHold") - } - if uattrs.Metadata != nil { - attrs.Metadata = uattrs.Metadata - if len(attrs.Metadata) == 0 { - // Sending the empty map is a no-op. We send null instead. - nullFields = append(nullFields, "Metadata") - } else { - forceSendFields = append(forceSendFields, "Metadata") - } - } - if uattrs.ACL != nil { - attrs.ACL = uattrs.ACL - // It's an error to attempt to delete the ACL, so - // we don't append to nullFields here. - forceSendFields = append(forceSendFields, "Acl") - } - rawObj := attrs.toRawObject(o.bucket) - rawObj.ForceSendFields = forceSendFields - rawObj.NullFields = nullFields - call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx) - if err := applyConds("Update", o.gen, o.conds, call); err != nil { - return nil, err - } - if o.userProject != "" { - call.UserProject(o.userProject) - } - if uattrs.PredefinedACL != "" { - call.PredefinedAcl(uattrs.PredefinedACL) - } - if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { - return nil, err - } - var obj *raw.Object - setClientHeader(call.Header()) - err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(obj), nil + isIdempotent := o.conds != nil && o.conds.MetagenerationMatch != 0 + opts := makeStorageOpts(isIdempotent, o.retry, o.userProject) + return o.c.tc.UpdateObject(ctx, o.bucket, o.object, &uattrs, o.gen, o.encryptionKey, o.conds, opts...) } // BucketName returns the name of the bucket. @@ -923,15 +950,17 @@ func (o *ObjectHandle) ObjectName() string { // ObjectAttrsToUpdate is used to update the attributes of an object. // Only fields set to non-nil values will be updated. -// Set a field to its zero value to delete it. +// For all fields except CustomTime, set the field to its zero value to delete +// it. CustomTime cannot be deleted or changed to an earlier time once set. // // For example, to change ContentType and delete ContentEncoding and // Metadata, use -// ObjectAttrsToUpdate{ -// ContentType: "text/html", -// ContentEncoding: "", -// Metadata: map[string]string{}, -// } +// +// ObjectAttrsToUpdate{ +// ContentType: "text/html", +// ContentEncoding: "", +// Metadata: map[string]string{}, +// } type ObjectAttrsToUpdate struct { EventBasedHold optional.Bool TemporaryHold optional.Bool @@ -940,7 +969,8 @@ type ObjectAttrsToUpdate struct { ContentEncoding optional.String ContentDisposition optional.String CacheControl optional.String - Metadata map[string]string // set to map[string]string{} to delete + CustomTime time.Time // Cannot be deleted or backdated from its current value. + Metadata map[string]string // Set to map[string]string{} to delete. ACL []ACLRule // If not empty, applies a predefined set of access controls. ACL must be nil. @@ -953,25 +983,11 @@ func (o *ObjectHandle) Delete(ctx context.Context) error { if err := o.validate(); err != nil { return err } - call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx) - if err := applyConds("Delete", o.gen, o.conds, call); err != nil { - return err - } - if o.userProject != "" { - call.UserProject(o.userProject) - } - // Encryption doesn't apply to Delete. - setClientHeader(call.Header()) - err := runWithRetry(ctx, func() error { return call.Do() }) - switch e := err.(type) { - case nil: - return nil - case *googleapi.Error: - if e.Code == http.StatusNotFound { - return ErrObjectNotExist - } - } - return err + // Delete is idempotent if GenerationMatch or Generation have been passed in. + // The default generation is negative to get the latest version of the object. + isIdempotent := (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0 + opts := makeStorageOpts(isIdempotent, o.retry, o.userProject) + return o.c.tc.DeleteObject(ctx, o.bucket, o.object, o.gen, o.conds, opts...) } // ReadCompressed when true causes the read to happen without decompressing. @@ -994,6 +1010,9 @@ func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { // attribute is specified, the content type will be automatically sniffed // using net/http.DetectContentType. // +// Note that each Writer allocates an internal buffer of size Writer.ChunkSize. +// See the ChunkSize docs for more information. +// // It is the caller's responsibility to call Close when writing is done. To // stop writing without saving the data, cancel the context. func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { @@ -1047,6 +1066,10 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { if !o.RetentionExpirationTime.IsZero() { ret = o.RetentionExpirationTime.Format(time.RFC3339) } + var ct string + if !o.CustomTime.IsZero() { + ct = o.CustomTime.Format(time.RFC3339) + } return &raw.Object{ Bucket: bucket, Name: o.Name, @@ -1061,7 +1084,84 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { StorageClass: o.StorageClass, Acl: toRawObjectACL(o.ACL), Metadata: o.Metadata, + CustomTime: ct, + } +} + +// toProtoObject copies the editable attributes from o to the proto library's Object type. +func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object { + // For now, there are only globally unique buckets, and "_" is the alias + // project ID for such buckets. If the bucket is not provided, like in the + // destination ObjectAttrs of a Copy, do not attempt to format it. + if b != "" { + b = bucketResourceName(globalProjectAlias, b) + } + + return &storagepb.Object{ + Bucket: b, + Name: o.Name, + EventBasedHold: proto.Bool(o.EventBasedHold), + TemporaryHold: o.TemporaryHold, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + StorageClass: o.StorageClass, + Acl: toProtoObjectACL(o.ACL), + Metadata: o.Metadata, + CreateTime: toProtoTimestamp(o.Created), + CustomTime: toProtoTimestamp(o.CustomTime), + DeleteTime: toProtoTimestamp(o.Deleted), + RetentionExpireTime: toProtoTimestamp(o.RetentionExpirationTime), + UpdateTime: toProtoTimestamp(o.Updated), + KmsKey: o.KMSKeyName, + Generation: o.Generation, + Size: o.Size, + } +} + +// toProtoObject copies the attributes to update from uattrs to the proto library's Object type. +func (uattrs *ObjectAttrsToUpdate) toProtoObject(bucket, object string) *storagepb.Object { + o := &storagepb.Object{ + Name: object, + Bucket: bucket, + } + if uattrs == nil { + return o + } + + if uattrs.EventBasedHold != nil { + o.EventBasedHold = proto.Bool(optional.ToBool(uattrs.EventBasedHold)) + } + if uattrs.TemporaryHold != nil { + o.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) + } + if uattrs.ContentType != nil { + o.ContentType = optional.ToString(uattrs.ContentType) + } + if uattrs.ContentLanguage != nil { + o.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + } + if uattrs.ContentEncoding != nil { + o.ContentEncoding = optional.ToString(uattrs.ContentEncoding) } + if uattrs.ContentDisposition != nil { + o.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + } + if uattrs.CacheControl != nil { + o.CacheControl = optional.ToString(uattrs.CacheControl) + } + if !uattrs.CustomTime.IsZero() { + o.CustomTime = toProtoTimestamp(uattrs.CustomTime) + } + if uattrs.ACL != nil { + o.Acl = toProtoObjectACL(uattrs.ACL) + } + + // TODO(cathyo): Handle metadata. Pending b/230510191. + + return o } // ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. @@ -1135,6 +1235,9 @@ type ObjectAttrs struct { // Composer. In those cases, if the SendCRC32C field in the Writer or Composer // is set to is true, the uploaded data is rejected if its CRC32C hash does // not match this field. + // + // Note: For a Writer, SendCRC32C must be set to true BEFORE the first call to + // Writer.Write() in order to send the checksum. CRC32C uint32 // MediaLink is an URL to the object's content. This field is read-only. @@ -1142,6 +1245,10 @@ type ObjectAttrs struct { // Metadata represents user-provided metadata, in key/value pairs. // It can be nil if no metadata is provided. + // + // For object downloads using Reader, metadata keys are sent as headers. + // Therefore, avoid setting metadata keys using characters that are not valid + // for headers. See https://www.rfc-editor.org/rfc/rfc7230#section-3.2.6. Metadata map[string]string // Generation is the generation number of the object's content. @@ -1199,6 +1306,15 @@ type ObjectAttrs struct { // Etag is the HTTP/1.1 Entity tag for the object. // This field is read-only. Etag string + + // A user-specified timestamp which can be applied to an object. This is + // typically set in order to use the CustomTimeBefore and DaysSinceCustomTime + // LifecycleConditions to manage object lifecycles. + // + // CustomTime cannot be removed once set on an object. It can be updated to a + // later value but not to an earlier one. For more information see + // https://cloud.google.com/storage/docs/metadata#custom-time . + CustomTime time.Time } // convertTime converts a time in RFC3339 format to time.Time. @@ -1211,6 +1327,22 @@ func convertTime(t string) time.Time { return r } +func convertProtoTime(t *timestamppb.Timestamp) time.Time { + var r time.Time + if t != nil { + r = t.AsTime() + } + return r +} + +func toProtoTimestamp(t time.Time) *timestamppb.Timestamp { + if t.IsZero() { + return nil + } + + return timestamppb.New(t) +} + func newObject(o *raw.Object) *ObjectAttrs { if o == nil { return nil @@ -1252,6 +1384,41 @@ func newObject(o *raw.Object) *ObjectAttrs { Deleted: convertTime(o.TimeDeleted), Updated: convertTime(o.Updated), Etag: o.Etag, + CustomTime: convertTime(o.CustomTime), + } +} + +func newObjectFromProto(o *storagepb.Object) *ObjectAttrs { + if o == nil { + return nil + } + return &ObjectAttrs{ + Bucket: parseBucketName(o.Bucket), + Name: o.Name, + ContentType: o.ContentType, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + EventBasedHold: o.GetEventBasedHold(), + TemporaryHold: o.TemporaryHold, + RetentionExpirationTime: convertProtoTime(o.GetRetentionExpireTime()), + ACL: toObjectACLRulesFromProto(o.GetAcl()), + Owner: o.GetOwner().GetEntity(), + ContentEncoding: o.ContentEncoding, + ContentDisposition: o.ContentDisposition, + Size: int64(o.Size), + MD5: o.GetChecksums().GetMd5Hash(), + CRC32C: o.GetChecksums().GetCrc32C(), + Metadata: o.Metadata, + Generation: o.Generation, + Metageneration: o.Metageneration, + StorageClass: o.StorageClass, + // CustomerKeySHA256 needs to be presented as base64 encoded, but the response from gRPC is not. + CustomerKeySHA256: base64.StdEncoding.EncodeToString(o.GetCustomerEncryption().GetKeySha256Bytes()), + KMSKeyName: o.GetKmsKey(), + Created: convertProtoTime(o.GetCreateTime()), + Deleted: convertProtoTime(o.GetDeleteTime()), + Updated: convertProtoTime(o.GetUpdateTime()), + CustomTime: convertProtoTime(o.GetCustomTime()), } } @@ -1273,6 +1440,31 @@ func encodeUint32(u uint32) string { return base64.StdEncoding.EncodeToString(b) } +// Projection is enumerated type for Query.Projection. +type Projection int + +const ( + // ProjectionDefault returns all fields of objects. + ProjectionDefault Projection = iota + + // ProjectionFull returns all fields of objects. + ProjectionFull + + // ProjectionNoACL returns all fields of objects except for Owner and ACL. + ProjectionNoACL +) + +func (p Projection) String() string { + switch p { + case ProjectionFull: + return "full" + case ProjectionNoACL: + return "noAcl" + default: + return "" + } +} + // Query represents a query to filter objects from a bucket. type Query struct { // Delimiter returns results in a directory-like fashion. @@ -1293,10 +1485,35 @@ type Query struct { // object will be included in the results. Versions bool - // fieldSelection is used to select only specific fields to be returned by - // the query. It's used internally and is populated for the user by - // calling Query.SetAttrSelection - fieldSelection string + // attrSelection is used to select only specific fields to be returned by + // the query. It is set by the user calling calling SetAttrSelection. These + // are used by toFieldMask and toFieldSelection for gRPC and HTTP/JSON + // clients repsectively. + attrSelection []string + + // StartOffset is used to filter results to objects whose names are + // lexicographically equal to or after startOffset. If endOffset is also set, + // the objects listed will have names between startOffset (inclusive) and + // endOffset (exclusive). + StartOffset string + + // EndOffset is used to filter results to objects whose names are + // lexicographically before endOffset. If startOffset is also set, the objects + // listed will have names between startOffset (inclusive) and endOffset (exclusive). + EndOffset string + + // Projection defines the set of properties to return. It will default to ProjectionFull, + // which returns all properties. Passing ProjectionNoACL will omit Owner and ACL, + // which may improve performance when listing many objects. + Projection Projection + + // IncludeTrailingDelimiter controls how objects which end in a single + // instance of Delimiter (for example, if Query.Delimiter = "/" and the + // object name is "foo/bar/") are included in the results. By default, these + // objects only show up as prefixes. If IncludeTrailingDelimiter is set to + // true, they will also be included as objects and their metadata will be + // populated in the returned ObjectAttrs. + IncludeTrailingDelimiter bool } // attrToFieldMap maps the field names of ObjectAttrs to the underlying field @@ -1329,6 +1546,40 @@ var attrToFieldMap = map[string]string{ "Deleted": "timeDeleted", "Updated": "updated", "Etag": "etag", + "CustomTime": "customTime", +} + +// attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field +// names in the protobuf Object message. +var attrToProtoFieldMap = map[string]string{ + "Name": "name", + "Bucket": "bucket", + "Etag": "etag", + "Generation": "generation", + "Metageneration": "metageneration", + "StorageClass": "storage_class", + "Size": "size", + "ContentEncoding": "content_encoding", + "ContentDisposition": "content_disposition", + "CacheControl": "cache_control", + "ACL": "acl", + "ContentLanguage": "content_language", + "Deleted": "delete_time", + "ContentType": "content_type", + "Created": "create_time", + "CRC32C": "checksums.crc32c", + "MD5": "checksums.md5_hash", + "Updated": "update_time", + "KMSKeyName": "kms_key", + "TemporaryHold": "temporary_hold", + "RetentionExpirationTime": "retention_expire_time", + "Metadata": "metadata", + "EventBasedHold": "event_based_hold", + "Owner": "owner", + "CustomerKeySHA256": "customer_encryption", + "CustomTime": "custom_time", + // MediaLink was explicitly excluded from the proto as it is an HTTP-ism. + // "MediaLink": "mediaLink", } // SetAttrSelection makes the query populate only specific attributes of @@ -1339,19 +1590,45 @@ var attrToFieldMap = map[string]string{ // optimization; for more information, see // https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance func (q *Query) SetAttrSelection(attrs []string) error { + // Validate selections. + for _, attr := range attrs { + // If the attr is acceptable for one of the two sets, then it is OK. + // If it is not acceptable for either, then return an error. + // The respective masking implementations ignore unknown attrs which + // makes switching between transports a little easier. + _, okJSON := attrToFieldMap[attr] + _, okGRPC := attrToProtoFieldMap[attr] + + if !okJSON && !okGRPC { + return fmt.Errorf("storage: attr %v is not valid", attr) + } + } + + q.attrSelection = attrs + + return nil +} + +func (q *Query) toFieldSelection() string { + if q == nil || len(q.attrSelection) == 0 { + return "" + } fieldSet := make(map[string]bool) - for _, attr := range attrs { + for _, attr := range q.attrSelection { field, ok := attrToFieldMap[attr] if !ok { - return fmt.Errorf("storage: attr %v is not valid", attr) + // Future proofing, skip unknown fields, let SetAttrSelection handle + // error modes. + continue } fieldSet[field] = true } + var s string if len(fieldSet) > 0 { var b bytes.Buffer - b.WriteString("items(") + b.WriteString("prefixes,items(") first := true for field := range fieldSet { if !first { @@ -1361,9 +1638,50 @@ func (q *Query) SetAttrSelection(attrs []string) error { b.WriteString(field) } b.WriteString(")") - q.fieldSelection = b.String() + s = b.String() } - return nil + return s +} + +func (q *Query) toFieldMask() *fieldmaskpb.FieldMask { + // The default behavior with no Query is ProjectionDefault (i.e. ProjectionFull). + if q == nil { + return &fieldmaskpb.FieldMask{Paths: []string{"*"}} + } + + // User selected attributes via q.SetAttrSeleciton. This takes precedence + // over the Projection. + if numSelected := len(q.attrSelection); numSelected > 0 { + protoFieldPaths := make([]string, 0, numSelected) + + for _, attr := range q.attrSelection { + pf, ok := attrToProtoFieldMap[attr] + if !ok { + // Future proofing, skip unknown fields, let SetAttrSelection + // handle error modes. + continue + } + protoFieldPaths = append(protoFieldPaths, pf) + } + + return &fieldmaskpb.FieldMask{Paths: protoFieldPaths} + } + + // ProjectDefault == ProjectionFull which means all fields. + fm := &fieldmaskpb.FieldMask{Paths: []string{"*"}} + if q.Projection == ProjectionNoACL { + paths := make([]string, 0, len(attrToProtoFieldMap)-2) // omitting two fields + for _, f := range attrToProtoFieldMap { + // Skip the acl and owner fields for "NoACL". + if f == "acl" || f == "owner" { + continue + } + paths = append(paths, f) + } + fm.Paths = paths + } + + return fm } // Conditions constrain methods to act on specific generations of @@ -1507,6 +1825,33 @@ func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall return nil } +func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.RewriteObjectRequest) error { + if gen >= 0 { + call.SourceGeneration = gen + } + if conds == nil { + return nil + } + if err := conds.validate("CopyTo source"); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + call.IfSourceGenerationMatch = proto.Int64(conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + call.IfSourceGenerationNotMatch = proto.Int64(conds.GenerationNotMatch) + case conds.DoesNotExist: + call.IfSourceGenerationMatch = proto.Int64(0) + } + switch { + case conds.MetagenerationMatch != 0: + call.IfSourceMetagenerationMatch = proto.Int64(conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + call.IfSourceMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch) + } + return nil +} + // setConditionField sets a field on a *raw.WhateverCall. // We can't use anonymous interfaces because the return type is // different, since the field setters are builders. @@ -1519,42 +1864,168 @@ func setConditionField(call reflect.Value, name string, value interface{}) bool return true } -// conditionsQuery returns the generation and conditions as a URL query -// string suitable for URL.RawQuery. It assumes that the conditions -// have been validated. -func conditionsQuery(gen int64, conds *Conditions) string { - // URL escapes are elided because integer strings are URL-safe. - var buf []byte +// Retryer returns an object handle that is configured with custom retry +// behavior as specified by the options that are passed to it. All operations +// on the new handle will use the customized retry configuration. +// These retry options will merge with the bucket's retryer (if set) for the +// returned handle. Options passed into this method will take precedence over +// retry options on the bucket and client. Note that you must explicitly pass in +// each option you want to override. +func (o *ObjectHandle) Retryer(opts ...RetryOption) *ObjectHandle { + o2 := *o + var retry *retryConfig + if o.retry != nil { + // merge the options with the existing retry + retry = o.retry + } else { + retry = &retryConfig{} + } + for _, opt := range opts { + opt.apply(retry) + } + o2.retry = retry + o2.acl.retry = retry + return &o2 +} - appendParam := func(s string, n int64) { - if len(buf) > 0 { - buf = append(buf, '&') - } - buf = append(buf, s...) - buf = strconv.AppendInt(buf, n, 10) +// SetRetry configures the client with custom retry behavior as specified by the +// options that are passed to it. All operations using this client will use the +// customized retry configuration. +// This should be called once before using the client for network operations, as +// there could be indeterminate behaviour with operations in progress. +// Retry options set on a bucket or object handle will take precedence over +// these options. +func (c *Client) SetRetry(opts ...RetryOption) { + var retry *retryConfig + if c.retry != nil { + // merge the options with the existing retry + retry = c.retry + } else { + retry = &retryConfig{} } + for _, opt := range opts { + opt.apply(retry) + } + c.retry = retry +} - if gen >= 0 { - appendParam("generation=", gen) +// RetryOption allows users to configure non-default retry behavior for API +// calls made to GCS. +type RetryOption interface { + apply(config *retryConfig) +} + +// WithBackoff allows configuration of the backoff timing used for retries. +// Available configuration options (Initial, Max and Multiplier) are described +// at https://pkg.go.dev/github.com/googleapis/gax-go/v2#Backoff. If any fields +// are not supplied by the user, gax default values will be used. +func WithBackoff(backoff gax.Backoff) RetryOption { + return &withBackoff{ + backoff: backoff, } - if conds == nil { - return string(buf) +} + +type withBackoff struct { + backoff gax.Backoff +} + +func (wb *withBackoff) apply(config *retryConfig) { + config.backoff = &wb.backoff +} + +// RetryPolicy describes the available policies for which operations should be +// retried. The default is `RetryIdempotent`. +type RetryPolicy int + +const ( + // RetryIdempotent causes only idempotent operations to be retried when the + // service returns a transient error. Using this policy, fully idempotent + // operations (such as `ObjectHandle.Attrs()`) will always be retried. + // Conditionally idempotent operations (for example `ObjectHandle.Update()`) + // will be retried only if the necessary conditions have been supplied (in + // the case of `ObjectHandle.Update()` this would mean supplying a + // `Conditions.MetagenerationMatch` condition is required). + RetryIdempotent RetryPolicy = iota + + // RetryAlways causes all operations to be retried when the service returns a + // transient error, regardless of idempotency considerations. + RetryAlways + + // RetryNever causes the client to not perform retries on failed operations. + RetryNever +) + +// WithPolicy allows the configuration of which operations should be performed +// with retries for transient errors. +func WithPolicy(policy RetryPolicy) RetryOption { + return &withPolicy{ + policy: policy, } - switch { - case conds.GenerationMatch != 0: - appendParam("ifGenerationMatch=", conds.GenerationMatch) - case conds.GenerationNotMatch != 0: - appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch) - case conds.DoesNotExist: - appendParam("ifGenerationMatch=", 0) +} + +type withPolicy struct { + policy RetryPolicy +} + +func (ws *withPolicy) apply(config *retryConfig) { + config.policy = ws.policy +} + +// WithErrorFunc allows users to pass a custom function to the retryer. Errors +// will be retried if and only if `shouldRetry(err)` returns true. +// By default, the following errors are retried (see ShouldRetry for the default +// function): +// +// - HTTP responses with codes 408, 429, 502, 503, and 504. +// +// - Transient network errors such as connection reset and io.ErrUnexpectedEOF. +// +// - Errors which are considered transient using the Temporary() interface. +// +// - Wrapped versions of these errors. +// +// This option can be used to retry on a different set of errors than the +// default. Users can use the default ShouldRetry function inside their custom +// function if they only want to make minor modifications to default behavior. +func WithErrorFunc(shouldRetry func(err error) bool) RetryOption { + return &withErrorFunc{ + shouldRetry: shouldRetry, } - switch { - case conds.MetagenerationMatch != 0: - appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch) - case conds.MetagenerationNotMatch != 0: - appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch) +} + +type withErrorFunc struct { + shouldRetry func(err error) bool +} + +func (wef *withErrorFunc) apply(config *retryConfig) { + config.shouldRetry = wef.shouldRetry +} + +type retryConfig struct { + backoff *gax.Backoff + policy RetryPolicy + shouldRetry func(err error) bool +} + +func (r *retryConfig) clone() *retryConfig { + if r == nil { + return nil + } + + var bo *gax.Backoff + if r.backoff != nil { + bo = &gax.Backoff{ + Initial: r.backoff.Initial, + Max: r.backoff.Max, + Multiplier: r.backoff.Multiplier, + } + } + + return &retryConfig{ + backoff: bo, + policy: r.policy, + shouldRetry: r.shouldRetry, } - return string(buf) } // composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods @@ -1588,19 +2059,127 @@ func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) erro if copySource { cs = "copy-source-" } - headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256") + headers.Set("x-goog-"+cs+"encryption-algorithm", aes256Algorithm) headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) keyHash := sha256.Sum256(key) headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) return nil } +// toProtoCommonObjectRequestParams sets customer-supplied encryption to the proto library's CommonObjectRequestParams. +func toProtoCommonObjectRequestParams(key []byte) *storagepb.CommonObjectRequestParams { + if key == nil { + return nil + } + keyHash := sha256.Sum256(key) + return &storagepb.CommonObjectRequestParams{ + EncryptionAlgorithm: aes256Algorithm, + EncryptionKeyBytes: key, + EncryptionKeySha256Bytes: keyHash[:], + } +} + // ServiceAccount fetches the email address of the given project's Google Cloud Storage service account. func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) { - r := c.raw.Projects.ServiceAccount.Get(projectID) - res, err := r.Context(ctx).Do() - if err != nil { - return "", err + o := makeStorageOpts(true, c.retry, "") + return c.tc.GetServiceAccount(ctx, projectID, o...) + +} + +// bucketResourceName formats the given project ID and bucketResourceName ID +// into a Bucket resource name. This is the format necessary for the gRPC API as +// it conforms to the Resource-oriented design practices in https://google.aip.dev/121. +func bucketResourceName(p, b string) string { + return fmt.Sprintf("projects/%s/buckets/%s", p, b) +} + +// parseBucketName strips the leading resource path segment and returns the +// bucket ID, which is the simple Bucket name typical of the v1 API. +func parseBucketName(b string) string { + sep := strings.LastIndex(b, "/") + return b[sep+1:] +} + +// parseProjectNumber consume the given resource name and parses out the project +// number if one is present i.e. it is not a project ID. +func parseProjectNumber(r string) uint64 { + projectID := regexp.MustCompile(`projects\/([0-9]+)\/?`) + if matches := projectID.FindStringSubmatch(r); len(matches) > 0 { + // Capture group follows the matched segment. For example: + // input: projects/123/bars/456 + // output: [projects/123/, 123] + number, err := strconv.ParseUint(matches[1], 10, 64) + if err != nil { + return 0 + } + return number } - return res.EmailAddress, nil + + return 0 +} + +// toProjectResource accepts a project ID and formats it as a Project resource +// name. +func toProjectResource(project string) string { + return fmt.Sprintf("projects/%s", project) +} + +// setConditionProtoField uses protobuf reflection to set named condition field +// to the given condition value if supported on the protobuf message. +// +// This is an experimental API and not intended for public use. +func setConditionProtoField(m protoreflect.Message, f string, v int64) bool { + fields := m.Descriptor().Fields() + if rf := fields.ByName(protoreflect.Name(f)); rf != nil { + m.Set(rf, protoreflect.ValueOfInt64(v)) + return true + } + + return false +} + +// applyCondsProto validates and attempts to set the conditions on a protobuf +// message using protobuf reflection. +// +// This is an experimental API and not intended for public use. +func applyCondsProto(method string, gen int64, conds *Conditions, msg proto.Message) error { + rmsg := msg.ProtoReflect() + + if gen >= 0 { + if !setConditionProtoField(rmsg, "generation", gen) { + return fmt.Errorf("storage: %s: generation not supported", method) + } + } + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + + switch { + case conds.GenerationMatch != 0: + if !setConditionProtoField(rmsg, "if_generation_match", conds.GenerationMatch) { + return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) + } + case conds.GenerationNotMatch != 0: + if !setConditionProtoField(rmsg, "if_generation_not_match", conds.GenerationNotMatch) { + return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) + } + case conds.DoesNotExist: + if !setConditionProtoField(rmsg, "if_generation_match", int64(0)) { + return fmt.Errorf("storage: %s: DoesNotExist not supported", method) + } + } + switch { + case conds.MetagenerationMatch != 0: + if !setConditionProtoField(rmsg, "if_metageneration_match", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionProtoField(rmsg, "if_metageneration_not_match", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil } diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index 1843a8141559..3a6a1ce0f56c 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -16,15 +16,12 @@ package storage import ( "context" - "encoding/base64" "errors" "fmt" "io" "sync" + "time" "unicode/utf8" - - "google.golang.org/api/googleapi" - raw "google.golang.org/api/storage/v1" ) // A Writer writes a Cloud Storage object. @@ -34,35 +31,61 @@ type Writer struct { // attributes are ignored. ObjectAttrs - // SendCRC specifies whether to transmit a CRC32C field. It should be set + // SendCRC32C specifies whether to transmit a CRC32C field. It should be set // to true in addition to setting the Writer's CRC32C field, because zero // is a valid CRC and normally a zero would not be transmitted. // If a CRC32C is sent, and the data written does not match the checksum, // the write will be rejected. + // + // Note: SendCRC32C must be set to true BEFORE the first call to + // Writer.Write() in order to send the checksum. If it is set after that + // point, the checksum will be ignored. SendCRC32C bool // ChunkSize controls the maximum number of bytes of the object that the // Writer will attempt to send to the server in a single request. Objects // smaller than the size will be sent in a single request, while larger - // objects will be split over multiple requests. The size will be rounded up - // to the nearest multiple of 256K. + // objects will be split over multiple requests. The value will be rounded up + // to the nearest multiple of 256K. The default ChunkSize is 16MiB. + // + // Each Writer will internally allocate a buffer of size ChunkSize. This is + // used to buffer input data and allow for the input to be sent again if a + // request must be retried. // - // ChunkSize will default to a reasonable value. If you perform many - // concurrent writes of small objects (under ~8MB), you may wish set ChunkSize - // to a value that matches your objects' sizes to avoid consuming large - // amounts of memory. See + // If you upload small objects (< 16MiB), you should set ChunkSize + // to a value slightly larger than the objects' sizes to avoid memory bloat. + // This is especially important if you are uploading many small objects + // concurrently. See // https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#size // for more information about performance trade-offs related to ChunkSize. // // If ChunkSize is set to zero, chunking will be disabled and the object will // be uploaded in a single request without the use of a buffer. This will // further reduce memory used during uploads, but will also prevent the writer - // from retrying in case of a transient error from the server, since a buffer - // is required in order to retry the failed request. + // from retrying in case of a transient error from the server or resuming an + // upload that fails midway through, since the buffer is required in order to + // retry the failed request. // // ChunkSize must be set before the first Write call. ChunkSize int + // ChunkRetryDeadline sets a per-chunk retry deadline for multi-chunk + // resumable uploads. + // + // For uploads of larger files, the Writer will attempt to retry if the + // request to upload a particular chunk fails with a transient error. + // If a single chunk has been attempting to upload for longer than this + // deadline and the request fails, it will no longer be retried, and the error + // will be returned to the caller. This is only applicable for files which are + // large enough to require a multi-chunk resumable upload. The default value + // is 32s. Users may want to pick a longer deadline if they are using larger + // values for ChunkSize or if they expect to have a slow or unreliable + // internet connection. + // + // To set a deadline on the entire upload, use context timeout or + // cancellation. + ChunkRetryDeadline time.Duration + // ProgressFunc can be used to monitor the progress of a large write. // operation. If ProgressFunc is not nil and writing requires multiple // calls to the underlying service (see @@ -86,96 +109,6 @@ type Writer struct { err error } -func (w *Writer) open() error { - attrs := w.ObjectAttrs - // Check the developer didn't change the object Name (this is unfortunate, but - // we don't want to store an object under the wrong name). - if attrs.Name != w.o.object { - return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) - } - if !utf8.ValidString(attrs.Name) { - return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) - } - if attrs.KMSKeyName != "" && w.o.encryptionKey != nil { - return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key") - } - pr, pw := io.Pipe() - w.pw = pw - w.opened = true - - go w.monitorCancel() - - if w.ChunkSize < 0 { - return errors.New("storage: Writer.ChunkSize must be non-negative") - } - mediaOpts := []googleapi.MediaOption{ - googleapi.ChunkSize(w.ChunkSize), - } - if c := attrs.ContentType; c != "" { - mediaOpts = append(mediaOpts, googleapi.ContentType(c)) - } - - go func() { - defer close(w.donec) - - rawObj := attrs.toRawObject(w.o.bucket) - if w.SendCRC32C { - rawObj.Crc32c = encodeUint32(attrs.CRC32C) - } - if w.MD5 != nil { - rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5) - } - if w.o.c.envHost != "" { - w.o.c.raw.BasePath = fmt.Sprintf("%s://%s", w.o.c.scheme, w.o.c.envHost) - } - call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj). - Media(pr, mediaOpts...). - Projection("full"). - Context(w.ctx). - Name(w.o.object) - - if w.ProgressFunc != nil { - call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) - } - if attrs.KMSKeyName != "" { - call.KmsKeyName(attrs.KMSKeyName) - } - if attrs.PredefinedACL != "" { - call.PredefinedAcl(attrs.PredefinedACL) - } - if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { - w.mu.Lock() - w.err = err - w.mu.Unlock() - pr.CloseWithError(err) - return - } - var resp *raw.Object - err := applyConds("NewWriter", w.o.gen, w.o.conds, call) - if err == nil { - if w.o.userProject != "" { - call.UserProject(w.o.userProject) - } - setClientHeader(call.Header()) - - // The internals that perform call.Do automatically retry both the initial - // call to set up the upload as well as calls to upload individual chunks - // for a resumable upload (as long as the chunk size is non-zero). Hence - // there is no need to add retries here. - resp, err = call.Do() - } - if err != nil { - w.mu.Lock() - w.err = err - w.mu.Unlock() - pr.CloseWithError(err) - return - } - w.obj = newObject(resp) - }() - return nil -} - // Write appends to w. It implements the io.Writer interface. // // Since writes happen asynchronously, Write may return a nil @@ -193,7 +126,7 @@ func (w *Writer) Write(p []byte) (n int, err error) { return 0, werr } if !w.opened { - if err := w.open(); err != nil { + if err := w.openWriter(); err != nil { return 0, err } } @@ -205,7 +138,7 @@ func (w *Writer) Write(p []byte) (n int, err error) { // Preserve existing functionality that when context is canceled, Write will return // context.Canceled instead of "io: read/write on closed pipe". This hides the // pipe implementation detail from users and makes Write seem as though it's an RPC. - if werr == context.Canceled || werr == context.DeadlineExceeded { + if errors.Is(werr, context.Canceled) || errors.Is(werr, context.DeadlineExceeded) { return n, werr } } @@ -217,7 +150,7 @@ func (w *Writer) Write(p []byte) (n int, err error) { // can be retrieved by calling Attrs. func (w *Writer) Close() error { if !w.opened { - if err := w.open(); err != nil { + if err := w.openWriter(); err != nil { return err } } @@ -233,6 +166,43 @@ func (w *Writer) Close() error { return w.err } +func (w *Writer) openWriter() (err error) { + if err := w.validateWriteAttrs(); err != nil { + return err + } + if w.o.gen != defaultGen { + return fmt.Errorf("storage: generation not supported on Writer, got %v", w.o.gen) + } + + isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true) + opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject) + params := &openWriterParams{ + ctx: w.ctx, + chunkSize: w.ChunkSize, + chunkRetryDeadline: w.ChunkRetryDeadline, + bucket: w.o.bucket, + attrs: &w.ObjectAttrs, + conds: w.o.conds, + encryptionKey: w.o.encryptionKey, + sendCRC32C: w.SendCRC32C, + donec: w.donec, + setError: w.error, + progress: w.progress, + setObj: func(o *ObjectAttrs) { w.obj = o }, + } + if err := w.ctx.Err(); err != nil { + return err // short-circuit + } + w.pw, err = w.o.c.tc.OpenWriter(params, opts...) + if err != nil { + return err + } + w.opened = true + go w.monitorCancel() + + return nil +} + // monitorCancel is intended to be used as a background goroutine. It monitors the // context, and when it observes that the context has been canceled, it manually // closes things that do not take a context. @@ -266,3 +236,38 @@ func (w *Writer) CloseWithError(err error) error { func (w *Writer) Attrs() *ObjectAttrs { return w.obj } + +func (w *Writer) validateWriteAttrs() error { + attrs := w.ObjectAttrs + // Check the developer didn't change the object Name (this is unfortunate, but + // we don't want to store an object under the wrong name). + if attrs.Name != w.o.object { + return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) + } + if !utf8.ValidString(attrs.Name) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) + } + if attrs.KMSKeyName != "" && w.o.encryptionKey != nil { + return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key") + } + if w.ChunkSize < 0 { + return errors.New("storage: Writer.ChunkSize must be non-negative") + } + return nil +} + +// progress is a convenience wrapper that reports write progress to the Writer +// ProgressFunc if it is set and progress is non-zero. +func (w *Writer) progress(p int64) { + if w.ProgressFunc != nil && p != 0 { + w.ProgressFunc(p) + } +} + +// error acquires the Writer's lock, sets the Writer's err to the given error, +// then relinquishes the lock. +func (w *Writer) error(err error) { + w.mu.Lock() + w.err = err + w.mu.Unlock() +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md index fec416a9c415..b11eb07884b0 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -1,3 +1,5 @@ +# NOTE: This module will go out of support by March 31, 2023. For authenticating with Azure AD, use module [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) instead. For help migrating from `adal` to `azidentiy` please consult the [migration guide](https://aka.ms/azsdk/go/identity/migration). General information about the retirement of this and other legacy modules can be found [here](https://azure.microsoft.com/updates/support-for-azure-sdk-libraries-that-do-not-conform-to-our-current-azure-sdk-guidelines-will-be-retired-as-of-31-march-2023/). + # Azure Active Directory authentication for Go This is a standalone package for authenticating with Azure Active @@ -18,7 +20,7 @@ go get -u github.com/Azure/go-autorest/autorest/adal ## Usage -An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). ### Register an Azure AD Application with secret @@ -88,7 +90,7 @@ An Active Directory application is required in order to use this library. An app ### Grant the necessary permissions Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained -level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +level. There is a set of [pre-defined roles](https://docs.microsoft.com/azure/active-directory/role-based-access-built-in-roles) which can be assigned to a service principal of an Azure AD application depending of your needs. ``` @@ -104,7 +106,7 @@ It is also possible to define custom role definitions. az role definition create --role-definition role-definition.json ``` -* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. +* Check [custom roles](https://docs.microsoft.com/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. ### Acquire Access Token diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 310be07ec335..1a9c8ab537f0 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -365,6 +365,25 @@ func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, err }) } +// ServicePrincipalFederatedSecret implements ServicePrincipalSecret for Federated JWTs. +type ServicePrincipalFederatedSecret struct { + jwt string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during OAuth Token Acquisition using a JWT signed by an OIDC issuer. +func (secret *ServicePrincipalFederatedSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + + v.Set("client_assertion", secret.jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalFederatedSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalFederatedSecret is not supported") +} + // ServicePrincipalToken encapsulates a Token created for a Service Principal. type ServicePrincipalToken struct { inner servicePrincipalToken @@ -419,6 +438,8 @@ func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} case "ServicePrincipalAuthorizationCodeSecret": spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + case "ServicePrincipalFederatedSecret": + return errors.New("unmarshalling ServicePrincipalFederatedSecret is not supported") default: return fmt.Errorf("unrecognized token type '%s'", secret["type"]) } @@ -665,6 +686,31 @@ func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clie ) } +// NewServicePrincipalTokenFromFederatedToken creates a ServicePrincipalToken from the supplied federated OIDC JWT. +func NewServicePrincipalTokenFromFederatedToken(oauthConfig OAuthConfig, clientID string, jwt string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if jwt == "" { + return nil, fmt.Errorf("parameter 'jwt' cannot be empty") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalFederatedSecret{ + jwt: jwt, + }, + callbacks..., + ) +} + type msiType int const ( @@ -1058,8 +1104,8 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource // AAD returns expires_in as a string, ADFS returns it as an int ExpiresIn json.Number `json:"expires_in"` - // expires_on can be in two formats, a UTC time stamp or the number of seconds. - ExpiresOn string `json:"expires_on"` + // expires_on can be in three formats, a UTC time stamp, or the number of seconds as a string *or* int. + ExpiresOn interface{} `json:"expires_on"` NotBefore json.Number `json:"not_before"` Resource string `json:"resource"` @@ -1072,7 +1118,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource } expiresOn := json.Number("") // ADFS doesn't include the expires_on field - if token.ExpiresOn != "" { + if token.ExpiresOn != nil { if expiresOn, err = parseExpiresOn(token.ExpiresOn); err != nil { return newTokenRefreshError(fmt.Sprintf("adal: failed to parse expires_on: %v value '%s'", err, token.ExpiresOn), resp) } @@ -1089,18 +1135,27 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource } // converts expires_on to the number of seconds -func parseExpiresOn(s string) (json.Number, error) { - // convert the expiration date to the number of seconds from now +func parseExpiresOn(s interface{}) (json.Number, error) { + // the JSON unmarshaler treats JSON numbers unmarshaled into an interface{} as float64 + asFloat64, ok := s.(float64) + if ok { + // this is the number of seconds as int case + return json.Number(strconv.FormatInt(int64(asFloat64), 10)), nil + } + asStr, ok := s.(string) + if !ok { + return "", fmt.Errorf("unexpected expires_on type %T", s) + } + // convert the expiration date to the number of seconds from the unix epoch timeToDuration := func(t time.Time) json.Number { - dur := t.Sub(time.Now().UTC()) - return json.Number(strconv.FormatInt(int64(dur.Round(time.Second).Seconds()), 10)) + return json.Number(strconv.FormatInt(t.UTC().Unix(), 10)) } - if _, err := strconv.ParseInt(s, 10, 64); err == nil { + if _, err := json.Number(asStr).Int64(); err == nil { // this is the number of seconds case, no conversion required - return json.Number(s), nil - } else if eo, err := time.Parse(expiresOnDateFormatPM, s); err == nil { + return json.Number(asStr), nil + } else if eo, err := time.Parse(expiresOnDateFormatPM, asStr); err == nil { return timeToDuration(eo), nil - } else if eo, err := time.Parse(expiresOnDateFormat, s); err == nil { + } else if eo, err := time.Parse(expiresOnDateFormat, asStr); err == nil { return timeToDuration(eo), nil } else { // unknown format @@ -1317,12 +1372,25 @@ func NewMultiTenantServicePrincipalTokenFromCertificate(multiTenantCfg MultiTena // MSIAvailable returns true if the MSI endpoint is available for authentication. func MSIAvailable(ctx context.Context, s Sender) bool { + msiType, _, err := getMSIType() + + if err != nil { + return false + } + + if msiType != msiTypeIMDS { + return true + } + if s == nil { s = sender() } + resp, err := getMSIEndpoint(ctx, s) + if err == nil { resp.Body.Close() } + return err == nil } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go index 85acf1c9bc98..2f1a99818d10 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -16,6 +16,7 @@ package auth import ( "bytes" + "context" "encoding/binary" "encoding/json" "errors" @@ -232,6 +233,9 @@ func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) } // 4. MSI + if !adal.MSIAvailable(context.Background(), nil) { + return nil, errors.New("MSI not available") + } logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetAuthorizer() using MSI authentication") return settings.GetMSI().Authorizer() } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go index 38e4900ad0fb..f7eb26fd366b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go @@ -1,3 +1,4 @@ +//go:build modhack // +build modhack package auth diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go index 861ce2984e64..50d6f0391a5e 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go @@ -1,3 +1,4 @@ +//go:build modhack // +build modhack package cli diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go index 44ff446f6697..486619111c67 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go @@ -124,24 +124,91 @@ func LoadTokens(path string) ([]Token, error) { // GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios. func GetTokenFromCLI(resource string) (*Token, error) { - // This is the path that a developer can set to tell this class what the install path for Azure CLI is. - const azureCLIPath = "AzureCLIPath" + return GetTokenFromCLIWithParams(GetAccessTokenParams{Resource: resource}) +} - // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI. - azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles")) +// GetAccessTokenParams is the parameter struct of GetTokenFromCLIWithParams +type GetAccessTokenParams struct { + Resource string + ResourceType string + Subscription string + Tenant string +} - // Default path for non-Windows. - const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin" +// GetTokenFromCLIWithParams gets a token using Azure CLI 2.0 for local development scenarios. +func GetTokenFromCLIWithParams(params GetAccessTokenParams) (*Token, error) { + cliCmd := GetAzureCLICommand() - // Validate resource, since it gets sent as a command line argument to Azure CLI - const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed." - match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json") + if params.Resource != "" { + if err := validateParameter(params.Resource); err != nil { + return nil, err + } + cliCmd.Args = append(cliCmd.Args, "--resource", params.Resource) + } + if params.ResourceType != "" { + if err := validateParameter(params.ResourceType); err != nil { + return nil, err + } + cliCmd.Args = append(cliCmd.Args, "--resource-type", params.ResourceType) + } + if params.Subscription != "" { + if err := validateParameter(params.Subscription); err != nil { + return nil, err + } + cliCmd.Args = append(cliCmd.Args, "--subscription", params.Subscription) + } + if params.Tenant != "" { + if err := validateParameter(params.Tenant); err != nil { + return nil, err + } + cliCmd.Args = append(cliCmd.Args, "--tenant", params.Tenant) + } + + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + if stderr.Len() > 0 { + return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String()) + } + + return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", err.Error()) + } + + tokenResponse := Token{} + err = json.Unmarshal(output, &tokenResponse) if err != nil { return nil, err } + + return &tokenResponse, err +} + +func validateParameter(param string) error { + // Validate parameters, since it gets sent as a command line argument to Azure CLI + const invalidResourceErrorTemplate = "Parameter %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed." + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", param) + if err != nil { + return err + } if !match { - return nil, fmt.Errorf(invalidResourceErrorTemplate, resource) + return fmt.Errorf(invalidResourceErrorTemplate, param) } + return nil +} + +// GetAzureCLICommand can be used to run arbitrary Azure CLI command +func GetAzureCLICommand() *exec.Cmd { + // This is the path that a developer can set to tell this class what the install path for Azure CLI is. + const azureCLIPath = "AzureCLIPath" + + // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI. + azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles")) + + // Default path for non-Windows. + const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin" // Execute Azure CLI to get token var cliCmd *exec.Cmd @@ -155,21 +222,6 @@ func GetTokenFromCLI(resource string) (*Token, error) { cliCmd.Env = os.Environ() cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath)) } - cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json", "--resource", resource) - var stderr bytes.Buffer - cliCmd.Stderr = &stderr - - output, err := cliCmd.Output() - if err != nil { - return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String()) - } - - tokenResponse := Token{} - err = json.Unmarshal(output, &tokenResponse) - if err != nil { - return nil, err - } - - return &tokenResponse, err + return cliCmd } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index 737950eb36b0..b0a53769f26b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -34,8 +34,10 @@ const ( var environments = map[string]Environment{ "AZURECHINACLOUD": ChinaCloud, "AZUREGERMANCLOUD": GermanCloud, + "AZURECLOUD": PublicCloud, "AZUREPUBLICCLOUD": PublicCloud, - "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, + "AZUREUSGOVERNMENT": USGovernmentCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, //TODO: deprecate } // ResourceIdentifier contains a set of Azure resource IDs. @@ -51,6 +53,8 @@ type ResourceIdentifier struct { ServiceBus string `json:"serviceBus"` SQLDatabase string `json:"sqlDatabase"` CosmosDB string `json:"cosmosDB"` + ManagedHSM string `json:"managedHSM"` + MicrosoftGraph string `json:"microsoftGraph"` } // Environment represents a set of endpoints for each of Azure's Clouds. @@ -63,9 +67,11 @@ type Environment struct { ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` GalleryEndpoint string `json:"galleryEndpoint"` KeyVaultEndpoint string `json:"keyVaultEndpoint"` + ManagedHSMEndpoint string `json:"managedHSMEndpoint"` GraphEndpoint string `json:"graphEndpoint"` ServiceBusEndpoint string `json:"serviceBusEndpoint"` BatchManagementEndpoint string `json:"batchManagementEndpoint"` + MicrosoftGraphEndpoint string `json:"microsoftGraphEndpoint"` StorageEndpointSuffix string `json:"storageEndpointSuffix"` CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` MariaDBDNSSuffix string `json:"mariaDBDNSSuffix"` @@ -74,6 +80,7 @@ type Environment struct { SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ManagedHSMDNSSuffix string `json:"managedHSMDNSSuffix"` ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` @@ -81,6 +88,7 @@ type Environment struct { TokenAudience string `json:"tokenAudience"` APIManagementHostNameSuffix string `json:"apiManagementHostNameSuffix"` SynapseEndpointSuffix string `json:"synapseEndpointSuffix"` + DatalakeSuffix string `json:"datalakeSuffix"` ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` } @@ -95,9 +103,11 @@ var ( ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", GalleryEndpoint: "https://gallery.azure.com/", KeyVaultEndpoint: "https://vault.azure.net/", + ManagedHSMEndpoint: "https://managedhsm.azure.net/", GraphEndpoint: "https://graph.windows.net/", ServiceBusEndpoint: "https://servicebus.windows.net/", BatchManagementEndpoint: "https://batch.core.windows.net/", + MicrosoftGraphEndpoint: "https://graph.microsoft.com/", StorageEndpointSuffix: "core.windows.net", CosmosDBDNSSuffix: "documents.azure.com", MariaDBDNSSuffix: "mariadb.database.azure.com", @@ -106,6 +116,7 @@ var ( SQLDatabaseDNSSuffix: "database.windows.net", TrafficManagerDNSSuffix: "trafficmanager.net", KeyVaultDNSSuffix: "vault.azure.net", + ManagedHSMDNSSuffix: "managedhsm.azure.net", ServiceBusEndpointSuffix: "servicebus.windows.net", ServiceManagementVMDNSSuffix: "cloudapp.net", ResourceManagerVMDNSSuffix: "cloudapp.azure.com", @@ -113,6 +124,7 @@ var ( TokenAudience: "https://management.azure.com/", APIManagementHostNameSuffix: "azure-api.net", SynapseEndpointSuffix: "dev.azuresynapse.net", + DatalakeSuffix: "azuredatalakestore.net", ResourceIdentifiers: ResourceIdentifier{ Graph: "https://graph.windows.net/", KeyVault: "https://vault.azure.net", @@ -125,6 +137,8 @@ var ( ServiceBus: "https://servicebus.azure.net/", SQLDatabase: "https://database.windows.net/", CosmosDB: "https://cosmos.azure.com", + ManagedHSM: "https://managedhsm.azure.net", + MicrosoftGraph: "https://graph.microsoft.com/", }, } @@ -138,9 +152,11 @@ var ( ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", GalleryEndpoint: "https://gallery.usgovcloudapi.net/", KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + ManagedHSMEndpoint: NotAvailable, GraphEndpoint: "https://graph.windows.net/", ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", + MicrosoftGraphEndpoint: "https://graph.microsoft.us/", StorageEndpointSuffix: "core.usgovcloudapi.net", CosmosDBDNSSuffix: "documents.azure.us", MariaDBDNSSuffix: "mariadb.database.usgovcloudapi.net", @@ -149,13 +165,15 @@ var ( SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", TrafficManagerDNSSuffix: "usgovtrafficmanager.net", KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ManagedHSMDNSSuffix: NotAvailable, ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", ServiceManagementVMDNSSuffix: "usgovcloudapp.net", ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net", ContainerRegistryDNSSuffix: "azurecr.us", TokenAudience: "https://management.usgovcloudapi.net/", APIManagementHostNameSuffix: "azure-api.us", - SynapseEndpointSuffix: NotAvailable, + SynapseEndpointSuffix: "dev.azuresynapse.usgovcloudapi.net", + DatalakeSuffix: NotAvailable, ResourceIdentifiers: ResourceIdentifier{ Graph: "https://graph.windows.net/", KeyVault: "https://vault.usgovcloudapi.net", @@ -164,10 +182,12 @@ var ( OperationalInsights: "https://api.loganalytics.us", OSSRDBMS: "https://ossrdbms-aad.database.usgovcloudapi.net", Storage: "https://storage.azure.com/", - Synapse: NotAvailable, + Synapse: "https://dev.azuresynapse.usgovcloudapi.net", ServiceBus: "https://servicebus.azure.net/", SQLDatabase: "https://database.usgovcloudapi.net/", CosmosDB: "https://cosmos.azure.com", + ManagedHSM: NotAvailable, + MicrosoftGraph: "https://graph.microsoft.us/", }, } @@ -181,9 +201,11 @@ var ( ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", GalleryEndpoint: "https://gallery.chinacloudapi.cn/", KeyVaultEndpoint: "https://vault.azure.cn/", + ManagedHSMEndpoint: NotAvailable, GraphEndpoint: "https://graph.chinacloudapi.cn/", ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", + MicrosoftGraphEndpoint: "https://microsoftgraph.chinacloudapi.cn/", StorageEndpointSuffix: "core.chinacloudapi.cn", CosmosDBDNSSuffix: "documents.azure.cn", MariaDBDNSSuffix: "mariadb.database.chinacloudapi.cn", @@ -192,6 +214,7 @@ var ( SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", TrafficManagerDNSSuffix: "trafficmanager.cn", KeyVaultDNSSuffix: "vault.azure.cn", + ManagedHSMDNSSuffix: NotAvailable, ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", ServiceManagementVMDNSSuffix: "chinacloudapp.cn", ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn", @@ -199,6 +222,7 @@ var ( TokenAudience: "https://management.chinacloudapi.cn/", APIManagementHostNameSuffix: "azure-api.cn", SynapseEndpointSuffix: "dev.azuresynapse.azure.cn", + DatalakeSuffix: NotAvailable, ResourceIdentifiers: ResourceIdentifier{ Graph: "https://graph.chinacloudapi.cn/", KeyVault: "https://vault.azure.cn", @@ -211,6 +235,8 @@ var ( ServiceBus: "https://servicebus.azure.net/", SQLDatabase: "https://database.chinacloudapi.cn/", CosmosDB: "https://cosmos.azure.com", + ManagedHSM: NotAvailable, + MicrosoftGraph: "https://microsoftgraph.chinacloudapi.cn", }, } @@ -224,9 +250,11 @@ var ( ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", GalleryEndpoint: "https://gallery.cloudapi.de/", KeyVaultEndpoint: "https://vault.microsoftazure.de/", + ManagedHSMEndpoint: NotAvailable, GraphEndpoint: "https://graph.cloudapi.de/", ServiceBusEndpoint: "https://servicebus.cloudapi.de/", BatchManagementEndpoint: "https://batch.cloudapi.de/", + MicrosoftGraphEndpoint: NotAvailable, StorageEndpointSuffix: "core.cloudapi.de", CosmosDBDNSSuffix: "documents.microsoftazure.de", MariaDBDNSSuffix: "mariadb.database.cloudapi.de", @@ -235,6 +263,7 @@ var ( SQLDatabaseDNSSuffix: "database.cloudapi.de", TrafficManagerDNSSuffix: "azuretrafficmanager.de", KeyVaultDNSSuffix: "vault.microsoftazure.de", + ManagedHSMDNSSuffix: NotAvailable, ServiceBusEndpointSuffix: "servicebus.cloudapi.de", ServiceManagementVMDNSSuffix: "azurecloudapp.de", ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", @@ -242,6 +271,7 @@ var ( TokenAudience: "https://management.microsoftazure.de/", APIManagementHostNameSuffix: NotAvailable, SynapseEndpointSuffix: NotAvailable, + DatalakeSuffix: NotAvailable, ResourceIdentifiers: ResourceIdentifier{ Graph: "https://graph.cloudapi.de/", KeyVault: "https://vault.microsoftazure.de", @@ -254,6 +284,8 @@ var ( ServiceBus: "https://servicebus.azure.net/", SQLDatabase: "https://database.cloudapi.de/", CosmosDB: "https://cosmos.azure.com", + ManagedHSM: NotAvailable, + MicrosoftGraph: NotAvailable, }, } ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go index c6d39f686655..5b52357f951d 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -64,7 +64,7 @@ func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { regErr := register(client, r, re) if regErr != nil { - return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %w", regErr, err) } } } diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md index 5680010575b7..683be1dcf9c8 100644 --- a/vendor/github.com/Microsoft/go-winio/README.md +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -1,4 +1,4 @@ -# go-winio +# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) This repository contains utilities for efficiently performing Win32 IO operations in Go. Currently, this is focused on accessing named pipes and other file handles, and @@ -11,12 +11,27 @@ package. Please see the LICENSE file for licensing information. -This project has adopted the [Microsoft Open Source Code of -Conduct](https://opensource.microsoft.com/codeofconduct/). For more information -see the [Code of Conduct -FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact -[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional -questions or comments. +## Contributing +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) +declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR +appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves +or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can +attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. + + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + + + +## Special Thanks Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go index 0385e4108129..293ab54c80c3 100644 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -143,6 +144,11 @@ func (f *win32File) Close() error { return nil } +// IsClosed checks if the file has been closed +func (f *win32File) IsClosed() bool { + return f.closing.isSet() +} + // prepareIo prepares for a new IO operation. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. func (f *win32File) prepareIo() (*ioOperation, error) { diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index b632f8f8bb98..b2b644d002aa 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -252,15 +253,23 @@ func (conn *HvsockConn) Close() error { return conn.sock.Close() } +func (conn *HvsockConn) IsClosed() bool { + return conn.sock.IsClosed() +} + func (conn *HvsockConn) shutdown(how int) error { - err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) + if conn.IsClosed() { + return ErrFileClosed + } + + err := syscall.Shutdown(conn.sock.handle, how) if err != nil { return os.NewSyscallError("shutdown", err) } return nil } -// CloseRead shuts down the read end of the socket. +// CloseRead shuts down the read end of the socket, preventing future read operations. func (conn *HvsockConn) CloseRead() error { err := conn.shutdown(syscall.SHUT_RD) if err != nil { @@ -269,8 +278,8 @@ func (conn *HvsockConn) CloseRead() error { return nil } -// CloseWrite shuts down the write end of the socket, notifying the other endpoint that -// no more data will be written. +// CloseWrite shuts down the write end of the socket, preventing future write operations and +// notifying the other endpoint that no more data will be written. func (conn *HvsockConn) CloseWrite() error { err := conn.shutdown(syscall.SHUT_WR) if err != nil { diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go index f497c0e39178..2d9161e2deee 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -14,8 +14,6 @@ import ( "encoding/binary" "fmt" "strconv" - - "golang.org/x/sys/windows" ) // Variant specifies which GUID variant (or "type") of the GUID. It determines @@ -41,13 +39,6 @@ type Version uint8 var _ = (encoding.TextMarshaler)(GUID{}) var _ = (encoding.TextUnmarshaler)(&GUID{}) -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type so that stringification and -// marshaling can be supported. The representation matches that used by native -// Windows code. -type GUID windows.GUID - // NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. func NewV4() (GUID, error) { var b [16]byte diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go new file mode 100644 index 000000000000..f64d828c0ba4 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go @@ -0,0 +1,15 @@ +// +build !windows + +package guid + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type as that is only available to builds +// targeted at `windows`. The representation matches that used by native Windows +// code. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go new file mode 100644 index 000000000000..83617f4eee9a --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go @@ -0,0 +1,10 @@ +package guid + +import "golang.org/x/sys/windows" + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go index 9c83d36fe533..c3dd7c217697 100644 --- a/vendor/github.com/Microsoft/go-winio/privilege.go +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -28,8 +28,9 @@ const ( ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 - SeBackupPrivilege = "SeBackupPrivilege" - SeRestorePrivilege = "SeRestorePrivilege" + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" + SeSecurityPrivilege = "SeSecurityPrivilege" ) const ( diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore deleted file mode 100644 index 748e4c8073ce..000000000000 --- a/vendor/github.com/PuerkitoBio/purell/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.sublime-* -.DS_Store -*.swp -*.swo -tags diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml deleted file mode 100644 index cf31e6af6d50..000000000000 --- a/vendor/github.com/PuerkitoBio/purell/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - "1.10.x" - - "1.11.x" - - tip diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE deleted file mode 100644 index 4b9986dea714..000000000000 --- a/vendor/github.com/PuerkitoBio/purell/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2012, Martin Angers -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md deleted file mode 100644 index 07de0c498668..000000000000 --- a/vendor/github.com/PuerkitoBio/purell/README.md +++ /dev/null @@ -1,188 +0,0 @@ -# Purell - -Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... - -Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. - -[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) - -## Install - -`go get github.com/PuerkitoBio/purell` - -## Changelog - -* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor). -* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121). -* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). -* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). -* **v0.2.0** : Add benchmarks, Attempt IDN support. -* **v0.1.0** : Initial release. - -## Examples - -From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): - -```go -package purell - -import ( - "fmt" - "net/url" -) - -func ExampleNormalizeURLString() { - if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", - FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { - panic(err) - } else { - fmt.Print(normalized) - } - // Output: http://somewebsite.com:80/Amazing%3F/url/ -} - -func ExampleMustNormalizeURLString() { - normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", - FlagsUnsafeGreedy) - fmt.Print(normalized) - - // Output: http://somewebsite.com/Amazing%FA/url -} - -func ExampleNormalizeURL() { - if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { - panic(err) - } else { - normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) - fmt.Print(normalized) - } - - // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 -} -``` - -## API - -As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: - -```go -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) -``` - -For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. - -The [full godoc reference is available on gopkgdoc][godoc]. - -Some things to note: - -* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. - -* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): - - %24 -> $ - - %26 -> & - - %2B-%3B -> +,-./0123456789:; - - %3D -> = - - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ - - %5F -> _ - - %61-%7A -> abcdefghijklmnopqrstuvwxyz - - %7E -> ~ - - -* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). - -* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. - -* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. - -### Safe vs Usually Safe vs Unsafe - -Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. - -Consider the following URL: - -`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -Normalizing with the `FlagsSafe` gives: - -`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -With the `FlagsUsuallySafeGreedy`: - -`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` - -And with `FlagsUnsafeGreedy`: - -`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` - -## TODOs - -* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. - -## Thanks / Contributions - -@rogpeppe -@jehiah -@opennota -@pchristopher1275 -@zenovich -@beeker1121 - -## License - -The [BSD 3-Clause license][bsd]. - -[bsd]: http://opensource.org/licenses/BSD-3-Clause -[wiki]: http://en.wikipedia.org/wiki/URL_normalization -[rfc]: http://tools.ietf.org/html/rfc3986#section-6 -[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell -[pr5]: https://github.com/PuerkitoBio/purell/pull/5 -[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go deleted file mode 100644 index 6d0fc190a188..000000000000 --- a/vendor/github.com/PuerkitoBio/purell/purell.go +++ /dev/null @@ -1,379 +0,0 @@ -/* -Package purell offers URL normalization as described on the wikipedia page: -http://en.wikipedia.org/wiki/URL_normalization -*/ -package purell - -import ( - "bytes" - "fmt" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/PuerkitoBio/urlesc" - "golang.org/x/net/idna" - "golang.org/x/text/unicode/norm" - "golang.org/x/text/width" -) - -// A set of normalization flags determines how a URL will -// be normalized. -type NormalizationFlags uint - -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) - -const ( - defaultHttpPort = ":80" - defaultHttpsPort = ":443" -) - -// Regular expressions used by the normalizations -var rxPort = regexp.MustCompile(`(:\d+)/?$`) -var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) -var rxDupSlashes = regexp.MustCompile(`/{2,}`) -var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) -var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) -var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) -var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) -var rxEmptyPort = regexp.MustCompile(`:+$`) - -// Map of flags to implementation function. -// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically -// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. - -// Since maps have undefined traversing order, make a slice of ordered keys -var flagsOrder = []NormalizationFlags{ - FlagLowercaseScheme, - FlagLowercaseHost, - FlagRemoveDefaultPort, - FlagRemoveDirectoryIndex, - FlagRemoveDotSegments, - FlagRemoveFragment, - FlagForceHTTP, // Must be after remove default port (because https=443/http=80) - FlagRemoveDuplicateSlashes, - FlagRemoveWWW, - FlagAddWWW, - FlagSortQuery, - FlagDecodeDWORDHost, - FlagDecodeOctalHost, - FlagDecodeHexHost, - FlagRemoveUnnecessaryHostDots, - FlagRemoveEmptyPortSeparator, - FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last - FlagAddTrailingSlash, -} - -// ... and then the map, where order is unimportant -var flags = map[NormalizationFlags]func(*url.URL){ - FlagLowercaseScheme: lowercaseScheme, - FlagLowercaseHost: lowercaseHost, - FlagRemoveDefaultPort: removeDefaultPort, - FlagRemoveDirectoryIndex: removeDirectoryIndex, - FlagRemoveDotSegments: removeDotSegments, - FlagRemoveFragment: removeFragment, - FlagForceHTTP: forceHTTP, - FlagRemoveDuplicateSlashes: removeDuplicateSlashes, - FlagRemoveWWW: removeWWW, - FlagAddWWW: addWWW, - FlagSortQuery: sortQuery, - FlagDecodeDWORDHost: decodeDWORDHost, - FlagDecodeOctalHost: decodeOctalHost, - FlagDecodeHexHost: decodeHexHost, - FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, - FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, - FlagRemoveTrailingSlash: removeTrailingSlash, - FlagAddTrailingSlash: addTrailingSlash, -} - -// MustNormalizeURLString returns the normalized string, and panics if an error occurs. -// It takes an URL string as input, as well as the normalization flags. -func MustNormalizeURLString(u string, f NormalizationFlags) string { - result, e := NormalizeURLString(u, f) - if e != nil { - panic(e) - } - return result -} - -// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. -// It takes an URL string as input, as well as the normalization flags. -func NormalizeURLString(u string, f NormalizationFlags) (string, error) { - parsed, err := url.Parse(u) - if err != nil { - return "", err - } - - if f&FlagLowercaseHost == FlagLowercaseHost { - parsed.Host = strings.ToLower(parsed.Host) - } - - // The idna package doesn't fully conform to RFC 5895 - // (https://tools.ietf.org/html/rfc5895), so we do it here. - // Taken from Go 1.8 cycle source, courtesy of bradfitz. - // TODO: Remove when (if?) idna package conforms to RFC 5895. - parsed.Host = width.Fold.String(parsed.Host) - parsed.Host = norm.NFC.String(parsed.Host) - if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil { - return "", err - } - - return NormalizeURL(parsed, f), nil -} - -// NormalizeURL returns the normalized string. -// It takes a parsed URL object as input, as well as the normalization flags. -func NormalizeURL(u *url.URL, f NormalizationFlags) string { - for _, k := range flagsOrder { - if f&k == k { - flags[k](u) - } - } - return urlesc.Escape(u) -} - -func lowercaseScheme(u *url.URL) { - if len(u.Scheme) > 0 { - u.Scheme = strings.ToLower(u.Scheme) - } -} - -func lowercaseHost(u *url.URL) { - if len(u.Host) > 0 { - u.Host = strings.ToLower(u.Host) - } -} - -func removeDefaultPort(u *url.URL) { - if len(u.Host) > 0 { - scheme := strings.ToLower(u.Scheme) - u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { - return "" - } - return val - }) - } -} - -func removeTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if strings.HasSuffix(u.Path, "/") { - u.Path = u.Path[:l-1] - } - } else if l = len(u.Host); l > 0 { - if strings.HasSuffix(u.Host, "/") { - u.Host = u.Host[:l-1] - } - } -} - -func addTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } else if l = len(u.Host); l > 0 { - if !strings.HasSuffix(u.Host, "/") { - u.Host += "/" - } - } -} - -func removeDotSegments(u *url.URL) { - if len(u.Path) > 0 { - var dotFree []string - var lastIsDot bool - - sections := strings.Split(u.Path, "/") - for _, s := range sections { - if s == ".." { - if len(dotFree) > 0 { - dotFree = dotFree[:len(dotFree)-1] - } - } else if s != "." { - dotFree = append(dotFree, s) - } - lastIsDot = (s == "." || s == "..") - } - // Special case if host does not end with / and new path does not begin with / - u.Path = strings.Join(dotFree, "/") - if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - // Special case if the last segment was a dot, make sure the path ends with a slash - if lastIsDot && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } -} - -func removeDirectoryIndex(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") - } -} - -func removeFragment(u *url.URL) { - u.Fragment = "" -} - -func forceHTTP(u *url.URL) { - if strings.ToLower(u.Scheme) == "https" { - u.Scheme = "http" - } -} - -func removeDuplicateSlashes(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") - } -} - -func removeWWW(u *url.URL) { - if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = u.Host[4:] - } -} - -func addWWW(u *url.URL) { - if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = "www." + u.Host - } -} - -func sortQuery(u *url.URL) { - q := u.Query() - - if len(q) > 0 { - arKeys := make([]string, len(q)) - i := 0 - for k := range q { - arKeys[i] = k - i++ - } - sort.Strings(arKeys) - buf := new(bytes.Buffer) - for _, k := range arKeys { - sort.Strings(q[k]) - for _, v := range q[k] { - if buf.Len() > 0 { - buf.WriteRune('&') - } - buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) - } - } - - // Rebuild the raw query string - u.RawQuery = buf.String() - } -} - -func decodeDWORDHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { - var parts [4]int64 - - dword, _ := strconv.ParseInt(matches[1], 10, 0) - for i, shift := range []uint{24, 16, 8, 0} { - parts[i] = dword >> shift & 0xFF - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) - } - } -} - -func decodeOctalHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { - var parts [4]int64 - - for i := 1; i <= 4; i++ { - parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) - } - } -} - -func decodeHexHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { - // Conversion is safe because of regex validation - parsed, _ := strconv.ParseInt(matches[1], 16, 0) - // Set host as DWORD (base 10) encoded host - u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) - // The rest is the same as decoding a DWORD host - decodeDWORDHost(u) - } - } -} - -func removeUnncessaryHostDots(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { - // Trim the leading and trailing dots - u.Host = strings.Trim(matches[1], ".") - if len(matches) > 2 { - u.Host += matches[2] - } - } - } -} - -func removeEmptyPortSeparator(u *url.URL) { - if len(u.Host) > 0 { - u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") - } -} diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml deleted file mode 100644 index ba6b225f91e2..000000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE deleted file mode 100644 index 74487567632c..000000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md deleted file mode 100644 index 57aff0a5396d..000000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/README.md +++ /dev/null @@ -1,16 +0,0 @@ -urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) -====== - -Package urlesc implements query escaping as per RFC 3986. - -It contains some parts of the net/url package, modified so as to allow -some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). - -## Install - - go get github.com/PuerkitoBio/urlesc - -## License - -Go license (BSD-3-Clause) - diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go deleted file mode 100644 index 1b84624594d0..000000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package urlesc implements query escaping as per RFC 3986. -// It contains some parts of the net/url package, modified so as to allow -// some reserved characters incorrectly escaped by net/url. -// See https://github.com/golang/go/issues/5684 -package urlesc - -import ( - "bytes" - "net/url" - "strings" -) - -type encoding int - -const ( - encodePath encoding = 1 + iota - encodeUserPassword - encodeQueryComponent - encodeFragment -) - -// Return true if the specified character should be escaped when -// appearing in a URL string, according to RFC 3986. -func shouldEscape(c byte, mode encoding) bool { - // §2.3 Unreserved characters (alphanum) - if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { - return false - } - - switch c { - case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) - return false - - // §2.2 Reserved characters (reserved) - case ':', '/', '?', '#', '[', ']', '@', // gen-delims - '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims - // Different sections of the URL allow a few of - // the reserved characters to appear unescaped. - switch mode { - case encodePath: // §3.3 - // The RFC allows sub-delims and : @. - // '/', '[' and ']' can be used to assign meaning to individual path - // segments. This package only manipulates the path as a whole, - // so we allow those as well. That leaves only ? and # to escape. - return c == '?' || c == '#' - - case encodeUserPassword: // §3.2.1 - // The RFC allows : and sub-delims in - // userinfo. The parsing of userinfo treats ':' as special so we must escape - // all the gen-delims. - return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' - - case encodeQueryComponent: // §3.4 - // The RFC allows / and ?. - return c != '/' && c != '?' - - case encodeFragment: // §4.1 - // The RFC text is silent but the grammar allows - // everything, so escape nothing but # - return c == '#' - } - } - - // Everything else must be escaped. - return true -} - -// QueryEscape escapes the string so it can be safely placed -// inside a URL query. -func QueryEscape(s string) string { - return escape(s, encodeQueryComponent) -} - -func escape(s string, mode encoding) string { - spaceCount, hexCount := 0, 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEscape(c, mode) { - if c == ' ' && mode == encodeQueryComponent { - spaceCount++ - } else { - hexCount++ - } - } - } - - if spaceCount == 0 && hexCount == 0 { - return s - } - - t := make([]byte, len(s)+2*hexCount) - j := 0 - for i := 0; i < len(s); i++ { - switch c := s[i]; { - case c == ' ' && mode == encodeQueryComponent: - t[j] = '+' - j++ - case shouldEscape(c, mode): - t[j] = '%' - t[j+1] = "0123456789ABCDEF"[c>>4] - t[j+2] = "0123456789ABCDEF"[c&15] - j += 3 - default: - t[j] = s[i] - j++ - } - } - return string(t) -} - -var uiReplacer = strings.NewReplacer( - "%21", "!", - "%27", "'", - "%28", "(", - "%29", ")", - "%2A", "*", -) - -// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. -func unescapeUserinfo(s string) string { - return uiReplacer.Replace(s) -} - -// Escape reassembles the URL into a valid URL string. -// The general form of the result is one of: -// -// scheme:opaque -// scheme://userinfo@host/path?query#fragment -// -// If u.Opaque is non-empty, String uses the first form; -// otherwise it uses the second form. -// -// In the second form, the following rules apply: -// - if u.Scheme is empty, scheme: is omitted. -// - if u.User is nil, userinfo@ is omitted. -// - if u.Host is empty, host/ is omitted. -// - if u.Scheme and u.Host are empty and u.User is nil, -// the entire scheme://userinfo@host/ is omitted. -// - if u.Host is non-empty and u.Path begins with a /, -// the form host/path does not add its own /. -// - if u.RawQuery is empty, ?query is omitted. -// - if u.Fragment is empty, #fragment is omitted. -func Escape(u *url.URL) string { - var buf bytes.Buffer - if u.Scheme != "" { - buf.WriteString(u.Scheme) - buf.WriteByte(':') - } - if u.Opaque != "" { - buf.WriteString(u.Opaque) - } else { - if u.Scheme != "" || u.Host != "" || u.User != nil { - buf.WriteString("//") - if ui := u.User; ui != nil { - buf.WriteString(unescapeUserinfo(ui.String())) - buf.WriteByte('@') - } - if h := u.Host; h != "" { - buf.WriteString(h) - } - } - if u.Path != "" && u.Path[0] != '/' && u.Host != "" { - buf.WriteByte('/') - } - buf.WriteString(escape(u.Path, encodePath)) - } - if u.RawQuery != "" { - buf.WriteByte('?') - buf.WriteString(u.RawQuery) - } - if u.Fragment != "" { - buf.WriteByte('#') - buf.WriteString(escape(u.Fragment, encodeFragment)) - } - return buf.String() -} diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore index 2c9adc20b319..f88795748dee 100644 --- a/vendor/github.com/Shopify/sarama/.gitignore +++ b/vendor/github.com/Shopify/sarama/.gitignore @@ -23,7 +23,9 @@ _testmain.go *.exe -coverage.txt -profile.out +/bin +/coverage.txt +/profile.out +/output.json -simplest-uncommitted-msg-0.1-jar-with-dependencies.jar +.idea diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/Shopify/sarama/.golangci.yml index f92a7167c232..0b419abbfaec 100644 --- a/vendor/github.com/Shopify/sarama/.golangci.yml +++ b/vendor/github.com/Shopify/sarama/.golangci.yml @@ -23,13 +23,18 @@ linters-settings: gocritic: enabled-tags: - diagnostic - - experimental - - opinionated - - performance - - style + # - experimental + # - opinionated + # - performance + # - style disabled-checks: - - wrapperFunc + - assignOp + - appendAssign + - commentedOutCode - ifElseChain + - singleCaseSwitch + - sloppyReassign + - wrapperFunc funlen: lines: 300 statements: 300 @@ -44,10 +49,11 @@ linters: - dogsled # - dupl - errcheck + - errorlint - funlen - gochecknoinits # - goconst - # - gocritic + - gocritic - gocyclo - gofmt - goimports @@ -59,6 +65,7 @@ linters: - misspell # - nakedret - nilerr + # - paralleltest # - scopelint - staticcheck - structcheck @@ -72,5 +79,10 @@ linters: issues: exclude: - "G404: Use of weak random number generator" + exclude-rules: + # exclude some linters from running on certains files. + - path: functional.*_test\.go + linters: + - paralleltest # maximum count of issues with the same text. set to 0 for unlimited. default is 3. max-same-issues: 0 diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md index dd826e273df7..c2f92ec9a101 100644 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md @@ -1,12 +1,176 @@ # Changelog -#### Unreleased +## Version 1.31.1 (2022-02-01) + +- #2126 - @bai - Populate missing kafka versions +- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image +- #2123 - @bai - Update klauspost/compress to 0.14 +- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy +- #2119 - @bai - Add Kafka 3.1.0 version number +- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption +- #2051 - @seveas - Expose the TLS connection state of a broker connection +- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys +- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup +- #2113 - @mosceo - Fix typo + +## Version 1.31.0 (2022-01-18) + +## What's Changed +### :tada: New Features / Improvements +* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/Shopify/sarama/pull/2088 +* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/Shopify/sarama/pull/1686 +* Support request pipelining in AsyncProducer by @slaunay in https://github.com/Shopify/sarama/pull/2094 +### :bug: Fixes +* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/Shopify/sarama/pull/2080 +* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/Shopify/sarama/pull/2081 +* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/Shopify/sarama/pull/2082 +* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/Shopify/sarama/pull/2096 +* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/Shopify/sarama/pull/2107 +* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/Shopify/sarama/pull/2108 +* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/Shopify/sarama/pull/2078 +* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/Shopify/sarama/pull/2111 +### :wrench: Maintenance +* chore: bump runtime and test dependencies by @dnwe in https://github.com/Shopify/sarama/pull/2100 +### :memo: Documentation +* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/Shopify/sarama/pull/2099 +### :heavy_plus_sign: Other Changes +* Fix typo by @mosceo in https://github.com/Shopify/sarama/pull/2084 + +## New Contributors +* @grongor made their first contribution in https://github.com/Shopify/sarama/pull/2080 +* @fengyinqiao made their first contribution in https://github.com/Shopify/sarama/pull/2088 +* @xujianhai666 made their first contribution in https://github.com/Shopify/sarama/pull/1686 +* @mosceo made their first contribution in https://github.com/Shopify/sarama/pull/2084 + +**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.30.1...v1.31.0 + +## Version 1.30.1 (2021-12-04) + +## What's Changed +### :tada: New Features / Improvements +* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/Shopify/sarama/pull/2045 +### :bug: Fixes +* fix: set min-go-version to 1.16 by @troyanov in https://github.com/Shopify/sarama/pull/2048 +* logger: fix debug logs' formatting directives by @utrack in https://github.com/Shopify/sarama/pull/2054 +* fix: stuck on the batch with zero records length by @pachmu in https://github.com/Shopify/sarama/pull/2057 +* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/Shopify/sarama/pull/2076 +### :wrench: Maintenance +* chore: add release notes configuration by @dnwe in https://github.com/Shopify/sarama/pull/2046 +* chore: confluent platform version bump by @lizthegrey in https://github.com/Shopify/sarama/pull/2070 + +## Notes +* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x + +## New Contributors +* @troyanov made their first contribution in https://github.com/Shopify/sarama/pull/2048 +* @lizthegrey made their first contribution in https://github.com/Shopify/sarama/pull/2045 +* @utrack made their first contribution in https://github.com/Shopify/sarama/pull/2054 +* @pachmu made their first contribution in https://github.com/Shopify/sarama/pull/2057 + +**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.30.0...v1.30.1 + +## Version 1.30.0 (2021-09-29) + +⚠️ This release has been superseded by v1.30.1 and should _not_ be used. + +**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076 + +--- + +ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x** + +--- + +# New Features / Improvements + +- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh +- #2000 - @matzew - Using xdg-go module for SCRAM +- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures +- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM +- #2006 - @faillefer - Add support for DeleteOffsets operation +- #1909 - @agriffaut - KIP-546 Client quota APIs +- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state +- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger +- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log +- #2019 - @dnwe - feat: add logging & a metric for producer throttle +- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface +- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol +- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open +- #2034 - @bai - Add support for kafka 3.0.0 -# Improvements +# Fixes + +- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest +- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation +- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls +- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true +- #2007 - @bai - Add support for Go 1.17 +- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks +- #2010 - @dnwe - chore: enable exportloopref and misspell linters +- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements +- #2015 - @bai - Change default branch to main +- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID() +- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0 +- #2016 - @dnwe - chore: replace deprecated Go calls +- #2017 - @dnwe - chore: delete legacy vagrant script +- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test +- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5 +- #2033 - @bai - Update dependencies +- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method +- #2035 - @dnwe - chore: populate the missing kafka versions +- #2038 - @dnwe - feat: add a fuzzing workflow to github actions + +## New Contributors +* @zifengyu made their first contribution in https://github.com/Shopify/sarama/pull/1983 +* @doxsch made their first contribution in https://github.com/Shopify/sarama/pull/1990 +* @LubergAlexander made their first contribution in https://github.com/Shopify/sarama/pull/1988 +* @HurSungYun made their first contribution in https://github.com/Shopify/sarama/pull/2001 +* @gdm85 made their first contribution in https://github.com/Shopify/sarama/pull/2003 +* @qiangmzsx made their first contribution in https://github.com/Shopify/sarama/pull/1973 +* @zhaomoran made their first contribution in https://github.com/Shopify/sarama/pull/1992 +* @faillefer made their first contribution in https://github.com/Shopify/sarama/pull/2006 +* @crivera-fastly made their first contribution in https://github.com/Shopify/sarama/pull/1718 +* @null-sleep made their first contribution in https://github.com/Shopify/sarama/pull/1984 + +**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.29.1...v1.30.0 + +## Version 1.29.1 (2021-06-24) + +# New Features / Improvements + +- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API +- #1964 - @ajanikow - Add DelegationToken ResourceType + +# Fixes + +- #1962 - @hanxiaolin - fix(consumer): call interceptors when MaxProcessingTime expire +- #1971 - @KerryJava - fix kafka-producer-performance throughput panic +- #1968 - @dnwe - chore: bump golang.org/x versions +- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers +- #1963 - @dnwe - fix: ensure backoff timer is re-used +- #1949 - @dnwe - fix: explicitly use uint64 for payload length + +## Version 1.29.0 (2021-05-07) + +### New Features / Improvements -- #1912 - @faillefer Support for --delete-offsets for consumer group topic +- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API +- #1869 - @wyndhblb - zstd: encode+decode performance improvements +- #1541 - @izolight - add String, (Un)MarshalText for acl types. +- #1921 - @bai - Add support for Kafka 2.8.0 -#### Version 1.28.0 (2021-02-15) +### Fixes +- #1936 - @dnwe - fix(consumer): follow preferred broker +- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response) +- #1926 - @dnwe - fix: correct initial CodeQL findings +- #1925 - @bai - Test out CodeQL +- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos +- #1922 - @bai - Update go dependencies +- #1898 - @mmaslankaprv - Parsing only known control batches value +- #1887 - @withshubh - Fix: issues affecting code quality + +## Version 1.28.0 (2021-02-15) **Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** @@ -20,19 +184,19 @@ - #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev - #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica -#### Version 1.27.2 (2020-10-21) +## Version 1.27.2 (2020-10-21) -# Improvements +### Improvements #1750 - @krantideep95 Adds missing mock responses for mocking consumer group -# Fixes +## Fixes #1817 - reverts #1785 - Add private method to Client interface to prevent implementation -#### Version 1.27.1 (2020-10-07) +## Version 1.27.1 (2020-10-07) -# Improvements +### Improvements #1775 - @d1egoaz - Adds a Producer Interceptor example #1781 - @justin-chen - Refresh brokers given list of seed brokers @@ -42,13 +206,13 @@ #1785 - @justin-chen - Add private method to Client interface to prevent implementation #1802 - @uvw - Support Go 1.13 error unwrapping -# Fixes +## Fixes #1791 - @stanislavkozlovski - bump default version to 1.0.0 -#### Version 1.27.0 (2020-08-11) +## Version 1.27.0 (2020-08-11) -# Improvements +### Improvements #1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration #1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests @@ -62,31 +226,31 @@ #1763 - @alrs - remove deprecated tls options from test #1769 - @bai - Add support for Kafka 2.6.0 -# Fixes +## Fixes #1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication #1744 - @alrs - Fix isBalanced Function Signature -#### Version 1.26.4 (2020-05-19) +## Version 1.26.4 (2020-05-19) -# Fixes +## Fixes - #1701 - @d1egoaz - Set server name only for the current broker - #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka -#### Version 1.26.3 (2020-05-07) +## Version 1.26.3 (2020-05-07) -# Fixes +## Fixes - #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config -#### Version 1.26.2 (2020-05-06) +## Version 1.26.2 (2020-05-06) -# ⚠️ Known Issues +## ⚠️ Known Issues This release has been marked as not ready for production and may be unstable, please use v1.26.4. -# Improvements +### Improvements - #1560 - @iyacontrol - add sync pool for gzip 1-9 - #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID @@ -96,7 +260,7 @@ This release has been marked as not ready for production and may be unstable, pl - #1646 - @mimaison - Add DescribeLogDirs to admin client - #1667 - @bai - Add support for kafka 2.5.0 -# Fixes +## Fixes - #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 - #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine @@ -113,7 +277,7 @@ This release has been marked as not ready for production and may be unstable, pl - #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die - #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. -#### Version 1.26.1 (2020-02-04) +## Version 1.26.1 (2020-02-04) Improvements: - Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539)) @@ -125,7 +289,7 @@ Bug Fixes: - Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590)) - Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589)) -#### Version 1.26.0 (2020-01-24) +## Version 1.26.0 (2020-01-24) New Features: - Enable zstd compression @@ -156,7 +320,7 @@ Bug Fixes: - Retry topic request on ControllerNotAvailable ([1586](https://github.com/Shopify/sarama/pull/1586)). -#### Version 1.25.0 (2020-01-13) +## Version 1.25.0 (2020-01-13) New Features: - Support TLS protocol in kafka-producer-performance @@ -180,7 +344,7 @@ Bug Fixes: - Fix possible faulty metrics in TestFuncProducing ([1545](https://github.com/Shopify/sarama/pull/1545)). -#### Version 1.24.1 (2019-10-31) +## Version 1.24.1 (2019-10-31) New Features: - Add DescribeLogDirs Request/Response pair @@ -194,7 +358,7 @@ Bug Fixes: - Ensure consistent use of read/write deadlines ([1529](https://github.com/Shopify/sarama/pull/1529)). -#### Version 1.24.0 (2019-10-09) +## Version 1.24.0 (2019-10-09) New Features: - Add sticky partition assignor @@ -236,7 +400,7 @@ Known Issues: - Please **don't** use Zstd, as it doesn't work right now. See https://github.com/Shopify/sarama/issues/1252 -#### Version 1.23.1 (2019-07-22) +## Version 1.23.1 (2019-07-22) Bug Fixes: - Fix fetch delete bug record @@ -244,7 +408,7 @@ Bug Fixes: - Handle SASL/OAUTHBEARER token rejection ([1428](https://github.com/Shopify/sarama/pull/1428)). -#### Version 1.23.0 (2019-07-02) +## Version 1.23.0 (2019-07-02) New Features: - Add support for Kafka 2.3.0 @@ -280,7 +444,7 @@ Bug Fixes: - Refactor misspelled word Resouce to Resource ([1368](https://github.com/Shopify/sarama/pull/1368)). -#### Version 1.22.1 (2019-04-29) +## Version 1.22.1 (2019-04-29) Improvements: - Use zstd 1.3.8 @@ -296,7 +460,7 @@ Bug Fixes: - Fix AllowAutoTopicCreation for MetadataRequest greater than v3 ([1344](https://github.com/Shopify/sarama/pull/1344)). -#### Version 1.22.0 (2019-04-09) +## Version 1.22.0 (2019-04-09) New Features: - Add Offline Replicas Operation to Client @@ -340,7 +504,7 @@ Bug Fixes: - Fix rate condition in PartitionConsumer ([1156](https://github.com/Shopify/sarama/pull/1156)). -#### Version 1.21.0 (2019-02-24) +## Version 1.21.0 (2019-02-24) New Features: - Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest @@ -376,7 +540,7 @@ Bug Fixes: - Update kafka test version ([1273](https://github.com/Shopify/sarama/pull/1273)). -#### Version 1.20.1 (2019-01-10) +## Version 1.20.1 (2019-01-10) New Features: - Add optional replica id in offset request @@ -400,7 +564,7 @@ Bug Fixes: - Respect MaxMessageBytes limit for uncompressed messages ([1141](https://github.com/Shopify/sarama/pull/1141)). -#### Version 1.20.0 (2018-12-10) +## Version 1.20.0 (2018-12-10) New Features: - Add support for zstd compression @@ -430,7 +594,7 @@ Bug Fixes: - Fix typos in code and comments ([#1228](https://github.com/Shopify/sarama/pull/1228)). -#### Version 1.19.0 (2018-09-27) +## Version 1.19.0 (2018-09-27) New Features: - Implement a higher-level consumer group @@ -446,7 +610,7 @@ Bug Fixes: - Fix race condition in mock async producer ([#1174](https://github.com/Shopify/sarama/pull/1174)). -#### Version 1.18.0 (2018-09-07) +## Version 1.18.0 (2018-09-07) New Features: - Make `Partitioner.RequiresConsistency` vary per-message @@ -482,7 +646,7 @@ Bug Fixes: - Fix producer topic metadata on-demand fetch when topic error happens in metadata response ([#1125](https://github.com/Shopify/sarama/pull/1125)). -#### Version 1.17.0 (2018-05-30) +## Version 1.17.0 (2018-05-30) New Features: - Add support for gzip compression levels @@ -520,7 +684,7 @@ Bug Fixes: - Make `PartitionConsumer.Close` idempotent ([#1092](https://github.com/Shopify/sarama/pull/1092)). -#### Version 1.16.0 (2018-02-12) +## Version 1.16.0 (2018-02-12) New Features: - Add support for the Create/Delete Topics request/response pairs @@ -562,7 +726,7 @@ Bug Fixes: - Fix expectation-checking in the mock of `SyncProducer.SendMessages` ([#1035](https://github.com/Shopify/sarama/pull/1035)). -#### Version 1.15.0 (2017-12-08) +## Version 1.15.0 (2017-12-08) New Features: - Claim official support for Kafka 1.0, though it did already work @@ -588,7 +752,7 @@ Bug Fixes: - Fix leaking metrics when closing brokers ([#991](https://github.com/Shopify/sarama/pull/991)). -#### Version 1.14.0 (2017-11-13) +## Version 1.14.0 (2017-11-13) New Features: - Add support for the new Kafka 0.11 record-batch format, including the wire @@ -605,7 +769,7 @@ Bug Fixes: - Return partial replicas list when we have it ([#975](https://github.com/Shopify/sarama/pull/975)). -#### Version 1.13.0 (2017-10-04) +## Version 1.13.0 (2017-10-04) New Features: - Support for FetchRequest version 3 @@ -634,7 +798,7 @@ Bug Fixes: - Raise a proper error when encountering an unknown message version ([#940](https://github.com/Shopify/sarama/pull/940)). -#### Version 1.12.0 (2017-05-08) +## Version 1.12.0 (2017-05-08) New Features: - Added support for the `ApiVersions` request and response pair, and Kafka @@ -668,7 +832,7 @@ Bug Fixes: - Fix an alignment-related issue with atomics on 32-bit architectures ([#859](https://github.com/Shopify/sarama/pull/859)). -#### Version 1.11.0 (2016-12-20) +## Version 1.11.0 (2016-12-20) _Important:_ As of Sarama 1.11 it is necessary to set the config value of `Producer.Return.Successes` to true in order to use the SyncProducer. Previous @@ -700,7 +864,7 @@ Bug Fixes: - Fix crash on SASL initialization failure ([#795](https://github.com/Shopify/sarama/pull/795)). -#### Version 1.10.1 (2016-08-30) +## Version 1.10.1 (2016-08-30) Bug Fixes: - Fix the documentation for `HashPartitioner` which was incorrect @@ -715,7 +879,7 @@ Bug Fixes: - Handle consuming compressed relative offsets with Kafka 0.10 ([#735](https://github.com/Shopify/sarama/pull/735)). -#### Version 1.10.0 (2016-08-02) +## Version 1.10.0 (2016-08-02) _Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of Kafka you are running against (via the `config.Version` value) in order to use @@ -764,7 +928,7 @@ Bug Fixes: - Fix possible negative partition value from the HashPartitioner ([#709](https://github.com/Shopify/sarama/pull/709)). -#### Version 1.9.0 (2016-05-16) +## Version 1.9.0 (2016-05-16) New Features: - Add support for custom offset manager retention durations @@ -788,7 +952,7 @@ Bug Fixes: - Fix race condition shutting down the OffsetManager ([#658](https://github.com/Shopify/sarama/pull/658)). -#### Version 1.8.0 (2016-02-01) +## Version 1.8.0 (2016-02-01) New Features: - Full support for Kafka 0.9: @@ -807,7 +971,7 @@ Improvements: - Automatically retry `InvalidMessage` errors to match upstream behaviour ([#589](https://github.com/Shopify/sarama/pull/589)). -#### Version 1.7.0 (2015-12-11) +## Version 1.7.0 (2015-12-11) New Features: - Preliminary support for Kafka 0.9 @@ -838,13 +1002,13 @@ Bug Fixes: - Fix race condition in consumer test mock ([#553](https://github.com/Shopify/sarama/pull/553)). -#### Version 1.6.1 (2015-09-25) +## Version 1.6.1 (2015-09-25) Bug Fixes: - Fix panic that could occur if a user-supplied message value failed to encode ([#449](https://github.com/Shopify/sarama/pull/449)). -#### Version 1.6.0 (2015-09-04) +## Version 1.6.0 (2015-09-04) New Features: - Implementation of a consumer offset manager using the APIs introduced in @@ -866,7 +1030,7 @@ Bug Fixes: - Fix a potential race condition panic in the consumer on shutdown ([#529](https://github.com/Shopify/sarama/pull/529)). -#### Version 1.5.0 (2015-08-17) +## Version 1.5.0 (2015-08-17) New Features: - TLS-encrypted network connections are now supported. This feature is subject @@ -887,7 +1051,7 @@ Bug Fixes: - Fix a potential deadlock in the consumer on shutdown ([#475](https://github.com/Shopify/sarama/pull/475)). -#### Version 1.4.3 (2015-07-21) +## Version 1.4.3 (2015-07-21) Bug Fixes: - Don't include the partitioner in the producer's "fetch partitions" @@ -897,13 +1061,13 @@ Bug Fixes: - Update the import path for snappy-go, it has moved again and the API has changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). -#### Version 1.4.2 (2015-05-27) +## Version 1.4.2 (2015-05-27) Bug Fixes: - Update the import path for snappy-go, it has moved from google code to github ([#456](https://github.com/Shopify/sarama/pull/456)). -#### Version 1.4.1 (2015-05-25) +## Version 1.4.1 (2015-05-25) Improvements: - Optimizations when decoding snappy messages, thanks to John Potocny @@ -914,7 +1078,7 @@ Bug Fixes: ([#450](https://github.com/Shopify/sarama/pull/450), [#451](https://github.com/Shopify/sarama/pull/451)). -#### Version 1.4.0 (2015-05-01) +## Version 1.4.0 (2015-05-01) New Features: - The consumer now implements `Topics()` and `Partitions()` methods to enable @@ -944,7 +1108,7 @@ Bug Fixes: making it much more resilient to specific user code ordering ([#325](https://github.com/Shopify/sarama/pull/325)). -#### Version 1.3.0 (2015-04-16) +## Version 1.3.0 (2015-04-16) New Features: - The client now tracks consumer group coordinators using @@ -972,7 +1136,7 @@ Bug Fixes: it happens to be activated while the client is being closed ([#422](https://github.com/Shopify/sarama/pull/422)). -#### Version 1.2.0 (2015-04-07) +## Version 1.2.0 (2015-04-07) Improvements: - The producer's behaviour when `Flush.Frequency` is set is now more intuitive @@ -993,7 +1157,7 @@ Bug Fixes: API versions ([#390](https://github.com/Shopify/sarama/pull/390), [#400](https://github.com/Shopify/sarama/pull/400)). -#### Version 1.1.0 (2015-03-20) +## Version 1.1.0 (2015-03-20) Improvements: - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly @@ -1011,7 +1175,7 @@ Bug Fixes: metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). -#### Version 1.0.0 (2015-03-17) +## Version 1.0.0 (2015-03-17) Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: diff --git a/vendor/github.com/Shopify/sarama/Dockerfile.kafka b/vendor/github.com/Shopify/sarama/Dockerfile.kafka new file mode 100644 index 000000000000..c361f6d122c2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/Dockerfile.kafka @@ -0,0 +1,27 @@ +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest + +USER root + +RUN microdnf update \ + && microdnf install curl gzip java-11-openjdk-headless tar \ + && microdnf clean all + +ENV JAVA_HOME=/usr/lib/jvm/jre-11 + +# https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html +# Ensure Java doesn't cache any dns results +RUN cd /etc/java/java-11-openjdk/*/conf/security \ + && sed -e '/networkaddress.cache.ttl/d' -e '/networkaddress.cache.negative.ttl/d' -i java.security \ + && echo 'networkaddress.cache.ttl=0' >> java.security \ + && echo 'networkaddress.cache.negative.ttl=0' >> java.security + +# https://github.com/apache/kafka/blob/0d518aaed158896ee9ee6949b8f38128d1d73634/tests/docker/Dockerfile#L65-L67 +ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages" +RUN mkdir -p "/opt/kafka-2.8.2" && chmod a+rw /opt/kafka-2.8.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-2.8.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-2.8.2" +RUN mkdir -p "/opt/kafka-3.0.2" && chmod a+rw /opt/kafka-3.0.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.0.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.0.2" +RUN mkdir -p "/opt/kafka-3.1.2" && chmod a+rw /opt/kafka-3.1.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.1.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.1.2" +RUN mkdir -p "/opt/kafka-3.2.3" && chmod a+rw /opt/kafka-3.2.3 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.2.3.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.2.3" + +COPY entrypoint.sh / + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile index 8f8fc6bdb0eb..7cefc2a2c36b 100644 --- a/vendor/github.com/Shopify/sarama/Makefile +++ b/vendor/github.com/Shopify/sarama/Makefile @@ -1,12 +1,15 @@ default: fmt get update test lint GO := go +GOBIN := $(shell pwd)/bin GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG) -GOTEST := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 10m -coverprofile=profile.out -covermode=atomic +GOTEST := $(GO) test -v -race -coverprofile=profile.out -covermode=atomic FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go') TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go') +$(GOBIN)/tparse: + GOBIN=$(GOBIN) go install github.com/mfridman/tparse@v0.11.1 get: $(GO) get ./... $(GO) mod verify @@ -23,9 +26,14 @@ fmt: lint: GOFLAGS="-tags=functional" golangci-lint run -test: - $(GOTEST) ./... - +test: $(GOBIN)/tparse + $(GOTEST) -timeout 2m -json ./... \ + | tee output.json | $(GOBIN)/tparse -follow -all + [ -z "$${GITHUB_STEP_SUMMARY}" ] \ + || NO_COLOR=1 $(GOBIN)/tparse -format markdown -file output.json -all >"$${GITHUB_STEP_SUMMARY:-/dev/null}" .PHONY: test_functional -test_functional: - $(GOTEST) -tags=functional ./... +test_functional: $(GOBIN)/tparse + $(GOTEST) -timeout 15m -tags=functional -json ./... \ + | tee output.json | $(GOBIN)/tparse -follow -all + [ -z "$${GITHUB_STEP_SUMMARY:-}" ] \ + || NO_COLOR=1 $(GOBIN)/tparse -format markdown -file output.json -all >"$${GITHUB_STEP_SUMMARY:-/dev/null}" diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md index f66550a8b063..0ee6e6a7f68c 100644 --- a/vendor/github.com/Shopify/sarama/README.md +++ b/vendor/github.com/Shopify/sarama/README.md @@ -3,7 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/Shopify/sarama.svg)](https://pkg.go.dev/github.com/Shopify/sarama) [![Coverage](https://codecov.io/gh/Shopify/sarama/branch/main/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) -Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later). +Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/). ## Getting started @@ -18,9 +18,7 @@ You might also want to look at the [Frequently Asked Questions](https://github.c Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest stable releases of Kafka and Go, and we provide a two month -grace period for older releases. This means we currently officially support -Go 1.15 through 1.16, and Kafka 2.7 through 2.8, although older releases are -still likely to work. +grace period for older releases. However, older releases of Kafka are still likely to work. Sarama follows semantic versioning and provides API stability via the gopkg.in service. You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go index 59fd9983e116..f06198a65294 100644 --- a/vendor/github.com/Shopify/sarama/admin.go +++ b/vendor/github.com/Shopify/sarama/admin.go @@ -70,11 +70,24 @@ type ClusterAdmin interface { // for some resources while fail for others. The configs for a particular resource are updated automatically. AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error + // IncrementalAlterConfig Incrementally Update the configuration for the specified resources with the default options. + // This operation is supported by brokers with version 2.3.0.0 or higher. + // Updates are not transactional so they may succeed for some resources while fail for others. + // The configs for a particular resource are updated automatically. + IncrementalAlterConfig(resourceType ConfigResourceType, name string, entries map[string]IncrementalAlterConfigsEntry, validateOnly bool) error + + // Creates an access control list (ACL) which is bound to a specific resource. + // This operation is not transactional so it may succeed or fail. + // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but + // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher. + // Deprecated: Use CreateACLs instead. + CreateACL(resource Resource, acl Acl) error + // Creates access control lists (ACLs) which are bound to specific resources. // This operation is not transactional so it may succeed for some ACLs while fail for others. // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher. - CreateACL(resource Resource, acl Acl) error + CreateACLs([]*ResourceAcls) error // Lists access control lists (ACLs) according to the supplied filter. // it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls @@ -128,6 +141,11 @@ type ClusterAdmin interface { // locally cached value if it's available. Controller() (*Broker, error) + // Remove members from the consumer group by given member identities. + // This operation is supported by brokers with version 2.3 or higher + // This is for static membership feature. KIP-345 + RemoveMemberFromConsumerGroup(groupId string, groupInstanceIds []string) (*LeaveGroupResponse, error) + // Close shuts down the admin and closes underlying client. Close() error } @@ -143,7 +161,11 @@ func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) { if err != nil { return nil, err } - return NewClusterAdminFromClient(client) + admin, err := NewClusterAdminFromClient(client) + if err != nil { + client.Close() + } + return admin, err } // NewClusterAdminFromClient creates a new ClusterAdmin using the given client. @@ -177,15 +199,7 @@ func (ca *clusterAdmin) refreshController() (*Broker, error) { // isErrNoController returns `true` if the given error type unwraps to an // `ErrNotController` response from Kafka func isErrNoController(err error) bool { - switch e := err.(type) { - case *TopicError: - return e.Err == ErrNotController - case *TopicPartitionError: - return e.Err == ErrNotController - case KError: - return e == ErrNotController - } - return false + return errors.Is(err, ErrNotController) } // retryOnError will repeatedly call the given (error-returning) func in the @@ -249,8 +263,8 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO return ErrIncompleteResponse } - if topicErr.Err != ErrNoError { - if topicErr.Err == ErrNotController { + if !errors.Is(topicErr.Err, ErrNoError) { + if errors.Is(topicErr.Err, ErrNotController) { _, _ = ca.refreshController() } return topicErr @@ -436,8 +450,8 @@ func (ca *clusterAdmin) DeleteTopic(topic string) error { return ErrIncompleteResponse } - if topicErr != ErrNoError { - if topicErr == ErrNotController { + if !errors.Is(topicErr, ErrNoError) { + if errors.Is(topicErr, ErrNotController) { _, _ = ca.refreshController() } return topicErr @@ -477,8 +491,8 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [ return ErrIncompleteResponse } - if topicErr.Err != ErrNoError { - if topicErr.Err == ErrNotController { + if !errors.Is(topicErr.Err, ErrNoError) { + if errors.Is(topicErr.Err, ErrNotController) { _, _ = ca.refreshController() } return topicErr @@ -516,21 +530,20 @@ func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][ errs = append(errs, err) } else { if rsp.ErrorCode > 0 { - errs = append(errs, errors.New(rsp.ErrorCode.Error())) + errs = append(errs, rsp.ErrorCode) } for topic, topicErrors := range rsp.Errors { for partition, partitionError := range topicErrors { - if partitionError.errorCode != ErrNoError { - errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error()) - errs = append(errs, errors.New(errStr)) + if !errors.Is(partitionError.errorCode, ErrNoError) { + errs = append(errs, fmt.Errorf("[%s-%d]: %w", topic, partition, partitionError.errorCode)) } } } } if len(errs) > 0 { - return ErrReassignPartitions{MultiError{&errs}} + return Wrap(ErrReassignPartitions, errs...) } return nil @@ -568,45 +581,50 @@ func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]i if topic == "" { return ErrInvalidTopic } + errs := make([]error, 0) partitionPerBroker := make(map[*Broker][]int32) for partition := range partitionOffsets { broker, err := ca.client.Leader(topic, partition) if err != nil { - return err + errs = append(errs, err) + continue } partitionPerBroker[broker] = append(partitionPerBroker[broker], partition) } - errs := make([]error, 0) for broker, partitions := range partitionPerBroker { topics := make(map[string]*DeleteRecordsRequestTopic) recordsToDelete := make(map[int32]int64) for _, p := range partitions { recordsToDelete[p] = partitionOffsets[p] } - topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete} + topics[topic] = &DeleteRecordsRequestTopic{ + PartitionOffsets: recordsToDelete, + } request := &DeleteRecordsRequest{ Topics: topics, Timeout: ca.conf.Admin.Timeout, } - rsp, err := broker.DeleteRecords(request) if err != nil { errs = append(errs, err) - } else { - deleteRecordsResponseTopic, ok := rsp.Topics[topic] - if !ok { - errs = append(errs, ErrIncompleteResponse) - } else { - for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions { - if deleteRecordsResponsePartition.Err != ErrNoError { - errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error())) - } - } + continue + } + + deleteRecordsResponseTopic, ok := rsp.Topics[topic] + if !ok { + errs = append(errs, ErrIncompleteResponse) + continue + } + + for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions { + if !errors.Is(deleteRecordsResponsePartition.Err, ErrNoError) { + errs = append(errs, deleteRecordsResponsePartition.Err) + continue } } } if len(errs) > 0 { - return ErrDeleteRecords{MultiError{&errs}} + return Wrap(ErrDeleteRecords, errs...) } // todo since we are dealing with couple of partitions it would be good if we return slice of errors // for each partition instead of one error @@ -731,6 +749,58 @@ func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string return nil } +func (ca *clusterAdmin) IncrementalAlterConfig(resourceType ConfigResourceType, name string, entries map[string]IncrementalAlterConfigsEntry, validateOnly bool) error { + var resources []*IncrementalAlterConfigsResource + resources = append(resources, &IncrementalAlterConfigsResource{ + Type: resourceType, + Name: name, + ConfigEntries: entries, + }) + + request := &IncrementalAlterConfigsRequest{ + Resources: resources, + ValidateOnly: validateOnly, + } + + var ( + b *Broker + err error + ) + + // AlterConfig of broker/broker logger must be sent to the broker in question + if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) { + var id int64 + id, err = strconv.ParseInt(name, 10, 32) + if err != nil { + return err + } + b, err = ca.findBroker(int32(id)) + } else { + b, err = ca.findAnyBroker() + } + if err != nil { + return err + } + + _ = b.Open(ca.client.Config()) + rsp, err := b.IncrementalAlterConfigs(request) + if err != nil { + return err + } + + for _, rspResource := range rsp.Resources { + if rspResource.Name == name { + if rspResource.ErrorMsg != "" { + return errors.New(rspResource.ErrorMsg) + } + if rspResource.ErrorCode != 0 { + return KError(rspResource.ErrorCode) + } + } + } + return nil +} + func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error { var acls []*AclCreation acls = append(acls, &AclCreation{resource, acl}) @@ -749,6 +819,28 @@ func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error { return err } +func (ca *clusterAdmin) CreateACLs(resourceACLs []*ResourceAcls) error { + var acls []*AclCreation + for _, resourceACL := range resourceACLs { + for _, acl := range resourceACL.Acls { + acls = append(acls, &AclCreation{resourceACL.Resource, *acl}) + } + } + request := &CreateAclsRequest{AclCreations: acls} + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + + b, err := ca.Controller() + if err != nil { + return err + } + + _, err = b.CreateAcls(request) + return err +} + func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) { request := &DescribeAclsRequest{AclFilter: filter} @@ -813,9 +905,13 @@ func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*Group } for broker, brokerGroups := range groupsPerBroker { - response, err := broker.DescribeGroups(&DescribeGroupsRequest{ + describeReq := &DescribeGroupsRequest{ Groups: brokerGroups, - }) + } + if ca.conf.Version.IsAtLeast(V2_3_0_0) { + describeReq.Version = 4 + } + response, err := broker.DescribeGroups(describeReq) if err != nil { return nil, err } @@ -908,11 +1004,11 @@ func (ca *clusterAdmin) DeleteConsumerGroupOffset(group string, topic string, pa return err } - if resp.ErrorCode != ErrNoError { + if !errors.Is(resp.ErrorCode, ErrNoError) { return resp.ErrorCode } - if resp.Errors[topic][partition] != ErrNoError { + if !errors.Is(resp.Errors[topic][partition], ErrNoError) { return resp.Errors[topic][partition] } return nil @@ -938,7 +1034,7 @@ func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { return ErrIncompleteResponse } - if groupErr != ErrNoError { + if !errors.Is(groupErr, ErrNoError) { return groupErr } @@ -954,12 +1050,12 @@ func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32 wg := sync.WaitGroup{} for _, b := range brokerIds { - wg.Add(1) broker, err := ca.findBroker(b) if err != nil { Logger.Printf("Unable to find broker with ID = %v\n", b) continue } + wg.Add(1) go func(b *Broker, conf *Config) { defer wg.Done() _ = b.Open(conf) // Ensure that broker is opened @@ -1067,10 +1163,10 @@ func (ca *clusterAdmin) DescribeClientQuotas(components []QuotaFilterComponent, return nil, err } - if rsp.ErrorMsg != nil { + if rsp.ErrorMsg != nil && len(*rsp.ErrorMsg) > 0 { return nil, errors.New(*rsp.ErrorMsg) } - if rsp.ErrorCode != ErrNoError { + if !errors.Is(rsp.ErrorCode, ErrNoError) { return nil, rsp.ErrorCode } @@ -1099,10 +1195,31 @@ func (ca *clusterAdmin) AlterClientQuotas(entity []QuotaEntityComponent, op Clie } for _, entry := range rsp.Entries { - if entry.ErrorCode != ErrNoError { + if entry.ErrorMsg != nil && len(*entry.ErrorMsg) > 0 { + return errors.New(*entry.ErrorMsg) + } + if !errors.Is(entry.ErrorCode, ErrNoError) { return entry.ErrorCode } } return nil } + +func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(groupId string, groupInstanceIds []string) (*LeaveGroupResponse, error) { + controller, err := ca.client.Coordinator(groupId) + if err != nil { + return nil, err + } + request := &LeaveGroupRequest{ + Version: 3, + GroupId: groupId, + } + for _, instanceId := range groupInstanceIds { + groupInstanceId := instanceId + request.Members = append(request.Members, MemberIdentity{ + GroupInstanceId: &groupInstanceId, + }) + } + return controller.LeaveGroup(request) +} diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go index c807aea7208f..5c23ac77515a 100644 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/Shopify/sarama/async_producer.go @@ -2,12 +2,15 @@ package sarama import ( "encoding/binary" + "errors" "fmt" + "math" "sync" "time" "github.com/eapache/go-resiliency/breaker" "github.com/eapache/queue" + "github.com/rcrowley/go-metrics" ) // AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages @@ -46,65 +49,27 @@ type AsyncProducer interface { // you can set Producer.Return.Errors in your config to false, which prevents // errors to be returned. Errors() <-chan *ProducerError -} -// transactionManager keeps the state necessary to ensure idempotent production -type transactionManager struct { - producerID int64 - producerEpoch int16 - sequenceNumbers map[string]int32 - mutex sync.Mutex -} + // IsTransactional return true when current producer is is transactional. + IsTransactional() bool -const ( - noProducerID = -1 - noProducerEpoch = -1 -) + // TxnStatus return current producer transaction status. + TxnStatus() ProducerTxnStatusFlag -func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) { - key := fmt.Sprintf("%s-%d", topic, partition) - t.mutex.Lock() - defer t.mutex.Unlock() - sequence := t.sequenceNumbers[key] - t.sequenceNumbers[key] = sequence + 1 - return sequence, t.producerEpoch -} + // BeginTxn mark current transaction as ready. + BeginTxn() error -func (t *transactionManager) bumpEpoch() { - t.mutex.Lock() - defer t.mutex.Unlock() - t.producerEpoch++ - for k := range t.sequenceNumbers { - t.sequenceNumbers[k] = 0 - } -} + // CommitTxn commit current transaction. + CommitTxn() error -func (t *transactionManager) getProducerID() (int64, int16) { - t.mutex.Lock() - defer t.mutex.Unlock() - return t.producerID, t.producerEpoch -} + // AbortTxn abort current transaction. + AbortTxn() error -func newTransactionManager(conf *Config, client Client) (*transactionManager, error) { - txnmgr := &transactionManager{ - producerID: noProducerID, - producerEpoch: noProducerEpoch, - } - - if conf.Producer.Idempotent { - initProducerIDResponse, err := client.InitProducerID() - if err != nil { - return nil, err - } - txnmgr.producerID = initProducerIDResponse.ProducerID - txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch - txnmgr.sequenceNumbers = make(map[string]int32) - txnmgr.mutex = sync.Mutex{} + // AddOffsetsToTxn add associated offsets to current transaction. + AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error - Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch) - } - - return txnmgr, nil + // AddMessageToTxn add message offsets to current transaction. + AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error } type asyncProducer struct { @@ -120,6 +85,9 @@ type asyncProducer struct { brokerLock sync.Mutex txnmgr *transactionManager + txLock sync.Mutex + + metricsRegistry metrics.Registry } // NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. @@ -152,15 +120,16 @@ func newAsyncProducer(client Client) (AsyncProducer, error) { } p := &asyncProducer{ - client: client, - conf: client.Config(), - errors: make(chan *ProducerError), - input: make(chan *ProducerMessage), - successes: make(chan *ProducerMessage), - retries: make(chan *ProducerMessage), - brokers: make(map[*Broker]*brokerProducer), - brokerRefs: make(map[*brokerProducer]int), - txnmgr: txnmgr, + client: client, + conf: client.Config(), + errors: make(chan *ProducerError), + input: make(chan *ProducerMessage), + successes: make(chan *ProducerMessage), + retries: make(chan *ProducerMessage), + brokers: make(map[*Broker]*brokerProducer), + brokerRefs: make(map[*brokerProducer]int), + txnmgr: txnmgr, + metricsRegistry: newCleanupRegistry(client.Config().MetricRegistry), } // launch our singleton dispatchers @@ -173,9 +142,12 @@ func newAsyncProducer(client Client) (AsyncProducer, error) { type flagSet int8 const ( - syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer - fin // final message from partitionProducer to brokerProducer and back - shutdown // start the shutdown process + syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer + fin // final message from partitionProducer to brokerProducer and back + shutdown // start the shutdown process + endtxn // endtxn + committxn // endtxn + aborttxn // endtxn ) // ProducerMessage is the collection of elements passed to the Producer in order to send a message. @@ -230,7 +202,7 @@ type ProducerMessage struct { const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. -func (m *ProducerMessage) byteSize(version int) int { +func (m *ProducerMessage) ByteSize(version int) int { var size int if version >= 2 { size = maximumRecordOverhead @@ -281,6 +253,97 @@ func (pe ProducerErrors) Error() string { return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe)) } +func (p *asyncProducer) IsTransactional() bool { + return p.txnmgr.isTransactional() +} + +func (p *asyncProducer) AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error { + offsets := make(map[string][]*PartitionOffsetMetadata) + offsets[msg.Topic] = []*PartitionOffsetMetadata{ + { + Partition: msg.Partition, + Offset: msg.Offset + 1, + Metadata: metadata, + }, + } + return p.AddOffsetsToTxn(offsets, groupId) +} + +func (p *asyncProducer) AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error { + p.txLock.Lock() + defer p.txLock.Unlock() + + if !p.IsTransactional() { + DebugLogger.Printf("producer/txnmgr [%s] attempt to call AddOffsetsToTxn on a non-transactional producer\n", p.txnmgr.transactionalID) + return ErrNonTransactedProducer + } + + DebugLogger.Printf("producer/txnmgr [%s] add offsets to transaction\n", p.txnmgr.transactionalID) + return p.txnmgr.addOffsetsToTxn(offsets, groupId) +} + +func (p *asyncProducer) TxnStatus() ProducerTxnStatusFlag { + return p.txnmgr.currentTxnStatus() +} + +func (p *asyncProducer) BeginTxn() error { + p.txLock.Lock() + defer p.txLock.Unlock() + + if !p.IsTransactional() { + DebugLogger.Println("producer/txnmgr attempt to call BeginTxn on a non-transactional producer") + return ErrNonTransactedProducer + } + + return p.txnmgr.transitionTo(ProducerTxnFlagInTransaction, nil) +} + +func (p *asyncProducer) CommitTxn() error { + p.txLock.Lock() + defer p.txLock.Unlock() + + if !p.IsTransactional() { + DebugLogger.Printf("producer/txnmgr [%s] attempt to call CommitTxn on a non-transactional producer\n", p.txnmgr.transactionalID) + return ErrNonTransactedProducer + } + + DebugLogger.Printf("producer/txnmgr [%s] committing transaction\n", p.txnmgr.transactionalID) + err := p.finishTransaction(true) + if err != nil { + return err + } + DebugLogger.Printf("producer/txnmgr [%s] transaction committed\n", p.txnmgr.transactionalID) + return nil +} + +func (p *asyncProducer) AbortTxn() error { + p.txLock.Lock() + defer p.txLock.Unlock() + + if !p.IsTransactional() { + DebugLogger.Printf("producer/txnmgr [%s] attempt to call AbortTxn on a non-transactional producer\n", p.txnmgr.transactionalID) + return ErrNonTransactedProducer + } + DebugLogger.Printf("producer/txnmgr [%s] aborting transaction\n", p.txnmgr.transactionalID) + err := p.finishTransaction(false) + if err != nil { + return err + } + DebugLogger.Printf("producer/txnmgr [%s] transaction aborted\n", p.txnmgr.transactionalID) + return nil +} + +func (p *asyncProducer) finishTransaction(commit bool) error { + p.inFlight.Add(1) + if commit { + p.input <- &ProducerMessage{flags: endtxn | committxn} + } else { + p.input <- &ProducerMessage{flags: endtxn | aborttxn} + } + p.inFlight.Wait() + return p.txnmgr.finishTransaction(commit) +} + func (p *asyncProducer) Errors() <-chan *ProducerError { return p.errors } @@ -334,11 +397,27 @@ func (p *asyncProducer) dispatcher() { continue } + if msg.flags&endtxn != 0 { + var err error + if msg.flags&committxn != 0 { + err = p.txnmgr.transitionTo(ProducerTxnFlagEndTransaction|ProducerTxnFlagCommittingTransaction, nil) + } else { + err = p.txnmgr.transitionTo(ProducerTxnFlagEndTransaction|ProducerTxnFlagAbortingTransaction, nil) + } + if err != nil { + Logger.Printf("producer/txnmgr unable to end transaction %s", err) + } + p.inFlight.Done() + continue + } + if msg.flags&shutdown != 0 { shuttingDown = true p.inFlight.Done() continue - } else if msg.retries == 0 { + } + + if msg.retries == 0 { if shuttingDown { // we can't just call returnError here because that decrements the wait group, // which hasn't been incremented yet for this message, and shouldn't be @@ -351,6 +430,13 @@ func (p *asyncProducer) dispatcher() { continue } p.inFlight.Add(1) + // Ignore retried msg, there are already in txn. + // Can't produce new record when transaction is not started. + if p.IsTransactional() && p.txnmgr.currentTxnStatus()&ProducerTxnFlagInTransaction == 0 { + Logger.Printf("attempt to send message when transaction is not started or is in ending state, got %d, expect %d\n", p.txnmgr.currentTxnStatus(), ProducerTxnFlagInTransaction) + p.returnError(msg, ErrTransactionNotReady) + continue + } } for _, interceptor := range p.conf.Producer.Interceptors { @@ -364,7 +450,7 @@ func (p *asyncProducer) dispatcher() { p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11")) continue } - if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes { + if msg.ByteSize(version) > p.conf.Producer.MaxMessageBytes { p.returnError(msg, ErrMessageSizeTooLarge) continue } @@ -603,6 +689,10 @@ func (pp *partitionProducer) dispatch() { msg.hasSequence = true } + if pp.parent.IsTransactional() { + pp.parent.txnmgr.maybeAddPartitionToCurrentTxn(pp.topic, pp.partition) + } + pp.brokerProducer.input <- msg } } @@ -675,6 +765,7 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer { var ( input = make(chan *ProducerMessage) bridge = make(chan *produceSet) + pending = make(chan *brokerProducerResponse) responses = make(chan *brokerProducerResponse) ) @@ -684,7 +775,6 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer { input: input, output: bridge, responses: responses, - stopchan: make(chan struct{}), buffer: newProduceSet(p), currentRetries: make(map[string]map[int32]error), } @@ -692,18 +782,88 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer { // minimal bridge to make the network response `select`able go withRecover(func() { + // Use a wait group to know if we still have in flight requests + var wg sync.WaitGroup + for set := range bridge { request := set.buildRequest() - response, err := broker.Produce(request) + // Count the in flight requests to know when we can close the pending channel safely + wg.Add(1) + // Capture the current set to forward in the callback + sendResponse := func(set *produceSet) ProduceCallback { + return func(response *ProduceResponse, err error) { + // Forward the response to make sure we do not block the responseReceiver + pending <- &brokerProducerResponse{ + set: set, + err: err, + res: response, + } + wg.Done() + } + }(set) + + if p.IsTransactional() { + // Add partition to tx before sending current batch + err := p.txnmgr.publishTxnPartitions() + if err != nil { + // Request failed to be sent + sendResponse(nil, err) + continue + } + } - responses <- &brokerProducerResponse{ - set: set, - err: err, - res: response, + // Use AsyncProduce vs Produce to not block waiting for the response + // so that we can pipeline multiple produce requests and achieve higher throughput, see: + // https://kafka.apache.org/protocol#protocol_network + err := broker.AsyncProduce(request, sendResponse) + if err != nil { + // Request failed to be sent + sendResponse(nil, err) + continue + } + // Callback is not called when using NoResponse + if p.conf.Producer.RequiredAcks == NoResponse { + // Provide the expected nil response + sendResponse(nil, nil) + } + } + // Wait for all in flight requests to close the pending channel safely + wg.Wait() + close(pending) + }) + + // In order to avoid a deadlock when closing the broker on network or malformed response error + // we use an intermediate channel to buffer and send pending responses in order + // This is because the AsyncProduce callback inside the bridge is invoked from the broker + // responseReceiver goroutine and closing the broker requires such goroutine to be finished + go withRecover(func() { + buf := queue.New() + for { + if buf.Length() == 0 { + res, ok := <-pending + if !ok { + // We are done forwarding the last pending response + close(responses) + return + } + buf.Add(res) + } + // Send the head pending response or buffer another one + // so that we never block the callback + headRes := buf.Peek().(*brokerProducerResponse) + select { + case res, ok := <-pending: + if !ok { + continue + } + buf.Add(res) + continue + case responses <- headRes: + buf.Remove() + continue } } - close(responses) }) if p.conf.Producer.Retry.Max <= 0 { @@ -729,10 +889,9 @@ type brokerProducer struct { output chan<- *produceSet responses <-chan *brokerProducerResponse abandoned chan struct{} - stopchan chan struct{} buffer *produceSet - timer <-chan time.Time + timer *time.Timer timerFired bool closing error @@ -741,6 +900,7 @@ type brokerProducer struct { func (bp *brokerProducer) run() { var output chan<- *produceSet + var timerChan <-chan time.Time Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID()) for { @@ -780,6 +940,14 @@ func (bp *brokerProducer) run() { continue } + if msg.flags&fin == fin { + // New broker producer that was caught up by the retry loop + bp.parent.retryMessage(msg, ErrShuttingDown) + DebugLogger.Printf("producer/broker/%d state change to [dying-%d] on %s/%d\n", + bp.broker.ID(), msg.retries, msg.Topic, msg.Partition) + continue + } + if bp.buffer.wouldOverflow(msg) { Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) if err := bp.waitForSpace(msg, false); err != nil { @@ -802,20 +970,18 @@ func (bp *brokerProducer) run() { } if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil { - bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency) + bp.timer = time.NewTimer(bp.parent.conf.Producer.Flush.Frequency) + timerChan = bp.timer.C } - case <-bp.timer: + case <-timerChan: bp.timerFired = true case output <- bp.buffer: bp.rollOver() + timerChan = nil case response, ok := <-bp.responses: if ok { bp.handleResponse(response) } - case <-bp.stopchan: - Logger.Printf( - "producer/broker/%d run loop asked to stop\n", bp.broker.ID()) - return } if bp.timerFired || bp.buffer.readyToFlush() { @@ -836,10 +1002,11 @@ func (bp *brokerProducer) shutdown() { } } close(bp.output) + // Drain responses from the bridge goroutine for response := range bp.responses { bp.handleResponse(response) } - close(bp.stopchan) + // No more brokerProducer related goroutine should be running Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID()) } @@ -870,6 +1037,9 @@ func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) } func (bp *brokerProducer) rollOver() { + if bp.timer != nil { + bp.timer.Stop() + } bp.timer = nil bp.timerFired = false bp.buffer = newProduceSet(bp.parent) @@ -999,15 +1169,16 @@ func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitio } bp := p.getBrokerProducer(leader) bp.output <- produceSet + p.unrefBrokerProducer(leader, bp) } func (bp *brokerProducer) handleError(sent *produceSet, err error) { - switch err.(type) { - case PacketEncodingError: + var target PacketEncodingError + if errors.As(err, &target) { sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { bp.parent.returnErrors(pSet.msgs, err) }) - default: + } else { Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err) bp.parent.abandonBrokerConnection(bp.broker) _ = bp.broker.Close() @@ -1067,15 +1238,50 @@ func (p *asyncProducer) shutdown() { close(p.retries) close(p.errors) close(p.successes) + + p.metricsRegistry.UnregisterAll() +} + +func (p *asyncProducer) bumpIdempotentProducerEpoch() { + _, epoch := p.txnmgr.getProducerID() + if epoch == math.MaxInt16 { + Logger.Println("producer/txnmanager epoch exhausted, requesting new producer ID") + txnmgr, err := newTransactionManager(p.conf, p.client) + if err != nil { + Logger.Println(err) + return + } + + p.txnmgr = txnmgr + } else { + p.txnmgr.bumpEpoch() + } +} + +func (p *asyncProducer) maybeTransitionToErrorState(err error) error { + if errors.Is(err, ErrClusterAuthorizationFailed) || + errors.Is(err, ErrProducerFenced) || + errors.Is(err, ErrUnsupportedVersion) || + errors.Is(err, ErrTransactionalIDAuthorizationFailed) { + return p.txnmgr.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, err) + } + if p.txnmgr.coordinatorSupportsBumpingEpoch && p.txnmgr.currentTxnStatus()&ProducerTxnFlagEndTransaction == 0 { + p.txnmgr.epochBumpRequired = true + } + return p.txnmgr.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, err) } func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { + if p.IsTransactional() { + _ = p.maybeTransitionToErrorState(err) + } // We need to reset the producer ID epoch if we set a sequence number on it, because the broker // will never see a message with this number, so we can never continue the sequence. - if msg.hasSequence { + if !p.IsTransactional() && msg.hasSequence { Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition) - p.txnmgr.bumpEpoch() + p.bumpIdempotentProducerEpoch() } + msg.clear() pErr := &ProducerError{Msg: msg, Err: err} if p.conf.Producer.Return.Errors { diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go index 9855bf44398d..4594df6f6d64 100644 --- a/vendor/github.com/Shopify/sarama/balance_strategy.go +++ b/vendor/github.com/Shopify/sarama/balance_strategy.go @@ -58,18 +58,28 @@ type BalanceStrategy interface { // -------------------------------------------------------------------- // BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members. -// Example with one topic T with six partitions (0..5) and two members (M1, M2): -// M1: {T: [0, 1, 2]} -// M2: {T: [3, 4, 5]} +// This follows the same logic as +// https://kafka.apache.org/31/javadoc/org/apache/kafka/clients/consumer/RangeAssignor.html +// +// Example with two topics T1 and T2 with six partitions each (0..5) and two members (M1, M2): +// +// M1: {T1: [0, 1, 2], T2: [0, 1, 2]} +// M2: {T2: [3, 4, 5], T2: [3, 4, 5]} var BalanceStrategyRange = &balanceStrategy{ name: RangeBalanceStrategyName, coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { - step := float64(len(partitions)) / float64(len(memberIDs)) + partitionsPerConsumer := len(partitions) / len(memberIDs) + consumersWithExtraPartition := len(partitions) % len(memberIDs) + + sort.Strings(memberIDs) for i, memberID := range memberIDs { - pos := float64(i) - min := int(math.Floor(pos*step + 0.5)) - max := int(math.Floor((pos+1)*step + 0.5)) + min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i))) + extra := 0 + if i < consumersWithExtraPartition { + extra = 1 + } + max := min + partitionsPerConsumer + extra plan.Add(memberID, topic, partitions[min:max]...) } }, @@ -78,14 +88,15 @@ var BalanceStrategyRange = &balanceStrategy{ // BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments // while maintain a balanced partition distribution. // Example with topic T with six partitions (0..5) and two members (M1, M2): -// M1: {T: [0, 2, 4]} -// M2: {T: [1, 3, 5]} +// +// M1: {T: [0, 2, 4]} +// M2: {T: [1, 3, 5]} // // On reassignment with an additional consumer, you might get an assignment plan like: -// M1: {T: [0, 2]} -// M2: {T: [1, 3]} -// M3: {T: [4, 5]} // +// M1: {T: [0, 2]} +// M2: {T: [1, 3]} +// M3: {T: [4, 5]} var BalanceStrategySticky = &stickyBalanceStrategy{} // -------------------------------------------------------------------- @@ -108,18 +119,27 @@ func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, t } } - // Sort members for each topic - for topic, memberIDs := range mbt { - sort.Sort(&balanceStrategySortable{ - topic: topic, - memberIDs: memberIDs, - }) + // func to sort and de-duplicate a StringSlice + uniq := func(ss sort.StringSlice) []string { + if ss.Len() < 2 { + return ss + } + sort.Sort(ss) + var i, j int + for i = 1; i < ss.Len(); i++ { + if ss[i] == ss[j] { + continue + } + j++ + ss.Swap(i, j) + } + return ss[:j+1] } // Assemble plan plan := make(BalanceStrategyPlan, len(members)) for topic, memberIDs := range mbt { - s.coreFn(plan, memberIDs, topic, topics[topic]) + s.coreFn(plan, uniq(memberIDs), topic, topics[topic]) } return plan, nil } @@ -129,31 +149,6 @@ func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]in return nil, nil } -type balanceStrategySortable struct { - topic string - memberIDs []string -} - -func (p balanceStrategySortable) Len() int { return len(p.memberIDs) } -func (p balanceStrategySortable) Swap(i, j int) { - p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i] -} - -func (p balanceStrategySortable) Less(i, j int) bool { - return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j]) -} - -func balanceStrategyHashValue(vv ...string) uint32 { - h := uint32(2166136261) - for _, s := range vv { - for _, c := range s { - h ^= uint32(c) - h *= 16777619 - } - } - return h -} - type stickyBalanceStrategy struct { movements partitionMovements } @@ -614,9 +609,9 @@ func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscripti // Deserialize topic partition assignment data to aid with creation of a sticky assignment. func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) { userDataV1 := &StickyAssignorUserDataV1{} - if err := decode(userDataBytes, userDataV1); err != nil { + if err := decode(userDataBytes, userDataV1, nil); err != nil { userDataV0 := &StickyAssignorUserDataV0{} - if err := decode(userDataBytes, userDataV0); err != nil { + if err := decode(userDataBytes, userDataV0, nil); err != nil { return nil, err } return userDataV0, nil diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go index ed92a669d850..5815fd567915 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/Shopify/sarama/broker.go @@ -3,8 +3,10 @@ package sarama import ( "crypto/tls" "encoding/binary" + "errors" "fmt" "io" + "math/rand" "net" "sort" "strconv" @@ -28,13 +30,13 @@ type Broker struct { connErr error lock sync.Mutex opened int32 - responses chan responsePromise + responses chan *responsePromise done chan bool - registeredMetrics []string - + metricRegistry metrics.Registry incomingByteRate metrics.Meter requestRate metrics.Meter + fetchRate metrics.Meter requestSize metrics.Histogram requestLatency metrics.Histogram outgoingByteRate metrics.Meter @@ -43,6 +45,7 @@ type Broker struct { requestsInFlight metrics.Counter brokerIncomingByteRate metrics.Meter brokerRequestRate metrics.Meter + brokerFetchRate metrics.Meter brokerRequestSize metrics.Histogram brokerRequestLatency metrics.Histogram brokerOutgoingByteRate metrics.Meter @@ -51,7 +54,8 @@ type Broker struct { brokerRequestsInFlight metrics.Counter brokerThrottleTime metrics.Histogram - kerberosAuthenticator GSSAPIKerberosAuth + kerberosAuthenticator GSSAPIKerberosAuth + clientSessionReauthenticationTimeMs int64 } // SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker @@ -121,10 +125,25 @@ type responsePromise struct { requestTime time.Time correlationID int32 headerVersion int16 + handler func([]byte, error) packets chan []byte errors chan error } +func (p *responsePromise) handle(packets []byte, err error) { + // Use callback when provided + if p.handler != nil { + p.handler(packets, err) + return + } + // Otherwise fallback to using channels + if err != nil { + p.errors <- err + return + } + p.packets <- packets +} + // NewBroker creates and returns a Broker targeting the given host:port address. // This does not attempt to actually connect, you have to call Open() for that. func NewBroker(addr string) *Broker { @@ -154,6 +173,8 @@ func (b *Broker) Open(conf *Config) error { b.lock.Lock() + b.metricRegistry = newCleanupRegistry(conf.MetricRegistry) + go withRecover(func() { defer func() { b.lock.Unlock() @@ -188,22 +209,28 @@ func (b *Broker) Open(conf *Config) error { b.conf = conf // Create or reuse the global metrics shared between brokers - b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry) - b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry) - b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry) - b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry) - b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry) - b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry) - b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry) - b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", conf.MetricRegistry) + b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", b.metricRegistry) + b.requestRate = metrics.GetOrRegisterMeter("request-rate", b.metricRegistry) + b.fetchRate = metrics.GetOrRegisterMeter("consumer-fetch-rate", b.metricRegistry) + b.requestSize = getOrRegisterHistogram("request-size", b.metricRegistry) + b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", b.metricRegistry) + b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", b.metricRegistry) + b.responseRate = metrics.GetOrRegisterMeter("response-rate", b.metricRegistry) + b.responseSize = getOrRegisterHistogram("response-size", b.metricRegistry) + b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", b.metricRegistry) // Do not gather metrics for seeded broker (only used during bootstrap) because they share // the same id (-1) and are already exposed through the global metrics above if b.id >= 0 && !metrics.UseNilMetrics { b.registerMetrics() } - if conf.Net.SASL.Enable { - b.connErr = b.authenticateViaSASL() + if conf.Net.SASL.Mechanism == SASLTypeOAuth && conf.Net.SASL.Version == SASLHandshakeV0 { + conf.Net.SASL.Version = SASLHandshakeV1 + } + + useSaslV0 := conf.Net.SASL.Version == SASLHandshakeV0 || conf.Net.SASL.Mechanism == SASLTypeGSSAPI + if conf.Net.SASL.Enable && useSaslV0 { + b.connErr = b.authenticateViaSASLv0() if b.connErr != nil { err = b.conn.Close() @@ -219,19 +246,41 @@ func (b *Broker) Open(conf *Config) error { } b.done = make(chan bool) - b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1) + b.responses = make(chan *responsePromise, b.conf.Net.MaxOpenRequests-1) + go withRecover(b.responseReceiver) + if conf.Net.SASL.Enable && !useSaslV0 { + b.connErr = b.authenticateViaSASLv1() + if b.connErr != nil { + close(b.responses) + err = b.conn.Close() + if err == nil { + DebugLogger.Printf("Closed connection to broker %s\n", b.addr) + } else { + Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + } + b.conn = nil + atomic.StoreInt32(&b.opened, 0) + return + } + } if b.id >= 0 { DebugLogger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) } else { DebugLogger.Printf("Connected to broker at %s (unregistered)\n", b.addr) } - go withRecover(b.responseReceiver) }) return nil } +func (b *Broker) ResponseSize() int { + b.lock.Lock() + defer b.lock.Unlock() + + return len(b.responses) +} + // Connected returns true if the broker is connected and false otherwise. If the broker is not // connected but it had tried to connect, the error from that connection attempt is also returned. func (b *Broker) Connected() (bool, error) { @@ -241,6 +290,24 @@ func (b *Broker) Connected() (bool, error) { return b.conn != nil, b.connErr } +// TLSConnectionState returns the client's TLS connection state. The second return value is false if this is not a tls connection or the connection has not yet been established. +func (b *Broker) TLSConnectionState() (state tls.ConnectionState, ok bool) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.conn == nil { + return state, false + } + conn := b.conn + if bconn, ok := b.conn.(*bufConn); ok { + conn = bconn.Conn + } + if tc, ok := conn.(*tls.Conn); ok { + return tc.ConnectionState(), true + } + return state, false +} + // Close closes the broker resources func (b *Broker) Close() error { b.lock.Lock() @@ -260,7 +327,7 @@ func (b *Broker) Close() error { b.done = nil b.responses = nil - b.unregisterMetrics() + b.metricRegistry.UnregisterAll() if err == nil { DebugLogger.Printf("Closed connection to broker %s\n", b.addr) @@ -342,6 +409,56 @@ func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, e return response, nil } +// ProduceCallback function is called once the produce response has been parsed +// or could not be read. +type ProduceCallback func(*ProduceResponse, error) + +// AsyncProduce sends a produce request and eventually call the provided callback +// with a produce response or an error. +// +// Waiting for the response is generally not blocking on the contrary to using Produce. +// If the maximum number of in flight request configured is reached then +// the request will be blocked till a previous response is received. +// +// When configured with RequiredAcks == NoResponse, the callback will not be invoked. +// If an error is returned because the request could not be sent then the callback +// will not be invoked either. +// +// Make sure not to Close the broker in the callback as it will lead to a deadlock. +func (b *Broker) AsyncProduce(request *ProduceRequest, cb ProduceCallback) error { + needAcks := request.RequiredAcks != NoResponse + // Use a nil promise when no acks is required + var promise *responsePromise + + if needAcks { + // Create ProduceResponse early to provide the header version + res := new(ProduceResponse) + promise = &responsePromise{ + headerVersion: res.headerVersion(), + // Packets will be converted to a ProduceResponse in the responseReceiver goroutine + handler: func(packets []byte, err error) { + if err != nil { + // Failed request + cb(nil, err) + return + } + + if err := versionedDecode(packets, res, request.version(), b.metricRegistry); err != nil { + // Malformed response + cb(nil, err) + return + } + + // Wellformed response + b.updateThrottleMetric(res.ThrottleTime) + cb(res, nil) + }, + } + } + + return b.sendWithPromise(request, promise) +} + // Produce returns a produce response or error func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { var ( @@ -354,15 +471,7 @@ func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { } else { response = new(ProduceResponse) err = b.sendAndReceive(request, response) - if response.ThrottleTime != time.Duration(0) { - DebugLogger.Printf( - "producer/broker/%d ProduceResponse throttled %v\n", - b.ID(), response.ThrottleTime) - if b.brokerThrottleTime != nil { - throttleTimeInMs := int64(response.ThrottleTime / time.Millisecond) - b.brokerThrottleTime.Update(throttleTimeInMs) - } - } + b.updateThrottleMetric(response.ThrottleTime) } if err != nil { @@ -374,6 +483,15 @@ func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { // Fetch returns a FetchResponse or error func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { + defer func() { + if b.fetchRate != nil { + b.fetchRate.Mark(1) + } + if b.brokerFetchRate != nil { + b.brokerFetchRate.Mark(1) + } + }() + response := new(FetchResponse) err := b.sendAndReceive(request, response) @@ -590,6 +708,17 @@ func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, er return nil, err } + errs := make([]error, 0) + for _, res := range response.AclCreationResponses { + if !errors.Is(res.Err, ErrNoError) { + errs = append(errs, res.Err) + } + } + + if len(errs) > 0 { + return response, Wrap(ErrCreateACLs, errs...) + } + return response, nil } @@ -608,6 +737,7 @@ func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, er // InitProducerID sends an init producer request and returns a response or error func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) { response := new(InitProducerIDResponse) + response.Version = request.version() err := b.sendAndReceive(request, response) if err != nil { @@ -807,24 +937,60 @@ func (b *Broker) write(buf []byte) (n int, err error) { } func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) { + var promise *responsePromise + if promiseResponse { + // Packets or error will be sent to the following channels + // once the response is received + promise = makeResponsePromise(responseHeaderVersion) + } + + if err := b.sendWithPromise(rb, promise); err != nil { + return nil, err + } + + return promise, nil +} + +func makeResponsePromise(responseHeaderVersion int16) *responsePromise { + promise := &responsePromise{ + headerVersion: responseHeaderVersion, + packets: make(chan []byte), + errors: make(chan error), + } + return promise +} + +func (b *Broker) sendWithPromise(rb protocolBody, promise *responsePromise) error { b.lock.Lock() defer b.lock.Unlock() if b.conn == nil { if b.connErr != nil { - return nil, b.connErr + return b.connErr + } + return ErrNotConnected + } + + if b.clientSessionReauthenticationTimeMs > 0 && currentUnixMilli() > b.clientSessionReauthenticationTimeMs { + err := b.authenticateViaSASLv1() + if err != nil { + return err } - return nil, ErrNotConnected } + return b.sendInternal(rb, promise) +} + +// b.lock must be held by caller +func (b *Broker) sendInternal(rb protocolBody, promise *responsePromise) error { if !b.conf.Version.IsAtLeast(rb.requiredVersion()) { - return nil, ErrUnsupportedVersion + return ErrUnsupportedVersion } req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req, b.conf.MetricRegistry) + buf, err := encode(req, b.metricRegistry) if err != nil { - return nil, err + return err } requestTime := time.Now() @@ -834,20 +1000,21 @@ func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersi b.updateOutgoingCommunicationMetrics(bytes) if err != nil { b.addRequestInFlightMetrics(-1) - return nil, err + return err } b.correlationID++ - if !promiseResponse { + if promise == nil { // Record request latency without the response b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime)) - return nil, nil + return nil } - promise := responsePromise{requestTime, req.correlationID, responseHeaderVersion, make(chan []byte), make(chan error)} + promise.requestTime = requestTime + promise.correlationID = req.correlationID b.responses <- promise - return &promise, nil + return nil } func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error { @@ -865,10 +1032,14 @@ func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error { return nil } + return b.handleResponsePromise(req, res, promise) +} + +func (b *Broker) handleResponsePromise(req protocolBody, res protocolBody, promise *responsePromise) error { select { case buf := <-promise.packets: - return versionedDecode(buf, res, req.version()) - case err = <-promise.errors: + return versionedDecode(buf, res, req.version(), b.metricRegistry) + case err := <-promise.errors: return err } } @@ -942,7 +1113,7 @@ func (b *Broker) responseReceiver() { // This was previously incremented in send() and // we are not calling updateIncomingCommunicationMetrics() b.addRequestInFlightMetrics(-1) - response.errors <- dead + response.handle(nil, dead) continue } @@ -954,16 +1125,16 @@ func (b *Broker) responseReceiver() { if err != nil { b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) dead = err - response.errors <- err + response.handle(nil, err) continue } decodedHeader := responseHeader{} - err = versionedDecode(header, &decodedHeader, response.headerVersion) + err = versionedDecode(header, &decodedHeader, response.headerVersion, b.metricRegistry) if err != nil { b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) dead = err - response.errors <- err + response.handle(nil, err) continue } if decodedHeader.correlationID != response.correlationID { @@ -971,7 +1142,7 @@ func (b *Broker) responseReceiver() { // TODO if decoded ID < cur ID, discard until we catch up // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)} - response.errors <- dead + response.handle(nil, dead) continue } @@ -980,11 +1151,11 @@ func (b *Broker) responseReceiver() { b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) if err != nil { dead = err - response.errors <- err + response.handle(nil, err) continue } - response.packets <- buf + response.handle(buf, nil) } close(b.done) } @@ -998,16 +1169,74 @@ func getHeaderLength(headerVersion int16) int8 { } } -func (b *Broker) authenticateViaSASL() error { +func (b *Broker) authenticateViaSASLv0() error { switch b.conf.Net.SASL.Mechanism { - case SASLTypeOAuth: - return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider) case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: - return b.sendAndReceiveSASLSCRAM() + return b.sendAndReceiveSASLSCRAMv0() case SASLTypeGSSAPI: return b.sendAndReceiveKerberos() default: - return b.sendAndReceiveSASLPlainAuth() + return b.sendAndReceiveSASLPlainAuthV0() + } +} + +func (b *Broker) authenticateViaSASLv1() error { + if b.conf.Net.SASL.Handshake { + handshakeRequest := &SaslHandshakeRequest{Mechanism: string(b.conf.Net.SASL.Mechanism), Version: b.conf.Net.SASL.Version} + handshakeResponse := new(SaslHandshakeResponse) + prom := makeResponsePromise(handshakeResponse.version()) + + handshakeErr := b.sendInternal(handshakeRequest, prom) + if handshakeErr != nil { + Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + return handshakeErr + } + handshakeErr = b.handleResponsePromise(handshakeRequest, handshakeResponse, prom) + if handshakeErr != nil { + Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + return handshakeErr + } + + if !errors.Is(handshakeResponse.Err, ErrNoError) { + return handshakeResponse.Err + } + } + + authSendReceiver := func(authBytes []byte) (*SaslAuthenticateResponse, error) { + authenticateRequest := b.createSaslAuthenticateRequest(authBytes) + authenticateResponse := new(SaslAuthenticateResponse) + prom := makeResponsePromise(authenticateResponse.version()) + authErr := b.sendInternal(authenticateRequest, prom) + if authErr != nil { + Logger.Printf("Error while performing SASL Auth %s\n", b.addr) + return nil, authErr + } + authErr = b.handleResponsePromise(authenticateRequest, authenticateResponse, prom) + if authErr != nil { + Logger.Printf("Error while performing SASL Auth %s\n", b.addr) + return nil, authErr + } + + if !errors.Is(authenticateResponse.Err, ErrNoError) { + var err error = authenticateResponse.Err + if authenticateResponse.ErrorMessage != nil { + err = Wrap(authenticateResponse.Err, errors.New(*authenticateResponse.ErrorMessage)) + } + return nil, err + } + + b.computeSaslSessionLifetime(authenticateResponse) + return authenticateResponse, nil + } + + switch b.conf.Net.SASL.Mechanism { + case SASLTypeOAuth: + provider := b.conf.Net.SASL.TokenProvider + return b.sendAndReceiveSASLOAuth(authSendReceiver, provider) + case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: + return b.sendAndReceiveSASLSCRAMv1(authSendReceiver, b.conf.Net.SASL.SCRAMClientGeneratorFunc()) + default: + return b.sendAndReceiveSASLPlainAuthV1(authSendReceiver) } } @@ -1023,7 +1252,7 @@ func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version} req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req, b.conf.MetricRegistry) + buf, err := encode(req, b.metricRegistry) if err != nil { return err } @@ -1060,25 +1289,21 @@ func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime)) res := &SaslHandshakeResponse{} - err = versionedDecode(payload, res, 0) + err = versionedDecode(payload, res, 0, b.metricRegistry) if err != nil { Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error()) return err } - if res.Err != ErrNoError { + if !errors.Is(res.Err, ErrNoError) { Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error()) return res.Err } - DebugLogger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms) + DebugLogger.Print("Completed pre-auth SASL handshake. Available mechanisms: ", res.EnabledMechanisms) return nil } -// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43). -// Kafka 1.x.x onward added a SaslAuthenticate request/response message which -// wraps the SASL flow in the Kafka protocol, which allows for returning -// meaningful errors on authentication failure. // // In SASL Plain, Kafka expects the auth header to be in the following format // Message format (from https://tools.ietf.org/html/rfc4616): @@ -1092,14 +1317,15 @@ func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int // SAFE = UTF1 / UTF2 / UTF3 / UTF4 // ;; any UTF-8 encoded Unicode character except NUL // +// + +// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43). +// sendAndReceiveSASLPlainAuthV0 flows the v0 sasl auth NOT wrapped in the kafka protocol +// // With SASL v0 handshake and auth then: // When credentials are valid, Kafka returns a 4 byte array of null characters. // When credentials are invalid, Kafka closes the connection. -// -// With SASL v1 handshake and auth then: -// When credentials are invalid, Kafka replies with a SaslAuthenticate response -// containing an error code and message detailing the authentication failure. -func (b *Broker) sendAndReceiveSASLPlainAuth() error { +func (b *Broker) sendAndReceiveSASLPlainAuthV0() error { // default to V0 to allow for backward compatibility when SASL is enabled // but not the handshake if b.conf.Net.SASL.Handshake { @@ -1110,14 +1336,6 @@ func (b *Broker) sendAndReceiveSASLPlainAuth() error { } } - if b.conf.Net.SASL.Version == SASLHandshakeV1 { - return b.sendAndReceiveV1SASLPlainAuth() - } - return b.sendAndReceiveV0SASLPlainAuth() -} - -// sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol -func (b *Broker) sendAndReceiveV0SASLPlainAuth() error { length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) authBytes := make([]byte, length+4) // 4 byte length header + auth data binary.BigEndian.PutUint32(authBytes, uint32(length)) @@ -1148,44 +1366,25 @@ func (b *Broker) sendAndReceiveV0SASLPlainAuth() error { return nil } -// sendAndReceiveV1SASLPlainAuth flows the v1 sasl authentication using the kafka protocol -func (b *Broker) sendAndReceiveV1SASLPlainAuth() error { - correlationID := b.correlationID - - requestTime := time.Now() - - // Will be decremented in updateIncomingCommunicationMetrics (except error) - b.addRequestInFlightMetrics(1) - bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID) - b.updateOutgoingCommunicationMetrics(bytesWritten) - - if err != nil { - b.addRequestInFlightMetrics(-1) - Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) - return err - } - - b.correlationID++ - - bytesRead, err := b.receiveSASLServerResponse(&SaslAuthenticateResponse{}, correlationID) - b.updateIncomingCommunicationMetrics(bytesRead, time.Since(requestTime)) - - // With v1 sasl we get an error message set in the response we can return +// Kafka 1.x.x onward added a SaslAuthenticate request/response message which +// wraps the SASL flow in the Kafka protocol, which allows for returning +// meaningful errors on authentication failure. +func (b *Broker) sendAndReceiveSASLPlainAuthV1(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error)) error { + authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password) + _, err := authSendReceiver(authBytes) if err != nil { - Logger.Printf("Error returned from broker during SASL flow %s: %s\n", b.addr, err.Error()) return err } + return err +} - return nil +func currentUnixMilli() int64 { + return time.Now().UnixNano() / int64(time.Millisecond) } // sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255 // https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876 -func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error { - if err := b.sendAndReceiveSASLHandshake(SASLTypeOAuth, SASLHandshakeV1); err != nil { - return err - } - +func (b *Broker) sendAndReceiveSASLOAuth(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error), provider AccessTokenProvider) error { token, err := provider.Token() if err != nil { return err @@ -1196,59 +1395,19 @@ func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error { return err } - challenged, err := b.sendClientMessage(message) + res, err := authSendReceiver(message) if err != nil { return err } + isChallenge := len(res.SaslAuthBytes) > 0 - if challenged { + if isChallenge { // Abort the token exchange. The broker returns the failure code. - _, err = b.sendClientMessage([]byte(`\x01`)) + _, err = authSendReceiver([]byte(`\x01`)) } - return err } -// sendClientMessage sends a SASL/OAUTHBEARER client message and returns true -// if the broker responds with a challenge, in which case the token is -// rejected. -func (b *Broker) sendClientMessage(message []byte) (bool, error) { - requestTime := time.Now() - // Will be decremented in updateIncomingCommunicationMetrics (except error) - b.addRequestInFlightMetrics(1) - correlationID := b.correlationID - - bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID) - b.updateOutgoingCommunicationMetrics(bytesWritten) - if err != nil { - b.addRequestInFlightMetrics(-1) - return false, err - } - - b.correlationID++ - - res := &SaslAuthenticateResponse{} - bytesRead, err := b.receiveSASLServerResponse(res, correlationID) - - requestLatency := time.Since(requestTime) - b.updateIncomingCommunicationMetrics(bytesRead, requestLatency) - - isChallenge := len(res.SaslAuthBytes) > 0 - - if isChallenge && err != nil { - Logger.Printf("Broker rejected authentication token: %s", res.SaslAuthBytes) - } - - return isChallenge, err -} - -func (b *Broker) sendAndReceiveSASLSCRAM() error { - if b.conf.Net.SASL.Version == SASLHandshakeV0 { - return b.sendAndReceiveSASLSCRAMv0() - } - return b.sendAndReceiveSASLSCRAMv1() -} - func (b *Broker) sendAndReceiveSASLSCRAMv0() error { if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV0); err != nil { return err @@ -1256,12 +1415,12 @@ func (b *Broker) sendAndReceiveSASLSCRAMv0() error { scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc() if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil { - return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error()) + return fmt.Errorf("failed to start SCRAM exchange with the server: %w", err) } msg, err := scramClient.Step("") if err != nil { - return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error()) + return fmt.Errorf("failed to advance the SCRAM exchange: %w", err) } for !scramClient.Done() { @@ -1269,7 +1428,7 @@ func (b *Broker) sendAndReceiveSASLSCRAMv0() error { // Will be decremented in updateIncomingCommunicationMetrics (except error) b.addRequestInFlightMetrics(1) length := len(msg) - authBytes := make([]byte, length+4) //4 byte length header + auth data + authBytes := make([]byte, length+4) // 4 byte length header + auth data binary.BigEndian.PutUint32(authBytes, uint32(length)) copy(authBytes[4:], []byte(msg)) _, err := b.write(authBytes) @@ -1306,44 +1465,23 @@ func (b *Broker) sendAndReceiveSASLSCRAMv0() error { return nil } -func (b *Broker) sendAndReceiveSASLSCRAMv1() error { - if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil { - return err - } - - scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc() +func (b *Broker) sendAndReceiveSASLSCRAMv1(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error), scramClient SCRAMClient) error { if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil { - return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error()) + return fmt.Errorf("failed to start SCRAM exchange with the server: %w", err) } msg, err := scramClient.Step("") if err != nil { - return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error()) + return fmt.Errorf("failed to advance the SCRAM exchange: %w", err) } for !scramClient.Done() { - requestTime := time.Now() - // Will be decremented in updateIncomingCommunicationMetrics (except error) - b.addRequestInFlightMetrics(1) - correlationID := b.correlationID - bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg)) - b.updateOutgoingCommunicationMetrics(bytesWritten) - if err != nil { - b.addRequestInFlightMetrics(-1) - Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) - return err - } - - b.correlationID++ - challenge, err := b.receiveSaslAuthenticateResponse(correlationID) + res, err := authSendReceiver([]byte(msg)) if err != nil { - b.addRequestInFlightMetrics(-1) - Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) return err } - b.updateIncomingCommunicationMetrics(len(challenge), time.Since(requestTime)) - msg, err = scramClient.Step(string(challenge)) + msg, err = scramClient.Step(string(res.SaslAuthBytes)) if err != nil { Logger.Println("SASL authentication failed", err) return err @@ -1351,51 +1489,17 @@ func (b *Broker) sendAndReceiveSASLSCRAMv1() error { } DebugLogger.Println("SASL authentication succeeded") - return nil -} - -func (b *Broker) sendSaslAuthenticateRequest(correlationID int32, msg []byte) (int, error) { - rb := &SaslAuthenticateRequest{msg} - req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req, b.conf.MetricRegistry) - if err != nil { - return 0, err - } - return b.write(buf) + return nil } -func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) { - buf := make([]byte, responseLengthSize+correlationIDSize) - _, err := b.readFull(buf) - if err != nil { - return nil, err - } - - header := responseHeader{} - err = versionedDecode(buf, &header, 0) - if err != nil { - return nil, err - } - - if header.correlationID != correlationID { - return nil, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID) - } - - buf = make([]byte, header.length-correlationIDSize) - _, err = b.readFull(buf) - if err != nil { - return nil, err +func (b *Broker) createSaslAuthenticateRequest(msg []byte) *SaslAuthenticateRequest { + authenticateRequest := SaslAuthenticateRequest{SaslAuthBytes: msg} + if b.conf.Version.IsAtLeast(V2_2_0_0) { + authenticateRequest.Version = 1 } - res := &SaslAuthenticateResponse{} - if err := versionedDecode(buf, res, 0); err != nil { - return nil, err - } - if res.Err != ErrNoError { - return nil, res.Err - } - return res.SaslAuthBytes, nil + return &authenticateRequest } // Build SASL/OAUTHBEARER initial client response as described by RFC-7628 @@ -1429,64 +1533,21 @@ func mapToString(extensions map[string]string, keyValSep string, elemSep string) return strings.Join(buf, elemSep) } -func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) { - authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password) - rb := &SaslAuthenticateRequest{authBytes} - req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req, b.conf.MetricRegistry) - if err != nil { - return 0, err - } - - return b.write(buf) -} - -func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) { - rb := &SaslAuthenticateRequest{initialResp} - - req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} - - buf, err := encode(req, b.conf.MetricRegistry) - if err != nil { - return 0, err - } - - return b.write(buf) -} - -func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correlationID int32) (int, error) { - buf := make([]byte, responseLengthSize+correlationIDSize) - bytesRead, err := b.readFull(buf) - if err != nil { - return bytesRead, err - } - - header := responseHeader{} - err = versionedDecode(buf, &header, 0) - if err != nil { - return bytesRead, err - } - - if header.correlationID != correlationID { - return bytesRead, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID) - } - - buf = make([]byte, header.length-correlationIDSize) - c, err := b.readFull(buf) - bytesRead += c - if err != nil { - return bytesRead, err - } - - if err := versionedDecode(buf, res, 0); err != nil { - return bytesRead, err - } - - if res.Err != ErrNoError { - return bytesRead, res.Err +func (b *Broker) computeSaslSessionLifetime(res *SaslAuthenticateResponse) { + if res.SessionLifetimeMs > 0 { + // Follows the Java Kafka implementation from SaslClientAuthenticator.ReauthInfo#setAuthenticationEndAndSessionReauthenticationTimes + // pick a random percentage between 85% and 95% for session re-authentication + positiveSessionLifetimeMs := res.SessionLifetimeMs + authenticationEndMs := currentUnixMilli() + pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount := 0.85 + pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously := 0.10 + pctToUse := pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount + rand.Float64()*pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously + sessionLifetimeMsToUse := int64(float64(positiveSessionLifetimeMs) * pctToUse) + DebugLogger.Printf("Session expiration in %d ms and session re-authentication on or after %d ms", positiveSessionLifetimeMs, sessionLifetimeMsToUse) + b.clientSessionReauthenticationTimeMs = authenticationEndMs + sessionLifetimeMsToUse + } else { + b.clientSessionReauthenticationTimeMs = 0 } - - return bytesRead, nil } func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { @@ -1545,9 +1606,22 @@ func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { } } +func (b *Broker) updateThrottleMetric(throttleTime time.Duration) { + if throttleTime != time.Duration(0) { + DebugLogger.Printf( + "producer/broker/%d ProduceResponse throttled %v\n", + b.ID(), throttleTime) + if b.brokerThrottleTime != nil { + throttleTimeInMs := int64(throttleTime / time.Millisecond) + b.brokerThrottleTime.Update(throttleTimeInMs) + } + } +} + func (b *Broker) registerMetrics() { b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate") b.brokerRequestRate = b.registerMeter("request-rate") + b.brokerFetchRate = b.registerMeter("consumer-fetch-rate") b.brokerRequestSize = b.registerHistogram("request-size") b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms") b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate") @@ -1557,29 +1631,19 @@ func (b *Broker) registerMetrics() { b.brokerThrottleTime = b.registerHistogram("throttle-time-in-ms") } -func (b *Broker) unregisterMetrics() { - for _, name := range b.registeredMetrics { - b.conf.MetricRegistry.Unregister(name) - } - b.registeredMetrics = nil -} - func (b *Broker) registerMeter(name string) metrics.Meter { nameForBroker := getMetricNameForBroker(name, b) - b.registeredMetrics = append(b.registeredMetrics, nameForBroker) - return metrics.GetOrRegisterMeter(nameForBroker, b.conf.MetricRegistry) + return metrics.GetOrRegisterMeter(nameForBroker, b.metricRegistry) } func (b *Broker) registerHistogram(name string) metrics.Histogram { nameForBroker := getMetricNameForBroker(name, b) - b.registeredMetrics = append(b.registeredMetrics, nameForBroker) - return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry) + return getOrRegisterHistogram(nameForBroker, b.metricRegistry) } func (b *Broker) registerCounter(name string) metrics.Counter { nameForBroker := getMetricNameForBroker(name, b) - b.registeredMetrics = append(b.registeredMetrics, nameForBroker) - return metrics.GetOrRegisterCounter(nameForBroker, b.conf.MetricRegistry) + return metrics.GetOrRegisterCounter(nameForBroker, b.metricRegistry) } func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config { diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go index 1c69cb8d60d2..b3b7dfbcca7b 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/Shopify/sarama/client.go @@ -1,9 +1,12 @@ package sarama import ( + "errors" + "math" "math/rand" "sort" "sync" + "sync/atomic" "time" ) @@ -85,9 +88,22 @@ type Client interface { // in local cache. This function only works on Kafka 0.8.2 and higher. RefreshCoordinator(consumerGroup string) error + // Coordinator returns the coordinating broker for a transaction id. It will + // return a locally cached value if it's available. You can call + // RefreshCoordinator to update the cached value. This function only works on + // Kafka 0.11.0.0 and higher. + TransactionCoordinator(transactionID string) (*Broker, error) + + // RefreshCoordinator retrieves the coordinator for a transaction id and stores it + // in local cache. This function only works on Kafka 0.11.0.0 and higher. + RefreshTransactionCoordinator(transactionID string) error + // InitProducerID retrieves information required for Idempotent Producer InitProducerID() (*InitProducerIDResponse, error) + // LeastLoadedBroker retrieves broker that has the least responses pending + LeastLoadedBroker() *Broker + // Close shuts down all broker connections managed by this client. It is required // to call this function before a client object passes out of scope, as it will // otherwise leak memory. You must close any Producers or Consumers using a client @@ -112,6 +128,11 @@ const ( ) type client struct { + // updateMetaDataMs stores the time at which metadata was lasted updated. + // Note: this accessed atomically so must be the first word in the struct + // as per golang/go#41970 + updateMetaDataMs int64 + conf *Config closer, closed chan none // for shutting down background metadata updater @@ -121,17 +142,19 @@ type client struct { seedBrokers []*Broker deadSeeds []*Broker - controllerID int32 // cluster controller broker id - brokers map[int32]*Broker // maps broker ids to brokers - metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata - metadataTopics map[string]none // topics that need to collect metadata - coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs + controllerID int32 // cluster controller broker id + brokers map[int32]*Broker // maps broker ids to brokers + metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata + metadataTopics map[string]none // topics that need to collect metadata + coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs + transactionCoordinators map[string]int32 // Maps transaction ids to coordinating broker IDs // If the number of partitions is large, we can get some churn calling cachedPartitions, // so the result is cached. It is important to update this value whenever metadata is changed cachedPartitionsResults map[string][maxPartitionIndex][]int32 lock sync.RWMutex // protects access to the maps that hold cluster state. + } // NewClient creates a new Client. It connects to one of the given broker addresses @@ -161,6 +184,7 @@ func NewClient(addrs []string, conf *Config) (Client, error) { metadataTopics: make(map[string]none), cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), coordinators: make(map[string]int32), + transactionCoordinators: make(map[string]int32), } client.randomizeSeedBrokers(addrs) @@ -168,13 +192,11 @@ func NewClient(addrs []string, conf *Config) (Client, error) { if conf.Metadata.Full { // do an initial fetch of all cluster metadata by specifying an empty list of topics err := client.RefreshMetadata() - switch err { - case nil: - break - case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: + if err == nil { + } else if errors.Is(err, ErrLeaderNotAvailable) || errors.Is(err, ErrReplicaNotAvailable) || errors.Is(err, ErrTopicAuthorizationFailed) || errors.Is(err, ErrClusterAuthorizationFailed) { // indicates that maybe part of the cluster is down, but is not fatal to creating the client Logger.Println(err) - default: + } else { close(client.closed) // we haven't started the background updater yet, so we have to do this manually _ = client.Close() return nil, err @@ -213,24 +235,24 @@ func (client *client) Broker(brokerID int32) (*Broker, error) { } func (client *client) InitProducerID() (*InitProducerIDResponse, error) { - err := ErrOutOfBrokers - for broker := client.any(); broker != nil; broker = client.any() { + brokerErrors := make([]error, 0) + for broker := client.anyBroker(); broker != nil; broker = client.anyBroker() { var response *InitProducerIDResponse req := &InitProducerIDRequest{} - response, err = broker.InitProducerID(req) - switch err.(type) { - case nil: + response, err := broker.InitProducerID(req) + if err == nil { return response, nil - default: + } else { // some error, remove that broker and try again Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err) _ = broker.Close() + brokerErrors = append(brokerErrors, err) client.deregisterBroker(broker) } } - return nil, err + return nil, Wrap(ErrOutOfBrokers, brokerErrors...) } func (client *client) Close() error { @@ -373,7 +395,7 @@ func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) return nil, ErrUnknownTopicOrPartition } - if metadata.Err == ErrReplicaNotAvailable { + if errors.Is(metadata.Err, ErrReplicaNotAvailable) { return dupInt32Slice(metadata.Replicas), metadata.Err } return dupInt32Slice(metadata.Replicas), nil @@ -398,7 +420,7 @@ func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, return nil, ErrUnknownTopicOrPartition } - if metadata.Err == ErrReplicaNotAvailable { + if errors.Is(metadata.Err, ErrReplicaNotAvailable) { return dupInt32Slice(metadata.Isr), metadata.Err } return dupInt32Slice(metadata.Isr), nil @@ -423,7 +445,7 @@ func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, return nil, ErrUnknownTopicOrPartition } - if metadata.Err == ErrReplicaNotAvailable { + if errors.Is(metadata.Err, ErrReplicaNotAvailable) { return dupInt32Slice(metadata.OfflineReplicas), metadata.Err } return dupInt32Slice(metadata.OfflineReplicas), nil @@ -460,6 +482,14 @@ func (client *client) RefreshBrokers(addrs []string) error { delete(client.brokers, broker.ID()) } + for _, broker := range client.seedBrokers { + _ = broker.Close() + } + + for _, broker := range client.deadSeeds { + _ = broker.Close() + } + client.seedBrokers = nil client.deadSeeds = nil @@ -534,7 +564,10 @@ func (client *client) Controller() (*Broker, error) { func (client *client) deregisterController() { client.lock.Lock() defer client.lock.Unlock() - delete(client.brokers, client.controllerID) + if controller, ok := client.brokers[client.controllerID]; ok { + _ = controller.Close() + delete(client.brokers, client.controllerID) + } } // RefreshController retrieves the cluster controller from fresh metadata @@ -586,7 +619,7 @@ func (client *client) RefreshCoordinator(consumerGroup string) error { return ErrClosedClient } - response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max) + response, err := client.findCoordinator(consumerGroup, CoordinatorGroup, client.conf.Metadata.Retry.Max) if err != nil { return err } @@ -598,6 +631,45 @@ func (client *client) RefreshCoordinator(consumerGroup string) error { return nil } +func (client *client) TransactionCoordinator(transactionID string) (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + coordinator := client.cachedTransactionCoordinator(transactionID) + + if coordinator == nil { + if err := client.RefreshTransactionCoordinator(transactionID); err != nil { + return nil, err + } + coordinator = client.cachedTransactionCoordinator(transactionID) + } + + if coordinator == nil { + return nil, ErrConsumerCoordinatorNotAvailable + } + + _ = coordinator.Open(client.conf) + return coordinator, nil +} + +func (client *client) RefreshTransactionCoordinator(transactionID string) error { + if client.Closed() { + return ErrClosedClient + } + + response, err := client.findCoordinator(transactionID, CoordinatorTransaction, client.conf.Metadata.Retry.Max) + if err != nil { + return err + } + + client.lock.Lock() + defer client.lock.Unlock() + client.registerBroker(response.Coordinator) + client.transactionCoordinators[transactionID] = response.Coordinator.ID() + return nil +} + // private broker management helpers func (client *client) randomizeSeedBrokers(addrs []string) { @@ -678,7 +750,7 @@ func (client *client) resurrectDeadBrokers() { client.deadSeeds = nil } -func (client *client) any() *Broker { +func (client *client) anyBroker() *Broker { client.lock.RLock() defer client.lock.RUnlock() @@ -696,6 +768,30 @@ func (client *client) any() *Broker { return nil } +func (client *client) LeastLoadedBroker() *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + + if len(client.seedBrokers) > 0 { + _ = client.seedBrokers[0].Open(client.conf) + return client.seedBrokers[0] + } + + var leastLoadedBroker *Broker + pendingRequests := math.MaxInt + for _, broker := range client.brokers { + if pendingRequests > broker.ResponseSize() { + pendingRequests = broker.ResponseSize() + leastLoadedBroker = broker + } + } + + if leastLoadedBroker != nil { + _ = leastLoadedBroker.Open(client.conf) + } + return leastLoadedBroker +} + // private caching/lazy metadata helpers type partitionType int @@ -742,7 +838,7 @@ func (client *client) setPartitionCache(topic string, partitionSet partitionType ret := make([]int32, 0, len(partitions)) for _, partition := range partitions { - if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable { + if partitionSet == writablePartitions && errors.Is(partition.Err, ErrLeaderNotAvailable) { continue } ret = append(ret, partition.ID) @@ -760,7 +856,7 @@ func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, er if partitions != nil { metadata, ok := partitions[partitionID] if ok { - if metadata.Err == ErrLeaderNotAvailable { + if errors.Is(metadata.Err, ErrLeaderNotAvailable) { return nil, ErrLeaderNotAvailable } b := client.brokers[metadata.Leader] @@ -798,7 +894,7 @@ func (client *client) getOffset(topic string, partitionID int32, time int64) (in _ = broker.Close() return -1, ErrIncompleteResponse } - if block.Err != ErrNoError { + if !errors.Is(block.Err, ErrNoError) { return -1, block.Err } if len(block.Offsets) != 1 { @@ -867,17 +963,24 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout") return err } - Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) if backoff > 0 { time.Sleep(backoff) } + + t := atomic.LoadInt64(&client.updateMetaDataMs) + if time.Since(time.Unix(t/1e3, 0)) < backoff { + return err + } + Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) + return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline) } return err } - broker := client.any() - for ; broker != nil && !pastDeadline(0); broker = client.any() { + broker := client.anyBroker() + brokerErrors := make([]error, 0) + for ; broker != nil && !pastDeadline(0); broker = client.anyBroker() { allowAutoTopicCreation := client.conf.Metadata.AllowAutoTopicCreation if len(topics) > 0 { DebugLogger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) @@ -892,9 +995,16 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, } else if client.conf.Version.IsAtLeast(V0_10_0_0) { req.Version = 1 } + + t := atomic.LoadInt64(&client.updateMetaDataMs) + if !atomic.CompareAndSwapInt64(&client.updateMetaDataMs, t, time.Now().UnixNano()/int64(time.Millisecond)) { + return nil + } + response, err := broker.GetMetadata(req) - switch err := err.(type) { - case nil: + var kerror KError + var packetEncodingError PacketEncodingError + if err == nil { allKnownMetaData := len(topics) == 0 // valid response, use it shouldRetry, err := client.updateMetadata(response, allKnownMetaData) @@ -903,19 +1013,17 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, return retry(err) // note: err can be nil } return err - - case PacketEncodingError: + } else if errors.As(err, &packetEncodingError) { // didn't even send, return the error return err - - case KError: + } else if errors.As(err, &kerror) { // if SASL auth error return as this _should_ be a non retryable err for all brokers - if err == ErrSASLAuthenticationFailed { + if errors.Is(err, ErrSASLAuthenticationFailed) { Logger.Println("client/metadata failed SASL authentication") return err } - if err == ErrTopicAuthorizationFailed { + if errors.Is(err, ErrTopicAuthorizationFailed) { Logger.Println("client is not authorized to access this topic. The topics were: ", topics) return err } @@ -923,23 +1031,24 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err) _ = broker.Close() client.deregisterBroker(broker) - - default: + } else { // some other error, remove that broker and try again Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err) + brokerErrors = append(brokerErrors, err) _ = broker.Close() client.deregisterBroker(broker) } } + error := Wrap(ErrOutOfBrokers, brokerErrors...) if broker != nil { Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr) - return retry(ErrOutOfBrokers) + return retry(error) } Logger.Println("client/metadata no available broker to send metadata request to") client.resurrectDeadBrokers() - return retry(ErrOutOfBrokers) + return retry(error) } // if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable @@ -996,7 +1105,7 @@ func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bo client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions)) for _, partition := range topic.Partitions { client.metadata[topic.Name][partition.ID] = partition - if partition.Err == ErrLeaderNotAvailable { + if errors.Is(partition.Err, ErrLeaderNotAvailable) { retry = true } } @@ -1019,6 +1128,15 @@ func (client *client) cachedCoordinator(consumerGroup string) *Broker { return nil } +func (client *client) cachedTransactionCoordinator(transactionID string) *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + if coordinatorID, ok := client.transactionCoordinators[transactionID]; ok { + return client.brokers[coordinatorID] + } + return nil +} + func (client *client) cachedController() *Broker { client.lock.RLock() defer client.lock.RUnlock() @@ -1035,45 +1153,49 @@ func (client *client) computeBackoff(attemptsRemaining int) time.Duration { return client.conf.Metadata.Retry.Backoff } -func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) { +func (client *client) findCoordinator(coordinatorKey string, coordinatorType CoordinatorType, attemptsRemaining int) (*FindCoordinatorResponse, error) { retry := func(err error) (*FindCoordinatorResponse, error) { if attemptsRemaining > 0 { backoff := client.computeBackoff(attemptsRemaining) Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) time.Sleep(backoff) - return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) + return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining-1) } return nil, err } - for broker := client.any(); broker != nil; broker = client.any() { - DebugLogger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr()) + brokerErrors := make([]error, 0) + for broker := client.anyBroker(); broker != nil; broker = client.anyBroker() { + DebugLogger.Printf("client/coordinator requesting coordinator for %s from %s\n", coordinatorKey, broker.Addr()) request := new(FindCoordinatorRequest) - request.CoordinatorKey = consumerGroup - request.CoordinatorType = CoordinatorGroup + request.CoordinatorKey = coordinatorKey + request.CoordinatorType = coordinatorType + + if client.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 1 + } response, err := broker.FindCoordinator(request) if err != nil { Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err) - switch err.(type) { - case PacketEncodingError: + var packetEncodingError PacketEncodingError + if errors.As(err, &packetEncodingError) { return nil, err - default: + } else { _ = broker.Close() + brokerErrors = append(brokerErrors, err) client.deregisterBroker(broker) continue } } - switch response.Err { - case ErrNoError: - DebugLogger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) + if errors.Is(response.Err, ErrNoError) { + DebugLogger.Printf("client/coordinator coordinator for %s is #%d (%s)\n", coordinatorKey, response.Coordinator.ID(), response.Coordinator.Addr()) return response, nil - - case ErrConsumerCoordinatorNotAvailable: - Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup) + } else if errors.Is(response.Err, ErrConsumerCoordinatorNotAvailable) { + Logger.Printf("client/coordinator coordinator for %s is not available\n", coordinatorKey) // This is very ugly, but this scenario will only happen once per cluster. // The __consumer_offsets topic only has to be created one time. @@ -1082,20 +1204,25 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n") time.Sleep(2 * time.Second) } + if coordinatorType == CoordinatorTransaction { + if _, err := client.Leader("__transaction_state", 0); err != nil { + Logger.Printf("client/coordinator the __transaction_state topic is not initialized completely yet. Waiting 2 seconds...\n") + time.Sleep(2 * time.Second) + } + } return retry(ErrConsumerCoordinatorNotAvailable) - case ErrGroupAuthorizationFailed: - Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup) + } else if errors.Is(response.Err, ErrGroupAuthorizationFailed) { + Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", coordinatorKey) return retry(ErrGroupAuthorizationFailed) - - default: + } else { return nil, response.Err } } Logger.Println("client/coordinator no available broker to send consumer metadata request to") client.resurrectDeadBrokers() - return retry(ErrOutOfBrokers) + return retry(Wrap(ErrOutOfBrokers, brokerErrors...)) } // nopCloserClient embeds an existing Client, but disables diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go index 12cd7c3d5104..504007a49b14 100644 --- a/vendor/github.com/Shopify/sarama/compress.go +++ b/vendor/github.com/Shopify/sarama/compress.go @@ -7,7 +7,7 @@ import ( "sync" snappy "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4" + "github.com/pierrec/lz4/v4" ) var ( @@ -187,7 +187,7 @@ func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) { } return buf.Bytes(), nil case CompressionZSTD: - return zstdCompress(nil, data) + return zstdCompress(ZstdEncoderParams{level}, nil, data) default: return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)} } diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go index 3f86f1fb9c5d..b07034434cb8 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/Shopify/sarama/config.go @@ -38,6 +38,9 @@ type Config struct { Net struct { // How many outstanding requests a connection is allowed to have before // sending on it blocks (default 5). + // Throughput can improve but message ordering is not guaranteed if Producer.Idempotent is disabled, see: + // https://kafka.apache.org/protocol#protocol_network + // https://kafka.apache.org/28/documentation.html#producerconfigs_max.in.flight.requests.per.connection MaxOpenRequests int // All three of the below configurations are similar to the @@ -185,6 +188,28 @@ type Config struct { // If enabled, the producer will ensure that exactly one copy of each message is // written. Idempotent bool + // Transaction specify + Transaction struct { + // Used in transactions to identify an instance of a producer through restarts + ID string + // Amount of time a transaction can remain unresolved (neither committed nor aborted) + // default is 1 min + Timeout time.Duration + + Retry struct { + // The total number of times to retry sending a message (default 50). + // Similar to the `message.send.max.retries` setting of the JVM producer. + Max int + // How long to wait for the cluster to settle between retries + // (default 10ms). Similar to the `retry.backoff.ms` setting of the + // JVM producer. + Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries, maxRetries int) time.Duration + } + } // Return specifies what channels will be populated. If they are set to true, // you must read from the respective channels to prevent deadlock. If, @@ -270,7 +295,16 @@ type Config struct { } Rebalance struct { // Strategy for allocating topic partitions to members (default BalanceStrategyRange) + // Deprecated: Strategy exists for historical compatibility + // and should not be used. Please use GroupStrategies. Strategy BalanceStrategy + + // GroupStrategies is the priority-ordered list of client-side consumer group + // balancing strategies that will be offered to the coordinator. The first + // strategy that all group members support will be chosen by the leader. + // default: [BalanceStrategyRange] + GroupStrategies []BalanceStrategy + // The maximum allowed time for each worker to join the group once a rebalance has begun. // This is basically a limit on the amount of time needed for all tasks to flush any pending // data and commit offsets. If the timeout is exceeded, then the worker will be removed from @@ -293,6 +327,17 @@ type Config struct { // coordinator for the group. UserData []byte } + + // support KIP-345 + InstanceId string + + // If true, consumer offsets will be automatically reset to configured Initial value + // if the fetched consumer offset is out of range of available offsets. Out of range + // can happen if the data has been deleted from the server, or during situations of + // under-replication where a replica does not have all the data yet. It can be + // dangerous to reset the offset automatically, particularly in the latter case. Defaults + // to true to maintain existing behavior. + ResetInvalidOffsets bool } Retry struct { @@ -477,10 +522,14 @@ func NewConfig() *Config { c.Producer.Return.Errors = true c.Producer.CompressionLevel = CompressionLevelDefault + c.Producer.Transaction.Timeout = 1 * time.Minute + c.Producer.Transaction.Retry.Max = 50 + c.Producer.Transaction.Retry.Backoff = 100 * time.Millisecond + c.Consumer.Fetch.Min = 1 c.Consumer.Fetch.Default = 1024 * 1024 c.Consumer.Retry.Backoff = 2 * time.Second - c.Consumer.MaxWaitTime = 250 * time.Millisecond + c.Consumer.MaxWaitTime = 500 * time.Millisecond c.Consumer.MaxProcessingTime = 100 * time.Millisecond c.Consumer.Return.Errors = false c.Consumer.Offsets.AutoCommit.Enable = true @@ -490,10 +539,11 @@ func NewConfig() *Config { c.Consumer.Group.Session.Timeout = 10 * time.Second c.Consumer.Group.Heartbeat.Interval = 3 * time.Second - c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange + c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{BalanceStrategyRange} c.Consumer.Group.Rebalance.Timeout = 60 * time.Second c.Consumer.Group.Rebalance.Retry.Max = 4 c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second + c.Consumer.Group.ResetInvalidOffsets = true c.ClientID = defaultClientID c.ChannelBufferSize = 256 @@ -506,6 +556,8 @@ func NewConfig() *Config { // Validate checks a Config instance. It will return a // ConfigurationError if the specified values don't make sense. +// +//nolint:gocyclo // This function's cyclomatic complexity has go beyond 100 func (c *Config) Validate() error { // some configuration values should be warned on but not fail completely, do those first if !c.Net.TLS.Enable && c.Net.TLS.Config != nil { @@ -700,6 +752,10 @@ func (c *Config) Validate() error { } } + if c.Producer.Transaction.ID != "" && !c.Producer.Idempotent { + return ConfigurationError("Transactional producer requires Idempotent to be true") + } + // validate the Consumer values switch { case c.Consumer.Fetch.Min <= 0: @@ -728,6 +784,10 @@ func (c *Config) Validate() error { Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" + " and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored") } + if c.Consumer.Group.Rebalance.Strategy != nil { + Logger.Println("Deprecation warning: Consumer.Group.Rebalance.Strategy exists for historical compatibility" + + " and should not be used. Please use Consumer.Group.Rebalance.GroupStrategies") + } // validate IsolationLevel if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) { @@ -742,8 +802,8 @@ func (c *Config) Validate() error { return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms") case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout: return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout") - case c.Consumer.Group.Rebalance.Strategy == nil: - return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty") + case c.Consumer.Group.Rebalance.Strategy == nil && len(c.Consumer.Group.Rebalance.GroupStrategies) == 0: + return ConfigurationError("Consumer.Group.Rebalance.GroupStrategies or Consumer.Group.Rebalance.Strategy must not be empty") case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond: return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms") case c.Consumer.Group.Rebalance.Retry.Max < 0: @@ -752,6 +812,21 @@ func (c *Config) Validate() error { return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0") } + for _, strategy := range c.Consumer.Group.Rebalance.GroupStrategies { + if strategy == nil { + return ConfigurationError("elements in Consumer.Group.Rebalance.Strategies must not be empty") + } + } + + if c.Consumer.Group.InstanceId != "" { + if !c.Version.IsAtLeast(V2_3_0_0) { + return ConfigurationError("Consumer.Group.InstanceId need Version >= 2.3") + } + if err := validateGroupInstanceId(c.Consumer.Group.InstanceId); err != nil { + return err + } + } + // validate misc shared values switch { case c.ChannelBufferSize < 0: @@ -775,3 +850,23 @@ func (c *Config) getDialer() proxy.Dialer { } } } + +const MAX_GROUP_INSTANCE_ID_LENGTH = 249 + +var GROUP_INSTANCE_ID_REGEXP = regexp.MustCompile(`^[0-9a-zA-Z\._\-]+$`) + +func validateGroupInstanceId(id string) error { + if id == "" { + return ConfigurationError("Group instance id must be non-empty string") + } + if id == "." || id == ".." { + return ConfigurationError(`Group instance id cannot be "." or ".."`) + } + if len(id) > MAX_GROUP_INSTANCE_ID_LENGTH { + return ConfigurationError(fmt.Sprintf(`Group instance id cannot be longer than %v, characters: %s`, MAX_GROUP_INSTANCE_ID_LENGTH, id)) + } + if !GROUP_INSTANCE_ID_REGEXP.MatchString(id) { + return ConfigurationError(fmt.Sprintf(`Group instance id %s is illegal, it contains a character other than, '.', '_' and '-'`, id)) + } + return nil +} diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go index 2d76e8a1e8ee..d635dfe81dc4 100644 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/Shopify/sarama/consumer.go @@ -74,13 +74,37 @@ type Consumer interface { // Close shuts down the consumer. It must be called after all child // PartitionConsumers have already been closed. Close() error + + // Pause suspends fetching from the requested partitions. Future calls to the broker will not return any + // records from these partitions until they have been resumed using Resume()/ResumeAll(). + // Note that this method does not affect partition subscription. + // In particular, it does not cause a group rebalance when automatic assignment is used. + Pause(topicPartitions map[string][]int32) + + // Resume resumes specified partitions which have been paused with Pause()/PauseAll(). + // New calls to the broker will return records from these partitions if there are any to be fetched. + Resume(topicPartitions map[string][]int32) + + // Pause suspends fetching from all partitions. Future calls to the broker will not return any + // records from these partitions until they have been resumed using Resume()/ResumeAll(). + // Note that this method does not affect partition subscription. + // In particular, it does not cause a group rebalance when automatic assignment is used. + PauseAll() + + // Resume resumes all partitions which have been paused with Pause()/PauseAll(). + // New calls to the broker will return records from these partitions if there are any to be fetched. + ResumeAll() } +// max time to wait for more partition subscriptions +const partitionConsumersBatchTimeout = 100 * time.Millisecond + type consumer struct { conf *Config children map[string]map[int32]*partitionConsumer brokerConsumers map[*Broker]*brokerConsumer client Client + metricRegistry metrics.Registry lock sync.Mutex } @@ -113,12 +137,14 @@ func newConsumer(client Client) (Consumer, error) { conf: client.Config(), children: make(map[string]map[int32]*partitionConsumer), brokerConsumers: make(map[*Broker]*brokerConsumer), + metricRegistry: newCleanupRegistry(client.Config().MetricRegistry), } return c, nil } func (c *consumer) Close() error { + c.metricRegistry.UnregisterAll() return c.client.Close() } @@ -132,16 +158,17 @@ func (c *consumer) Partitions(topic string) ([]int32, error) { func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) { child := &partitionConsumer{ - consumer: c, - conf: c.conf, - topic: topic, - partition: partition, - messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), - errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), - feeder: make(chan *FetchResponse, 1), - trigger: make(chan none, 1), - dying: make(chan none), - fetchSize: c.conf.Consumer.Fetch.Default, + consumer: c, + conf: c.conf, + topic: topic, + partition: partition, + messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), + errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), + feeder: make(chan *FetchResponse, 1), + preferredReadReplica: invalidPreferredReplicaID, + trigger: make(chan none, 1), + dying: make(chan none), + fetchSize: c.conf.Consumer.Fetch.Default, } if err := child.chooseStartingOffset(offset); err != nil { @@ -244,6 +271,62 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { delete(c.brokerConsumers, brokerWorker.broker) } +// Pause implements Consumer. +func (c *consumer) Pause(topicPartitions map[string][]int32) { + c.lock.Lock() + defer c.lock.Unlock() + + for topic, partitions := range topicPartitions { + for _, partition := range partitions { + if topicConsumers, ok := c.children[topic]; ok { + if partitionConsumer, ok := topicConsumers[partition]; ok { + partitionConsumer.Pause() + } + } + } + } +} + +// Resume implements Consumer. +func (c *consumer) Resume(topicPartitions map[string][]int32) { + c.lock.Lock() + defer c.lock.Unlock() + + for topic, partitions := range topicPartitions { + for _, partition := range partitions { + if topicConsumers, ok := c.children[topic]; ok { + if partitionConsumer, ok := topicConsumers[partition]; ok { + partitionConsumer.Resume() + } + } + } + } +} + +// PauseAll implements Consumer. +func (c *consumer) PauseAll() { + c.lock.Lock() + defer c.lock.Unlock() + + for _, partitions := range c.children { + for _, partitionConsumer := range partitions { + partitionConsumer.Pause() + } + } +} + +// ResumeAll implements Consumer. +func (c *consumer) ResumeAll() { + c.lock.Lock() + defer c.lock.Unlock() + + for _, partitions := range c.children { + for _, partitionConsumer := range partitions { + partitionConsumer.Resume() + } + } +} + // PartitionConsumer // PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or @@ -291,6 +374,20 @@ type PartitionConsumer interface { // i.e. the offset that will be used for the next message that will be produced. // You can use this to determine how far behind the processing is. HighWaterMarkOffset() int64 + + // Pause suspends fetching from this partition. Future calls to the broker will not return + // any records from these partition until it have been resumed using Resume(). + // Note that this method does not affect partition subscription. + // In particular, it does not cause a group rebalance when automatic assignment is used. + Pause() + + // Resume resumes this partition which have been paused with Pause(). + // New calls to the broker will return records from these partitions if there are any to be fetched. + // If the partition was not previously paused, this method is a no-op. + Resume() + + // IsPaused indicates if this partition consumer is paused or not + IsPaused() bool } type partitionConsumer struct { @@ -313,6 +410,8 @@ type partitionConsumer struct { fetchSize int32 offset int64 retries int32 + + paused int32 } var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing @@ -350,7 +449,6 @@ func (child *partitionConsumer) dispatcher() { child.broker = nil } - Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition) if err := child.dispatch(); err != nil { child.sendError(err) child.trigger <- none{} @@ -371,6 +469,14 @@ func (child *partitionConsumer) preferredBroker() (*Broker, error) { if err == nil { return broker, nil } + Logger.Printf( + "consumer/%s/%d failed to find active broker for preferred read replica %d - will fallback to leader", + child.topic, child.partition, child.preferredReadReplica) + + // if we couldn't find it, discard the replica preference and trigger a + // metadata refresh whilst falling back to consuming from the leader again + child.preferredReadReplica = invalidPreferredReplicaID + _ = child.consumer.client.RefreshMetadata(child.topic) } // if preferred replica cannot be found fallback to leader @@ -399,6 +505,9 @@ func (child *partitionConsumer) chooseStartingOffset(offset int64) error { if err != nil { return err } + + child.highWaterMarkOffset = newestOffset + oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest) if err != nil { return err @@ -572,13 +681,9 @@ func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMes } func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { - var ( - metricRegistry = child.conf.MetricRegistry - consumerBatchSizeMetric metrics.Histogram - ) - - if metricRegistry != nil { - consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry) + var consumerBatchSizeMetric metrics.Histogram + if child.consumer != nil && child.consumer.metricRegistry != nil { + consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", child.consumer.metricRegistry) } // If request was throttled and empty we log and return without error @@ -594,7 +699,7 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu return nil, ErrIncompleteResponse } - if block.Err != ErrNoError { + if !errors.Is(block.Err, ErrNoError) { return nil, block.Err } @@ -603,9 +708,13 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu return nil, err } - consumerBatchSizeMetric.Update(int64(nRecs)) + if consumerBatchSizeMetric != nil { + consumerBatchSizeMetric.Update(int64(nRecs)) + } - child.preferredReadReplica = block.PreferredReadReplica + if block.PreferredReadReplica != invalidPreferredReplicaID { + child.preferredReadReplica = block.PreferredReadReplica + } if nRecs == 0 { partialTrailingMessage, err := block.isPartial() @@ -629,6 +738,10 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu child.fetchSize = child.conf.Consumer.Fetch.Max } } + } else if block.LastRecordsBatchOffset != nil && *block.LastRecordsBatchOffset < block.HighWaterMarkOffset { + // check last record offset to avoid stuck if high watermark was not reached + Logger.Printf("consumer/broker/%d received batch with zero records but high watermark was not reached, topic %s, partition %d, offset %d\n", child.broker.broker.ID(), child.topic, child.partition, *block.LastRecordsBatchOffset) + child.offset = *block.LastRecordsBatchOffset + 1 } return nil, nil @@ -720,13 +833,27 @@ func (child *partitionConsumer) interceptors(msg *ConsumerMessage) { } } +// Pause implements PartitionConsumer. +func (child *partitionConsumer) Pause() { + atomic.StoreInt32(&child.paused, 1) +} + +// Resume implements PartitionConsumer. +func (child *partitionConsumer) Resume() { + atomic.StoreInt32(&child.paused, 0) +} + +// IsPaused implements PartitionConsumer. +func (child *partitionConsumer) IsPaused() bool { + return atomic.LoadInt32(&child.paused) == 1 +} + type brokerConsumer struct { consumer *consumer broker *Broker input chan *partitionConsumer newSubscriptions chan []*partitionConsumer subscriptions map[*partitionConsumer]none - wait chan none acks sync.WaitGroup refs int } @@ -737,7 +864,6 @@ func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { broker: broker, input: make(chan *partitionConsumer), newSubscriptions: make(chan []*partitionConsumer), - wait: make(chan none), subscriptions: make(map[*partitionConsumer]none), refs: 0, } @@ -751,54 +877,56 @@ func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give -// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, -// so the main goroutine can block waiting for work if it has none. +// it nil if no new subscriptions are available. func (bc *brokerConsumer) subscriptionManager() { - var buffer []*partitionConsumer + defer close(bc.newSubscriptions) for { - if len(buffer) > 0 { - select { - case event, ok := <-bc.input: - if !ok { - goto done - } - buffer = append(buffer, event) - case bc.newSubscriptions <- buffer: - buffer = nil - case bc.wait <- none{}: + var partitionConsumers []*partitionConsumer + + // Check for any partition consumer asking to subscribe if there aren't + // any, trigger the network request (to fetch Kafka messages) by sending "nil" to the + // newSubscriptions channel + select { + case pc, ok := <-bc.input: + if !ok { + return } - } else { + partitionConsumers = append(partitionConsumers, pc) + case bc.newSubscriptions <- nil: + continue + } + + // drain input of any further incoming subscriptions + timer := time.NewTimer(partitionConsumersBatchTimeout) + for batchComplete := false; !batchComplete; { select { - case event, ok := <-bc.input: - if !ok { - goto done - } - buffer = append(buffer, event) - case bc.newSubscriptions <- nil: + case pc := <-bc.input: + partitionConsumers = append(partitionConsumers, pc) + case <-timer.C: + batchComplete = true } } - } + timer.Stop() + + Logger.Printf( + "consumer/broker/%d accumulated %d new subscriptions\n", + bc.broker.ID(), len(partitionConsumers)) -done: - close(bc.wait) - if len(buffer) > 0 { - bc.newSubscriptions <- buffer + bc.newSubscriptions <- partitionConsumers } - close(bc.newSubscriptions) } // subscriptionConsumer ensures we will get nil right away if no new subscriptions is available +// this is a the main loop that fetches Kafka messages func (bc *brokerConsumer) subscriptionConsumer() { - <-bc.wait // wait for our first piece of work - for newSubscriptions := range bc.newSubscriptions { bc.updateSubscriptions(newSubscriptions) if len(bc.subscriptions) == 0 { // We're about to be shut down or we're about to receive more subscriptions. - // Either way, the signal just hasn't propagated to our goroutine yet. - <-bc.wait + // Take a small nap to avoid burning the CPU. + time.Sleep(partitionConsumersBatchTimeout) continue } @@ -809,8 +937,24 @@ func (bc *brokerConsumer) subscriptionConsumer() { return } + // if there isn't response, it means that not fetch was made + // so we don't need to handle any response + if response == nil { + continue + } + bc.acks.Add(len(bc.subscriptions)) for child := range bc.subscriptions { + if _, ok := response.Blocks[child.topic]; !ok { + bc.acks.Done() + continue + } + + if _, ok := response.Blocks[child.topic][child.partition]; !ok { + bc.acks.Done() + continue + } + child.feeder <- response } bc.acks.Wait() @@ -846,6 +990,9 @@ func (bc *brokerConsumer) handleResponses() { if preferredBroker, err := child.preferredBroker(); err == nil { if bc.broker.ID() != preferredBroker.ID() { // not an error but needs redispatching to consume from preferred replica + Logger.Printf( + "consumer/broker/%d abandoned in favor of preferred replica broker/%d\n", + bc.broker.ID(), preferredBroker.ID()) child.trigger <- none{} delete(bc.subscriptions, child) } @@ -854,27 +1001,26 @@ func (bc *brokerConsumer) handleResponses() { } // Discard any replica preference. - child.preferredReadReplica = -1 + child.preferredReadReplica = invalidPreferredReplicaID - switch result { - case errTimedOut: + if errors.Is(result, errTimedOut) { Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", bc.broker.ID(), child.topic, child.partition) delete(bc.subscriptions, child) - case ErrOffsetOutOfRange: + } else if errors.Is(result, ErrOffsetOutOfRange) { // there's no point in retrying this it will just fail the same way again // shut it down and force the user to choose what to do child.sendError(result) Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result) close(child.trigger) delete(bc.subscriptions, child) - case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable: + } else if errors.Is(result, ErrUnknownTopicOrPartition) || errors.Is(result, ErrNotLeaderForPartition) || errors.Is(result, ErrLeaderNotAvailable) || errors.Is(result, ErrReplicaNotAvailable) { // not an error, but does need redispatching Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", bc.broker.ID(), child.topic, child.partition, result) child.trigger <- none{} delete(bc.subscriptions, child) - default: + } else { // dunno, tell the user and try redispatching child.sendError(result) Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", @@ -896,7 +1042,8 @@ func (bc *brokerConsumer) abort(err error) { for newSubscriptions := range bc.newSubscriptions { if len(newSubscriptions) == 0 { - <-bc.wait + // Take a small nap to avoid burning the CPU. + time.Sleep(partitionConsumersBatchTimeout) continue } for _, child := range newSubscriptions { @@ -906,6 +1053,8 @@ func (bc *brokerConsumer) abort(err error) { } } +// fetchResponse can be nil if no fetch is made, it can occur when +// all partitions are paused func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { request := &FetchRequest{ MinBytes: bc.consumer.conf.Consumer.Fetch.Min, @@ -942,7 +1091,14 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { } for child := range bc.subscriptions { - request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) + if !child.IsPaused() { + request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) + } + } + + // avoid to fetch when there is no block + if len(request.blocks) == 0 { + return nil, nil } return bc.broker.Fetch(request) diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go index b603d1705aba..4f0fd66606fc 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group.go +++ b/vendor/github.com/Shopify/sarama/consumer_group.go @@ -54,22 +54,46 @@ type ConsumerGroup interface { // Close stops the ConsumerGroup and detaches any running sessions. It is required to call // this function before the object passes out of scope, as it will otherwise leak memory. Close() error + + // Pause suspends fetching from the requested partitions. Future calls to the broker will not return any + // records from these partitions until they have been resumed using Resume()/ResumeAll(). + // Note that this method does not affect partition subscription. + // In particular, it does not cause a group rebalance when automatic assignment is used. + Pause(partitions map[string][]int32) + + // Resume resumes specified partitions which have been paused with Pause()/PauseAll(). + // New calls to the broker will return records from these partitions if there are any to be fetched. + Resume(partitions map[string][]int32) + + // Pause suspends fetching from all partitions. Future calls to the broker will not return any + // records from these partitions until they have been resumed using Resume()/ResumeAll(). + // Note that this method does not affect partition subscription. + // In particular, it does not cause a group rebalance when automatic assignment is used. + PauseAll() + + // Resume resumes all partitions which have been paused with Pause()/PauseAll(). + // New calls to the broker will return records from these partitions if there are any to be fetched. + ResumeAll() } type consumerGroup struct { client Client - config *Config - consumer Consumer - groupID string - memberID string - errors chan error + config *Config + consumer Consumer + groupID string + groupInstanceId *string + memberID string + errors chan error - lock sync.Mutex - closed chan none - closeOnce sync.Once + lock sync.Mutex + errorsLock sync.RWMutex + closed chan none + closeOnce sync.Once userData []byte + + metricRegistry metrics.Registry } // NewConsumerGroup creates a new consumer group the given broker addresses and configuration. @@ -107,15 +131,20 @@ func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) { return nil, err } - return &consumerGroup{ - client: client, - consumer: consumer, - config: config, - groupID: groupID, - errors: make(chan error, config.ChannelBufferSize), - closed: make(chan none), - userData: config.Consumer.Group.Member.UserData, - }, nil + cg := &consumerGroup{ + client: client, + consumer: consumer, + config: config, + groupID: groupID, + errors: make(chan error, config.ChannelBufferSize), + closed: make(chan none), + userData: config.Consumer.Group.Member.UserData, + metricRegistry: newCleanupRegistry(config.MetricRegistry), + } + if client.Config().Consumer.Group.InstanceId != "" && config.Version.IsAtLeast(V2_3_0_0) { + cg.groupInstanceId = &client.Config().Consumer.Group.InstanceId + } + return cg, nil } // Errors implements ConsumerGroup. @@ -131,10 +160,13 @@ func (c *consumerGroup) Close() (err error) { err = e } - // drain errors go func() { + c.errorsLock.Lock() + defer c.errorsLock.Unlock() close(c.errors) }() + + // drain errors for e := range c.errors { err = e } @@ -142,6 +174,8 @@ func (c *consumerGroup) Close() (err error) { if e := c.client.Close(); e != nil { err = e } + + c.metricRegistry.UnregisterAll() }) return } @@ -170,7 +204,7 @@ func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler Co // Init session sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max) - if err == ErrClosedClient { + if errors.Is(err, ErrClosedClient) { return ErrClosedConsumerGroup } else if err != nil { return err @@ -188,6 +222,26 @@ func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler Co return sess.release(true) } +// Pause implements ConsumerGroup. +func (c *consumerGroup) Pause(partitions map[string][]int32) { + c.consumer.Pause(partitions) +} + +// Resume implements ConsumerGroup. +func (c *consumerGroup) Resume(partitions map[string][]int32) { + c.consumer.Resume(partitions) +} + +// PauseAll implements ConsumerGroup. +func (c *consumerGroup) PauseAll() { + c.consumer.PauseAll() +} + +// ResumeAll implements ConsumerGroup. +func (c *consumerGroup) ResumeAll() { + c.consumer.ResumeAll() +} + func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) { select { case <-c.closed: @@ -216,7 +270,7 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler } var ( - metricRegistry = c.config.MetricRegistry + metricRegistry = c.metricRegistry consumerGroupJoinTotal metrics.Counter consumerGroupJoinFailed metrics.Counter consumerGroupSyncTotal metrics.Counter @@ -242,7 +296,7 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler } return nil, err } - if join.Err != ErrNoError { + if !errors.Is(join.Err, ErrNoError) { if consumerGroupJoinFailed != nil { consumerGroupJoinFailed.Inc(1) } @@ -250,41 +304,58 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler switch join.Err { case ErrNoError: c.memberID = join.MemberId - case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately + case ErrUnknownMemberId, ErrIllegalGeneration: + // reset member ID and retry immediately c.memberID = "" return c.newSession(ctx, topics, handler, retries) - case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh + case ErrNotCoordinatorForConsumer, ErrRebalanceInProgress, ErrOffsetsLoadInProgress: + // retry after backoff if retries <= 0 { return nil, join.Err } - return c.retryNewSession(ctx, topics, handler, retries, true) - case ErrRebalanceInProgress: // retry after backoff - if retries <= 0 { - return nil, join.Err + case ErrMemberIdRequired: + // from JoinGroupRequest v4, if client start with empty member id, + // it need to get member id from response and send another join request to join group + c.memberID = join.MemberId + return c.retryNewSession(ctx, topics, handler, retries+1 /*keep retry time*/, false) + case ErrFencedInstancedId: + if c.groupInstanceId != nil { + Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *c.groupInstanceId) } - - return c.retryNewSession(ctx, topics, handler, retries, false) + return nil, join.Err default: return nil, join.Err } + var strategy BalanceStrategy + var ok bool + if strategy = c.config.Consumer.Group.Rebalance.Strategy; strategy == nil { + strategy, ok = c.findStrategy(join.GroupProtocol, c.config.Consumer.Group.Rebalance.GroupStrategies) + if !ok { + // this case shouldn't happen in practice, since the leader will choose the protocol + // that all the members support + return nil, fmt.Errorf("unable to find selected strategy: %s", join.GroupProtocol) + } + } + // Prepare distribution plan if we joined as the leader var plan BalanceStrategyPlan + var members map[string]ConsumerGroupMemberMetadata if join.LeaderId == join.MemberId { - members, err := join.GetMembers() + members, err = join.GetMembers() if err != nil { return nil, err } - plan, err = c.balance(members) + plan, err = c.balance(strategy, members) if err != nil { return nil, err } } // Sync consumer group - groupRequest, err := c.syncGroupRequest(coordinator, plan, join.GenerationId) + syncGroupResponse, err := c.syncGroupRequest(coordinator, members, plan, join.GenerationId, strategy) if consumerGroupSyncTotal != nil { consumerGroupSyncTotal.Inc(1) } @@ -295,37 +366,37 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler } return nil, err } - if groupRequest.Err != ErrNoError { + if !errors.Is(syncGroupResponse.Err, ErrNoError) { if consumerGroupSyncFailed != nil { consumerGroupSyncFailed.Inc(1) } } - switch groupRequest.Err { + switch syncGroupResponse.Err { case ErrNoError: - case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately + case ErrUnknownMemberId, ErrIllegalGeneration: + // reset member ID and retry immediately c.memberID = "" return c.newSession(ctx, topics, handler, retries) - case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh + case ErrNotCoordinatorForConsumer, ErrRebalanceInProgress, ErrOffsetsLoadInProgress: + // retry after backoff if retries <= 0 { - return nil, groupRequest.Err + return nil, syncGroupResponse.Err } - return c.retryNewSession(ctx, topics, handler, retries, true) - case ErrRebalanceInProgress: // retry after backoff - if retries <= 0 { - return nil, groupRequest.Err + case ErrFencedInstancedId: + if c.groupInstanceId != nil { + Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *c.groupInstanceId) } - - return c.retryNewSession(ctx, topics, handler, retries, false) + return nil, syncGroupResponse.Err default: - return nil, groupRequest.Err + return nil, syncGroupResponse.Err } // Retrieve and sort claims var claims map[string][]int32 - if len(groupRequest.MemberAssignment) > 0 { - members, err := groupRequest.GetMemberAssignment() + if len(syncGroupResponse.MemberAssignment) > 0 { + members, err := syncGroupResponse.GetMemberAssignment() if err != nil { return nil, err } @@ -359,26 +430,61 @@ func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) ( req.Version = 1 req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond) } + if c.groupInstanceId != nil { + req.Version = 5 + req.GroupInstanceId = c.groupInstanceId + } meta := &ConsumerGroupMemberMetadata{ Topics: topics, UserData: c.userData, } - strategy := c.config.Consumer.Group.Rebalance.Strategy - if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil { - return nil, err + var strategy BalanceStrategy + if strategy = c.config.Consumer.Group.Rebalance.Strategy; strategy != nil { + if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil { + return nil, err + } + } else { + for _, strategy = range c.config.Consumer.Group.Rebalance.GroupStrategies { + if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil { + return nil, err + } + } } return coordinator.JoinGroup(req) } -func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) { +// findStrategy returns the BalanceStrategy with the specified protocolName +// from the slice provided. +func (c *consumerGroup) findStrategy(name string, groupStrategies []BalanceStrategy) (BalanceStrategy, bool) { + for _, strategy := range groupStrategies { + if strategy.Name() == name { + return strategy, true + } + } + return nil, false +} + +func (c *consumerGroup) syncGroupRequest( + coordinator *Broker, + members map[string]ConsumerGroupMemberMetadata, + plan BalanceStrategyPlan, + generationID int32, + strategy BalanceStrategy, +) (*SyncGroupResponse, error) { req := &SyncGroupRequest{ GroupId: c.groupID, MemberId: c.memberID, GenerationId: generationID, } - strategy := c.config.Consumer.Group.Rebalance.Strategy + + if c.config.Version.IsAtLeast(V2_3_0_0) { + req.Version = 3 + } + if c.groupInstanceId != nil { + req.GroupInstanceId = c.groupInstanceId + } for memberID, topics := range plan { assignment := &ConsumerGroupMemberAssignment{Topics: topics} userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID) @@ -389,7 +495,15 @@ func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrate if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil { return nil, err } + delete(members, memberID) + } + // add empty assignments for any remaining members + for memberID := range members { + if err := req.AddGroupAssignmentMember(memberID, &ConsumerGroupMemberAssignment{}); err != nil { + return nil, err + } } + return coordinator.SyncGroup(req) } @@ -399,11 +513,15 @@ func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, g MemberId: memberID, GenerationId: generationID, } + if c.groupInstanceId != nil { + req.Version = 3 + req.GroupInstanceId = c.groupInstanceId + } return coordinator.Heartbeat(req) } -func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) { +func (c *consumerGroup) balance(strategy BalanceStrategy, members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) { topics := make(map[string][]int32) for _, meta := range members { for _, topic := range meta.Topics { @@ -419,7 +537,6 @@ func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) topics[topic] = partitions } - strategy := c.config.Consumer.Group.Rebalance.Strategy return strategy.Plan(members, topics) } @@ -436,29 +553,37 @@ func (c *consumerGroup) leave() error { return err } - resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{ - GroupId: c.groupID, - MemberId: c.memberID, - }) - if err != nil { - _ = coordinator.Close() - return err - } + // KIP-345 if groupInstanceId is set, don not leave group when consumer closed. + // Since we do not discover ApiVersion for brokers, LeaveGroupRequest still use the old version request for now + if c.groupInstanceId == nil { + resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + }) + if err != nil { + _ = coordinator.Close() + return err + } - // Unset memberID - c.memberID = "" + // Unset memberID + c.memberID = "" - // Check response - switch resp.Err { - case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError: - return nil - default: - return resp.Err + // Check response + switch resp.Err { + case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError: + return nil + default: + return resp.Err + } + } else { + c.memberID = "" } + return nil } func (c *consumerGroup) handleError(err error, topic string, partition int32) { - if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 { + var consumerError *ConsumerError + if ok := errors.As(err, &consumerError); !ok && topic != "" && partition > -1 { err = &ConsumerError{ Topic: topic, Partition: partition, @@ -471,6 +596,8 @@ func (c *consumerGroup) handleError(err error, topic string, partition int32) { return } + c.errorsLock.RLock() + defer c.errorsLock.RUnlock() select { case <-c.closed: // consumer is closed @@ -486,6 +613,9 @@ func (c *consumerGroup) handleError(err error, topic string, partition int32) { } func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) { + if c.config.Metadata.RefreshFrequency == time.Duration(0) { + return + } pause := time.NewTicker(c.config.Metadata.RefreshFrequency) defer session.cancel() defer pause.Stop() @@ -507,7 +637,9 @@ func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *cons select { case <-pause.C: case <-session.ctx.Done(): - Logger.Printf("loop check partition number coroutine will exit, topics %s", topics) + Logger.Printf( + "consumergroup/%s loop check partition number coroutine will exit, topics %s\n", + c.groupID, topics) // if session closed by other, should be exited return case <-c.closed: @@ -520,7 +652,9 @@ func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int topicToPartitionNum := make(map[string]int, len(topics)) for _, topic := range topics { if partitionNum, err := c.client.Partitions(topic); err != nil { - Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err) + Logger.Printf( + "consumergroup/%s topic %s get partition number failed due to '%v'\n", + c.groupID, topic, err) return nil, err } else { topicToPartitionNum[topic] = len(partitionNum) @@ -593,15 +727,15 @@ type consumerGroupSession struct { } func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) { + // init context + ctx, cancel := context.WithCancel(ctx) + // init offset manager - offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client) + offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client, cancel) if err != nil { return nil, err } - // init context - ctx, cancel := context.WithCancel(ctx) - // init session sess := &consumerGroupSession{ parent: parent, @@ -766,12 +900,21 @@ func (s *consumerGroupSession) release(withCleanup bool) (err error) { <-s.hbDead }) + Logger.Printf( + "consumergroup/session/%s/%d released\n", + s.MemberID(), s.GenerationID()) + return } func (s *consumerGroupSession) heartbeatLoop() { defer close(s.hbDead) defer s.cancel() // trigger the end of the session on exit + defer func() { + Logger.Printf( + "consumergroup/session/%s/%d heartbeat loop stopped\n", + s.MemberID(), s.GenerationID()) + }() pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval) defer pause.Stop() @@ -813,7 +956,16 @@ func (s *consumerGroupSession) heartbeatLoop() { switch resp.Err { case ErrNoError: retries = s.parent.config.Metadata.Retry.Max - case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration: + case ErrRebalanceInProgress: + retries = s.parent.config.Metadata.Retry.Max + s.cancel() + case ErrUnknownMemberId, ErrIllegalGeneration: + return + case ErrFencedInstancedId: + if s.parent.groupInstanceId != nil { + Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *s.parent.groupInstanceId) + } + s.parent.handleError(resp.Err, "", -1) return default: s.parent.handleError(resp.Err, "", -1) @@ -883,7 +1035,8 @@ type consumerGroupClaim struct { func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) { pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset) - if err == ErrOffsetOutOfRange { + + if errors.Is(err, ErrOffsetOutOfRange) && sess.parent.config.Consumer.Group.ResetInvalidOffsets { offset = sess.parent.config.Consumer.Offsets.Initial pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset) } diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go index 21b11e944fe5..3b8ca36f60ea 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group_members.go +++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go @@ -1,10 +1,14 @@ package sarama +import "errors" + // ConsumerGroupMemberMetadata holds the metadata for consumer group +// https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ConsumerProtocolSubscription.json type ConsumerGroupMemberMetadata struct { - Version int16 - Topics []string - UserData []byte + Version int16 + Topics []string + UserData []byte + OwnedPartitions []*OwnedPartition } func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { @@ -33,11 +37,50 @@ func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { if m.UserData, err = pd.getBytes(); err != nil { return } + if m.Version >= 1 { + n, err := pd.getArrayLength() + if err != nil { + // permit missing data here in case of misbehaving 3rd party + // clients who incorrectly marked the member metadata as V1 in + // their JoinGroup request + if errors.Is(err, ErrInsufficientData) { + return nil + } + return err + } + if n == 0 { + return nil + } + m.OwnedPartitions = make([]*OwnedPartition, n) + for i := 0; i < n; i++ { + m.OwnedPartitions[i] = &OwnedPartition{} + if err := m.OwnedPartitions[i].decode(pd); err != nil { + return err + } + } + } + + return nil +} + +type OwnedPartition struct { + Topic string + Partitions []int32 +} + +func (m *OwnedPartition) decode(pd packetDecoder) (err error) { + if m.Topic, err = pd.getString(); err != nil { + return err + } + if m.Partitions, err = pd.getInt32Array(); err != nil { + return err + } return nil } // ConsumerGroupMemberAssignment holds the member assignment for a consume group +// https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ConsumerProtocolAssignment.json type ConsumerGroupMemberAssignment struct { Version int16 Topics map[string][]int32 diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go index 12ce78857bc5..235787f133ec 100644 --- a/vendor/github.com/Shopify/sarama/create_partitions_response.go +++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go @@ -84,6 +84,10 @@ func (t *TopicPartitionError) Error() string { return text } +func (t *TopicPartitionError) Unwrap() error { + return t.Err +} + func (t *TopicPartitionError) encode(pe packetEncoder) error { pe.putInt16(int16(t.Err)) diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go index 7e1448a6692d..6b940bff065a 100644 --- a/vendor/github.com/Shopify/sarama/create_topics_response.go +++ b/vendor/github.com/Shopify/sarama/create_topics_response.go @@ -98,6 +98,10 @@ func (t *TopicError) Error() string { return text } +func (t *TopicError) Unwrap() error { + return t.Err +} + func (t *TopicError) encode(pe packetEncoder, version int16) error { pe.putInt16(int16(t.Err)) diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go index 5565e36cfa29..aa7fb749868b 100644 --- a/vendor/github.com/Shopify/sarama/decompress.go +++ b/vendor/github.com/Shopify/sarama/decompress.go @@ -8,7 +8,7 @@ import ( "sync" snappy "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4" + "github.com/pierrec/lz4/v4" ) var ( @@ -54,7 +54,7 @@ func decompress(cc CompressionCodec, data []byte) ([]byte, error) { return io.ReadAll(reader) case CompressionZSTD: - return zstdDecompress(nil, data) + return zstdDecompress(ZstdDecoderParams{}, nil, data) default: return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} } diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_response.go b/vendor/github.com/Shopify/sarama/delete_offsets_response.go index 00c3eaf906ed..d59ae0f8c1c7 100644 --- a/vendor/github.com/Shopify/sarama/delete_offsets_response.go +++ b/vendor/github.com/Shopify/sarama/delete_offsets_response.go @@ -5,10 +5,10 @@ import ( ) type DeleteOffsetsResponse struct { - //The top-level error code, or 0 if there was no error. + // The top-level error code, or 0 if there was no error. ErrorCode KError ThrottleTime time.Duration - //The responses for each partition of the topics. + // The responses for each partition of the topics. Errors map[string]map[int32]KError } diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go index f8962da58fc0..f81f69ac4b8f 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_request.go +++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go @@ -1,16 +1,33 @@ package sarama type DescribeGroupsRequest struct { - Groups []string + Version int16 + Groups []string + IncludeAuthorizedOperations bool } func (r *DescribeGroupsRequest) encode(pe packetEncoder) error { - return pe.putStringArray(r.Groups) + if err := pe.putStringArray(r.Groups); err != nil { + return err + } + if r.Version >= 3 { + pe.putBool(r.IncludeAuthorizedOperations) + } + return nil } func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version r.Groups, err = pd.getStringArray() - return + if err != nil { + return err + } + if r.Version >= 3 { + if r.IncludeAuthorizedOperations, err = pd.getBool(); err != nil { + return err + } + } + return nil } func (r *DescribeGroupsRequest) key() int16 { @@ -18,7 +35,7 @@ func (r *DescribeGroupsRequest) key() int16 { } func (r *DescribeGroupsRequest) version() int16 { - return 0 + return r.Version } func (r *DescribeGroupsRequest) headerVersion() int16 { @@ -26,6 +43,10 @@ func (r *DescribeGroupsRequest) headerVersion() int16 { } func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1, 2, 3, 4: + return V2_3_0_0 + } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go index bc242e4217dd..09052e4310c9 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_response.go +++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go @@ -1,16 +1,26 @@ package sarama type DescribeGroupsResponse struct { + // Version defines the protocol version to use for encode and decode + Version int16 + // ThrottleTimeMs contains the duration in milliseconds for which the + // request was throttled due to a quota violation, or zero if the request + // did not violate any quota. + ThrottleTimeMs int32 + // Groups contains each described group. Groups []*GroupDescription } -func (r *DescribeGroupsResponse) encode(pe packetEncoder) error { +func (r *DescribeGroupsResponse) encode(pe packetEncoder) (err error) { + if r.Version >= 1 { + pe.putInt32(r.ThrottleTimeMs) + } if err := pe.putArrayLength(len(r.Groups)); err != nil { return err } - for _, groupDescription := range r.Groups { - if err := groupDescription.encode(pe); err != nil { + for _, block := range r.Groups { + if err := block.encode(pe, r.Version); err != nil { return err } } @@ -19,18 +29,24 @@ func (r *DescribeGroupsResponse) encode(pe packetEncoder) error { } func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) { - n, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Groups = make([]*GroupDescription, n) - for i := 0; i < n; i++ { - r.Groups[i] = new(GroupDescription) - if err := r.Groups[i].decode(pd); err != nil { + r.Version = version + if r.Version >= 1 { + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { return err } } + if numGroups, err := pd.getArrayLength(); err != nil { + return err + } else if numGroups > 0 { + r.Groups = make([]*GroupDescription, numGroups) + for i := 0; i < numGroups; i++ { + block := &GroupDescription{} + if err := block.decode(pd, r.Version); err != nil { + return err + } + r.Groups[i] = block + } + } return nil } @@ -40,7 +56,7 @@ func (r *DescribeGroupsResponse) key() int16 { } func (r *DescribeGroupsResponse) version() int16 { - return 0 + return r.Version } func (r *DescribeGroupsResponse) headerVersion() int16 { @@ -48,20 +64,39 @@ func (r *DescribeGroupsResponse) headerVersion() int16 { } func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1, 2, 3, 4: + return V2_3_0_0 + } return V0_9_0_0 } +// GroupDescription contains each described group. type GroupDescription struct { - Err KError - GroupId string - State string + // Version defines the protocol version to use for encode and decode + Version int16 + // Err contains the describe error as the KError type. + Err KError + // ErrorCode contains the describe error, or 0 if there was no error. + ErrorCode int16 + // GroupId contains the group ID string. + GroupId string + // State contains the group state string, or the empty string. + State string + // ProtocolType contains the group protocol type, or the empty string. ProtocolType string - Protocol string - Members map[string]*GroupMemberDescription + // Protocol contains the group protocol data, or the empty string. + Protocol string + // Members contains the group members. + Members map[string]*GroupMemberDescription + // AuthorizedOperations contains a 32-bit bitfield to represent authorized + // operations for this group. + AuthorizedOperations int32 } -func (gd *GroupDescription) encode(pe packetEncoder) error { - pe.putInt16(int16(gd.Err)) +func (gd *GroupDescription) encode(pe packetEncoder, version int16) (err error) { + gd.Version = version + pe.putInt16(gd.ErrorCode) if err := pe.putString(gd.GroupId); err != nil { return err @@ -80,56 +115,55 @@ func (gd *GroupDescription) encode(pe packetEncoder) error { return err } - for memberId, groupMemberDescription := range gd.Members { - if err := pe.putString(memberId); err != nil { - return err - } - if err := groupMemberDescription.encode(pe); err != nil { + for _, block := range gd.Members { + if err := block.encode(pe, gd.Version); err != nil { return err } } + if gd.Version >= 3 { + pe.putInt32(gd.AuthorizedOperations) + } + return nil } -func (gd *GroupDescription) decode(pd packetDecoder) (err error) { - kerr, err := pd.getInt16() - if err != nil { +func (gd *GroupDescription) decode(pd packetDecoder, version int16) (err error) { + gd.Version = version + if gd.ErrorCode, err = pd.getInt16(); err != nil { return err } - gd.Err = KError(kerr) + gd.Err = KError(gd.ErrorCode) if gd.GroupId, err = pd.getString(); err != nil { - return + return err } if gd.State, err = pd.getString(); err != nil { - return + return err } if gd.ProtocolType, err = pd.getString(); err != nil { - return + return err } if gd.Protocol, err = pd.getString(); err != nil { - return - } - - n, err := pd.getArrayLength() - if err != nil { return err } - if n == 0 { - return nil - } - gd.Members = make(map[string]*GroupMemberDescription) - for i := 0; i < n; i++ { - memberId, err := pd.getString() - if err != nil { - return err + if numMembers, err := pd.getArrayLength(); err != nil { + return err + } else if numMembers > 0 { + gd.Members = make(map[string]*GroupMemberDescription, numMembers) + for i := 0; i < numMembers; i++ { + block := &GroupMemberDescription{} + if err := block.decode(pd, gd.Version); err != nil { + return err + } + gd.Members[block.MemberId] = block } + } - gd.Members[memberId] = new(GroupMemberDescription) - if err := gd.Members[memberId].decode(pd); err != nil { + if gd.Version >= 3 { + if gd.AuthorizedOperations, err = pd.getInt32(); err != nil { return err } } @@ -137,14 +171,38 @@ func (gd *GroupDescription) decode(pd packetDecoder) (err error) { return nil } +// GroupMemberDescription contains the group members. type GroupMemberDescription struct { - ClientId string - ClientHost string - MemberMetadata []byte + // Version defines the protocol version to use for encode and decode + Version int16 + // MemberId contains the member ID assigned by the group coordinator. + MemberId string + // GroupInstanceId contains the unique identifier of the consumer instance + // provided by end user. + GroupInstanceId *string + // ClientId contains the client ID used in the member's latest join group + // request. + ClientId string + // ClientHost contains the client host. + ClientHost string + // MemberMetadata contains the metadata corresponding to the current group + // protocol in use. + MemberMetadata []byte + // MemberAssignment contains the current assignment provided by the group + // leader. MemberAssignment []byte } -func (gmd *GroupMemberDescription) encode(pe packetEncoder) error { +func (gmd *GroupMemberDescription) encode(pe packetEncoder, version int16) (err error) { + gmd.Version = version + if err := pe.putString(gmd.MemberId); err != nil { + return err + } + if gmd.Version >= 4 { + if err := pe.putNullableString(gmd.GroupInstanceId); err != nil { + return err + } + } if err := pe.putString(gmd.ClientId); err != nil { return err } @@ -161,31 +219,46 @@ func (gmd *GroupMemberDescription) encode(pe packetEncoder) error { return nil } -func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) { +func (gmd *GroupMemberDescription) decode(pd packetDecoder, version int16) (err error) { + gmd.Version = version + if gmd.MemberId, err = pd.getString(); err != nil { + return err + } + if gmd.Version >= 4 { + if gmd.GroupInstanceId, err = pd.getNullableString(); err != nil { + return err + } + } if gmd.ClientId, err = pd.getString(); err != nil { - return + return err } if gmd.ClientHost, err = pd.getString(); err != nil { - return + return err } if gmd.MemberMetadata, err = pd.getBytes(); err != nil { - return + return err } if gmd.MemberAssignment, err = pd.getBytes(); err != nil { - return + return err } return nil } func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + if len(gmd.MemberAssignment) == 0 { + return nil, nil + } assignment := new(ConsumerGroupMemberAssignment) - err := decode(gmd.MemberAssignment, assignment) + err := decode(gmd.MemberAssignment, assignment, nil) return assignment, err } func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) { + if len(gmd.MemberMetadata) == 0 { + return nil, nil + } metadata := new(ConsumerGroupMemberMetadata) - err := decode(gmd.MemberMetadata, metadata) + err := decode(gmd.MemberMetadata, metadata, nil) return metadata, err } diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml index 05448a884fb3..e2acb38bb9c7 100644 --- a/vendor/github.com/Shopify/sarama/dev.yml +++ b/vendor/github.com/Shopify/sarama/dev.yml @@ -2,7 +2,7 @@ name: sarama up: - go: - version: '1.17.1' + version: '1.17.6' commands: test: diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/Shopify/sarama/docker-compose.yml index 744ed3cf8762..5d3f03894260 100644 --- a/vendor/github.com/Shopify/sarama/docker-compose.yml +++ b/vendor/github.com/Shopify/sarama/docker-compose.yml @@ -1,128 +1,150 @@ version: '3.7' services: zookeeper-1: - image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' + image: 'docker.io/library/zookeeper:3.6.3' restart: always environment: - ZOOKEEPER_SERVER_ID: '1' - ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' - ZOOKEEPER_CLIENT_PORT: '2181' - ZOOKEEPER_PEER_PORT: '2888' - ZOOKEEPER_LEADER_PORT: '3888' - ZOOKEEPER_INIT_LIMIT: '10' - ZOOKEEPER_SYNC_LIMIT: '5' - ZOOKEEPER_MAX_CLIENT_CONNS: '0' + ZOO_MY_ID: '1' + ZOO_SERVERS: 'server.1=zookeeper-1:2888:3888 server.2=zookeeper-2:2888:3888 server.3=zookeeper-3:2888:3888' + ZOO_CFG_EXTRA: 'clientPort=2181 peerPort=2888 leaderPort=3888' + ZOO_INIT_LIMIT: '10' + ZOO_SYNC_LIMIT: '5' + ZOO_MAX_CLIENT_CNXNS: '0' + ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-2: - image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' + image: 'docker.io/library/zookeeper:3.6.3' restart: always environment: - ZOOKEEPER_SERVER_ID: '2' - ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' - ZOOKEEPER_CLIENT_PORT: '2181' - ZOOKEEPER_PEER_PORT: '2888' - ZOOKEEPER_LEADER_PORT: '3888' - ZOOKEEPER_INIT_LIMIT: '10' - ZOOKEEPER_SYNC_LIMIT: '5' - ZOOKEEPER_MAX_CLIENT_CONNS: '0' + ZOO_MY_ID: '2' + ZOO_SERVERS: 'server.1=zookeeper-1:2888:3888 server.2=zookeeper-2:2888:3888 server.3=zookeeper-3:2888:3888' + ZOO_CFG_EXTRA: 'clientPort=2181 peerPort=2888 leaderPort=3888' + ZOO_INIT_LIMIT: '10' + ZOO_SYNC_LIMIT: '5' + ZOO_MAX_CLIENT_CNXNS: '0' + ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-3: - image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' + image: 'docker.io/library/zookeeper:3.6.3' restart: always environment: - ZOOKEEPER_SERVER_ID: '3' - ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' - ZOOKEEPER_CLIENT_PORT: '2181' - ZOOKEEPER_PEER_PORT: '2888' - ZOOKEEPER_LEADER_PORT: '3888' - ZOOKEEPER_INIT_LIMIT: '10' - ZOOKEEPER_SYNC_LIMIT: '5' - ZOOKEEPER_MAX_CLIENT_CONNS: '0' + ZOO_MY_ID: '3' + ZOO_SERVERS: 'server.1=zookeeper-1:2888:3888 server.2=zookeeper-2:2888:3888 server.3=zookeeper-3:2888:3888' + ZOO_CFG_EXTRA: 'clientPort=2181 peerPort=2888 leaderPort=3888' + ZOO_INIT_LIMIT: '10' + ZOO_SYNC_LIMIT: '5' + ZOO_MAX_CLIENT_CNXNS: '0' + ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' kafka-1: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' + image: 'sarama/fv-kafka' + build: + context: . + dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' - KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' - KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' - KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' - KAFKA_DEFAULT_REPLICATION_FACTOR: '2' - KAFKA_BROKER_ID: '1' - KAFKA_BROKER_RACK: '1' - KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' - KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' - KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' - KAFKA_DELETE_TOPIC_ENABLE: 'true' - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_VERSION: ${KAFKA_VERSION:-3.1.2} + KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' + KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' + KAFKA_CFG_BROKER_ID: '1' + KAFKA_CFG_BROKER_RACK: '1' + KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' + KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' + KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' + KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-2: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' + image: 'sarama/fv-kafka' + build: + context: . + dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' - KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' - KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' - KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' - KAFKA_DEFAULT_REPLICATION_FACTOR: '2' - KAFKA_BROKER_ID: '2' - KAFKA_BROKER_RACK: '2' - KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' - KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' - KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' - KAFKA_DELETE_TOPIC_ENABLE: 'true' - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_VERSION: ${KAFKA_VERSION:-3.1.2} + KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' + KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' + KAFKA_CFG_BROKER_ID: '2' + KAFKA_CFG_BROKER_RACK: '2' + KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' + KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' + KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' + KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-3: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' + image: 'sarama/fv-kafka' + build: + context: . + dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' - KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' - KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' - KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' - KAFKA_DEFAULT_REPLICATION_FACTOR: '2' - KAFKA_BROKER_ID: '3' - KAFKA_BROKER_RACK: '3' - KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' - KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' - KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' - KAFKA_DELETE_TOPIC_ENABLE: 'true' - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_VERSION: ${KAFKA_VERSION:-3.1.2} + KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' + KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' + KAFKA_CFG_BROKER_ID: '3' + KAFKA_CFG_BROKER_RACK: '3' + KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' + KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' + KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' + KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-4: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' + image: 'sarama/fv-kafka' + build: + context: . + dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' - KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' - KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' - KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' - KAFKA_DEFAULT_REPLICATION_FACTOR: '2' - KAFKA_BROKER_ID: '4' - KAFKA_BROKER_RACK: '4' - KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' - KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' - KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' - KAFKA_DELETE_TOPIC_ENABLE: 'true' - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_VERSION: ${KAFKA_VERSION:-3.1.2} + KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' + KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' + KAFKA_CFG_BROKER_ID: '4' + KAFKA_CFG_BROKER_RACK: '4' + KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' + KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' + KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' + KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-5: - image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.2.0}' + image: 'sarama/fv-kafka' + build: + context: . + dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' - KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' - KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' - KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' - KAFKA_DEFAULT_REPLICATION_FACTOR: '2' - KAFKA_BROKER_ID: '5' - KAFKA_BROKER_RACK: '5' - KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' - KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' - KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' - KAFKA_DELETE_TOPIC_ENABLE: 'true' - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_VERSION: ${KAFKA_VERSION:-3.1.2} + KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' + KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' + KAFKA_CFG_BROKER_ID: '5' + KAFKA_CFG_BROKER_RACK: '5' + KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000' + KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000' + KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' + KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' toxiproxy: - image: 'ghcr.io/shopify/toxiproxy:2.1.5' + image: 'ghcr.io/shopify/toxiproxy:2.4.0' ports: # The tests themselves actually start the proxies on these ports - '29091:29091' diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go index dab54f88cc78..4ee76c6d725d 100644 --- a/vendor/github.com/Shopify/sarama/encoder_decoder.go +++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go @@ -57,12 +57,15 @@ type versionedDecoder interface { // decode takes bytes and a decoder and fills the fields of the decoder from the bytes, // interpreted using Kafka's encoding rules. -func decode(buf []byte, in decoder) error { +func decode(buf []byte, in decoder, metricRegistry metrics.Registry) error { if buf == nil { return nil } - helper := realDecoder{raw: buf} + helper := realDecoder{ + raw: buf, + registry: metricRegistry, + } err := in.decode(&helper) if err != nil { return err @@ -75,19 +78,24 @@ func decode(buf []byte, in decoder) error { return nil } -func versionedDecode(buf []byte, in versionedDecoder, version int16) error { +func versionedDecode(buf []byte, in versionedDecoder, version int16, metricRegistry metrics.Registry) error { if buf == nil { return nil } - helper := realDecoder{raw: buf} + helper := realDecoder{ + raw: buf, + registry: metricRegistry, + } err := in.decode(&helper, version) if err != nil { return err } if helper.off != len(buf) { - return PacketDecodingError{"invalid length"} + return PacketDecodingError{ + Info: fmt.Sprintf("invalid length (off=%d, len=%d)", helper.off, len(buf)), + } } return nil diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go index 763976726ccb..dd2a045048b4 100644 --- a/vendor/github.com/Shopify/sarama/end_txn_response.go +++ b/vendor/github.com/Shopify/sarama/end_txn_response.go @@ -32,7 +32,7 @@ func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) { } func (e *EndTxnResponse) key() int16 { - return 25 + return 26 } func (e *EndTxnResponse) version() int16 { diff --git a/vendor/github.com/Shopify/sarama/entrypoint.sh b/vendor/github.com/Shopify/sarama/entrypoint.sh new file mode 100644 index 000000000000..cbcbcfc58898 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/entrypoint.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +KAFKA_VERSION="${KAFKA_VERSION:-3.1.2}" +KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}" + +if [ ! -d "${KAFKA_HOME}" ]; then + echo 'Error: KAFKA_VERSION '$KAFKA_VERSION' not available in this image at '$KAFKA_HOME + exit 1 +fi + +cd "${KAFKA_HOME}" || exit 1 + +# discard all empty/commented lines +sed -e '/^#/d' -e '/^$/d' -i"" config/server.properties + +# emulate kafka_configure_from_environment_variables from bitnami/bitnami-docker-kafka +for var in "${!KAFKA_CFG_@}"; do + key="$(echo "$var" | sed -e 's/^KAFKA_CFG_//g' -e 's/_/\./g' -e 's/.*/\L&/')" + sed -e '/^'$key'/d' -i"" config/server.properties + value="${!var}" + echo "$key=$value" >>config/server.properties +done + +sort config/server.properties + +exec bin/kafka-server-start.sh config/server.properties diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go index 0fca0a30eabd..27977f166290 100644 --- a/vendor/github.com/Shopify/sarama/errors.go +++ b/vendor/github.com/Shopify/sarama/errors.go @@ -3,11 +3,14 @@ package sarama import ( "errors" "fmt" + "strings" + + "github.com/hashicorp/go-multierror" ) // ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored // or otherwise failed to respond. -var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)") +var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to") // ErrBrokerNotFound is the error returned when there's no broker found for the requested ID. var ErrBrokerNotFound = errors.New("kafka: broker for ID is not found") @@ -55,6 +58,87 @@ var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to updat // ErrUnknownScramMechanism is returned when user tries to AlterUserScramCredentials with unknown SCRAM mechanism var ErrUnknownScramMechanism = errors.New("kafka: unknown SCRAM mechanism provided") +// ErrReassignPartitions is returned when altering partition assignments for a topic fails +var ErrReassignPartitions = errors.New("failed to reassign partitions for topic") + +// ErrDeleteRecords is the type of error returned when fail to delete the required records +var ErrDeleteRecords = errors.New("kafka server: failed to delete records") + +// ErrCreateACLs is the type of error returned when ACL creation failed +var ErrCreateACLs = errors.New("kafka server: failed to create one or more ACL rules") + +// ErrAddPartitionsToTxn is returned when AddPartitionsToTxn failed multiple times +var ErrAddPartitionsToTxn = errors.New("transaction manager: failed to send partitions to transaction") + +// ErrTxnOffsetCommit is returned when TxnOffsetCommit failed multiple times +var ErrTxnOffsetCommit = errors.New("transaction manager: failed to send offsets to transaction") + +// ErrTransactionNotReady when transaction status is invalid for the current action. +var ErrTransactionNotReady = errors.New("transaction manager: transaction is not ready") + +// ErrNonTransactedProducer when calling BeginTxn, CommitTxn or AbortTxn on a non transactional producer. +var ErrNonTransactedProducer = errors.New("transaction manager: you need to add TransactionalID to producer") + +// ErrTransitionNotAllowed when txnmgr state transiion is not valid. +var ErrTransitionNotAllowed = errors.New("transaction manager: invalid transition attempted") + +// ErrCannotTransitionNilError when transition is attempted with an nil error. +var ErrCannotTransitionNilError = errors.New("transaction manager: cannot transition with a nil error") + +// ErrTxnUnableToParseResponse when response is nil +var ErrTxnUnableToParseResponse = errors.New("transaction manager: unable to parse response") + +// MultiErrorFormat specifies the formatter applied to format multierrors. The +// default implementation is a consensed version of the hashicorp/go-multierror +// default one +var MultiErrorFormat multierror.ErrorFormatFunc = func(es []error) string { + if len(es) == 1 { + return es[0].Error() + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n", + len(es), strings.Join(points, "\n\t")) +} + +type sentinelError struct { + sentinel error + wrapped error +} + +func (err sentinelError) Error() string { + if err.wrapped != nil { + return fmt.Sprintf("%s: %v", err.sentinel, err.wrapped) + } else { + return fmt.Sprintf("%s", err.sentinel) + } +} + +func (err sentinelError) Is(target error) bool { + return errors.Is(err.sentinel, target) || errors.Is(err.wrapped, target) +} + +func (err sentinelError) Unwrap() error { + return err.wrapped +} + +func Wrap(sentinel error, wrapped ...error) sentinelError { + return sentinelError{sentinel: sentinel, wrapped: multiError(wrapped...)} +} + +func multiError(wrapped ...error) error { + merr := multierror.Append(nil, wrapped...) + if MultiErrorFormat != nil { + merr.ErrorFormat = MultiErrorFormat + } + return merr.ErrorOrNil() +} + // PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, // if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. type PacketEncodingError struct { @@ -87,44 +171,6 @@ func (err ConfigurationError) Error() string { // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes type KError int16 -// MultiError is used to contain multi error. -type MultiError struct { - Errors *[]error -} - -func (mErr MultiError) Error() string { - errString := "" - for _, err := range *mErr.Errors { - errString += err.Error() + "," - } - return errString -} - -func (mErr MultiError) PrettyError() string { - errString := "" - for _, err := range *mErr.Errors { - errString += err.Error() + "\n" - } - return errString -} - -// ErrDeleteRecords is the type of error returned when fail to delete the required records -type ErrDeleteRecords struct { - MultiError -} - -func (err ErrDeleteRecords) Error() string { - return "kafka server: failed to delete records " + err.MultiError.Error() -} - -type ErrReassignPartitions struct { - MultiError -} - -func (err ErrReassignPartitions) Error() string { - return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError()) -} - // Numeric error codes returned by the Kafka server. const ( ErrNoError KError = 0 @@ -217,6 +263,8 @@ const ( ErrGroupSubscribedToTopic KError = 86 ErrInvalidRecord KError = 87 ErrUnstableOffsetCommit KError = 88 + ErrThrottlingQuotaExceeded KError = 89 + ErrProducerFenced KError = 90 ) func (err KError) Error() string { @@ -226,159 +274,159 @@ func (err KError) Error() string { case ErrNoError: return "kafka server: Not an error, why are you printing me?" case ErrUnknown: - return "kafka server: Unexpected (unknown?) server error." + return "kafka server: Unexpected (unknown?) server error" case ErrOffsetOutOfRange: - return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition." + return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition" case ErrInvalidMessage: - return "kafka server: Message contents does not match its CRC." + return "kafka server: Message contents does not match its CRC" case ErrUnknownTopicOrPartition: - return "kafka server: Request was for a topic or partition that does not exist on this broker." + return "kafka server: Request was for a topic or partition that does not exist on this broker" case ErrInvalidMessageSize: - return "kafka server: The message has a negative size." + return "kafka server: The message has a negative size" case ErrLeaderNotAvailable: - return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes." + return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes" case ErrNotLeaderForPartition: - return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date." + return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date" case ErrRequestTimedOut: - return "kafka server: Request exceeded the user-specified time limit in the request." + return "kafka server: Request exceeded the user-specified time limit in the request" case ErrBrokerNotAvailable: return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!" case ErrReplicaNotAvailable: - return "kafka server: Replica information not available, one or more brokers are down." + return "kafka server: Replica information not available, one or more brokers are down" case ErrMessageSizeTooLarge: - return "kafka server: Message was too large, server rejected it to avoid allocation error." + return "kafka server: Message was too large, server rejected it to avoid allocation error" case ErrStaleControllerEpochCode: - return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)." + return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)" case ErrOffsetMetadataTooLarge: - return "kafka server: Specified a string larger than the configured maximum for offset metadata." + return "kafka server: Specified a string larger than the configured maximum for offset metadata" case ErrNetworkException: - return "kafka server: The server disconnected before a response was received." + return "kafka server: The server disconnected before a response was received" case ErrOffsetsLoadInProgress: - return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition." + return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition" case ErrConsumerCoordinatorNotAvailable: - return "kafka server: Offset's topic has not yet been created." + return "kafka server: Offset's topic has not yet been created" case ErrNotCoordinatorForConsumer: - return "kafka server: Request was for a consumer group that is not coordinated by this broker." + return "kafka server: Request was for a consumer group that is not coordinated by this broker" case ErrInvalidTopic: - return "kafka server: The request attempted to perform an operation on an invalid topic." + return "kafka server: The request attempted to perform an operation on an invalid topic" case ErrMessageSetSizeTooLarge: - return "kafka server: The request included message batch larger than the configured segment size on the server." + return "kafka server: The request included message batch larger than the configured segment size on the server" case ErrNotEnoughReplicas: - return "kafka server: Messages are rejected since there are fewer in-sync replicas than required." + return "kafka server: Messages are rejected since there are fewer in-sync replicas than required" case ErrNotEnoughReplicasAfterAppend: - return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required." + return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required" case ErrInvalidRequiredAcks: - return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)." + return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)" case ErrIllegalGeneration: - return "kafka server: The provided generation id is not the current generation." + return "kafka server: The provided generation id is not the current generation" case ErrInconsistentGroupProtocol: - return "kafka server: The provider group protocol type is incompatible with the other members." + return "kafka server: The provider group protocol type is incompatible with the other members" case ErrInvalidGroupId: - return "kafka server: The provided group id was empty." + return "kafka server: The provided group id was empty" case ErrUnknownMemberId: - return "kafka server: The provided member is not known in the current generation." + return "kafka server: The provided member is not known in the current generation" case ErrInvalidSessionTimeout: - return "kafka server: The provided session timeout is outside the allowed range." + return "kafka server: The provided session timeout is outside the allowed range" case ErrRebalanceInProgress: - return "kafka server: A rebalance for the group is in progress. Please re-join the group." + return "kafka server: A rebalance for the group is in progress. Please re-join the group" case ErrInvalidCommitOffsetSize: - return "kafka server: The provided commit metadata was too large." + return "kafka server: The provided commit metadata was too large" case ErrTopicAuthorizationFailed: - return "kafka server: The client is not authorized to access this topic." + return "kafka server: The client is not authorized to access this topic" case ErrGroupAuthorizationFailed: - return "kafka server: The client is not authorized to access this group." + return "kafka server: The client is not authorized to access this group" case ErrClusterAuthorizationFailed: - return "kafka server: The client is not authorized to send this request type." + return "kafka server: The client is not authorized to send this request type" case ErrInvalidTimestamp: - return "kafka server: The timestamp of the message is out of acceptable range." + return "kafka server: The timestamp of the message is out of acceptable range" case ErrUnsupportedSASLMechanism: - return "kafka server: The broker does not support the requested SASL mechanism." + return "kafka server: The broker does not support the requested SASL mechanism" case ErrIllegalSASLState: - return "kafka server: Request is not valid given the current SASL state." + return "kafka server: Request is not valid given the current SASL state" case ErrUnsupportedVersion: - return "kafka server: The version of API is not supported." + return "kafka server: The version of API is not supported" case ErrTopicAlreadyExists: - return "kafka server: Topic with this name already exists." + return "kafka server: Topic with this name already exists" case ErrInvalidPartitions: - return "kafka server: Number of partitions is invalid." + return "kafka server: Number of partitions is invalid" case ErrInvalidReplicationFactor: - return "kafka server: Replication-factor is invalid." + return "kafka server: Replication-factor is invalid" case ErrInvalidReplicaAssignment: - return "kafka server: Replica assignment is invalid." + return "kafka server: Replica assignment is invalid" case ErrInvalidConfig: - return "kafka server: Configuration is invalid." + return "kafka server: Configuration is invalid" case ErrNotController: - return "kafka server: This is not the correct controller for this cluster." + return "kafka server: This is not the correct controller for this cluster" case ErrInvalidRequest: - return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details." + return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details" case ErrUnsupportedForMessageFormat: - return "kafka server: The requested operation is not supported by the message format version." + return "kafka server: The requested operation is not supported by the message format version" case ErrPolicyViolation: - return "kafka server: Request parameters do not satisfy the configured policy." + return "kafka server: Request parameters do not satisfy the configured policy" case ErrOutOfOrderSequenceNumber: - return "kafka server: The broker received an out of order sequence number." + return "kafka server: The broker received an out of order sequence number" case ErrDuplicateSequenceNumber: - return "kafka server: The broker received a duplicate sequence number." + return "kafka server: The broker received a duplicate sequence number" case ErrInvalidProducerEpoch: - return "kafka server: Producer attempted an operation with an old epoch." + return "kafka server: Producer attempted an operation with an old epoch" case ErrInvalidTxnState: - return "kafka server: The producer attempted a transactional operation in an invalid state." + return "kafka server: The producer attempted a transactional operation in an invalid state" case ErrInvalidProducerIDMapping: - return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id." + return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id" case ErrInvalidTransactionTimeout: - return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)." + return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)" case ErrConcurrentTransactions: - return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing." + return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing" case ErrTransactionCoordinatorFenced: - return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer." + return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer" case ErrTransactionalIDAuthorizationFailed: - return "kafka server: Transactional ID authorization failed." + return "kafka server: Transactional ID authorization failed" case ErrSecurityDisabled: - return "kafka server: Security features are disabled." + return "kafka server: Security features are disabled" case ErrOperationNotAttempted: - return "kafka server: The broker did not attempt to execute this operation." + return "kafka server: The broker did not attempt to execute this operation" case ErrKafkaStorageError: - return "kafka server: Disk error when trying to access log file on the disk." + return "kafka server: Disk error when trying to access log file on the disk" case ErrLogDirNotFound: - return "kafka server: The specified log directory is not found in the broker config." + return "kafka server: The specified log directory is not found in the broker config" case ErrSASLAuthenticationFailed: - return "kafka server: SASL Authentication failed." + return "kafka server: SASL Authentication failed" case ErrUnknownProducerID: - return "kafka server: The broker could not locate the producer metadata associated with the Producer ID." + return "kafka server: The broker could not locate the producer metadata associated with the Producer ID" case ErrReassignmentInProgress: - return "kafka server: A partition reassignment is in progress." + return "kafka server: A partition reassignment is in progress" case ErrDelegationTokenAuthDisabled: - return "kafka server: Delegation Token feature is not enabled." + return "kafka server: Delegation Token feature is not enabled" case ErrDelegationTokenNotFound: - return "kafka server: Delegation Token is not found on server." + return "kafka server: Delegation Token is not found on server" case ErrDelegationTokenOwnerMismatch: - return "kafka server: Specified Principal is not valid Owner/Renewer." + return "kafka server: Specified Principal is not valid Owner/Renewer" case ErrDelegationTokenRequestNotAllowed: - return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels." + return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels" case ErrDelegationTokenAuthorizationFailed: - return "kafka server: Delegation Token authorization failed." + return "kafka server: Delegation Token authorization failed" case ErrDelegationTokenExpired: - return "kafka server: Delegation Token is expired." + return "kafka server: Delegation Token is expired" case ErrInvalidPrincipalType: - return "kafka server: Supplied principalType is not supported." + return "kafka server: Supplied principalType is not supported" case ErrNonEmptyGroup: - return "kafka server: The group is not empty." + return "kafka server: The group is not empty" case ErrGroupIDNotFound: - return "kafka server: The group id does not exist." + return "kafka server: The group id does not exist" case ErrFetchSessionIDNotFound: - return "kafka server: The fetch session ID was not found." + return "kafka server: The fetch session ID was not found" case ErrInvalidFetchSessionEpoch: - return "kafka server: The fetch session epoch is invalid." + return "kafka server: The fetch session epoch is invalid" case ErrListenerNotFound: - return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed." + return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed" case ErrTopicDeletionDisabled: - return "kafka server: Topic deletion is disabled." + return "kafka server: Topic deletion is disabled" case ErrFencedLeaderEpoch: - return "kafka server: The leader epoch in the request is older than the epoch on the broker." + return "kafka server: The leader epoch in the request is older than the epoch on the broker" case ErrUnknownLeaderEpoch: - return "kafka server: The leader epoch in the request is newer than the epoch on the broker." + return "kafka server: The leader epoch in the request is newer than the epoch on the broker" case ErrUnsupportedCompressionType: - return "kafka server: The requesting client does not support the compression type of given partition." + return "kafka server: The requesting client does not support the compression type of given partition" case ErrStaleBrokerEpoch: return "kafka server: Broker epoch has changed" case ErrOffsetNotAvailable: @@ -388,21 +436,21 @@ func (err KError) Error() string { case ErrPreferredLeaderNotAvailable: return "kafka server: The preferred leader was not available" case ErrGroupMaxSizeReached: - return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members." + return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members" case ErrFencedInstancedId: - return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id." + return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id" case ErrEligibleLeadersNotAvailable: - return "kafka server: Eligible topic partition leaders are not available." + return "kafka server: Eligible topic partition leaders are not available" case ErrElectionNotNeeded: - return "kafka server: Leader election not needed for topic partition." + return "kafka server: Leader election not needed for topic partition" case ErrNoReassignmentInProgress: - return "kafka server: No partition reassignment is in progress." + return "kafka server: No partition reassignment is in progress" case ErrGroupSubscribedToTopic: - return "kafka server: Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it." + return "kafka server: Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it" case ErrInvalidRecord: - return "kafka server: This record has failed the validation on broker and hence will be rejected." + return "kafka server: This record has failed the validation on broker and hence will be rejected" case ErrUnstableOffsetCommit: - return "kafka server: There are unstable offsets that need to be cleared." + return "kafka server: There are unstable offsets that need to be cleared" } return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go index f893aeff7d52..63190a2b0df0 100644 --- a/vendor/github.com/Shopify/sarama/fetch_request.go +++ b/vendor/github.com/Shopify/sarama/fetch_request.go @@ -1,11 +1,18 @@ package sarama type fetchRequestBlock struct { - Version int16 + Version int16 + // currentLeaderEpoch contains the current leader epoch of the partition. currentLeaderEpoch int32 - fetchOffset int64 - logStartOffset int64 - maxBytes int32 + // fetchOffset contains the message offset. + fetchOffset int64 + // logStartOffset contains the earliest available offset of the follower + // replica. The field is only used when the request is sent by the + // follower. + logStartOffset int64 + // maxBytes contains the maximum bytes to fetch from this partition. See + // KIP-74 for cases where this limit may not be honored. + maxBytes int32 } func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error { @@ -46,16 +53,38 @@ func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) // https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at // https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes type FetchRequest struct { - MaxWaitTime int32 - MinBytes int32 - MaxBytes int32 - Version int16 - Isolation IsolationLevel - SessionID int32 + // Version defines the protocol version to use for encode and decode + Version int16 + // ReplicaID contains the broker ID of the follower, of -1 if this request + // is from a consumer. + // ReplicaID int32 + // MaxWaitTime contains the maximum time in milliseconds to wait for the response. + MaxWaitTime int32 + // MinBytes contains the minimum bytes to accumulate in the response. + MinBytes int32 + // MaxBytes contains the maximum bytes to fetch. See KIP-74 for cases + // where this limit may not be honored. + MaxBytes int32 + // Isolation contains a This setting controls the visibility of + // transactional records. Using READ_UNCOMMITTED (isolation_level = 0) + // makes all records visible. With READ_COMMITTED (isolation_level = 1), + // non-transactional and COMMITTED transactional records are visible. To be + // more concrete, READ_COMMITTED returns all data from offsets smaller than + // the current LSO (last stable offset), and enables the inclusion of the + // list of aborted transactions in the result, which allows consumers to + // discard ABORTED transactional records + Isolation IsolationLevel + // SessionID contains the fetch session ID. + SessionID int32 + // SessionEpoch contains the epoch of the partition leader as known to the + // follower replica or a consumer. SessionEpoch int32 - blocks map[string]map[int32]*fetchRequestBlock - forgotten map[string][]int32 - RackID string + // blocks contains the topics to fetch. + blocks map[string]map[int32]*fetchRequestBlock + // forgotten contains in an incremental fetch request, the partitions to remove. + forgotten map[string][]int32 + // RackID contains a Rack ID of the consumer making this request + RackID string } type IsolationLevel int8 @@ -66,7 +95,9 @@ const ( ) func (r *FetchRequest) encode(pe packetEncoder) (err error) { - pe.putInt32(-1) // replica ID is always -1 for clients + metricRegistry := pe.metricRegistry() + + pe.putInt32(-1) // ReplicaID is always -1 for clients pe.putInt32(r.MaxWaitTime) pe.putInt32(r.MinBytes) if r.Version >= 3 { @@ -99,6 +130,7 @@ func (r *FetchRequest) encode(pe packetEncoder) (err error) { return err } } + getOrRegisterTopicMeter("consumer-fetch-rate", topic, metricRegistry).Mark(1) } if r.Version >= 7 { err = pe.putArrayLength(len(r.forgotten)) diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go index 54b88284ad98..c8ad6046ab55 100644 --- a/vendor/github.com/Shopify/sarama/fetch_response.go +++ b/vendor/github.com/Shopify/sarama/fetch_response.go @@ -1,12 +1,19 @@ package sarama import ( + "errors" "sort" "time" + + "github.com/rcrowley/go-metrics" ) +const invalidPreferredReplicaID = -1 + type AbortedTransaction struct { - ProducerID int64 + // ProducerID contains the producer id associated with the aborted transaction. + ProducerID int64 + // FirstOffset contains the first offset in the aborted transaction. FirstOffset int64 } @@ -30,18 +37,37 @@ func (t *AbortedTransaction) encode(pe packetEncoder) (err error) { } type FetchResponseBlock struct { - Err KError - HighWaterMarkOffset int64 - LastStableOffset int64 - LogStartOffset int64 - AbortedTransactions []*AbortedTransaction + // Err contains the error code, or 0 if there was no fetch error. + Err KError + // HighWatermarkOffset contains the current high water mark. + HighWaterMarkOffset int64 + // LastStableOffset contains the last stable offset (or LSO) of the + // partition. This is the last offset such that the state of all + // transactional records prior to this offset have been decided (ABORTED or + // COMMITTED) + LastStableOffset int64 + LastRecordsBatchOffset *int64 + // LogStartOffset contains the current log start offset. + LogStartOffset int64 + // AbortedTransactions contains the aborted transactions. + AbortedTransactions []*AbortedTransaction + // PreferredReadReplica contains the preferred read replica for the + // consumer to use on its next fetch request PreferredReadReplica int32 - Records *Records // deprecated: use FetchResponseBlock.RecordsSet - RecordsSet []*Records - Partial bool + // RecordsSet contains the record data. + RecordsSet []*Records + + Partial bool + Records *Records // deprecated: use FetchResponseBlock.RecordsSet } func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { + metricRegistry := pd.metricRegistry() + var sizeMetric metrics.Histogram + if metricRegistry != nil { + sizeMetric = getOrRegisterHistogram("consumer-fetch-response-size", metricRegistry) + } + tmp, err := pd.getInt16() if err != nil { return err @@ -97,6 +123,9 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) if err != nil { return err } + if sizeMetric != nil { + sizeMetric.Update(int64(recordsSize)) + } recordsDecoder, err := pd.getSubset(int(recordsSize)) if err != nil { @@ -109,7 +138,7 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) records := &Records{} if err := records.decode(recordsDecoder); err != nil { // If we have at least one decoded records, this is not an error - if err == ErrInsufficientData { + if errors.Is(err, ErrInsufficientData) { if len(b.RecordsSet) == 0 { b.Partial = true } @@ -118,6 +147,11 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) return err } + b.LastRecordsBatchOffset, err = records.recordsOffset() + if err != nil { + return err + } + partial, err := records.isPartial() if err != nil { return err @@ -224,11 +258,19 @@ func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction { } type FetchResponse struct { - Blocks map[string]map[int32]*FetchResponseBlock - ThrottleTime time.Duration - ErrorCode int16 - SessionID int32 - Version int16 + // Version defines the protocol version to use for encode and decode + Version int16 + // ThrottleTime contains the duration in milliseconds for which the request + // was throttled due to a quota violation, or zero if the request did not + // violate any quota. + ThrottleTime time.Duration + // ErrorCode contains the top level response error code. + ErrorCode int16 + // SessionID contains the fetch session ID, or 0 if this is not part of a fetch session. + SessionID int32 + // Blocks contains the response topics. + Blocks map[string]map[int32]*FetchResponseBlock + LogAppendTime bool Timestamp time.Time } diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go index e9d9af191102..511910e712e1 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_request.go +++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go @@ -1,9 +1,11 @@ package sarama type HeartbeatRequest struct { - GroupId string - GenerationId int32 - MemberId string + Version int16 + GroupId string + GenerationId int32 + MemberId string + GroupInstanceId *string } func (r *HeartbeatRequest) encode(pe packetEncoder) error { @@ -17,10 +19,17 @@ func (r *HeartbeatRequest) encode(pe packetEncoder) error { return err } + if r.Version >= 3 { + if err := pe.putNullableString(r.GroupInstanceId); err != nil { + return err + } + } + return nil } func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version if r.GroupId, err = pd.getString(); err != nil { return } @@ -30,6 +39,11 @@ func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { if r.MemberId, err = pd.getString(); err != nil { return } + if r.Version >= 3 { + if r.GroupInstanceId, err = pd.getNullableString(); err != nil { + return + } + } return nil } @@ -39,7 +53,7 @@ func (r *HeartbeatRequest) key() int16 { } func (r *HeartbeatRequest) version() int16 { - return 0 + return r.Version } func (r *HeartbeatRequest) headerVersion() int16 { @@ -47,5 +61,9 @@ func (r *HeartbeatRequest) headerVersion() int16 { } func (r *HeartbeatRequest) requiredVersion() KafkaVersion { + switch { + case r.Version >= 3: + return V2_3_0_0 + } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go index 577ab72e5742..95ef97f47a47 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_response.go +++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go @@ -1,15 +1,27 @@ package sarama type HeartbeatResponse struct { - Err KError + Version int16 + ThrottleTime int32 + Err KError } func (r *HeartbeatResponse) encode(pe packetEncoder) error { + if r.Version >= 1 { + pe.putInt32(r.ThrottleTime) + } pe.putInt16(int16(r.Err)) return nil } func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { + var err error + r.Version = version + if r.Version >= 1 { + if r.ThrottleTime, err = pd.getInt32(); err != nil { + return err + } + } kerr, err := pd.getInt16() if err != nil { return err @@ -24,7 +36,7 @@ func (r *HeartbeatResponse) key() int16 { } func (r *HeartbeatResponse) version() int16 { - return 0 + return r.Version } func (r *HeartbeatResponse) headerVersion() int16 { @@ -32,5 +44,9 @@ func (r *HeartbeatResponse) headerVersion() int16 { } func (r *HeartbeatResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1, 2, 3: + return V2_3_0_0 + } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go index 689444397d6f..33ce5fa41c89 100644 --- a/vendor/github.com/Shopify/sarama/init_producer_id_request.go +++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go @@ -3,22 +3,45 @@ package sarama import "time" type InitProducerIDRequest struct { + Version int16 TransactionalID *string TransactionTimeout time.Duration + ProducerID int64 + ProducerEpoch int16 } func (i *InitProducerIDRequest) encode(pe packetEncoder) error { - if err := pe.putNullableString(i.TransactionalID); err != nil { - return err + if i.Version < 2 { + if err := pe.putNullableString(i.TransactionalID); err != nil { + return err + } + } else { + if err := pe.putNullableCompactString(i.TransactionalID); err != nil { + return err + } } pe.putInt32(int32(i.TransactionTimeout / time.Millisecond)) + if i.Version >= 3 { + pe.putInt64(i.ProducerID) + pe.putInt16(i.ProducerEpoch) + } + if i.Version >= 2 { + pe.putEmptyTaggedFieldArray() + } return nil } func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) { - if i.TransactionalID, err = pd.getNullableString(); err != nil { - return err + i.Version = version + if i.Version < 2 { + if i.TransactionalID, err = pd.getNullableString(); err != nil { + return err + } + } else { + if i.TransactionalID, err = pd.getCompactNullableString(); err != nil { + return err + } } timeout, err := pd.getInt32() @@ -26,6 +49,21 @@ func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err err return err } i.TransactionTimeout = time.Duration(timeout) * time.Millisecond + if i.Version >= 3 { + if i.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if i.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + } + + if i.Version >= 2 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } return nil } @@ -35,13 +73,30 @@ func (i *InitProducerIDRequest) key() int16 { } func (i *InitProducerIDRequest) version() int16 { - return 0 + return i.Version } func (i *InitProducerIDRequest) headerVersion() int16 { + if i.Version >= 2 { + return 2 + } + return 1 } func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch i.Version { + case 2: + // Added tagged fields + return V2_4_0_0 + case 3: + // Added ProducerID/Epoch + return V2_5_0_0 + case 0: + fallthrough + case 1: + fallthrough + default: + return V0_11_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go index 3e1242bf6224..006070189983 100644 --- a/vendor/github.com/Shopify/sarama/init_producer_id_response.go +++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go @@ -5,6 +5,7 @@ import "time" type InitProducerIDResponse struct { ThrottleTime time.Duration Err KError + Version int16 ProducerID int64 ProducerEpoch int16 } @@ -15,10 +16,15 @@ func (i *InitProducerIDResponse) encode(pe packetEncoder) error { pe.putInt64(i.ProducerID) pe.putInt16(i.ProducerEpoch) + if i.Version >= 2 { + pe.putEmptyTaggedFieldArray() + } + return nil } func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) { + i.Version = version throttleTime, err := pd.getInt32() if err != nil { return err @@ -39,6 +45,12 @@ func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err er return err } + if i.Version >= 2 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return nil } @@ -47,13 +59,27 @@ func (i *InitProducerIDResponse) key() int16 { } func (i *InitProducerIDResponse) version() int16 { - return 0 + return i.Version } func (i *InitProducerIDResponse) headerVersion() int16 { + if i.Version >= 2 { + return 1 + } return 0 } func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch i.Version { + case 2: + fallthrough + case 3: + return V2_4_0_0 + case 0: + fallthrough + case 1: + fallthrough + default: + return V0_11_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go index 3734e82e4068..432338cd5976 100644 --- a/vendor/github.com/Shopify/sarama/join_group_request.go +++ b/vendor/github.com/Shopify/sarama/join_group_request.go @@ -30,6 +30,7 @@ type JoinGroupRequest struct { SessionTimeout int32 RebalanceTimeout int32 MemberId string + GroupInstanceId *string ProtocolType string GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols OrderedGroupProtocols []*GroupProtocol @@ -46,6 +47,11 @@ func (r *JoinGroupRequest) encode(pe packetEncoder) error { if err := pe.putString(r.MemberId); err != nil { return err } + if r.Version >= 5 { + if err := pe.putNullableString(r.GroupInstanceId); err != nil { + return err + } + } if err := pe.putString(r.ProtocolType); err != nil { return err } @@ -101,6 +107,12 @@ func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { return } + if version >= 5 { + if r.GroupInstanceId, err = pd.getNullableString(); err != nil { + return + } + } + if r.ProtocolType, err = pd.getString(); err != nil { return } @@ -140,7 +152,9 @@ func (r *JoinGroupRequest) headerVersion() int16 { func (r *JoinGroupRequest) requiredVersion() KafkaVersion { switch r.Version { - case 2: + case 4, 5: + return V2_3_0_0 + case 2, 3: return V0_11_0_0 case 1: return V0_10_1_0 diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go index 54b0a45c28ea..d8aa1f0023d4 100644 --- a/vendor/github.com/Shopify/sarama/join_group_response.go +++ b/vendor/github.com/Shopify/sarama/join_group_response.go @@ -8,17 +8,23 @@ type JoinGroupResponse struct { GroupProtocol string LeaderId string MemberId string - Members map[string][]byte + Members []GroupMember +} + +type GroupMember struct { + MemberId string + GroupInstanceId *string + Metadata []byte } func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) { members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members)) - for id, bin := range r.Members { + for _, member := range r.Members { meta := new(ConsumerGroupMemberMetadata) - if err := decode(bin, meta); err != nil { + if err := decode(member.Metadata, meta, nil); err != nil { return nil, err } - members[id] = *meta + members[member.MemberId] = *meta } return members, nil } @@ -44,12 +50,16 @@ func (r *JoinGroupResponse) encode(pe packetEncoder) error { return err } - for memberId, memberMetadata := range r.Members { - if err := pe.putString(memberId); err != nil { + for _, member := range r.Members { + if err := pe.putString(member.MemberId); err != nil { return err } - - if err := pe.putBytes(memberMetadata); err != nil { + if r.Version >= 5 { + if err := pe.putNullableString(member.GroupInstanceId); err != nil { + return err + } + } + if err := pe.putBytes(member.Metadata); err != nil { return err } } @@ -97,19 +107,27 @@ func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) return nil } - r.Members = make(map[string][]byte) + r.Members = make([]GroupMember, n) for i := 0; i < n; i++ { memberId, err := pd.getString() if err != nil { return err } + var groupInstanceId *string = nil + if r.Version >= 5 { + groupInstanceId, err = pd.getNullableString() + if err != nil { + return err + } + } + memberMetadata, err := pd.getBytes() if err != nil { return err } - r.Members[memberId] = memberMetadata + r.Members[i] = GroupMember{MemberId: memberId, GroupInstanceId: groupInstanceId, Metadata: memberMetadata} } return nil @@ -129,6 +147,8 @@ func (r *JoinGroupResponse) headerVersion() int16 { func (r *JoinGroupResponse) requiredVersion() KafkaVersion { switch r.Version { + case 3, 4, 5: + return V2_3_0_0 case 2: return V0_11_0_0 case 1: diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go index d7789b68dbe2..741b7290a8df 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_request.go +++ b/vendor/github.com/Shopify/sarama/leave_group_request.go @@ -1,27 +1,69 @@ package sarama +type MemberIdentity struct { + MemberId string + GroupInstanceId *string +} + type LeaveGroupRequest struct { + Version int16 GroupId string - MemberId string + MemberId string // Removed in Version 3 + Members []MemberIdentity // Added in Version 3 } func (r *LeaveGroupRequest) encode(pe packetEncoder) error { if err := pe.putString(r.GroupId); err != nil { return err } - if err := pe.putString(r.MemberId); err != nil { - return err + if r.Version < 3 { + if err := pe.putString(r.MemberId); err != nil { + return err + } + } + if r.Version >= 3 { + if err := pe.putArrayLength(len(r.Members)); err != nil { + return err + } + for _, member := range r.Members { + if err := pe.putString(member.MemberId); err != nil { + return err + } + if err := pe.putNullableString(member.GroupInstanceId); err != nil { + return err + } + } } return nil } func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version if r.GroupId, err = pd.getString(); err != nil { return } - if r.MemberId, err = pd.getString(); err != nil { - return + if r.Version < 3 { + if r.MemberId, err = pd.getString(); err != nil { + return + } + } + if r.Version >= 3 { + memberCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.Members = make([]MemberIdentity, memberCount) + for i := 0; i < memberCount; i++ { + memberIdentity := MemberIdentity{} + if memberIdentity.MemberId, err = pd.getString(); err != nil { + return err + } + if memberIdentity.GroupInstanceId, err = pd.getNullableString(); err != nil { + return err + } + r.Members[i] = memberIdentity + } } return nil @@ -32,7 +74,7 @@ func (r *LeaveGroupRequest) key() int16 { } func (r *LeaveGroupRequest) version() int16 { - return 0 + return r.Version } func (r *LeaveGroupRequest) headerVersion() int16 { @@ -40,5 +82,9 @@ func (r *LeaveGroupRequest) headerVersion() int16 { } func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1, 2, 3: + return V2_3_0_0 + } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go index 25f8d5eb36b4..18ed357e830b 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_response.go +++ b/vendor/github.com/Shopify/sarama/leave_group_response.go @@ -1,21 +1,73 @@ package sarama +type MemberResponse struct { + MemberId string + GroupInstanceId *string + Err KError +} type LeaveGroupResponse struct { - Err KError + Version int16 + ThrottleTime int32 + Err KError + Members []MemberResponse } func (r *LeaveGroupResponse) encode(pe packetEncoder) error { + if r.Version >= 1 { + pe.putInt32(r.ThrottleTime) + } pe.putInt16(int16(r.Err)) + if r.Version >= 3 { + if err := pe.putArrayLength(len(r.Members)); err != nil { + return err + } + for _, member := range r.Members { + if err := pe.putString(member.MemberId); err != nil { + return err + } + if err := pe.putNullableString(member.GroupInstanceId); err != nil { + return err + } + pe.putInt16(int16(member.Err)) + } + } return nil } func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.Version >= 1 { + if r.ThrottleTime, err = pd.getInt32(); err != nil { + return err + } + } kerr, err := pd.getInt16() if err != nil { return err } r.Err = KError(kerr) + if r.Version >= 3 { + membersLen, err := pd.getArrayLength() + if err != nil { + return err + } + r.Members = make([]MemberResponse, membersLen) + for i := 0; i < len(r.Members); i++ { + if r.Members[i].MemberId, err = pd.getString(); err != nil { + return err + } + if r.Members[i].GroupInstanceId, err = pd.getNullableString(); err != nil { + return err + } + if memberErr, err := pd.getInt16(); err != nil { + return err + } else { + r.Members[i].Err = KError(memberErr) + } + } + } + return nil } @@ -24,7 +76,7 @@ func (r *LeaveGroupResponse) key() int16 { } func (r *LeaveGroupResponse) version() int16 { - return 0 + return r.Version } func (r *LeaveGroupResponse) headerVersion() int16 { @@ -32,5 +84,9 @@ func (r *LeaveGroupResponse) headerVersion() int16 { } func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1, 2, 3: + return V2_3_0_0 + } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go index fd0d1d90b7fa..c6f35a3f5e54 100644 --- a/vendor/github.com/Shopify/sarama/message.go +++ b/vendor/github.com/Shopify/sarama/message.go @@ -42,6 +42,28 @@ func (cc CompressionCodec) String() string { }[int(cc)] } +// UnmarshalText returns a CompressionCodec from its string representation. +func (cc *CompressionCodec) UnmarshalText(text []byte) error { + codecs := map[string]CompressionCodec{ + "none": CompressionNone, + "gzip": CompressionGZIP, + "snappy": CompressionSnappy, + "lz4": CompressionLZ4, + "zstd": CompressionZSTD, + } + codec, ok := codecs[string(text)] + if !ok { + return fmt.Errorf("cannot parse %q as a compression codec", string(text)) + } + *cc = codec + return nil +} + +// MarshalText transforms a CompressionCodec into its string representation. +func (cc CompressionCodec) MarshalText() ([]byte, error) { + return []byte(cc.String()), nil +} + // Message is a kafka message type type Message struct { Codec CompressionCodec // codec used to compress the message contents diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go index 6523ec2f74d5..5bc211294426 100644 --- a/vendor/github.com/Shopify/sarama/message_set.go +++ b/vendor/github.com/Shopify/sarama/message_set.go @@ -1,5 +1,7 @@ package sarama +import "errors" + type MessageBlock struct { Offset int64 Msg *Message @@ -70,7 +72,7 @@ func (ms *MessageSet) decode(pd packetDecoder) (err error) { for pd.remaining() > 0 { magic, err := magicValue(pd) if err != nil { - if err == ErrInsufficientData { + if errors.Is(err, ErrInsufficientData) { ms.PartialTrailingMessage = true return nil } @@ -83,10 +85,9 @@ func (ms *MessageSet) decode(pd packetDecoder) (err error) { msb := new(MessageBlock) err = msb.decode(pd) - switch err { - case nil: + if err == nil { ms.Messages = append(ms.Messages, msb) - case ErrInsufficientData: + } else if errors.Is(err, ErrInsufficientData) { // As an optimization the server is allowed to return a partial message at the // end of the message set. Clients should handle this case. So we just ignore such things. if msb.Offset == -1 { @@ -96,7 +97,7 @@ func (ms *MessageSet) decode(pd packetDecoder) (err error) { ms.PartialTrailingMessage = true } return nil - default: + } else { return err } } diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go index 90e5a87f4972..7b7705f2e3bf 100644 --- a/vendor/github.com/Shopify/sarama/metrics.go +++ b/vendor/github.com/Shopify/sarama/metrics.go @@ -3,6 +3,7 @@ package sarama import ( "fmt" "strings" + "sync" "github.com/rcrowley/go-metrics" ) @@ -41,3 +42,79 @@ func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metr func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram { return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r) } + +// cleanupRegistry is an implementation of metrics.Registry that allows +// to unregister from the parent registry only those metrics +// that have been registered in cleanupRegistry +type cleanupRegistry struct { + parent metrics.Registry + metrics map[string]struct{} + mutex sync.RWMutex +} + +func newCleanupRegistry(parent metrics.Registry) metrics.Registry { + return &cleanupRegistry{ + parent: parent, + metrics: map[string]struct{}{}, + } +} + +func (r *cleanupRegistry) Each(fn func(string, interface{})) { + r.mutex.RLock() + defer r.mutex.RUnlock() + wrappedFn := func(name string, iface interface{}) { + if _, ok := r.metrics[name]; ok { + fn(name, iface) + } + } + r.parent.Each(wrappedFn) +} + +func (r *cleanupRegistry) Get(name string) interface{} { + r.mutex.RLock() + defer r.mutex.RUnlock() + if _, ok := r.metrics[name]; ok { + return r.parent.Get(name) + } + return nil +} + +func (r *cleanupRegistry) GetOrRegister(name string, metric interface{}) interface{} { + r.mutex.Lock() + defer r.mutex.Unlock() + r.metrics[name] = struct{}{} + return r.parent.GetOrRegister(name, metric) +} + +func (r *cleanupRegistry) Register(name string, metric interface{}) error { + r.mutex.Lock() + defer r.mutex.Unlock() + r.metrics[name] = struct{}{} + return r.parent.Register(name, metric) +} + +func (r *cleanupRegistry) RunHealthchecks() { + r.parent.RunHealthchecks() +} + +func (r *cleanupRegistry) GetAll() map[string]map[string]interface{} { + return r.parent.GetAll() +} + +func (r *cleanupRegistry) Unregister(name string) { + r.mutex.Lock() + defer r.mutex.Unlock() + if _, ok := r.metrics[name]; ok { + delete(r.metrics, name) + r.parent.Unregister(name) + } +} + +func (r *cleanupRegistry) UnregisterAll() { + r.mutex.Lock() + defer r.mutex.Unlock() + for name := range r.metrics { + delete(r.metrics, name) + r.parent.Unregister(name) + } +} diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go index 9166f6efbca9..628c3cb90c7f 100644 --- a/vendor/github.com/Shopify/sarama/mockbroker.go +++ b/vendor/github.com/Shopify/sarama/mockbroker.go @@ -3,6 +3,7 @@ package sarama import ( "bytes" "encoding/binary" + "errors" "fmt" "io" "net" @@ -359,9 +360,10 @@ func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) func (b *MockBroker) serverError(err error) { isConnectionClosedError := false - if _, ok := err.(*net.OpError); ok { + opError := &net.OpError{} + if errors.As(err, &opError) { isConnectionClosedError = true - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { isConnectionClosedError = true } else if err.Error() == "use of closed network connection" { isConnectionClosedError = true diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go index a5e94fb39f07..15b4367f998b 100644 --- a/vendor/github.com/Shopify/sarama/mockresponses.go +++ b/vendor/github.com/Shopify/sarama/mockresponses.go @@ -3,6 +3,7 @@ package sarama import ( "fmt" "strings" + "sync" ) // TestReporter has methods matching go's testing.T to avoid importing @@ -203,7 +204,6 @@ func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader type MockOffsetResponse struct { offsets map[string]map[int32]map[int64]int64 t TestReporter - version int16 } func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { @@ -213,11 +213,6 @@ func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { } } -func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse { - mor.version = version - return mor -} - func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { partitions := mor.offsets[topic] if partitions == nil { @@ -235,7 +230,7 @@ func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, of func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { offsetRequest := reqBody.(*OffsetRequest) - offsetResponse := &OffsetResponse{Version: mor.version} + offsetResponse := &OffsetResponse{Version: offsetRequest.Version} for topic, partitions := range offsetRequest.blocks { for partition, block := range partitions { offset := mor.getOffset(topic, partition, block.time) @@ -261,41 +256,56 @@ func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int return offset } +// mockMessage is a message that used to be mocked for `FetchResponse` +type mockMessage struct { + key Encoder + msg Encoder +} + +func newMockMessage(key, msg Encoder) *mockMessage { + return &mockMessage{ + key: key, + msg: msg, + } +} + // MockFetchResponse is a `FetchResponse` builder. type MockFetchResponse struct { - messages map[string]map[int32]map[int64]Encoder + messages map[string]map[int32]map[int64]*mockMessage + messagesLock *sync.RWMutex highWaterMarks map[string]map[int32]int64 t TestReporter batchSize int - version int16 } func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { return &MockFetchResponse{ - messages: make(map[string]map[int32]map[int64]Encoder), + messages: make(map[string]map[int32]map[int64]*mockMessage), + messagesLock: &sync.RWMutex{}, highWaterMarks: make(map[string]map[int32]int64), t: t, batchSize: batchSize, } } -func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse { - mfr.version = version - return mfr +func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { + return mfr.SetMessageWithKey(topic, partition, offset, nil, msg) } -func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { +func (mfr *MockFetchResponse) SetMessageWithKey(topic string, partition int32, offset int64, key, msg Encoder) *MockFetchResponse { + mfr.messagesLock.Lock() + defer mfr.messagesLock.Unlock() partitions := mfr.messages[topic] if partitions == nil { - partitions = make(map[int32]map[int64]Encoder) + partitions = make(map[int32]map[int64]*mockMessage) mfr.messages[topic] = partitions } messages := partitions[partition] if messages == nil { - messages = make(map[int64]Encoder) + messages = make(map[int64]*mockMessage) partitions[partition] = messages } - messages[offset] = msg + messages[offset] = newMockMessage(key, msg) return mfr } @@ -312,7 +322,7 @@ func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, of func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { fetchRequest := reqBody.(*FetchRequest) res := &FetchResponse{ - Version: mfr.version, + Version: fetchRequest.Version, } for topic, partitions := range fetchRequest.blocks { for partition, block := range partitions { @@ -322,7 +332,7 @@ func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { for i := 0; i < mfr.batchSize && offset < maxOffset; { msg := mfr.getMessage(topic, partition, offset) if msg != nil { - res.AddMessage(topic, partition, nil, msg, offset) + res.AddMessage(topic, partition, msg.key, msg.msg, offset) i++ } offset++ @@ -338,7 +348,9 @@ func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { return res } -func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder { +func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) *mockMessage { + mfr.messagesLock.RLock() + defer mfr.messagesLock.RUnlock() partitions := mfr.messages[topic] if partitions == nil { return nil @@ -351,6 +363,8 @@ func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset i } func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int { + mfr.messagesLock.RLock() + defer mfr.messagesLock.RUnlock() partitions := mfr.messages[topic] if partitions == nil { return 0 @@ -445,6 +459,7 @@ func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*FindCoordinatorRequest) res := &FindCoordinatorResponse{} + res.Version = req.Version var v interface{} switch req.CoordinatorType { case CoordinatorGroup: @@ -926,6 +941,51 @@ func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) e return res } +type MockIncrementalAlterConfigsResponse struct { + t TestReporter +} + +func NewMockIncrementalAlterConfigsResponse(t TestReporter) *MockIncrementalAlterConfigsResponse { + return &MockIncrementalAlterConfigsResponse{t: t} +} + +func (mr *MockIncrementalAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*IncrementalAlterConfigsRequest) + res := &IncrementalAlterConfigsResponse{} + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorMsg: "", + }) + } + return res +} + +type MockIncrementalAlterConfigsResponseWithErrorCode struct { + t TestReporter +} + +func NewMockIncrementalAlterConfigsResponseWithErrorCode(t TestReporter) *MockIncrementalAlterConfigsResponseWithErrorCode { + return &MockIncrementalAlterConfigsResponseWithErrorCode{t: t} +} + +func (mr *MockIncrementalAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*IncrementalAlterConfigsRequest) + res := &IncrementalAlterConfigsResponse{} + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorCode: 83, + ErrorMsg: "", + }) + } + return res +} + type MockCreateAclsResponse struct { t TestReporter } @@ -944,6 +1004,24 @@ func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeade return res } +type MockCreateAclsResponseError struct { + t TestReporter +} + +func NewMockCreateAclsResponseWithError(t TestReporter) *MockCreateAclsResponseError { + return &MockCreateAclsResponseError{t: t} +} + +func (mr *MockCreateAclsResponseError) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*CreateAclsRequest) + res := &CreateAclsResponse{} + + for range req.AclCreations { + res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrInvalidRequest}) + } + return res +} + type MockListAclsResponse struct { t TestReporter } @@ -985,9 +1063,10 @@ func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader } type MockSaslAuthenticateResponse struct { - t TestReporter - kerror KError - saslAuthBytes []byte + t TestReporter + kerror KError + saslAuthBytes []byte + sessionLifetimeMs int64 } func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse { @@ -995,9 +1074,12 @@ func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateRespon } func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*SaslAuthenticateRequest) res := &SaslAuthenticateResponse{} + res.Version = req.Version res.Err = msar.kerror res.SaslAuthBytes = msar.saslAuthBytes + res.SessionLifetimeMs = msar.sessionLifetimeMs return res } @@ -1011,6 +1093,11 @@ func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *Mo return msar } +func (msar *MockSaslAuthenticateResponse) SetSessionLifetimeMs(sessionLifetimeMs int64) *MockSaslAuthenticateResponse { + msar.sessionLifetimeMs = sessionLifetimeMs + return msar +} + type MockDeleteAclsResponse struct { t TestReporter } @@ -1120,13 +1207,13 @@ type MockJoinGroupResponse struct { GroupProtocol string LeaderId string MemberId string - Members map[string][]byte + Members []GroupMember } func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse { return &MockJoinGroupResponse{ t: t, - Members: make(map[string][]byte), + Members: make([]GroupMember, 0), } } @@ -1180,7 +1267,7 @@ func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMe if err != nil { panic(fmt.Sprintf("error encoding member metadata: %v", err)) } - m.Members[id] = bin + m.Members = append(m.Members, GroupMember{MemberId: id, Metadata: bin}) return m } @@ -1302,31 +1389,38 @@ func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithH } type MockApiVersionsResponse struct { - t TestReporter + t TestReporter + apiKeys []ApiVersionsResponseKey } func NewMockApiVersionsResponse(t TestReporter) *MockApiVersionsResponse { - return &MockApiVersionsResponse{t: t} -} - -func (mr *MockApiVersionsResponse) For(reqBody versionedDecoder) encoderWithHeader { - req := reqBody.(*ApiVersionsRequest) - res := &ApiVersionsResponse{ - Version: req.Version, - ApiKeys: []ApiVersionsResponseKey{ + return &MockApiVersionsResponse{ + t: t, + apiKeys: []ApiVersionsResponseKey{ { - Version: req.Version, ApiKey: 0, MinVersion: 5, MaxVersion: 8, }, { - Version: req.Version, ApiKey: 1, MinVersion: 7, MaxVersion: 11, }, }, } +} + +func (m *MockApiVersionsResponse) SetApiKeys(apiKeys []ApiVersionsResponseKey) *MockApiVersionsResponse { + m.apiKeys = apiKeys + return m +} + +func (m *MockApiVersionsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ApiVersionsRequest) + res := &ApiVersionsResponse{ + Version: req.Version, + ApiKeys: m.apiKeys, + } return res } diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go index 9931cade512d..5dd88220d97f 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_request.go +++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go @@ -13,9 +13,10 @@ const ReceiveTime int64 = -1 const GroupGenerationUndefined = -1 type offsetCommitRequestBlock struct { - offset int64 - timestamp int64 - metadata string + offset int64 + timestamp int64 + committedLeaderEpoch int32 + metadata string } func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { @@ -25,6 +26,9 @@ func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error } else if b.timestamp != 0 { Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored") } + if version >= 6 { + pe.putInt32(b.committedLeaderEpoch) + } return pe.putString(b.metadata) } @@ -38,15 +42,22 @@ func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err return err } } + if version >= 6 { + if b.committedLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } + b.metadata, err = pd.getString() return err } type OffsetCommitRequest struct { ConsumerGroup string - ConsumerGroupGeneration int32 // v1 or later - ConsumerID string // v1 or later - RetentionTime int64 // v2 or later + ConsumerGroupGeneration int32 // v1 or later + ConsumerID string // v1 or later + GroupInstanceId *string // v7 or later + RetentionTime int64 // v2 or later // Version can be: // - 0 (kafka 0.8.1 and later) @@ -54,12 +65,14 @@ type OffsetCommitRequest struct { // - 2 (kafka 0.9.0 and later) // - 3 (kafka 0.11.0 and later) // - 4 (kafka 2.0.0 and later) + // - 5&6 (kafka 2.1.0 and later) + // - 7 (kafka 2.3.0 and later) Version int16 blocks map[string]map[int32]*offsetCommitRequestBlock } func (r *OffsetCommitRequest) encode(pe packetEncoder) error { - if r.Version < 0 || r.Version > 4 { + if r.Version < 0 || r.Version > 7 { return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} } @@ -81,12 +94,19 @@ func (r *OffsetCommitRequest) encode(pe packetEncoder) error { } } - if r.Version >= 2 { + // Version 5 removes RetentionTime, which is now controlled only by a broker configuration. + if r.Version >= 2 && r.Version <= 4 { pe.putInt64(r.RetentionTime) } else if r.RetentionTime != 0 { Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored") } + if r.Version >= 7 { + if err := pe.putNullableString(r.GroupInstanceId); err != nil { + return err + } + } + if err := pe.putArrayLength(len(r.blocks)); err != nil { return err } @@ -123,12 +143,19 @@ func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error } } - if r.Version >= 2 { + // Version 5 removes RetentionTime, which is now controlled only by a broker configuration. + if r.Version >= 2 && r.Version <= 4 { if r.RetentionTime, err = pd.getInt64(); err != nil { return err } } + if r.Version >= 7 { + if r.GroupInstanceId, err = pd.getNullableString(); err != nil { + return err + } + } + topicCount, err := pd.getArrayLength() if err != nil { return err @@ -184,12 +211,16 @@ func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { return V0_11_0_0 case 4: return V2_0_0_0 + case 5, 6: + return V2_1_0_0 + case 7: + return V2_3_0_0 default: return MinVersion } } -func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { +func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) } @@ -198,7 +229,7 @@ func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset i r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) } - r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata} + r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, leaderEpoch, metadata} } func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) { diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go index 342260ef5995..4bed269aa5be 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_response.go +++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go @@ -108,6 +108,8 @@ func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { return V0_11_0_0 case 4: return V2_0_0_0 + case 5, 6, 7: + return V2_3_0_0 default: return MinVersion } diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go index 4f480a08b905..1ea15ff93933 100644 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ b/vendor/github.com/Shopify/sarama/offset_manager.go @@ -26,13 +26,15 @@ type OffsetManager interface { } type offsetManager struct { - client Client - conf *Config - group string - ticker *time.Ticker + client Client + conf *Config + group string + ticker *time.Ticker + sessionCanceler func() - memberID string - generation int32 + memberID string + groupInstanceId *string + generation int32 broker *Broker brokerLock sync.RWMutex @@ -48,10 +50,10 @@ type offsetManager struct { // NewOffsetManagerFromClient creates a new OffsetManager from the given client. // It is still necessary to call Close() on the underlying client when finished with the partition manager. func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) { - return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client) + return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client, nil) } -func newOffsetManagerFromClient(group, memberID string, generation int32, client Client) (*offsetManager, error) { +func newOffsetManagerFromClient(group, memberID string, generation int32, client Client, sessionCanceler func()) (*offsetManager, error) { // Check that we are not dealing with a closed Client before processing any other arguments if client.Closed() { return nil, ErrClosedClient @@ -59,10 +61,11 @@ func newOffsetManagerFromClient(group, memberID string, generation int32, client conf := client.Config() om := &offsetManager{ - client: client, - conf: conf, - group: group, - poms: make(map[string]map[int32]*partitionOffsetManager), + client: client, + conf: conf, + group: group, + poms: make(map[string]map[int32]*partitionOffsetManager), + sessionCanceler: sessionCanceler, memberID: memberID, generation: generation, @@ -70,6 +73,9 @@ func newOffsetManagerFromClient(group, memberID string, generation int32, client closing: make(chan none), closed: make(chan none), } + if conf.Consumer.Group.InstanceId != "" { + om.groupInstanceId = &conf.Consumer.Group.InstanceId + } if conf.Consumer.Offsets.AutoCommit.Enable { om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval) go withRecover(om.mainLoop) @@ -138,11 +144,11 @@ func (om *offsetManager) computeBackoff(retries int) time.Duration { } } -func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) { +func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, int32, string, error) { broker, err := om.coordinator() if err != nil { if retries <= 0 { - return 0, "", err + return 0, 0, "", err } return om.fetchInitialOffset(topic, partition, retries-1) } @@ -155,7 +161,7 @@ func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retri resp, err := broker.FetchOffset(req) if err != nil { if retries <= 0 { - return 0, "", err + return 0, 0, "", err } om.releaseCoordinator(broker) return om.fetchInitialOffset(topic, partition, retries-1) @@ -163,31 +169,31 @@ func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retri block := resp.GetBlock(topic, partition) if block == nil { - return 0, "", ErrIncompleteResponse + return 0, 0, "", ErrIncompleteResponse } switch block.Err { case ErrNoError: - return block.Offset, block.Metadata, nil + return block.Offset, block.LeaderEpoch, block.Metadata, nil case ErrNotCoordinatorForConsumer: if retries <= 0 { - return 0, "", block.Err + return 0, 0, "", block.Err } om.releaseCoordinator(broker) return om.fetchInitialOffset(topic, partition, retries-1) case ErrOffsetsLoadInProgress: if retries <= 0 { - return 0, "", block.Err + return 0, 0, "", block.Err } backoff := om.computeBackoff(retries) select { case <-om.closing: - return 0, "", block.Err + return 0, 0, "", block.Err case <-time.After(backoff): } return om.fetchInitialOffset(topic, partition, retries-1) default: - return 0, "", block.Err + return 0, 0, "", block.Err } } @@ -298,12 +304,17 @@ func (om *offsetManager) constructRequest() *OffsetCommitRequest { for _, pom := range topicManagers { pom.lock.Lock() if pom.dirty { - r.AddBlock(pom.topic, pom.partition, pom.offset, perPartitionTimestamp, pom.metadata) + r.AddBlock(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, perPartitionTimestamp, pom.metadata) } pom.lock.Unlock() } } + if om.groupInstanceId != nil { + r.Version = 7 + r.GroupInstanceId = om.groupInstanceId + } + if len(r.blocks) > 0 { return r } @@ -346,6 +357,10 @@ func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest pom.handleError(err) case ErrOffsetsLoadInProgress: // nothing wrong but we didn't commit, we'll get it next time round + case ErrFencedInstancedId: + pom.handleError(err) + // TODO close the whole consumer for instacne fenced.... + om.tryCancelSession() case ErrUnknownTopicOrPartition: // let the user know *and* try redispatching - if topic-auto-create is // enabled, redispatching should trigger a metadata req and create the @@ -420,6 +435,12 @@ func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffset return nil } +func (om *offsetManager) tryCancelSession() { + if om.sessionCanceler != nil { + om.sessionCanceler() + } +} + // Partition Offset Manager // PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close() @@ -476,9 +497,10 @@ type PartitionOffsetManager interface { } type partitionOffsetManager struct { - parent *offsetManager - topic string - partition int32 + parent *offsetManager + topic string + partition int32 + leaderEpoch int32 lock sync.Mutex offset int64 @@ -491,18 +513,19 @@ type partitionOffsetManager struct { } func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) { - offset, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max) + offset, leaderEpoch, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max) if err != nil { return nil, err } return &partitionOffsetManager{ - parent: om, - topic: topic, - partition: partition, - errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), - offset: offset, - metadata: metadata, + parent: om, + topic: topic, + partition: partition, + leaderEpoch: leaderEpoch, + errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), + offset: offset, + metadata: metadata, }, nil } diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go index 69349efe2ba0..ffe84664c59e 100644 --- a/vendor/github.com/Shopify/sarama/offset_response.go +++ b/vendor/github.com/Shopify/sarama/offset_response.go @@ -125,7 +125,6 @@ func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponse 105 99 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 1 1] - */ func (r *OffsetResponse) encode(pe packetEncoder) (err error) { if r.Version >= 2 { diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go index 08b433223409..b8cae5350a93 100644 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ b/vendor/github.com/Shopify/sarama/packet_decoder.go @@ -1,5 +1,7 @@ package sarama +import "github.com/rcrowley/go-metrics" + // PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules. // Types implementing Decoder only need to worry about calling methods like GetString, // not about how a string is represented in Kafka. @@ -40,6 +42,9 @@ type packetDecoder interface { // Stacks, see PushDecoder push(in pushDecoder) error pop() error + + // To record metrics when provided + metricRegistry() metrics.Registry } // PushDecoder is the interface for decoding fields like CRCs and lengths where the validity diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go index 9c70f8180069..8d6980479ebe 100644 --- a/vendor/github.com/Shopify/sarama/produce_set.go +++ b/vendor/github.com/Shopify/sarama/produce_set.go @@ -137,6 +137,9 @@ func (ps *produceSet) buildRequest() *ProduceRequest { } if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { req.Version = 3 + if ps.parent.IsTransactional() { + req.TransactionalID = &ps.parent.conf.Producer.Transaction.ID + } } if ps.parent.conf.Producer.Compression == CompressionZSTD && ps.parent.conf.Version.IsAtLeast(V2_1_0_0) { @@ -160,6 +163,10 @@ func (ps *produceSet) buildRequest() *ProduceRequest { record.OffsetDelta = int64(i) } } + + // Set the batch as transactional when a transactionalID is set + rb.IsTransactional = ps.parent.IsTransactional() + req.AddBatch(topic, partition, rb) continue } @@ -181,7 +188,7 @@ func (ps *produceSet) buildRequest() *ProduceRequest { msg.Offset = int64(i) } } - payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry) + payload, err := encode(set.recordsToSend.MsgSet, ps.parent.metricsRegistry) if err != nil { Logger.Println(err) // if this happens, it's basically our fault. panic(err) @@ -235,11 +242,11 @@ func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { switch { // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. - case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)): + case ps.bufferBytes+msg.ByteSize(version) >= int(MaxRequestSize-(10*1024)): return true // Would we overflow the size-limit of a message-batch for this partition? case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && - ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: + ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.ByteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: return true // Would we overflow simply in number of messages? case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go index bab6cb486f27..7e37641f9d47 100644 --- a/vendor/github.com/Shopify/sarama/real_decoder.go +++ b/vendor/github.com/Shopify/sarama/real_decoder.go @@ -3,22 +3,24 @@ package sarama import ( "encoding/binary" "math" + + "github.com/rcrowley/go-metrics" ) var ( - errInvalidArrayLength = PacketDecodingError{"invalid array length"} - errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} - errInvalidStringLength = PacketDecodingError{"invalid string length"} - errVarintOverflow = PacketDecodingError{"varint overflow"} - errUVarintOverflow = PacketDecodingError{"uvarint overflow"} - errInvalidBool = PacketDecodingError{"invalid bool"} - errUnsupportedTaggedFields = PacketDecodingError{"non-empty tagged fields are not supported yet"} + errInvalidArrayLength = PacketDecodingError{"invalid array length"} + errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} + errInvalidStringLength = PacketDecodingError{"invalid string length"} + errVarintOverflow = PacketDecodingError{"varint overflow"} + errUVarintOverflow = PacketDecodingError{"uvarint overflow"} + errInvalidBool = PacketDecodingError{"invalid bool"} ) type realDecoder struct { - raw []byte - off int - stack []pushDecoder + raw []byte + off int + stack []pushDecoder + registry metrics.Registry } // primitives @@ -149,8 +151,21 @@ func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) { return 0, err } - if tagCount != 0 { - return 0, errUnsupportedTaggedFields + // skip over any tagged fields without deserializing them + // as we don't currently support doing anything with them + for i := uint64(0); i < tagCount; i++ { + // fetch and ignore tag identifier + _, err := rd.getUVarint() + if err != nil { + return 0, err + } + length, err := rd.getUVarint() + if err != nil { + return 0, err + } + if _, err := rd.getRawBytes(int(length)); err != nil { + return 0, err + } } return 0, nil @@ -447,3 +462,7 @@ func (rd *realDecoder) pop() error { return in.check(rd.off, rd.raw) } + +func (rd *realDecoder) metricRegistry() metrics.Registry { + return rd.registry +} diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go index c653763eca83..d382ca488721 100644 --- a/vendor/github.com/Shopify/sarama/record_batch.go +++ b/vendor/github.com/Shopify/sarama/record_batch.go @@ -1,6 +1,7 @@ package sarama import ( + "errors" "fmt" "time" ) @@ -167,7 +168,7 @@ func (b *RecordBatch) decode(pd packetDecoder) (err error) { bufSize := int(batchLen) - recordBatchOverhead recBuffer, err := pd.getRawBytes(bufSize) if err != nil { - if err == ErrInsufficientData { + if errors.Is(err, ErrInsufficientData) { b.PartialTrailingRecord = true b.Records = nil return nil @@ -185,8 +186,8 @@ func (b *RecordBatch) decode(pd packetDecoder) (err error) { } b.recordsLen = len(recBuffer) - err = decode(recBuffer, recordsArray(b.Records)) - if err == ErrInsufficientData { + err = decode(recBuffer, recordsArray(b.Records), nil) + if errors.Is(err, ErrInsufficientData) { b.PartialTrailingRecord = true b.Records = nil return nil diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go index f4c5e95f1de3..d0c45d7f3641 100644 --- a/vendor/github.com/Shopify/sarama/records.go +++ b/vendor/github.com/Shopify/sarama/records.go @@ -183,12 +183,27 @@ func (r *Records) isOverflow() (bool, error) { return false, fmt.Errorf("unknown records type: %v", r.recordsType) } +func (r *Records) recordsOffset() (*int64, error) { + switch r.recordsType { + case unknownRecords: + return nil, nil + case legacyRecords: + return nil, nil + case defaultRecords: + if r.RecordBatch == nil { + return nil, nil + } + return &r.RecordBatch.FirstOffset, nil + } + return nil, fmt.Errorf("unknown records type: %v", r.recordsType) +} + func magicValue(pd packetDecoder) (int8, error) { return pd.peekInt8(magicOffset) } func (r *Records) getControlRecord() (ControlRecord, error) { - if r.RecordBatch == nil || len(r.RecordBatch.Records) <= 0 { + if r.RecordBatch == nil || len(r.RecordBatch.Records) == 0 { return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty") } diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go index ce90eb8c46e2..5fec9776f275 100644 --- a/vendor/github.com/Shopify/sarama/request.go +++ b/vendor/github.com/Shopify/sarama/request.go @@ -109,7 +109,7 @@ func decodeRequest(r io.Reader) (*request, int, error) { bytesRead += len(encodedReq) req := &request{} - if err := decode(encodedReq, req); err != nil { + if err := decode(encodedReq, req, nil); err != nil { return nil, bytesRead, err } @@ -155,7 +155,7 @@ func allocateBody(key, version int16) protocolBody { case 21: return &DeleteRecordsRequest{} case 22: - return &InitProducerIDRequest{} + return &InitProducerIDRequest{Version: version} case 24: return &AddPartitionsToTxnRequest{} case 25: diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go index fbcef0bfbea3..4ced93c13567 100644 --- a/vendor/github.com/Shopify/sarama/response_header.go +++ b/vendor/github.com/Shopify/sarama/response_header.go @@ -2,11 +2,6 @@ package sarama import "fmt" -const ( - responseLengthSize = 4 - correlationIDSize = 4 -) - type responseHeader struct { length int32 correlationID int32 diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go index f6e55a395da5..cb773f171f72 100644 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ b/vendor/github.com/Shopify/sarama/sarama.go @@ -68,12 +68,15 @@ Consumer related metrics: | Name | Type | Description | +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ | consumer-batch-size | histogram | Distribution of the number of messages in a batch | + | consumer-fetch-rate | meter | Fetch requests/second sent to all brokers | + | consumer-fetch-rate-for-broker- | meter | Fetch requests/second sent to a given broker | + | consumer-fetch-rate-for-topic- | meter | Fetch requests/second sent for a given topic | + | consumer-fetch-response-size | histogram | Distribution of the fetch response size in bytes | | consumer-group-join-total- | counter | Total count of consumer group join attempts | | consumer-group-join-failed- | counter | Total count of consumer group join failures | | consumer-group-sync-total- | counter | Total count of consumer group sync attempts | | consumer-group-sync-failed- | counter | Total count of consumer group sync failures | +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ - */ package sarama @@ -116,13 +119,13 @@ type StdLogger interface { type debugLogger struct{} func (d *debugLogger) Print(v ...interface{}) { - Logger.Print(v) + Logger.Print(v...) } func (d *debugLogger) Printf(format string, v ...interface{}) { - Logger.Printf(format, v) + Logger.Printf(format, v...) } func (d *debugLogger) Println(v ...interface{}) { - Logger.Println(v) + Logger.Println(v...) } // DebugLogger is the instance of a StdLogger that Sarama writes more verbose diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go index 90504df6f522..5bb0988ea5f7 100644 --- a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go @@ -1,6 +1,8 @@ package sarama type SaslAuthenticateRequest struct { + // Version defines the protocol version to use for encode and decode + Version int16 SaslAuthBytes []byte } @@ -12,6 +14,7 @@ func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error { } func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version r.SaslAuthBytes, err = pd.getBytes() return err } @@ -21,7 +24,7 @@ func (r *SaslAuthenticateRequest) key() int16 { } func (r *SaslAuthenticateRequest) version() int16 { - return 0 + return r.Version } func (r *SaslAuthenticateRequest) headerVersion() int16 { @@ -29,5 +32,10 @@ func (r *SaslAuthenticateRequest) headerVersion() int16 { } func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion { - return V1_0_0_0 + switch r.Version { + case 1: + return V2_2_0_0 + default: + return V1_0_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go index 3ef57b5afad0..37c8e45dae1a 100644 --- a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go @@ -1,9 +1,12 @@ package sarama type SaslAuthenticateResponse struct { - Err KError - ErrorMessage *string - SaslAuthBytes []byte + // Version defines the protocol version to use for encode and decode + Version int16 + Err KError + ErrorMessage *string + SaslAuthBytes []byte + SessionLifetimeMs int64 } func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error { @@ -11,10 +14,17 @@ func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error { if err := pe.putNullableString(r.ErrorMessage); err != nil { return err } - return pe.putBytes(r.SaslAuthBytes) + if err := pe.putBytes(r.SaslAuthBytes); err != nil { + return err + } + if r.Version > 0 { + pe.putInt64(r.SessionLifetimeMs) + } + return nil } func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error { + r.Version = version kerr, err := pd.getInt16() if err != nil { return err @@ -26,7 +36,13 @@ func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error return err } - r.SaslAuthBytes, err = pd.getBytes() + if r.SaslAuthBytes, err = pd.getBytes(); err != nil { + return err + } + + if version > 0 { + r.SessionLifetimeMs, err = pd.getInt64() + } return err } @@ -36,7 +52,7 @@ func (r *SaslAuthenticateResponse) key() int16 { } func (r *SaslAuthenticateResponse) version() int16 { - return 0 + return r.Version } func (r *SaslAuthenticateResponse) headerVersion() int16 { @@ -44,5 +60,10 @@ func (r *SaslAuthenticateResponse) headerVersion() int16 { } func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion { - return V1_0_0_0 + switch r.Version { + case 1: + return V2_2_0_0 + default: + return V1_0_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go index ac6ecb13e045..33ed3baccbf3 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_request.go +++ b/vendor/github.com/Shopify/sarama/sync_group_request.go @@ -1,31 +1,73 @@ package sarama -type SyncGroupRequest struct { - GroupId string - GenerationId int32 - MemberId string - GroupAssignments map[string][]byte +type SyncGroupRequestAssignment struct { + // MemberId contains the ID of the member to assign. + MemberId string + // Assignment contains the member assignment. + Assignment []byte } -func (r *SyncGroupRequest) encode(pe packetEncoder) error { - if err := pe.putString(r.GroupId); err != nil { +func (a *SyncGroupRequestAssignment) encode(pe packetEncoder, version int16) (err error) { + if err := pe.putString(a.MemberId); err != nil { return err } - pe.putInt32(r.GenerationId) + if err := pe.putBytes(a.Assignment); err != nil { + return err + } + + return nil +} + +func (a *SyncGroupRequestAssignment) decode(pd packetDecoder, version int16) (err error) { + if a.MemberId, err = pd.getString(); err != nil { + return err + } - if err := pe.putString(r.MemberId); err != nil { + if a.Assignment, err = pd.getBytes(); err != nil { return err } - if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil { + return nil +} + +type SyncGroupRequest struct { + // Version defines the protocol version to use for encode and decode + Version int16 + // GroupId contains the unique group identifier. + GroupId string + // GenerationId contains the generation of the group. + GenerationId int32 + // MemberId contains the member ID assigned by the group. + MemberId string + // GroupInstanceId contains the unique identifier of the consumer instance provided by end user. + GroupInstanceId *string + // GroupAssignments contains each assignment. + GroupAssignments []SyncGroupRequestAssignment +} + +func (s *SyncGroupRequest) encode(pe packetEncoder) (err error) { + if err := pe.putString(s.GroupId); err != nil { + return err + } + + pe.putInt32(s.GenerationId) + + if err := pe.putString(s.MemberId); err != nil { return err } - for memberId, memberAssignment := range r.GroupAssignments { - if err := pe.putString(memberId); err != nil { + + if s.Version >= 3 { + if err := pe.putNullableString(s.GroupInstanceId); err != nil { return err } - if err := pe.putBytes(memberAssignment); err != nil { + } + + if err := pe.putArrayLength(len(s.GroupAssignments)); err != nil { + return err + } + for _, block := range s.GroupAssignments { + if err := block.encode(pe, s.Version); err != nil { return err } } @@ -33,37 +75,37 @@ func (r *SyncGroupRequest) encode(pe packetEncoder) error { return nil } -func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { - if r.GroupId, err = pd.getString(); err != nil { - return - } - if r.GenerationId, err = pd.getInt32(); err != nil { - return - } - if r.MemberId, err = pd.getString(); err != nil { - return +func (s *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { + s.Version = version + if s.GroupId, err = pd.getString(); err != nil { + return err } - n, err := pd.getArrayLength() - if err != nil { + if s.GenerationId, err = pd.getInt32(); err != nil { return err } - if n == 0 { - return nil + + if s.MemberId, err = pd.getString(); err != nil { + return err } - r.GroupAssignments = make(map[string][]byte) - for i := 0; i < n; i++ { - memberId, err := pd.getString() - if err != nil { - return err - } - memberAssignment, err := pd.getBytes() - if err != nil { + if s.Version >= 3 { + if s.GroupInstanceId, err = pd.getNullableString(); err != nil { return err } + } - r.GroupAssignments[memberId] = memberAssignment + if numAssignments, err := pd.getArrayLength(); err != nil { + return err + } else if numAssignments > 0 { + s.GroupAssignments = make([]SyncGroupRequestAssignment, numAssignments) + for i := 0; i < numAssignments; i++ { + var block SyncGroupRequestAssignment + if err := block.decode(pd, s.Version); err != nil { + return err + } + s.GroupAssignments[i] = block + } } return nil @@ -74,7 +116,7 @@ func (r *SyncGroupRequest) key() int16 { } func (r *SyncGroupRequest) version() int16 { - return 0 + return r.Version } func (r *SyncGroupRequest) headerVersion() int16 { @@ -82,18 +124,24 @@ func (r *SyncGroupRequest) headerVersion() int16 { } func (r *SyncGroupRequest) requiredVersion() KafkaVersion { + switch { + case r.Version >= 3: + return V2_3_0_0 + } return V0_9_0_0 } func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { - if r.GroupAssignments == nil { - r.GroupAssignments = make(map[string][]byte) - } - - r.GroupAssignments[memberId] = memberAssignment + r.GroupAssignments = append(r.GroupAssignments, SyncGroupRequestAssignment{ + MemberId: memberId, + Assignment: memberAssignment, + }) } -func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error { +func (r *SyncGroupRequest) AddGroupAssignmentMember( + memberId string, + memberAssignment *ConsumerGroupMemberAssignment, +) error { bin, err := encode(memberAssignment, nil) if err != nil { return err diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go index af019c42f97b..41b63b3d0362 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_response.go +++ b/vendor/github.com/Shopify/sarama/sync_group_response.go @@ -1,22 +1,39 @@ package sarama type SyncGroupResponse struct { - Err KError + // Version defines the protocol version to use for encode and decode + Version int16 + // ThrottleTimeMs contains the duration in milliseconds for which the + // request was throttled due to a quota violation, or zero if the request + // did not violate any quota. + ThrottleTime int32 + // Err contains the error code, or 0 if there was no error. + Err KError + // MemberAssignment contains the member assignment. MemberAssignment []byte } func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { assignment := new(ConsumerGroupMemberAssignment) - err := decode(r.MemberAssignment, assignment) + err := decode(r.MemberAssignment, assignment, nil) return assignment, err } func (r *SyncGroupResponse) encode(pe packetEncoder) error { + if r.Version >= 1 { + pe.putInt32(r.ThrottleTime) + } pe.putInt16(int16(r.Err)) return pe.putBytes(r.MemberAssignment) } func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.Version >= 1 { + if r.ThrottleTime, err = pd.getInt32(); err != nil { + return err + } + } kerr, err := pd.getInt16() if err != nil { return err @@ -33,7 +50,7 @@ func (r *SyncGroupResponse) key() int16 { } func (r *SyncGroupResponse) version() int16 { - return 0 + return r.Version } func (r *SyncGroupResponse) headerVersion() int16 { @@ -41,5 +58,9 @@ func (r *SyncGroupResponse) headerVersion() int16 { } func (r *SyncGroupResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1, 2, 3: + return V2_3_0_0 + } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go index eedece6b4a97..8765ac3368ef 100644 --- a/vendor/github.com/Shopify/sarama/sync_producer.go +++ b/vendor/github.com/Shopify/sarama/sync_producer.go @@ -29,6 +29,27 @@ type SyncProducer interface { // object passes out of scope, as it may otherwise leak memory. // You must call this before calling Close on the underlying client. Close() error + + // TxnStatus return current producer transaction status. + TxnStatus() ProducerTxnStatusFlag + + // IsTransactional return true when current producer is is transactional. + IsTransactional() bool + + // BeginTxn mark current transaction as ready. + BeginTxn() error + + // CommitTxn commit current transaction. + CommitTxn() error + + // AbortTxn abort current transaction. + AbortTxn() error + + // AddOffsetsToTxn add associated offsets to current transaction. + AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error + + // AddMessageToTxn add message offsets to current transaction. + AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error } type syncProducer struct { @@ -146,3 +167,31 @@ func (sp *syncProducer) Close() error { sp.wg.Wait() return nil } + +func (sp *syncProducer) IsTransactional() bool { + return sp.producer.IsTransactional() +} + +func (sp *syncProducer) BeginTxn() error { + return sp.producer.BeginTxn() +} + +func (sp *syncProducer) CommitTxn() error { + return sp.producer.CommitTxn() +} + +func (sp *syncProducer) AbortTxn() error { + return sp.producer.AbortTxn() +} + +func (sp *syncProducer) AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error { + return sp.producer.AddOffsetsToTxn(offsets, groupId) +} + +func (sp *syncProducer) AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error { + return sp.producer.AddMessageToTxn(msg, groupId, metadata) +} + +func (p *syncProducer) TxnStatus() ProducerTxnStatusFlag { + return p.producer.TxnStatus() +} diff --git a/vendor/github.com/Shopify/sarama/transaction_manager.go b/vendor/github.com/Shopify/sarama/transaction_manager.go new file mode 100644 index 000000000000..e18abecd385e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/transaction_manager.go @@ -0,0 +1,887 @@ +package sarama + +import ( + "errors" + "fmt" + "strings" + "sync" + "time" +) + +// ProducerTxnStatusFlag mark current transaction status. +type ProducerTxnStatusFlag int16 + +const ( + // ProducerTxnFlagUninitialized when txnmgr is created + ProducerTxnFlagUninitialized ProducerTxnStatusFlag = 1 << iota + // ProducerTxnFlagInitializing when txnmgr is initilizing + ProducerTxnFlagInitializing + // ProducerTxnFlagReady when is ready to receive transaction + ProducerTxnFlagReady + // ProducerTxnFlagInTransaction when transaction is started + ProducerTxnFlagInTransaction + // ProducerTxnFlagEndTransaction when transaction will be committed + ProducerTxnFlagEndTransaction + // ProducerTxnFlagInError whan having abortable or fatal error + ProducerTxnFlagInError + // ProducerTxnFlagCommittingTransaction when committing txn + ProducerTxnFlagCommittingTransaction + // ProducerTxnFlagAbortingTransaction when committing txn + ProducerTxnFlagAbortingTransaction + // ProducerTxnFlagAbortableError when producer encounter an abortable error + // Must call AbortTxn in this case. + ProducerTxnFlagAbortableError + // ProducerTxnFlagFatalError when producer encounter an fatal error + // Must Close an recreate it. + ProducerTxnFlagFatalError +) + +func (s ProducerTxnStatusFlag) String() string { + status := make([]string, 0) + if s&ProducerTxnFlagUninitialized != 0 { + status = append(status, "ProducerTxnStateUninitialized") + } + if s&ProducerTxnFlagInitializing != 0 { + status = append(status, "ProducerTxnStateInitializing") + } + if s&ProducerTxnFlagReady != 0 { + status = append(status, "ProducerTxnStateReady") + } + if s&ProducerTxnFlagInTransaction != 0 { + status = append(status, "ProducerTxnStateInTransaction") + } + if s&ProducerTxnFlagEndTransaction != 0 { + status = append(status, "ProducerTxnStateEndTransaction") + } + if s&ProducerTxnFlagInError != 0 { + status = append(status, "ProducerTxnStateInError") + } + if s&ProducerTxnFlagCommittingTransaction != 0 { + status = append(status, "ProducerTxnStateCommittingTransaction") + } + if s&ProducerTxnFlagAbortingTransaction != 0 { + status = append(status, "ProducerTxnStateAbortingTransaction") + } + if s&ProducerTxnFlagAbortableError != 0 { + status = append(status, "ProducerTxnStateAbortableError") + } + if s&ProducerTxnFlagFatalError != 0 { + status = append(status, "ProducerTxnStateFatalError") + } + return strings.Join(status, "|") +} + +// transactionManager keeps the state necessary to ensure idempotent production +type transactionManager struct { + producerID int64 + producerEpoch int16 + sequenceNumbers map[string]int32 + mutex sync.Mutex + transactionalID string + transactionTimeout time.Duration + client Client + + // when kafka cluster is at least 2.5.0. + // used to recover when producer failed. + coordinatorSupportsBumpingEpoch bool + + // When producer need to bump it's epoch. + epochBumpRequired bool + // Record last seen error. + lastError error + + // Ensure that status is never accessed with a race-condition. + statusLock sync.RWMutex + status ProducerTxnStatusFlag + + // Ensure that only one goroutine will update partitions in current transaction. + partitionInTxnLock sync.Mutex + pendingPartitionsInCurrentTxn topicPartitionSet + partitionsInCurrentTxn topicPartitionSet + + // Offsets to add to transaction. + offsetsInCurrentTxn map[string]topicPartitionOffsets +} + +const ( + noProducerID = -1 + noProducerEpoch = -1 + + // see publishTxnPartitions comment. + addPartitionsRetryBackoff = 20 * time.Millisecond +) + +// txnmngr allowed transitions. +var producerTxnTransitions = map[ProducerTxnStatusFlag][]ProducerTxnStatusFlag{ + ProducerTxnFlagUninitialized: { + ProducerTxnFlagReady, + ProducerTxnFlagInError, + }, + // When we need are initilizing + ProducerTxnFlagInitializing: { + ProducerTxnFlagInitializing, + ProducerTxnFlagReady, + ProducerTxnFlagInError, + }, + // When we have initilized transactional producer + ProducerTxnFlagReady: { + ProducerTxnFlagInTransaction, + }, + // When beginTxn has been called + ProducerTxnFlagInTransaction: { + // When calling commit or abort + ProducerTxnFlagEndTransaction, + // When got an error + ProducerTxnFlagInError, + }, + ProducerTxnFlagEndTransaction: { + // When epoch bump + ProducerTxnFlagInitializing, + // When commit is good + ProducerTxnFlagReady, + // When got an error + ProducerTxnFlagInError, + }, + // Need to abort transaction + ProducerTxnFlagAbortableError: { + // Call AbortTxn + ProducerTxnFlagAbortingTransaction, + // When got an error + ProducerTxnFlagInError, + }, + // Need to close producer + ProducerTxnFlagFatalError: { + ProducerTxnFlagFatalError, + }, +} + +type topicPartition struct { + topic string + partition int32 +} + +// to ensure that we don't do a full scan every time a partition or an offset is added. +type topicPartitionSet map[topicPartition]struct{} +type topicPartitionOffsets map[topicPartition]*PartitionOffsetMetadata + +func (s topicPartitionSet) mapToRequest() map[string][]int32 { + result := make(map[string][]int32, len(s)) + for tp := range s { + result[tp.topic] = append(result[tp.topic], tp.partition) + } + return result +} + +func (s topicPartitionOffsets) mapToRequest() map[string][]*PartitionOffsetMetadata { + result := make(map[string][]*PartitionOffsetMetadata, len(s)) + for tp, offset := range s { + result[tp.topic] = append(result[tp.topic], offset) + } + return result +} + +// Return true if current transition is allowed. +func (t *transactionManager) isTransitionValid(target ProducerTxnStatusFlag) bool { + for status, allowedTransitions := range producerTxnTransitions { + if status&t.status != 0 { + for _, allowedTransition := range allowedTransitions { + if allowedTransition&target != 0 { + return true + } + } + } + } + return false +} + +// Get current transaction status. +func (t *transactionManager) currentTxnStatus() ProducerTxnStatusFlag { + t.statusLock.RLock() + defer t.statusLock.RUnlock() + + return t.status +} + +// Try to transition to a valid status and return an error otherwise. +func (t *transactionManager) transitionTo(target ProducerTxnStatusFlag, err error) error { + t.statusLock.Lock() + defer t.statusLock.Unlock() + + if !t.isTransitionValid(target) { + return ErrTransitionNotAllowed + } + + if target&ProducerTxnFlagInError != 0 { + if err == nil { + return ErrCannotTransitionNilError + } + t.lastError = err + } else { + t.lastError = nil + } + + DebugLogger.Printf("txnmgr/transition [%s] transition from %s to %s\n", t.transactionalID, t.status, target) + + t.status = target + return err +} + +func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) { + key := fmt.Sprintf("%s-%d", topic, partition) + t.mutex.Lock() + defer t.mutex.Unlock() + sequence := t.sequenceNumbers[key] + t.sequenceNumbers[key] = sequence + 1 + return sequence, t.producerEpoch +} + +func (t *transactionManager) bumpEpoch() { + t.mutex.Lock() + defer t.mutex.Unlock() + t.producerEpoch++ + for k := range t.sequenceNumbers { + t.sequenceNumbers[k] = 0 + } +} + +func (t *transactionManager) getProducerID() (int64, int16) { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.producerID, t.producerEpoch +} + +// Compute retry backoff considered current attempts. +func (t *transactionManager) computeBackoff(attemptsRemaining int) time.Duration { + if t.client.Config().Producer.Transaction.Retry.BackoffFunc != nil { + maxRetries := t.client.Config().Producer.Transaction.Retry.Max + retries := maxRetries - attemptsRemaining + return t.client.Config().Producer.Transaction.Retry.BackoffFunc(retries, maxRetries) + } + return t.client.Config().Producer.Transaction.Retry.Backoff +} + +// return true is txnmngr is transactinal. +func (t *transactionManager) isTransactional() bool { + return t.transactionalID != "" +} + +// add specified offsets to current transaction. +func (t *transactionManager) addOffsetsToTxn(offsetsToAdd map[string][]*PartitionOffsetMetadata, groupId string) error { + t.mutex.Lock() + defer t.mutex.Unlock() + + if t.currentTxnStatus()&ProducerTxnFlagInTransaction == 0 { + return ErrTransactionNotReady + } + + if t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 { + return t.lastError + } + + if _, ok := t.offsetsInCurrentTxn[groupId]; !ok { + t.offsetsInCurrentTxn[groupId] = topicPartitionOffsets{} + } + + for topic, offsets := range offsetsToAdd { + for _, offset := range offsets { + tp := topicPartition{topic: topic, partition: offset.Partition} + t.offsetsInCurrentTxn[groupId][tp] = offset + } + } + return nil +} + +// send txnmgnr save offsets to transaction coordinator. +func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, groupId string) (topicPartitionOffsets, error) { + // First AddOffsetsToTxn + attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max + exec := func(run func() (bool, error), err error) error { + for attemptsRemaining >= 0 { + var retry bool + retry, err = run() + if !retry { + return err + } + backoff := t.computeBackoff(attemptsRemaining) + Logger.Printf("txnmgr/add-offset-to-txn [%s] retrying after %dms... (%d attempts remaining) (%s)\n", + t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err) + time.Sleep(backoff) + attemptsRemaining-- + } + return err + } + lastError := exec(func() (bool, error) { + coordinator, err := t.client.TransactionCoordinator(t.transactionalID) + if err != nil { + return true, err + } + response, err := coordinator.AddOffsetsToTxn(&AddOffsetsToTxnRequest{ + TransactionalID: t.transactionalID, + ProducerEpoch: t.producerEpoch, + ProducerID: t.producerID, + GroupID: groupId, + }) + if err != nil { + // If an error occurred try to refresh current transaction coordinator. + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + return true, err + } + if response == nil { + // If no response is returned just retry. + return true, ErrTxnUnableToParseResponse + } + if response.Err == ErrNoError { + DebugLogger.Printf("txnmgr/add-offset-to-txn [%s] successful add-offset-to-txn with group %s %+v\n", + t.transactionalID, groupId, response) + // If no error, just exit. + return false, nil + } + switch response.Err { + case ErrConsumerCoordinatorNotAvailable: + fallthrough + case ErrNotCoordinatorForConsumer: + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + fallthrough + case ErrOffsetsLoadInProgress: + fallthrough + case ErrConcurrentTransactions: + // Retry + case ErrUnknownProducerID: + fallthrough + case ErrInvalidProducerIDMapping: + return false, t.abortableErrorIfPossible(response.Err) + case ErrGroupAuthorizationFailed: + return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, response.Err) + default: + // Others are fatal + return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err) + } + return true, response.Err + }, nil) + + if lastError != nil { + return offsets, lastError + } + + resultOffsets := offsets + // Then TxnOffsetCommit + // note the result is not completed until the TxnOffsetCommit returns + attemptsRemaining = t.client.Config().Producer.Transaction.Retry.Max + execTxnOffsetCommit := func(run func() (topicPartitionOffsets, bool, error), err error) (topicPartitionOffsets, error) { + var r topicPartitionOffsets + for attemptsRemaining >= 0 { + var retry bool + r, retry, err = run() + if !retry { + return r, err + } + backoff := t.computeBackoff(attemptsRemaining) + Logger.Printf("txnmgr/txn-offset-commit [%s] retrying after %dms... (%d attempts remaining) (%s)\n", + t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err) + time.Sleep(backoff) + attemptsRemaining-- + } + return r, err + } + return execTxnOffsetCommit(func() (topicPartitionOffsets, bool, error) { + consumerGroupCoordinator, err := t.client.Coordinator(groupId) + if err != nil { + return resultOffsets, true, err + } + responses, err := consumerGroupCoordinator.TxnOffsetCommit(&TxnOffsetCommitRequest{ + TransactionalID: t.transactionalID, + ProducerEpoch: t.producerEpoch, + ProducerID: t.producerID, + GroupID: groupId, + Topics: offsets.mapToRequest(), + }) + if err != nil { + _ = consumerGroupCoordinator.Close() + _ = t.client.RefreshCoordinator(groupId) + return resultOffsets, true, err + } + + if responses == nil { + return resultOffsets, true, ErrTxnUnableToParseResponse + } + + var responseErrors []error + failedTxn := topicPartitionOffsets{} + for topic, partitionErrors := range responses.Topics { + for _, partitionError := range partitionErrors { + switch partitionError.Err { + case ErrNoError: + continue + // If the topic is unknown or the coordinator is loading, retry with the current coordinator + case ErrRequestTimedOut: + fallthrough + case ErrConsumerCoordinatorNotAvailable: + fallthrough + case ErrNotCoordinatorForConsumer: + _ = consumerGroupCoordinator.Close() + _ = t.client.RefreshCoordinator(groupId) + fallthrough + case ErrUnknownTopicOrPartition: + fallthrough + case ErrOffsetsLoadInProgress: + // Do nothing just retry + case ErrIllegalGeneration: + fallthrough + case ErrUnknownMemberId: + fallthrough + case ErrFencedInstancedId: + fallthrough + case ErrGroupAuthorizationFailed: + return resultOffsets, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, partitionError.Err) + default: + // Others are fatal + return resultOffsets, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, partitionError.Err) + } + tp := topicPartition{topic: topic, partition: partitionError.Partition} + failedTxn[tp] = offsets[tp] + responseErrors = append(responseErrors, partitionError.Err) + } + } + + resultOffsets = failedTxn + + if len(resultOffsets) == 0 { + DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s %+v\n", + t.transactionalID, groupId) + return resultOffsets, false, nil + } + return resultOffsets, true, Wrap(ErrTxnOffsetCommit, responseErrors...) + }, nil) +} + +func (t *transactionManager) initProducerId() (int64, int16, error) { + isEpochBump := false + + req := &InitProducerIDRequest{} + if t.isTransactional() { + req.TransactionalID = &t.transactionalID + req.TransactionTimeout = t.transactionTimeout + } + + if t.client.Config().Version.IsAtLeast(V2_5_0_0) { + req.Version = 3 + isEpochBump = t.producerID != noProducerID && t.producerEpoch != noProducerEpoch + t.coordinatorSupportsBumpingEpoch = true + req.ProducerID = t.producerID + req.ProducerEpoch = t.producerEpoch + } else if t.client.Config().Version.IsAtLeast(V2_4_0_0) { + req.Version = 2 + } + + if isEpochBump { + err := t.transitionTo(ProducerTxnFlagInitializing, nil) + if err != nil { + return -1, -1, err + } + DebugLogger.Printf("txnmgr/init-producer-id [%s] invoking InitProducerId for the first time in order to acquire a producer ID\n", + t.transactionalID) + } else { + DebugLogger.Printf("txnmgr/init-producer-id [%s] invoking InitProducerId with current producer ID %d and epoch %d in order to bump the epoch\n", + t.transactionalID, t.producerID, t.producerEpoch) + } + + attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max + exec := func(run func() (int64, int16, bool, error), err error) (int64, int16, error) { + pid := int64(-1) + pepoch := int16(-1) + for attemptsRemaining >= 0 { + var retry bool + pid, pepoch, retry, err = run() + if !retry { + return pid, pepoch, err + } + backoff := t.computeBackoff(attemptsRemaining) + Logger.Printf("txnmgr/init-producer-id [%s] retrying after %dms... (%d attempts remaining) (%s)\n", + t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err) + time.Sleep(backoff) + attemptsRemaining-- + } + return -1, -1, err + } + return exec(func() (int64, int16, bool, error) { + var err error + var coordinator *Broker + if t.isTransactional() { + coordinator, err = t.client.TransactionCoordinator(t.transactionalID) + } else { + coordinator = t.client.LeastLoadedBroker() + } + if err != nil { + return -1, -1, true, err + } + response, err := coordinator.InitProducerID(req) + if err != nil { + if t.isTransactional() { + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + } + return -1, -1, true, err + } + if response == nil { + return -1, -1, true, ErrTxnUnableToParseResponse + } + if response.Err == ErrNoError { + if isEpochBump { + t.sequenceNumbers = make(map[string]int32) + } + err := t.transitionTo(ProducerTxnFlagReady, nil) + if err != nil { + return -1, -1, true, err + } + DebugLogger.Printf("txnmgr/init-producer-id [%s] successful init producer id %+v\n", + t.transactionalID, response) + return response.ProducerID, response.ProducerEpoch, false, nil + } + switch response.Err { + case ErrConsumerCoordinatorNotAvailable: + fallthrough + case ErrNotCoordinatorForConsumer: + if t.isTransactional() { + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + } + // Fatal errors + default: + return -1, -1, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err) + } + return -1, -1, true, response.Err + }, nil) +} + +// if kafka cluster is at least 2.5.0 mark txnmngr to bump epoch else mark it as fatal. +func (t *transactionManager) abortableErrorIfPossible(err error) error { + if t.coordinatorSupportsBumpingEpoch { + t.epochBumpRequired = true + return t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, err) + } + return t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, err) +} + +// End current transaction. +func (t *transactionManager) completeTransaction() error { + if t.epochBumpRequired { + err := t.transitionTo(ProducerTxnFlagInitializing, nil) + if err != nil { + return err + } + } else { + err := t.transitionTo(ProducerTxnFlagReady, nil) + if err != nil { + return err + } + } + + t.lastError = nil + t.epochBumpRequired = false + t.partitionsInCurrentTxn = topicPartitionSet{} + t.pendingPartitionsInCurrentTxn = topicPartitionSet{} + t.offsetsInCurrentTxn = map[string]topicPartitionOffsets{} + + return nil +} + +// send EndTxn request with commit flag. (true when committing false otherwise) +func (t *transactionManager) endTxn(commit bool) error { + attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max + exec := func(run func() (bool, error), err error) error { + for attemptsRemaining >= 0 { + var retry bool + retry, err = run() + if !retry { + return err + } + backoff := t.computeBackoff(attemptsRemaining) + Logger.Printf("txnmgr/endtxn [%s] retrying after %dms... (%d attempts remaining) (%s)\n", + t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err) + time.Sleep(backoff) + attemptsRemaining-- + } + return err + } + return exec(func() (bool, error) { + coordinator, err := t.client.TransactionCoordinator(t.transactionalID) + if err != nil { + return true, err + } + response, err := coordinator.EndTxn(&EndTxnRequest{ + TransactionalID: t.transactionalID, + ProducerEpoch: t.producerEpoch, + ProducerID: t.producerID, + TransactionResult: commit, + }) + if err != nil { + // Always retry on network error + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + return true, err + } + if response == nil { + return true, ErrTxnUnableToParseResponse + } + if response.Err == ErrNoError { + DebugLogger.Printf("txnmgr/endtxn [%s] successful to end txn %+v\n", + t.transactionalID, response) + return false, t.completeTransaction() + } + switch response.Err { + // Need to refresh coordinator + case ErrConsumerCoordinatorNotAvailable: + fallthrough + case ErrNotCoordinatorForConsumer: + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + fallthrough + case ErrOffsetsLoadInProgress: + fallthrough + case ErrConcurrentTransactions: + // Just retry + case ErrUnknownProducerID: + fallthrough + case ErrInvalidProducerIDMapping: + return false, t.abortableErrorIfPossible(response.Err) + // Fatal errors + default: + return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err) + } + return true, response.Err + }, nil) +} + +// We will try to publish associated offsets for each groups +// then send endtxn request to mark transaction as finished. +func (t *transactionManager) finishTransaction(commit bool) error { + t.mutex.Lock() + defer t.mutex.Unlock() + + // Ensure no error when committing or abording + if commit && t.currentTxnStatus()&ProducerTxnFlagInError != 0 { + return t.lastError + } else if !commit && t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 { + return t.lastError + } + + // if no records has been sent don't do anything. + if len(t.partitionsInCurrentTxn) == 0 { + return t.completeTransaction() + } + + epochBump := t.epochBumpRequired + // If we're aborting the transaction, so there should be no need to add offsets. + if commit && len(t.offsetsInCurrentTxn) > 0 { + for group, offsets := range t.offsetsInCurrentTxn { + newOffsets, err := t.publishOffsetsToTxn(offsets, group) + if err != nil { + t.offsetsInCurrentTxn[group] = newOffsets + return err + } + delete(t.offsetsInCurrentTxn, group) + } + } + + if t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 { + return t.lastError + } + + if !errors.Is(t.lastError, ErrInvalidProducerIDMapping) { + err := t.endTxn(commit) + if err != nil { + return err + } + if !epochBump { + return nil + } + } + // reset pid and epoch if needed. + return t.initializeTransactions() +} + +// called before sending any transactional record +// won't do anything if current topic-partition is already added to transaction. +func (t *transactionManager) maybeAddPartitionToCurrentTxn(topic string, partition int32) { + if t.currentTxnStatus()&ProducerTxnFlagInError != 0 { + return + } + + tp := topicPartition{topic: topic, partition: partition} + + t.partitionInTxnLock.Lock() + defer t.partitionInTxnLock.Unlock() + if _, ok := t.partitionsInCurrentTxn[tp]; ok { + // partition is already added + return + } + + t.pendingPartitionsInCurrentTxn[tp] = struct{}{} +} + +// Makes a request to kafka to add a list of partitions ot the current transaction. +func (t *transactionManager) publishTxnPartitions() error { + t.partitionInTxnLock.Lock() + defer t.partitionInTxnLock.Unlock() + + if t.currentTxnStatus()&ProducerTxnFlagInError != 0 { + return t.lastError + } + + if len(t.pendingPartitionsInCurrentTxn) == 0 { + return nil + } + + // Remove the partitions from the pending set regardless of the result. We use the presence + // of partitions in the pending set to know when it is not safe to send batches. However, if + // the partitions failed to be added and we enter an error state, we expect the batches to be + // aborted anyway. In this case, we must be able to continue sending the batches which are in + // retry for partitions that were successfully added. + removeAllPartitionsOnFatalOrAbortedError := func() { + t.pendingPartitionsInCurrentTxn = topicPartitionSet{} + } + + // We only want to reduce the backoff when retrying the first AddPartition which errored out due to a + // CONCURRENT_TRANSACTIONS error since this means that the previous transaction is still completing and + // we don't want to wait too long before trying to start the new one. + // + // This is only a temporary fix, the long term solution is being tracked in + // https://issues.apache.org/jira/browse/KAFKA-5482 + retryBackoff := t.client.Config().Producer.Transaction.Retry.Backoff + computeBackoff := func(attemptsRemaining int) time.Duration { + if t.client.Config().Producer.Transaction.Retry.BackoffFunc != nil { + maxRetries := t.client.Config().Producer.Transaction.Retry.Max + retries := maxRetries - attemptsRemaining + return t.client.Config().Producer.Transaction.Retry.BackoffFunc(retries, maxRetries) + } + return retryBackoff + } + attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max + + exec := func(run func() (bool, error), err error) error { + for attemptsRemaining >= 0 { + var retry bool + retry, err = run() + if !retry { + return err + } + backoff := computeBackoff(attemptsRemaining) + Logger.Printf("txnmgr/add-partition-to-txn retrying after %dms... (%d attempts remaining) (%s)\n", backoff/time.Millisecond, attemptsRemaining, err) + time.Sleep(backoff) + attemptsRemaining-- + } + return err + } + return exec(func() (bool, error) { + coordinator, err := t.client.TransactionCoordinator(t.transactionalID) + if err != nil { + return true, err + } + addPartResponse, err := coordinator.AddPartitionsToTxn(&AddPartitionsToTxnRequest{ + TransactionalID: t.transactionalID, + ProducerID: t.producerID, + ProducerEpoch: t.producerEpoch, + TopicPartitions: t.pendingPartitionsInCurrentTxn.mapToRequest(), + }) + + if err != nil { + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + return true, err + } + + if addPartResponse == nil { + return true, ErrTxnUnableToParseResponse + } + + // remove from the list partitions that have been successfully updated + var responseErrors []error + for topic, results := range addPartResponse.Errors { + for _, response := range results { + tp := topicPartition{topic: topic, partition: response.Partition} + switch response.Err { + case ErrNoError: + // Mark partition as added to transaction + t.partitionsInCurrentTxn[tp] = struct{}{} + delete(t.pendingPartitionsInCurrentTxn, tp) + continue + case ErrConsumerCoordinatorNotAvailable: + fallthrough + case ErrNotCoordinatorForConsumer: + _ = coordinator.Close() + _ = t.client.RefreshTransactionCoordinator(t.transactionalID) + fallthrough + case ErrUnknownTopicOrPartition: + fallthrough + case ErrOffsetsLoadInProgress: + // Retry topicPartition + case ErrConcurrentTransactions: + if len(t.partitionsInCurrentTxn) == 0 && retryBackoff > addPartitionsRetryBackoff { + retryBackoff = addPartitionsRetryBackoff + } + case ErrOperationNotAttempted: + fallthrough + case ErrTopicAuthorizationFailed: + removeAllPartitionsOnFatalOrAbortedError() + return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, response.Err) + case ErrUnknownProducerID: + fallthrough + case ErrInvalidProducerIDMapping: + removeAllPartitionsOnFatalOrAbortedError() + return false, t.abortableErrorIfPossible(response.Err) + // Fatal errors + default: + removeAllPartitionsOnFatalOrAbortedError() + return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err) + } + responseErrors = append(responseErrors, response.Err) + } + } + + // handle end + if len(t.pendingPartitionsInCurrentTxn) == 0 { + DebugLogger.Printf("txnmgr/add-partition-to-txn [%s] successful to add partitions txn %+v\n", + t.transactionalID, addPartResponse) + return false, nil + } + return true, Wrap(ErrAddPartitionsToTxn, responseErrors...) + }, nil) +} + +// Build a new transaction manager sharing producer client. +func newTransactionManager(conf *Config, client Client) (*transactionManager, error) { + txnmgr := &transactionManager{ + producerID: noProducerID, + producerEpoch: noProducerEpoch, + client: client, + pendingPartitionsInCurrentTxn: topicPartitionSet{}, + partitionsInCurrentTxn: topicPartitionSet{}, + offsetsInCurrentTxn: make(map[string]topicPartitionOffsets), + status: ProducerTxnFlagUninitialized, + } + + if conf.Producer.Idempotent { + txnmgr.transactionalID = conf.Producer.Transaction.ID + txnmgr.transactionTimeout = conf.Producer.Transaction.Timeout + txnmgr.sequenceNumbers = make(map[string]int32) + txnmgr.mutex = sync.Mutex{} + + var err error + txnmgr.producerID, txnmgr.producerEpoch, err = txnmgr.initProducerId() + if err != nil { + return nil, err + } + Logger.Printf("txnmgr/init-producer-id [%s] obtained a ProducerId: %d and ProducerEpoch: %d\n", + txnmgr.transactionalID, txnmgr.producerID, txnmgr.producerEpoch) + } + + return txnmgr, nil +} + +// re-init producer-id and producer-epoch if needed. +func (t *transactionManager) initializeTransactions() (err error) { + t.producerID, t.producerEpoch, err = t.initProducerId() + return +} diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go index 4ff973033459..74c3089200d2 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/Shopify/sarama/utils.go @@ -122,8 +122,9 @@ func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion { // IsAtLeast return true if and only if the version it is called on is // greater than or equal to the version passed in: -// V1.IsAtLeast(V2) // false -// V2.IsAtLeast(V1) // true +// +// V1.IsAtLeast(V2) // false +// V2.IsAtLeast(V1) // true func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool { for i := range v.version { if v.version[i] > other.version[i] { @@ -173,11 +174,23 @@ var ( V2_6_0_0 = newKafkaVersion(2, 6, 0, 0) V2_6_1_0 = newKafkaVersion(2, 6, 1, 0) V2_6_2_0 = newKafkaVersion(2, 6, 2, 0) + V2_6_3_0 = newKafkaVersion(2, 6, 3, 0) V2_7_0_0 = newKafkaVersion(2, 7, 0, 0) V2_7_1_0 = newKafkaVersion(2, 7, 1, 0) + V2_7_2_0 = newKafkaVersion(2, 7, 2, 0) V2_8_0_0 = newKafkaVersion(2, 8, 0, 0) V2_8_1_0 = newKafkaVersion(2, 8, 1, 0) + V2_8_2_0 = newKafkaVersion(2, 8, 2, 0) V3_0_0_0 = newKafkaVersion(3, 0, 0, 0) + V3_0_1_0 = newKafkaVersion(3, 0, 1, 0) + V3_0_2_0 = newKafkaVersion(3, 0, 2, 0) + V3_1_0_0 = newKafkaVersion(3, 1, 0, 0) + V3_1_1_0 = newKafkaVersion(3, 1, 1, 0) + V3_1_2_0 = newKafkaVersion(3, 1, 2, 0) + V3_2_0_0 = newKafkaVersion(3, 2, 0, 0) + V3_2_1_0 = newKafkaVersion(3, 2, 1, 0) + V3_2_2_0 = newKafkaVersion(3, 2, 2, 0) + V3_2_3_0 = newKafkaVersion(3, 2, 3, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -220,11 +233,41 @@ var ( V2_7_1_0, V2_8_0_0, V2_8_1_0, + V2_8_2_0, V3_0_0_0, + V3_0_1_0, + V3_0_2_0, + V3_1_0_0, + V3_1_1_0, + V3_1_2_0, + V3_2_0_0, + V3_2_1_0, + V3_2_2_0, + V3_2_3_0, } MinVersion = V0_8_2_0 - MaxVersion = V3_0_0_0 + MaxVersion = V3_2_3_0 DefaultVersion = V1_0_0_0 + + // reduced set of versions to matrix test + fvtRangeVersions = []KafkaVersion{ + V0_8_2_2, + V0_10_2_2, + V1_0_2_0, + V1_1_1_0, + V2_0_1_0, + V2_1_1_0, + V2_2_2_0, + V2_3_1_0, + V2_4_1_0, + V2_5_1_0, + V2_6_2_0, + V2_7_1_0, + V2_8_2_0, + V3_0_2_0, + V3_1_2_0, + V3_2_3_0, + } ) // ParseKafkaVersion parses and returns kafka version or error from a string diff --git a/vendor/github.com/Shopify/sarama/version.go b/vendor/github.com/Shopify/sarama/version.go index 590329ef84ec..d3b9d53ff944 100644 --- a/vendor/github.com/Shopify/sarama/version.go +++ b/vendor/github.com/Shopify/sarama/version.go @@ -1,20 +1,27 @@ package sarama -import "runtime/debug" +import ( + "runtime/debug" + "sync" +) -var v string +var ( + v string + vOnce sync.Once +) func version() string { - if v == "" { + vOnce.Do(func() { bi, ok := debug.ReadBuildInfo() if ok { v = bi.Main.Version - } else { + } + if v == "" || v == "(devel)" { // if we can't read a go module version then they're using a git // clone or vendored module so all we can do is report "dev" for - // the version + // the version to make a valid ApiVersions request v = "dev" } - } + }) return v } diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/Shopify/sarama/zstd.go index e23bfc4772f7..80507e14e45c 100644 --- a/vendor/github.com/Shopify/sarama/zstd.go +++ b/vendor/github.com/Shopify/sarama/zstd.go @@ -1,18 +1,50 @@ package sarama import ( + "sync" + "github.com/klauspost/compress/zstd" ) -var ( - zstdDec, _ = zstd.NewReader(nil) - zstdEnc, _ = zstd.NewWriter(nil, zstd.WithZeroFrames(true)) -) +type ZstdEncoderParams struct { + Level int +} +type ZstdDecoderParams struct { +} + +var zstdEncMap, zstdDecMap sync.Map + +func getEncoder(params ZstdEncoderParams) *zstd.Encoder { + if ret, ok := zstdEncMap.Load(params); ok { + return ret.(*zstd.Encoder) + } + // It's possible to race and create multiple new writers. + // Only one will survive GC after use. + encoderLevel := zstd.SpeedDefault + if params.Level != CompressionLevelDefault { + encoderLevel = zstd.EncoderLevelFromZstd(params.Level) + } + zstdEnc, _ := zstd.NewWriter(nil, zstd.WithZeroFrames(true), + zstd.WithEncoderLevel(encoderLevel)) + zstdEncMap.Store(params, zstdEnc) + return zstdEnc +} + +func getDecoder(params ZstdDecoderParams) *zstd.Decoder { + if ret, ok := zstdDecMap.Load(params); ok { + return ret.(*zstd.Decoder) + } + // It's possible to race and create multiple new readers. + // Only one will survive GC after use. + zstdDec, _ := zstd.NewReader(nil) + zstdDecMap.Store(params, zstdDec) + return zstdDec +} -func zstdDecompress(dst, src []byte) ([]byte, error) { - return zstdDec.DecodeAll(src, dst) +func zstdDecompress(params ZstdDecoderParams, dst, src []byte) ([]byte, error) { + return getDecoder(params).DecodeAll(src, dst) } -func zstdCompress(dst, src []byte) ([]byte, error) { - return zstdEnc.EncodeAll(src, dst), nil +func zstdCompress(params ZstdEncoderParams, dst, src []byte) ([]byte, error) { + return getEncoder(params).EncodeAll(src, dst), nil } diff --git a/vendor/github.com/armon/go-metrics/prometheus/prometheus.go b/vendor/github.com/armon/go-metrics/prometheus/prometheus.go index 4a7f51618d06..5a8282fc898f 100644 --- a/vendor/github.com/armon/go-metrics/prometheus/prometheus.go +++ b/vendor/github.com/armon/go-metrics/prometheus/prometheus.go @@ -20,6 +20,7 @@ var ( // PrometheusSink. DefaultPrometheusOpts = PrometheusOpts{ Expiration: 60 * time.Second, + Name: "default_prometheus_sink", } ) @@ -48,6 +49,7 @@ type PrometheusOpts struct { GaugeDefinitions []GaugeDefinition SummaryDefinitions []SummaryDefinition CounterDefinitions []CounterDefinition + Name string } type PrometheusSink struct { @@ -57,6 +59,7 @@ type PrometheusSink struct { counters sync.Map expiration time.Duration help map[string]string + name string } // GaugeDefinition can be provided to PrometheusOpts to declare a constant gauge that is not deleted on expiry. @@ -106,12 +109,17 @@ func NewPrometheusSink() (*PrometheusSink, error) { // NewPrometheusSinkFrom creates a new PrometheusSink using the passed options. func NewPrometheusSinkFrom(opts PrometheusOpts) (*PrometheusSink, error) { + name := opts.Name + if name == "" { + name = "default_prometheus_sink" + } sink := &PrometheusSink{ gauges: sync.Map{}, summaries: sync.Map{}, counters: sync.Map{}, expiration: opts.Expiration, help: make(map[string]string), + name: name, } initGauges(&sink.gauges, opts.GaugeDefinitions, sink.help) @@ -126,11 +134,15 @@ func NewPrometheusSinkFrom(opts PrometheusOpts) (*PrometheusSink, error) { return sink, reg.Register(sink) } -// Describe is needed to meet the Collector interface. +// Describe sends a Collector.Describe value from the descriptor created around PrometheusSink.Name +// Note that we cannot describe all the metrics (gauges, counters, summaries) in the sink as +// metrics can be added at any point during the lifecycle of the sink, which does not respect +// the idempotency aspect of the Collector.Describe() interface func (p *PrometheusSink) Describe(c chan<- *prometheus.Desc) { - // We must emit some description otherwise an error is returned. This - // description isn't shown to the user! - prometheus.NewGauge(prometheus.GaugeOpts{Name: "Dummy", Help: "Dummy"}).Describe(c) + // dummy value to be able to register and unregister "empty" sinks + // Note this is not actually retained in the PrometheusSink so this has no side effects + // on the caller's sink. So it shouldn't show up to any of its consumers. + prometheus.NewGauge(prometheus.GaugeOpts{Name: p.name, Help: p.name}).Describe(c) } // Collect meets the collection interface and allows us to enforce our expiration @@ -398,6 +410,7 @@ func NewPrometheusPushSink(address string, pushInterval time.Duration, name stri summaries: sync.Map{}, counters: sync.Map{}, expiration: 60 * time.Second, + name: "default_prometheus_sink", } pusher := push.New(address, name).Collector(promSink) diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 792b4a60b346..8bf0e5b78153 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -3,8 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility @@ -45,19 +47,20 @@ I recommend using the latest release of Go. Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 000000000000..94b9c443987c --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index 15c835d5417c..a9e0d45c9dcc 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -16,19 +16,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -50,10 +42,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index be8db5bf7960..3e8b132579ec 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -1,215 +1,209 @@ +//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 000000000000..7e3145a22186 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go similarity index 73% rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index ad14b807f4d9..9216e0a40c1a 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -1,3 +1,5 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a821603e5..26df13bba4b7 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego package xxhash @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7a31f2..e86f1b5fd8e4 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 376e0ca2e497..1c1638fd88a1 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. @@ -11,7 +12,7 @@ import ( // In the future it's possible that compiler optimizations will make these // XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. // If that happens, even if we keep these functions they can be replaced with // the trivial safe code. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE deleted file mode 100644 index 8915f02773f5..000000000000 --- a/vendor/github.com/containerd/containerd/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go deleted file mode 100644 index 05a35228ca42..000000000000 --- a/vendor/github.com/containerd/containerd/errdefs/errors.go +++ /dev/null @@ -1,93 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package errdefs defines the common errors used throughout containerd -// packages. -// -// Use with errors.Wrap and error.Wrapf to add context to an error. -// -// To detect an error class, use the IsXXX functions to tell whether an error -// is of a certain type. -// -// The functions ToGRPC and FromGRPC can be used to map server-side and -// client-side errors to the correct types. -package errdefs - -import ( - "context" - - "github.com/pkg/errors" -) - -// Definitions of common error types used throughout containerd. All containerd -// errors returned by most packages will map into one of these errors classes. -// Packages should return errors of these types when they want to instruct a -// client to take a particular action. -// -// For the most part, we just try to provide local grpc errors. Most conditions -// map very well to those defined by grpc. -var ( - ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. - ErrInvalidArgument = errors.New("invalid argument") - ErrNotFound = errors.New("not found") - ErrAlreadyExists = errors.New("already exists") - ErrFailedPrecondition = errors.New("failed precondition") - ErrUnavailable = errors.New("unavailable") - ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented -) - -// IsInvalidArgument returns true if the error is due to an invalid argument -func IsInvalidArgument(err error) bool { - return errors.Is(err, ErrInvalidArgument) -} - -// IsNotFound returns true if the error is due to a missing object -func IsNotFound(err error) bool { - return errors.Is(err, ErrNotFound) -} - -// IsAlreadyExists returns true if the error is due to an already existing -// metadata item -func IsAlreadyExists(err error) bool { - return errors.Is(err, ErrAlreadyExists) -} - -// IsFailedPrecondition returns true if an operation could not proceed to the -// lack of a particular condition -func IsFailedPrecondition(err error) bool { - return errors.Is(err, ErrFailedPrecondition) -} - -// IsUnavailable returns true if the error is due to a resource being unavailable -func IsUnavailable(err error) bool { - return errors.Is(err, ErrUnavailable) -} - -// IsNotImplemented returns true if the error is due to not being implemented -func IsNotImplemented(err error) bool { - return errors.Is(err, ErrNotImplemented) -} - -// IsCanceled returns true if the error is due to `context.Canceled`. -func IsCanceled(err error) bool { - return errors.Is(err, context.Canceled) -} - -// IsDeadlineExceeded returns true if the error is due to -// `context.DeadlineExceeded`. -func IsDeadlineExceeded(err error) bool { - return errors.Is(err, context.DeadlineExceeded) -} diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go deleted file mode 100644 index 209f63bd0fc0..000000000000 --- a/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package errdefs - -import ( - "context" - "strings" - - "github.com/pkg/errors" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// ToGRPC will attempt to map the backend containerd error into a grpc error, -// using the original error message as a description. -// -// Further information may be extracted from certain errors depending on their -// type. -// -// If the error is unmapped, the original error will be returned to be handled -// by the regular grpc error handling stack. -func ToGRPC(err error) error { - if err == nil { - return nil - } - - if isGRPCError(err) { - // error has already been mapped to grpc - return err - } - - switch { - case IsInvalidArgument(err): - return status.Errorf(codes.InvalidArgument, err.Error()) - case IsNotFound(err): - return status.Errorf(codes.NotFound, err.Error()) - case IsAlreadyExists(err): - return status.Errorf(codes.AlreadyExists, err.Error()) - case IsFailedPrecondition(err): - return status.Errorf(codes.FailedPrecondition, err.Error()) - case IsUnavailable(err): - return status.Errorf(codes.Unavailable, err.Error()) - case IsNotImplemented(err): - return status.Errorf(codes.Unimplemented, err.Error()) - case IsCanceled(err): - return status.Errorf(codes.Canceled, err.Error()) - case IsDeadlineExceeded(err): - return status.Errorf(codes.DeadlineExceeded, err.Error()) - } - - return err -} - -// ToGRPCf maps the error to grpc error codes, assembling the formatting string -// and combining it with the target error string. -// -// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...)) -func ToGRPCf(err error, format string, args ...interface{}) error { - return ToGRPC(errors.Wrapf(err, format, args...)) -} - -// FromGRPC returns the underlying error from a grpc service based on the grpc error code -func FromGRPC(err error) error { - if err == nil { - return nil - } - - var cls error // divide these into error classes, becomes the cause - - switch code(err) { - case codes.InvalidArgument: - cls = ErrInvalidArgument - case codes.AlreadyExists: - cls = ErrAlreadyExists - case codes.NotFound: - cls = ErrNotFound - case codes.Unavailable: - cls = ErrUnavailable - case codes.FailedPrecondition: - cls = ErrFailedPrecondition - case codes.Unimplemented: - cls = ErrNotImplemented - case codes.Canceled: - cls = context.Canceled - case codes.DeadlineExceeded: - cls = context.DeadlineExceeded - default: - cls = ErrUnknown - } - - msg := rebaseMessage(cls, err) - if msg != "" { - err = errors.Wrap(cls, msg) - } else { - err = errors.WithStack(cls) - } - - return err -} - -// rebaseMessage removes the repeats for an error at the end of an error -// string. This will happen when taking an error over grpc then remapping it. -// -// Effectively, we just remove the string of cls from the end of err if it -// appears there. -func rebaseMessage(cls error, err error) string { - desc := errDesc(err) - clss := cls.Error() - if desc == clss { - return "" - } - - return strings.TrimSuffix(desc, ": "+clss) -} - -func isGRPCError(err error) bool { - _, ok := status.FromError(err) - return ok -} - -func code(err error) codes.Code { - if s, ok := status.FromError(err); ok { - return s.Code() - } - return codes.Unknown -} - -func errDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go deleted file mode 100644 index 37b6a7d1c8a6..000000000000 --- a/vendor/github.com/containerd/containerd/log/context.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package log - -import ( - "context" - - "github.com/sirupsen/logrus" -) - -var ( - // G is an alias for GetLogger. - // - // We may want to define this locally to a package to get package tagged log - // messages. - G = GetLogger - - // L is an alias for the standard logger. - L = logrus.NewEntry(logrus.StandardLogger()) -) - -type ( - loggerKey struct{} -) - -const ( - // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to - // ensure the formatted time is always the same number of characters. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - - // TextFormat represents the text logging format - TextFormat = "text" - - // JSONFormat represents the JSON logging format - JSONFormat = "json" -) - -// WithLogger returns a new context with the provided logger. Use in -// combination with logger.WithField(s) for great effect. -func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { - return context.WithValue(ctx, loggerKey{}, logger) -} - -// GetLogger retrieves the current logger from the context. If no logger is -// available, the default logger is returned. -func GetLogger(ctx context.Context) *logrus.Entry { - logger := ctx.Value(loggerKey{}) - - if logger == nil { - return L - } - - return logger.(*logrus.Entry) -} diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go deleted file mode 100644 index c7657e1869be..000000000000 --- a/vendor/github.com/containerd/containerd/platforms/compare.go +++ /dev/null @@ -1,193 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "strconv" - "strings" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// MatchComparer is able to match and compare platforms to -// filter and sort platforms. -type MatchComparer interface { - Matcher - - Less(specs.Platform, specs.Platform) bool -} - -// platformVector returns an (ordered) vector of appropriate specs.Platform -// objects to try matching for the given platform object (see platforms.Only). -func platformVector(platform specs.Platform) []specs.Platform { - vector := []specs.Platform{platform} - - switch platform.Architecture { - case "amd64": - vector = append(vector, specs.Platform{ - Architecture: "386", - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: platform.Variant, - }) - case "arm": - if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { - for armVersion--; armVersion >= 5; armVersion-- { - vector = append(vector, specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v" + strconv.Itoa(armVersion), - }) - } - } - case "arm64": - variant := platform.Variant - if variant == "" { - variant = "v8" - } - vector = append(vector, platformVector(specs.Platform{ - Architecture: "arm", - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: variant, - })...) - } - - return vector -} - -// Only returns a match comparer for a single platform -// using default resolution logic for the platform. -// -// For arm/v8, will also match arm/v7, arm/v6 and arm/v5 -// For arm/v7, will also match arm/v6 and arm/v5 -// For arm/v6, will also match arm/v5 -// For amd64, will also match 386 -func Only(platform specs.Platform) MatchComparer { - return Ordered(platformVector(Normalize(platform))...) -} - -// OnlyStrict returns a match comparer for a single platform. -// -// Unlike Only, OnlyStrict does not match sub platforms. -// So, "arm/vN" will not match "arm/vM" where M < N, -// and "amd64" will not also match "386". -// -// OnlyStrict matches non-canonical forms. -// So, "arm64" matches "arm/64/v8". -func OnlyStrict(platform specs.Platform) MatchComparer { - return Ordered(Normalize(platform)) -} - -// Ordered returns a platform MatchComparer which matches any of the platforms -// but orders them in order they are provided. -func Ordered(platforms ...specs.Platform) MatchComparer { - matchers := make([]Matcher, len(platforms)) - for i := range platforms { - matchers[i] = NewMatcher(platforms[i]) - } - return orderedPlatformComparer{ - matchers: matchers, - } -} - -// Any returns a platform MatchComparer which matches any of the platforms -// with no preference for ordering. -func Any(platforms ...specs.Platform) MatchComparer { - matchers := make([]Matcher, len(platforms)) - for i := range platforms { - matchers[i] = NewMatcher(platforms[i]) - } - return anyPlatformComparer{ - matchers: matchers, - } -} - -// All is a platform MatchComparer which matches all platforms -// with preference for ordering. -var All MatchComparer = allPlatformComparer{} - -type orderedPlatformComparer struct { - matchers []Matcher -} - -func (c orderedPlatformComparer) Match(platform specs.Platform) bool { - for _, m := range c.matchers { - if m.Match(platform) { - return true - } - } - return false -} - -func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { - for _, m := range c.matchers { - p1m := m.Match(p1) - p2m := m.Match(p2) - if p1m && !p2m { - return true - } - if p1m || p2m { - return false - } - } - return false -} - -type anyPlatformComparer struct { - matchers []Matcher -} - -func (c anyPlatformComparer) Match(platform specs.Platform) bool { - for _, m := range c.matchers { - if m.Match(platform) { - return true - } - } - return false -} - -func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { - var p1m, p2m bool - for _, m := range c.matchers { - if !p1m && m.Match(p1) { - p1m = true - } - if !p2m && m.Match(p2) { - p2m = true - } - if p1m && p2m { - return false - } - } - // If one matches, and the other does, sort match first - return p1m && !p2m -} - -type allPlatformComparer struct{} - -func (allPlatformComparer) Match(specs.Platform) bool { - return true -} - -func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { - return false -} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go deleted file mode 100644 index 4a7177e31384..000000000000 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "bufio" - "os" - "runtime" - "strings" - "sync" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - "github.com/pkg/errors" -) - -// Present the ARM instruction set architecture, eg: v7, v8 -// Don't use this value directly; call cpuVariant() instead. -var cpuVariantValue string - -var cpuVariantOnce sync.Once - -func cpuVariant() string { - cpuVariantOnce.Do(func() { - if isArmArch(runtime.GOARCH) { - cpuVariantValue = getCPUVariant() - } - }) - return cpuVariantValue -} - -// For Linux, the kernel has already detected the ABI, ISA and Features. -// So we don't need to access the ARM registers to detect platform information -// by ourselves. We can just parse these information from /proc/cpuinfo -func getCPUInfo(pattern string) (info string, err error) { - if !isLinuxOS(runtime.GOOS) { - return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) - } - - cpuinfo, err := os.Open("/proc/cpuinfo") - if err != nil { - return "", err - } - defer cpuinfo.Close() - - // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse - // the first core is enough. - scanner := bufio.NewScanner(cpuinfo) - for scanner.Scan() { - newline := scanner.Text() - list := strings.Split(newline, ":") - - if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { - return strings.TrimSpace(list[1]), nil - } - } - - // Check whether the scanner encountered errors - err = scanner.Err() - if err != nil { - return "", err - } - - return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) -} - -func getCPUVariant() string { - if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { - // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use - // runtime.GOARCH to determine the variants - var variant string - switch runtime.GOARCH { - case "arm64": - variant = "v8" - case "arm": - variant = "v7" - default: - variant = "unknown" - } - - return variant - } - - variant, err := getCPUInfo("Cpu architecture") - if err != nil { - log.L.WithError(err).Error("failure getting variant") - return "" - } - - // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") - // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 - if runtime.GOARCH == "arm" && variant == "7" { - model, err := getCPUInfo("model name") - if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { - variant = "6" - } - } - - switch strings.ToLower(variant) { - case "8", "aarch64": - variant = "v8" - case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": - variant = "v7" - case "6", "6tej": - variant = "v6" - case "5", "5t", "5te", "5tej": - variant = "v5" - case "4", "4t": - variant = "v4" - case "3": - variant = "v3" - default: - variant = "unknown" - } - - return variant -} diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go deleted file mode 100644 index 6ede94061eb8..000000000000 --- a/vendor/github.com/containerd/containerd/platforms/database.go +++ /dev/null @@ -1,114 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - "strings" -) - -// isLinuxOS returns true if the operating system is Linux. -// -// The OS value should be normalized before calling this function. -func isLinuxOS(os string) bool { - return os == "linux" -} - -// These function are generated from https://golang.org/src/go/build/syslist.go. -// -// We use switch statements because they are slightly faster than map lookups -// and use a little less memory. - -// isKnownOS returns true if we know about the operating system. -// -// The OS value should be normalized before calling this function. -func isKnownOS(os string) bool { - switch os { - case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": - return true - } - return false -} - -// isArmArch returns true if the architecture is ARM. -// -// The arch value should be normalized before being passed to this function. -func isArmArch(arch string) bool { - switch arch { - case "arm", "arm64": - return true - } - return false -} - -// isKnownArch returns true if we know about the architecture. -// -// The arch value should be normalized before being passed to this function. -func isKnownArch(arch string) bool { - switch arch { - case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": - return true - } - return false -} - -func normalizeOS(os string) string { - if os == "" { - return runtime.GOOS - } - os = strings.ToLower(os) - - switch os { - case "macos": - os = "darwin" - } - return os -} - -// normalizeArch normalizes the architecture. -func normalizeArch(arch, variant string) (string, string) { - arch, variant = strings.ToLower(arch), strings.ToLower(variant) - switch arch { - case "i386": - arch = "386" - variant = "" - case "x86_64", "x86-64": - arch = "amd64" - variant = "" - case "aarch64", "arm64": - arch = "arm64" - switch variant { - case "8", "v8": - variant = "" - } - case "armhf": - arch = "arm" - variant = "v7" - case "armel": - arch = "arm" - variant = "v6" - case "arm": - switch variant { - case "", "7": - variant = "v7" - case "5", "6", "8": - variant = "v" + variant - } - } - - return arch, variant -} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go deleted file mode 100644 index cb77fbc9f7b9..000000000000 --- a/vendor/github.com/containerd/containerd/platforms/defaults.go +++ /dev/null @@ -1,43 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// DefaultString returns the default string specifier for the platform. -func DefaultString() string { - return Format(DefaultSpec()) -} - -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - -// DefaultStrict returns strict form of Default. -func DefaultStrict() MatchComparer { - return OnlyStrict(DefaultSpec()) -} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go deleted file mode 100644 index e8a7d5ffa0d6..000000000000 --- a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -// Default returns the default matcher for the platform. -func Default() MatchComparer { - return Only(DefaultSpec()) -} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go deleted file mode 100644 index 0c380e3b7c11..000000000000 --- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ /dev/null @@ -1,81 +0,0 @@ -// +build windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "fmt" - "runtime" - "strconv" - "strings" - - imagespec "github.com/opencontainers/image-spec/specs-go/v1" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/sys/windows" -) - -type matchComparer struct { - defaults Matcher - osVersionPrefix string -} - -// Match matches platform with the same windows major, minor -// and build version. -func (m matchComparer) Match(p imagespec.Platform) bool { - if m.defaults.Match(p) { - // TODO(windows): Figure out whether OSVersion is deprecated. - return strings.HasPrefix(p.OSVersion, m.osVersionPrefix) - } - return false -} - -// Less sorts matched platforms in front of other platforms. -// For matched platforms, it puts platforms with larger revision -// number in front. -func (m matchComparer) Less(p1, p2 imagespec.Platform) bool { - m1, m2 := m.Match(p1), m.Match(p2) - if m1 && m2 { - r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) - return r1 > r2 - } - return m1 && !m2 -} - -func revision(v string) int { - parts := strings.Split(v, ".") - if len(parts) < 4 { - return 0 - } - r, err := strconv.Atoi(parts[3]) - if err != nil { - return 0 - } - return r -} - -// Default returns the current platform's default platform specification. -func Default() MatchComparer { - major, minor, build := windows.RtlGetNtVersionNumbers() - return matchComparer{ - defaults: Ordered(DefaultSpec(), specs.Platform{ - OS: "linux", - Architecture: runtime.GOARCH, - }), - osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build), - } -} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go deleted file mode 100644 index 088bdea05086..000000000000 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ /dev/null @@ -1,278 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package platforms provides a toolkit for normalizing, matching and -// specifying container platforms. -// -// Centered around OCI platform specifications, we define a string-based -// specifier syntax that can be used for user input. With a specifier, users -// only need to specify the parts of the platform that are relevant to their -// context, providing an operating system or architecture or both. -// -// How do I use this package? -// -// The vast majority of use cases should simply use the match function with -// user input. The first step is to parse a specifier into a matcher: -// -// m, err := Parse("linux") -// if err != nil { ... } -// -// Once you have a matcher, use it to match against the platform declared by a -// component, typically from an image or runtime. Since extracting an images -// platform is a little more involved, we'll use an example against the -// platform default: -// -// if ok := m.Match(Default()); !ok { /* doesn't match */ } -// -// This can be composed in loops for resolving runtimes or used as a filter for -// fetch and select images. -// -// More details of the specifier syntax and platform spec follow. -// -// Declaring Platform Support -// -// Components that have strict platform requirements should use the OCI -// platform specification to declare their support. Typically, this will be -// images and runtimes that should make these declaring which platform they -// support specifically. This looks roughly as follows: -// -// type Platform struct { -// Architecture string -// OS string -// Variant string -// } -// -// Most images and runtimes should at least set Architecture and OS, according -// to their GOARCH and GOOS values, respectively (follow the OCI image -// specification when in doubt). ARM should set variant under certain -// discussions, which are outlined below. -// -// Platform Specifiers -// -// While the OCI platform specifications provide a tool for components to -// specify structured information, user input typically doesn't need the full -// context and much can be inferred. To solve this problem, we introduced -// "specifiers". A specifier has the format -// `||/[/]`. The user can provide either the -// operating system or the architecture or both. -// -// An example of a common specifier is `linux/amd64`. If the host has a default -// of runtime that matches this, the user can simply provide the component that -// matters. For example, if a image provides amd64 and arm64 support, the -// operating system, `linux` can be inferred, so they only have to provide -// `arm64` or `amd64`. Similar behavior is implemented for operating systems, -// where the architecture may be known but a runtime may support images from -// different operating systems. -// -// Normalization -// -// Because not all users are familiar with the way the Go runtime represents -// platforms, several normalizations have been provided to make this package -// easier to user. -// -// The following are performed for architectures: -// -// Value Normalized -// aarch64 arm64 -// armhf arm -// armel arm/v6 -// i386 386 -// x86_64 amd64 -// x86-64 amd64 -// -// We also normalize the operating system `macos` to `darwin`. -// -// ARM Support -// -// To qualify ARM architecture, the Variant field is used to qualify the arm -// version. The most common arm version, v7, is represented without the variant -// unless it is explicitly provided. This is treated as equivalent to armhf. A -// previous architecture, armel, will be normalized to arm/v6. -// -// While these normalizations are provided, their support on arm platforms has -// not yet been fully implemented and tested. -package platforms - -import ( - "regexp" - "runtime" - "strconv" - "strings" - - "github.com/containerd/containerd/errdefs" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -var ( - specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) -) - -// Matcher matches platforms specifications, provided by an image or runtime. -type Matcher interface { - Match(platform specs.Platform) bool -} - -// NewMatcher returns a simple matcher based on the provided platform -// specification. The returned matcher only looks for equality based on os, -// architecture and variant. -// -// One may implement their own matcher if this doesn't provide the required -// functionality. -// -// Applications should opt to use `Match` over directly parsing specifiers. -func NewMatcher(platform specs.Platform) Matcher { - return &matcher{ - Platform: Normalize(platform), - } -} - -type matcher struct { - specs.Platform -} - -func (m *matcher) Match(platform specs.Platform) bool { - normalized := Normalize(platform) - return m.OS == normalized.OS && - m.Architecture == normalized.Architecture && - m.Variant == normalized.Variant -} - -func (m *matcher) String() string { - return Format(m.Platform) -} - -// Parse parses the platform specifier syntax into a platform declaration. -// -// Platform specifiers are in the format `||/[/]`. -// The minimum required information for a platform specifier is the operating -// system or architecture. If there is only a single string (no slashes), the -// value will be matched against the known set of operating systems, then fall -// back to the known set of architectures. The missing component will be -// inferred based on the local environment. -func Parse(specifier string) (specs.Platform, error) { - if strings.Contains(specifier, "*") { - // TODO(stevvooe): need to work out exact wildcard handling - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) - } - - parts := strings.Split(specifier, "/") - - for _, part := range parts { - if !specifierRe.MatchString(part) { - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) - } - } - - var p specs.Platform - switch len(parts) { - case 1: - // in this case, we will test that the value might be an OS, then look - // it up. If it is not known, we'll treat it as an architecture. Since - // we have very little information about the platform here, we are - // going to be a little more strict if we don't know about the argument - // value. - p.OS = normalizeOS(parts[0]) - if isKnownOS(p.OS) { - // picks a default architecture - p.Architecture = runtime.GOARCH - if p.Architecture == "arm" && cpuVariant() != "v7" { - p.Variant = cpuVariant() - } - - return p, nil - } - - p.Architecture, p.Variant = normalizeArch(parts[0], "") - if p.Architecture == "arm" && p.Variant == "v7" { - p.Variant = "" - } - if isKnownArch(p.Architecture) { - p.OS = runtime.GOOS - return p, nil - } - - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) - case 2: - // In this case, we treat as a regular os/arch pair. We don't care - // about whether or not we know of the platform. - p.OS = normalizeOS(parts[0]) - p.Architecture, p.Variant = normalizeArch(parts[1], "") - if p.Architecture == "arm" && p.Variant == "v7" { - p.Variant = "" - } - - return p, nil - case 3: - // we have a fully specified variant, this is rare - p.OS = normalizeOS(parts[0]) - p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) - if p.Architecture == "arm64" && p.Variant == "" { - p.Variant = "v8" - } - - return p, nil - } - - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) -} - -// MustParse is like Parses but panics if the specifier cannot be parsed. -// Simplifies initialization of global variables. -func MustParse(specifier string) specs.Platform { - p, err := Parse(specifier) - if err != nil { - panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) - } - return p -} - -// Format returns a string specifier from the provided platform specification. -func Format(platform specs.Platform) string { - if platform.OS == "" { - return "unknown" - } - - return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) -} - -func joinNotEmpty(s ...string) string { - var ss []string - for _, s := range s { - if s == "" { - continue - } - - ss = append(ss, s) - } - - return strings.Join(ss, "/") -} - -// Normalize validates and translate the platform to the canonical value. -// -// For example, if "Aarch64" is encountered, we change it to "arm64" or if -// "x86_64" is encountered, it becomes "amd64". -func Normalize(platform specs.Platform) specs.Platform { - platform.OS = normalizeOS(platform.OS) - platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) - - // these fields are deprecated, remove them - platform.OSFeatures = nil - platform.OSVersion = "" - - return platform -} diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go index 76cf4852c769..eb9fb7ff2d8c 100644 --- a/vendor/github.com/coreos/go-semver/semver/semver.go +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -85,7 +85,7 @@ func (v *Version) Set(version string) error { return fmt.Errorf("failed to validate metadata: %v", err) } - parsed := make([]int64, 3, 3) + parsed := make([]int64, 3) for i, v := range dotParts[:3] { val, err := strconv.ParseInt(v, 10, 64) diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go index 2d71fc5e9ffd..b3dfb7a6d7e1 100644 --- a/vendor/github.com/docker/distribution/reference/normalize.go +++ b/vendor/github.com/docker/distribution/reference/normalize.go @@ -56,6 +56,35 @@ func ParseNormalizedNamed(s string) (Named, error) { return named, nil } +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + // splitDockerDomain splits a repository name to domain and remotename string. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index 2f66cca87a3c..8c0c23b2fe1b 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -205,7 +205,7 @@ func Parse(s string) (Reference, error) { var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if nameMatch != nil && len(nameMatch) == 3 { + if len(nameMatch) == 3 { repo.domain = nameMatch[1] repo.path = nameMatch[2] } else { diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go deleted file mode 100644 index 6d9bb4b62afb..000000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go +++ /dev/null @@ -1,267 +0,0 @@ -package errcode - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ErrorCoder is the base interface for ErrorCode and Error allowing -// users of each to just call ErrorCode to get the real ID of each -type ErrorCoder interface { - ErrorCode() ErrorCode -} - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -var _ error = ErrorCode(0) - -// ErrorCode just returns itself -func (ec ErrorCode) ErrorCode() ErrorCode { - return ec -} - -// Error returns the ID/Value -func (ec ErrorCode) Error() string { - // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. - return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// WithMessage creates a new Error struct based on the passed-in info and -// overrides the Message property. -func (ec ErrorCode) WithMessage(message string) Error { - return Error{ - Code: ec, - Message: message, - } -} - -// WithDetail creates a new Error struct based on the passed-in info and -// set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithDetail(detail) -} - -// WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithArgs(args...) -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` - - // TODO(duglin): See if we need an "args" property so we can do the - // variable substitution right before showing the message to the user -} - -var _ error = Error{} - -// ErrorCode returns the ID/Value of this Error -func (e Error) ErrorCode() ErrorCode { - return e.Code -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) -} - -// WithDetail will return a new Error, based on the current one, but with -// some Detail info added -func (e Error) WithDetail(detail interface{}) Error { - return Error{ - Code: e.Code, - Message: e.Message, - Detail: detail, - } -} - -// WithArgs uses the passed-in list of interface{} as the substitution -// variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { - return Error{ - Code: e.Code, - Message: fmt.Sprintf(e.Code.Message(), args...), - Detail: e.Detail, - } -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors []error - -var _ error = Errors{} - -func (errs Errors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - default: - msg := "errors:\n" - for _, err := range errs { - msg += err.Error() + "\n" - } - return msg - } -} - -// Len returns the current number of errors. -func (errs Errors) Len() int { - return len(errs) -} - -// MarshalJSON converts slice of error, ErrorCode or Error into a -// slice of Error - then serializes -func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs struct { - Errors []Error `json:"errors,omitempty"` - } - - for _, daErr := range errs { - var err Error - - switch daErr.(type) { - case ErrorCode: - err = daErr.(ErrorCode).WithDetail(nil) - case Error: - err = daErr.(Error) - default: - err = ErrorCodeUnknown.WithDetail(daErr) - - } - - // If the Error struct was setup and they forgot to set the - // Message field (meaning its "") then grab it from the ErrCode - msg := err.Message - if msg == "" { - msg = err.Code.Message() - } - - tmpErrs.Errors = append(tmpErrs.Errors, Error{ - Code: err.Code, - Message: msg, - Detail: err.Detail, - }) - } - - return json.Marshal(tmpErrs) -} - -// UnmarshalJSON deserializes []Error and then converts it into slice of -// Error or ErrorCode -func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs struct { - Errors []Error - } - - if err := json.Unmarshal(data, &tmpErrs); err != nil { - return err - } - - var newErrs Errors - for _, daErr := range tmpErrs.Errors { - // If Message is empty or exactly matches the Code's message string - // then just use the Code, no need for a full Error struct - if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { - // Error's w/o details get converted to ErrorCode - newErrs = append(newErrs, daErr.Code) - } else { - // Error's w/ details are untouched - newErrs = append(newErrs, Error{ - Code: daErr.Code, - Message: daErr.Message, - Detail: daErr.Detail, - }) - } - } - - *errs = newErrs - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go deleted file mode 100644 index d77e70473e7b..000000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go +++ /dev/null @@ -1,40 +0,0 @@ -package errcode - -import ( - "encoding/json" - "net/http" -) - -// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err -// and sets the content-type header to 'application/json'. It will handle -// ErrorCoder and Errors, and if necessary will create an envelope. -func ServeJSON(w http.ResponseWriter, err error) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - var sc int - - switch errs := err.(type) { - case Errors: - if len(errs) < 1 { - break - } - - if err, ok := errs[0].(ErrorCoder); ok { - sc = err.ErrorCode().Descriptor().HTTPStatusCode - } - case ErrorCoder: - sc = errs.ErrorCode().Descriptor().HTTPStatusCode - err = Errors{err} // create an envelope. - default: - // We just have an unhandled error type, so just place in an envelope - // and move along. - err = Errors{err} - } - - if sc == 0 { - sc = http.StatusInternalServerError - } - - w.WriteHeader(sc) - - return json.NewEncoder(w).Encode(err) -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go deleted file mode 100644 index d1e8826c6d7d..000000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/register.go +++ /dev/null @@ -1,138 +0,0 @@ -package errcode - -import ( - "fmt" - "net/http" - "sort" - "sync" -) - -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -var ( - // ErrorCodeUnknown is a generic error that can be used as a last - // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusMethodNotAllowed, - }) - - // ErrorCodeUnauthorized is returned if a request requires - // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "authentication required", - Description: `The access controller was unable to authenticate - the client. Often this will be accompanied by a - Www-Authenticate HTTP response header indicating how to - authenticate.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - - // ErrorCodeDenied is returned if a client does not have sufficient - // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ - Value: "DENIED", - Message: "requested access to the resource is denied", - Description: `The access controller denied access for the - operation on a resource.`, - HTTPStatusCode: http.StatusForbidden, - }) - - // ErrorCodeUnavailable provides a common error to report unavailability - // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ - Value: "UNAVAILABLE", - Message: "service unavailable", - Description: "Returned when a service is not available", - HTTPStatusCode: http.StatusServiceUnavailable, - }) - - // ErrorCodeTooManyRequests is returned if a client attempts too many - // times to contact a service endpoint. - ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ - Value: "TOOMANYREQUESTS", - Message: "too many requests", - Description: `Returned when a client attempts to contact a - service too many times`, - HTTPStatusCode: http.StatusTooManyRequests, - }) -) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - - descriptor.Code = ErrorCode(nextCode) - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return descriptor.Code -} - -type byValue []ErrorDescriptor - -func (a byValue) Len() int { return len(a) } -func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - desc := groupToDescriptors[name] - sort.Sort(byValue(desc)) - return desc -} - -// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are -// registered, irrespective of what group they're in -func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { - result = append(result, GetErrorCodeGroup(group)...) - } - sort.Sort(byValue(result)) - return result -} diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go index 504b0c90d7bb..19fc63d6589a 100644 --- a/vendor/github.com/docker/docker/api/common_unix.go +++ b/vendor/github.com/docker/docker/api/common_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package api // import "github.com/docker/docker/api" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index bada4a8e3c87..0bbe7470016d 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -3347,7 +3347,7 @@ definitions: Limits: description: "Define resources limits." $ref: "#/definitions/Limit" - Reservation: + Reservations: description: "Define resources reservation." $ref: "#/definitions/ResourceObject" RestartPolicy: @@ -5755,7 +5755,6 @@ paths: property1: "string" property2: "string" IpcMode: "" - LxcConf: [] Memory: 0 MemorySwap: 0 MemoryReservation: 0 @@ -8607,12 +8606,20 @@ paths: if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: - 201: + 200: description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go index cf6fdf44026c..24c4fa8d9002 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package container // import "github.com/docker/docker/api/types/container" diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go index 9d0f0dcbf0ba..5846f888fea2 100644 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -1,3 +1,4 @@ +//go:build linux || freebsd || openbsd || netbsd || darwin || solaris || illumos || dragonfly // +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly package client // import "github.com/docker/docker/client" diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index b1d5fea5bd50..c5079ee539e9 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -4,8 +4,8 @@ import ( "context" "encoding/json" "net/url" + "path" - "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/versions" @@ -16,7 +16,6 @@ type configWrapper struct { *container.Config HostConfig *container.HostConfig NetworkingConfig *network.NetworkingConfig - Platform *specs.Platform } // ContainerCreate creates a new container based in the given configuration. @@ -38,8 +37,8 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config } query := url.Values{} - if platform != nil { - query.Set("platform", platforms.Format(*platform)) + if p := formatPlatform(platform); p != "" { + query.Set("platform", p) } if containerName != "" { @@ -61,3 +60,15 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config err = json.NewDecoder(serverResp.body).Decode(&response) return response, err } + +// formatPlatform returns a formatted string representing platform (e.g. linux/arm/v7). +// +// Similar to containerd's platforms.Format(), but does allow components to be +// omitted (e.g. pass "architecture" only, without "os": +// https://github.com/containerd/containerd/blob/v1.5.2/platforms/platforms.go#L243-L263 +func formatPlatform(platform *specs.Platform) string { + if platform == nil { + return "" + } + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/file_unix.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/file_unix.go index e7b60952962e..1d2553c2800e 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/file_unix.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/file_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package loggerutils diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go new file mode 100644 index 000000000000..755a483d7a10 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go @@ -0,0 +1,211 @@ +package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" + +import ( + "io" + "os" + "time" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/filenotify" + "github.com/fsnotify/fsnotify" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var errRetry = errors.New("retry") +var errDone = errors.New("done") + +type follow struct { + file *os.File + dec Decoder + fileWatcher filenotify.FileWatcher + logWatcher *logger.LogWatcher + notifyRotate, notifyEvict chan interface{} + oldSize int64 + retries int +} + +func (fl *follow) handleRotate() error { + name := fl.file.Name() + + fl.file.Close() + fl.fileWatcher.Remove(name) + + // retry when the file doesn't exist + var err error + for retries := 0; retries <= 5; retries++ { + f, err := open(name) + if err == nil || !os.IsNotExist(err) { + fl.file = f + break + } + } + if err != nil { + return err + } + if err := fl.fileWatcher.Add(name); err != nil { + return err + } + fl.dec.Reset(fl.file) + return nil +} + +func (fl *follow) handleMustClose(evictErr error) { + fl.file.Close() + fl.dec.Close() + fl.logWatcher.Err <- errors.Wrap(evictErr, "log reader evicted due to errors") + logrus.WithField("file", fl.file.Name()).Error("Log reader notified that it must re-open log file, some log data may not be streamed to the client.") +} + +func (fl *follow) waitRead() error { + select { + case e := <-fl.notifyEvict: + if e != nil { + err := e.(error) + fl.handleMustClose(err) + } + return errDone + case e := <-fl.fileWatcher.Events(): + switch e.Op { + case fsnotify.Write: + fl.dec.Reset(fl.file) + return nil + case fsnotify.Rename, fsnotify.Remove: + select { + case <-fl.notifyRotate: + case <-fl.logWatcher.WatchProducerGone(): + return errDone + case <-fl.logWatcher.WatchConsumerGone(): + return errDone + } + if err := fl.handleRotate(); err != nil { + return err + } + return nil + } + return errRetry + case err := <-fl.fileWatcher.Errors(): + logrus.Debugf("logger got error watching file: %v", err) + // Something happened, let's try and stay alive and create a new watcher + if fl.retries <= 5 { + fl.fileWatcher.Close() + fl.fileWatcher, err = watchFile(fl.file.Name()) + if err != nil { + return err + } + fl.retries++ + return errRetry + } + return err + case <-fl.logWatcher.WatchProducerGone(): + return errDone + case <-fl.logWatcher.WatchConsumerGone(): + return errDone + } +} + +func (fl *follow) handleDecodeErr(err error) error { + if !errors.Is(err, io.EOF) { + return err + } + + // Handle special case (#39235): max-file=1 and file was truncated + st, stErr := fl.file.Stat() + if stErr == nil { + size := st.Size() + defer func() { fl.oldSize = size }() + if size < fl.oldSize { // truncated + fl.file.Seek(0, 0) + fl.dec.Reset(fl.file) + return nil + } + } else { + logrus.WithError(stErr).Warn("logger: stat error") + } + + for { + err := fl.waitRead() + if err == nil { + break + } + if err == errRetry { + continue + } + return err + } + return nil +} + +func (fl *follow) mainLoop(since, until time.Time) { + for { + select { + case err := <-fl.notifyEvict: + if err != nil { + fl.handleMustClose(err.(error)) + } + return + default: + } + msg, err := fl.dec.Decode() + if err != nil { + if err := fl.handleDecodeErr(err); err != nil { + if err == errDone { + return + } + // we got an unrecoverable error, so return + fl.logWatcher.Err <- err + return + } + // ready to try again + continue + } + + fl.retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + if !until.IsZero() && msg.Timestamp.After(until) { + return + } + // send the message, unless the consumer is gone + select { + case e := <-fl.notifyEvict: + if e != nil { + err := e.(error) + logrus.WithError(err).Debug("Reader evicted while sending log message") + fl.logWatcher.Err <- err + } + return + case fl.logWatcher.Msg <- msg: + case <-fl.logWatcher.WatchConsumerGone(): + return + } + } +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate, notifyEvict chan interface{}, dec Decoder, since, until time.Time) { + dec.Reset(f) + + name := f.Name() + fileWatcher, err := watchFile(name) + if err != nil { + logWatcher.Err <- err + return + } + defer func() { + f.Close() + dec.Close() + fileWatcher.Close() + }() + + fl := &follow{ + file: f, + oldSize: -1, + logWatcher: logWatcher, + fileWatcher: fileWatcher, + notifyRotate: notifyRotate, + notifyEvict: notifyEvict, + dec: dec, + } + fl.mainLoop(since, until) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go index 6b42d9dd30d7..d4f3f5e149b5 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go @@ -17,7 +17,6 @@ import ( "github.com/docker/docker/pkg/filenotify" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/pubsub" - "github.com/fsnotify/fsnotify" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -608,179 +607,6 @@ func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, dec Decoder, ge } } -func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate, notifyEvict chan interface{}, dec Decoder, since, until time.Time) { - dec.Reset(f) - - name := f.Name() - fileWatcher, err := watchFile(name) - if err != nil { - logWatcher.Err <- err - return - } - defer func() { - f.Close() - dec.Close() - fileWatcher.Close() - }() - - var retries int - handleRotate := func() error { - f.Close() - fileWatcher.Remove(name) - - // retry when the file doesn't exist - for retries := 0; retries <= 5; retries++ { - f, err = open(name) - if err == nil || !os.IsNotExist(err) { - break - } - } - if err != nil { - return err - } - if err := fileWatcher.Add(name); err != nil { - return err - } - dec.Reset(f) - return nil - } - - errRetry := errors.New("retry") - errDone := errors.New("done") - - handleMustClose := func(evictErr error) { - f.Close() - dec.Close() - logWatcher.Err <- errors.Wrap(err, "log reader evicted due to errors") - logrus.WithField("file", f.Name()).Error("Log reader notified that it must re-open log file, some log data may not be streamed to the client.") - } - - waitRead := func() error { - select { - case e := <-notifyEvict: - if e != nil { - err := e.(error) - handleMustClose(err) - } - return errDone - case e := <-fileWatcher.Events(): - switch e.Op { - case fsnotify.Write: - dec.Reset(f) - return nil - case fsnotify.Rename, fsnotify.Remove: - select { - case <-notifyRotate: - case <-logWatcher.WatchProducerGone(): - return errDone - case <-logWatcher.WatchConsumerGone(): - return errDone - } - if err := handleRotate(); err != nil { - return err - } - return nil - } - return errRetry - case err := <-fileWatcher.Errors(): - logrus.Debugf("logger got error watching file: %v", err) - // Something happened, let's try and stay alive and create a new watcher - if retries <= 5 { - fileWatcher.Close() - fileWatcher, err = watchFile(name) - if err != nil { - return err - } - retries++ - return errRetry - } - return err - case <-logWatcher.WatchProducerGone(): - return errDone - case <-logWatcher.WatchConsumerGone(): - return errDone - } - } - - oldSize := int64(-1) - handleDecodeErr := func(err error) error { - if !errors.Is(err, io.EOF) { - return err - } - - // Handle special case (#39235): max-file=1 and file was truncated - st, stErr := f.Stat() - if stErr == nil { - size := st.Size() - defer func() { oldSize = size }() - if size < oldSize { // truncated - f.Seek(0, 0) - return nil - } - } else { - logrus.WithError(stErr).Warn("logger: stat error") - } - - for { - err := waitRead() - if err == nil { - break - } - if err == errRetry { - continue - } - return err - } - return nil - } - - // main loop - for { - select { - case err := <-notifyEvict: - if err != nil { - handleMustClose(err.(error)) - } - return - default: - } - msg, err := dec.Decode() - if err != nil { - if err := handleDecodeErr(err); err != nil { - if err == errDone { - return - } - // we got an unrecoverable error, so return - logWatcher.Err <- err - return - } - // ready to try again - continue - } - - retries = 0 // reset retries since we've succeeded - if !since.IsZero() && msg.Timestamp.Before(since) { - continue - } - if !until.IsZero() && msg.Timestamp.After(until) { - return - } - // send the message, unless the consumer is gone - select { - case e := <-notifyEvict: - if e != nil { - err := e.(error) - logrus.WithError(err).Debug("Reader evicted while sending log message") - logWatcher.Err <- err - } - return - case logWatcher.Msg <- msg: - case <-logWatcher.WatchConsumerGone(): - return - } - } -} - func watchFile(name string) (filenotify.FileWatcher, error) { var fileWatcher filenotify.FileWatcher diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go b/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go index e9a16af9b1f3..a59fda860a59 100644 --- a/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go +++ b/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go @@ -1,3 +1,4 @@ +//go:build linux || freebsd // +build linux freebsd package logger // import "github.com/docker/docker/daemon/logger" diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go index 2ad47cc07798..fbbeba0c2170 100644 --- a/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !freebsd // +build !linux,!freebsd package logger // import "github.com/docker/docker/daemon/logger" diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go index 07552f1cc1dd..5afe486779db 100644 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -1,78 +1,11 @@ package errdefs // import "github.com/docker/docker/errdefs" import ( - "fmt" "net/http" - containerderrors "github.com/containerd/containerd/errdefs" - "github.com/docker/distribution/registry/api/errcode" "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) -// GetHTTPErrorStatusCode retrieves status code from error message. -func GetHTTPErrorStatusCode(err error) int { - if err == nil { - logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") - return http.StatusInternalServerError - } - - var statusCode int - - // Stop right there - // Are you sure you should be adding a new error class here? Do one of the existing ones work? - - // Note that the below functions are already checking the error causal chain for matches. - switch { - case IsNotFound(err): - statusCode = http.StatusNotFound - case IsInvalidParameter(err): - statusCode = http.StatusBadRequest - case IsConflict(err): - statusCode = http.StatusConflict - case IsUnauthorized(err): - statusCode = http.StatusUnauthorized - case IsUnavailable(err): - statusCode = http.StatusServiceUnavailable - case IsForbidden(err): - statusCode = http.StatusForbidden - case IsNotModified(err): - statusCode = http.StatusNotModified - case IsNotImplemented(err): - statusCode = http.StatusNotImplemented - case IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) || IsCancelled(err): - statusCode = http.StatusInternalServerError - default: - statusCode = statusCodeFromGRPCError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - statusCode = statusCodeFromContainerdError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - statusCode = statusCodeFromDistributionError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - if e, ok := err.(causer); ok { - return GetHTTPErrorStatusCode(e.Cause()) - } - - logrus.WithFields(logrus.Fields{ - "module": "api", - "error_type": fmt.Sprintf("%T", err), - }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err) - } - - if statusCode == 0 { - statusCode = http.StatusInternalServerError - } - - return statusCode -} - // FromStatusCode creates an errdef error, based on the provided HTTP status-code func FromStatusCode(err error, statusCode int) error { if err == nil { @@ -100,10 +33,10 @@ func FromStatusCode(err error, statusCode int) error { err = System(err) } default: - logrus.WithFields(logrus.Fields{ + logrus.WithError(err).WithFields(logrus.Fields{ "module": "api", - "status_code": fmt.Sprintf("%d", statusCode), - }).Debugf("FIXME: Got an status-code for which error does not match any expected type!!!: %d", statusCode) + "status_code": statusCode, + }).Debug("FIXME: Got an status-code for which error does not match any expected type!!!") switch { case statusCode >= 200 && statusCode < 400: @@ -118,74 +51,3 @@ func FromStatusCode(err error, statusCode int) error { } return err } - -// statusCodeFromGRPCError returns status code according to gRPC error -func statusCodeFromGRPCError(err error) int { - switch status.Code(err) { - case codes.InvalidArgument: // code 3 - return http.StatusBadRequest - case codes.NotFound: // code 5 - return http.StatusNotFound - case codes.AlreadyExists: // code 6 - return http.StatusConflict - case codes.PermissionDenied: // code 7 - return http.StatusForbidden - case codes.FailedPrecondition: // code 9 - return http.StatusBadRequest - case codes.Unauthenticated: // code 16 - return http.StatusUnauthorized - case codes.OutOfRange: // code 11 - return http.StatusBadRequest - case codes.Unimplemented: // code 12 - return http.StatusNotImplemented - case codes.Unavailable: // code 14 - return http.StatusServiceUnavailable - default: - // codes.Canceled(1) - // codes.Unknown(2) - // codes.DeadlineExceeded(4) - // codes.ResourceExhausted(8) - // codes.Aborted(10) - // codes.Internal(13) - // codes.DataLoss(15) - return http.StatusInternalServerError - } -} - -// statusCodeFromDistributionError returns status code according to registry errcode -// code is loosely based on errcode.ServeJSON() in docker/distribution -func statusCodeFromDistributionError(err error) int { - switch errs := err.(type) { - case errcode.Errors: - if len(errs) < 1 { - return http.StatusInternalServerError - } - if _, ok := errs[0].(errcode.ErrorCoder); ok { - return statusCodeFromDistributionError(errs[0]) - } - case errcode.ErrorCoder: - return errs.ErrorCode().Descriptor().HTTPStatusCode - } - return http.StatusInternalServerError -} - -// statusCodeFromContainerdError returns status code for containerd errors when -// consumed directly (not through gRPC) -func statusCodeFromContainerdError(err error) int { - switch { - case containerderrors.IsInvalidArgument(err): - return http.StatusBadRequest - case containerderrors.IsNotFound(err): - return http.StatusNotFound - case containerderrors.IsAlreadyExists(err): - return http.StatusConflict - case containerderrors.IsFailedPrecondition(err): - return http.StatusPreconditionFailed - case containerderrors.IsUnavailable(err): - return http.StatusServiceUnavailable - case containerderrors.IsNotImplemented(err): - return http.StatusNotImplemented - default: - return http.StatusInternalServerError - } -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go index dc894f91314e..4e67ec2f53fd 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package ioutils // import "github.com/docker/docker/pkg/ioutils" diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go index 58058f282868..d645da8ce421 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package plugins // import "github.com/docker/docker/pkg/plugins" diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go index cdfbe9345827..23e9d5715a58 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package plugins // import "github.com/docker/docker/pkg/plugins" diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go index 85f6ab07155b..c245a89513f8 100644 --- a/vendor/github.com/docker/go-units/size.go +++ b/vendor/github.com/docker/go-units/size.go @@ -2,7 +2,6 @@ package units import ( "fmt" - "regexp" "strconv" "strings" ) @@ -26,16 +25,17 @@ const ( PiB = 1024 * TiB ) -type unitMap map[string]int64 +type unitMap map[byte]int64 var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) + decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB} + binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB} ) -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +var ( + decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +) func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { i := 0 @@ -89,20 +89,66 @@ func RAMInBytes(size string) (int64, error) { // Parses the human-readable size string into the amount it represents. func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 4 { + // TODO: rewrite to use strings.Cut if there's a space + // once Go < 1.18 is deprecated. + sep := strings.LastIndexAny(sizeStr, "01234567890. ") + if sep == -1 { + // There should be at least a digit. return -1, fmt.Errorf("invalid size: '%s'", sizeStr) } + var num, sfx string + if sizeStr[sep] != ' ' { + num = sizeStr[:sep+1] + sfx = sizeStr[sep+1:] + } else { + // Omit the space separator. + num = sizeStr[:sep] + sfx = sizeStr[sep+1:] + } - size, err := strconv.ParseFloat(matches[1], 64) + size, err := strconv.ParseFloat(num, 64) if err != nil { return -1, err } + // Backward compatibility: reject negative sizes. + if size < 0 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + if len(sfx) == 0 { + return int64(size), nil + } - unitPrefix := strings.ToLower(matches[3]) - if mul, ok := uMap[unitPrefix]; ok { + // Process the suffix. + + if len(sfx) > 3 { // Too long. + goto badSuffix + } + sfx = strings.ToLower(sfx) + // Trivial case: b suffix. + if sfx[0] == 'b' { + if len(sfx) > 1 { // no extra characters allowed after b. + goto badSuffix + } + return int64(size), nil + } + // A suffix from the map. + if mul, ok := uMap[sfx[0]]; ok { size *= float64(mul) + } else { + goto badSuffix + } + + // The suffix may have extra "b" or "ib" (e.g. KiB or MB). + switch { + case len(sfx) == 2 && sfx[1] != 'b': + goto badSuffix + case len(sfx) == 3 && sfx[1:] != "ib": + goto badSuffix } return int64(size), nil + +badSuffix: + return -1, fmt.Errorf("invalid suffix: '%s'", sfx) } diff --git a/vendor/github.com/go-kit/log/README.md b/vendor/github.com/go-kit/log/README.md index a0931951df8d..8067794657cb 100644 --- a/vendor/github.com/go-kit/log/README.md +++ b/vendor/github.com/go-kit/log/README.md @@ -1,5 +1,10 @@ # package log +[![Go Reference](https://pkg.go.dev/badge/github.com/go-kit/log.svg)](https://pkg.go.dev/github.com/go-kit/log) +[![Go Report Card](https://goreportcard.com/badge/go-kit/log)](https://goreportcard.com/report/go-kit/log) +[![GitHub Actions](https://github.com/go-kit/log/actions/workflows/test.yml/badge.svg)](https://github.com/go-kit/log/actions/workflows/test.yml) +[![Coverage Status](https://coveralls.io/repos/github/go-kit/log/badge.svg?branch=main)](https://coveralls.io/github/go-kit/log?branch=main) + `package log` provides a minimal interface for structured logging in services. It may be wrapped to encode conventions, enforce type-safety, provide leveled logging, and so on. It can be used for both typical application log events, diff --git a/vendor/github.com/go-kit/log/json_logger.go b/vendor/github.com/go-kit/log/json_logger.go index 0cedbf82478e..d0faed4f0981 100644 --- a/vendor/github.com/go-kit/log/json_logger.go +++ b/vendor/github.com/go-kit/log/json_logger.go @@ -68,7 +68,7 @@ func safeString(str fmt.Stringer) (s string) { if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { s = "NULL" } else { - panic(panicVal) + s = fmt.Sprintf("PANIC in String method: %v", panicVal) } } }() @@ -82,7 +82,7 @@ func safeError(err error) (s interface{}) { if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { s = nil } else { - panic(panicVal) + s = fmt.Sprintf("PANIC in Error method: %v", panicVal) } } }() diff --git a/vendor/github.com/go-kit/log/level/doc.go b/vendor/github.com/go-kit/log/level/doc.go index 505d307b11b7..fd681dcf922a 100644 --- a/vendor/github.com/go-kit/log/level/doc.go +++ b/vendor/github.com/go-kit/log/level/doc.go @@ -7,6 +7,17 @@ // logger = level.NewFilter(logger, level.AllowInfo()) // <-- // logger = log.With(logger, "ts", log.DefaultTimestampUTC) // +// It's also possible to configure log level from a string. For instance from +// a flag, environment variable or configuration file. +// +// fs := flag.NewFlagSet("myprogram") +// lvl := fs.String("log", "info", "debug, info, warn, error") +// +// var logger log.Logger +// logger = log.NewLogfmtLogger(os.Stderr) +// logger = level.NewFilter(logger, level.Allow(level.ParseDefault(*lvl, level.InfoValue()))) // <-- +// logger = log.With(logger, "ts", log.DefaultTimestampUTC) +// // Then, at the callsites, use one of the level.Debug, Info, Warn, or Error // helper methods to emit leveled log events. // diff --git a/vendor/github.com/go-kit/log/level/level.go b/vendor/github.com/go-kit/log/level/level.go index c94756c6bea0..c641d9855243 100644 --- a/vendor/github.com/go-kit/log/level/level.go +++ b/vendor/github.com/go-kit/log/level/level.go @@ -1,6 +1,14 @@ package level -import "github.com/go-kit/log" +import ( + "errors" + "strings" + + "github.com/go-kit/log" +) + +// ErrInvalidLevelString is returned whenever an invalid string is passed to Parse. +var ErrInvalidLevelString = errors.New("invalid level string") // Error returns a logger that includes a Key/ErrorValue pair. func Error(logger log.Logger) log.Logger { @@ -66,6 +74,22 @@ func (l *logger) Log(keyvals ...interface{}) error { // Option sets a parameter for the leveled logger. type Option func(*logger) +// Allow the provided log level to pass. +func Allow(v Value) Option { + switch v { + case debugValue: + return AllowDebug() + case infoValue: + return AllowInfo() + case warnValue: + return AllowWarn() + case errorValue: + return AllowError() + default: + return AllowNone() + } +} + // AllowAll is an alias for AllowDebug. func AllowAll() Option { return AllowDebug() @@ -100,6 +124,33 @@ func allowed(allowed level) Option { return func(l *logger) { l.allowed = allowed } } +// Parse a string to its corresponding level value. Valid strings are "debug", +// "info", "warn", and "error". Strings are normalized via strings.TrimSpace and +// strings.ToLower. +func Parse(level string) (Value, error) { + switch strings.TrimSpace(strings.ToLower(level)) { + case debugValue.name: + return debugValue, nil + case infoValue.name: + return infoValue, nil + case warnValue.name: + return warnValue, nil + case errorValue.name: + return errorValue, nil + default: + return nil, ErrInvalidLevelString + } +} + +// ParseDefault calls Parse and returns the default Value on error. +func ParseDefault(level string, def Value) Value { + v, err := Parse(level) + if err != nil { + return def + } + return v +} + // ErrNotAllowed sets the error to return from Log when it squelches a log // event disallowed by the configured Allow[Level] option. By default, // ErrNotAllowed is nil; in this case the log event is squelched with no diff --git a/vendor/github.com/go-kit/log/staticcheck.conf b/vendor/github.com/go-kit/log/staticcheck.conf new file mode 100644 index 000000000000..528438b97d21 --- /dev/null +++ b/vendor/github.com/go-kit/log/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all"] diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index ad825f5f0ae9..ab5931181317 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -105,14 +105,18 @@ with higher verbosity means more (and less important) logs will be generated. There are implementations for the following logging libraries: - **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) - **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) - **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) - **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) +- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) +- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) ## FAQ diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index b23ab9679a84..7accdb0c4003 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -351,15 +351,15 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s if v, ok := value.(logr.Marshaler); ok { // Replace the value with what the type wants to get logged. // That then gets handled below via reflection. - value = v.MarshalLog() + value = invokeMarshaler(v) } // Handle types that want to format themselves. switch v := value.(type) { case fmt.Stringer: - value = v.String() + value = invokeStringer(v) case error: - value = v.Error() + value = invokeError(v) } // Handling the most common types without reflect is a small perf win. @@ -408,8 +408,9 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s if i > 0 { buf.WriteByte(',') } + k, _ := v[i].(string) // sanitize() above means no need to check success // arbitrary keys might need escaping - buf.WriteString(prettyString(v[i].(string))) + buf.WriteString(prettyString(k)) buf.WriteByte(':') buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) } @@ -596,6 +597,33 @@ func isEmpty(v reflect.Value) bool { return false } +func invokeMarshaler(m logr.Marshaler) (ret interface{}) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + // Caller represents the original call site for a log line, after considering // logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and // Line fields will always be provided, while the Func field is optional. diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index c05482a20319..c3b56b3d2c5e 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -115,6 +115,15 @@ limitations under the License. // may be any Go value, but how the value is formatted is determined by the // LogSink implementation. // +// Logger instances are meant to be passed around by value. Code that receives +// such a value can call its methods without having to check whether the +// instance is ready for use. +// +// Calling methods with the null logger (Logger{}) as instance will crash +// because it has no LogSink. Therefore this null logger should never be passed +// around. For cases where passing a logger is optional, a pointer to Logger +// should be used. +// // Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go new file mode 100644 index 000000000000..8956c30884d9 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -0,0 +1,63 @@ +package internal + +import ( + "net/url" + "regexp" + "strings" +) + +const ( + defaultHttpPort = ":80" + defaultHttpsPort = ":443" +) + +// Regular expressions used by the normalizations +var rxPort = regexp.MustCompile(`(:\d+)/?$`) +var rxDupSlashes = regexp.MustCompile(`/{2,}`) + +// NormalizeURL will normalize the specified URL +// This was added to replace a previous call to the no longer maintained purell library: +// The call that was used looked like the following: +// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) +// +// To explain all that was included in the call above, purell.FlagsSafe was really just the following: +// - FlagLowercaseScheme +// - FlagLowercaseHost +// - FlagRemoveDefaultPort +// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +func NormalizeURL(u *url.URL) { + lowercaseScheme(u) + lowercaseHost(u) + removeDefaultPort(u) + removeDuplicateSlashes(u) +} + +func lowercaseScheme(u *url.URL) { + if len(u.Scheme) > 0 { + u.Scheme = strings.ToLower(u.Scheme) + } +} + +func lowercaseHost(u *url.URL) { + if len(u.Host) > 0 { + u.Host = strings.ToLower(u.Host) + } +} + +func removeDefaultPort(u *url.URL) { + if len(u.Host) > 0 { + scheme := strings.ToLower(u.Scheme) + u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { + if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { + return "" + } + return val + }) + } +} + +func removeDuplicateSlashes(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") + } +} diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go index 3bc0a6e26f8b..cfdef03e5d94 100644 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -30,8 +30,8 @@ import ( "net/url" "strings" - "github.com/PuerkitoBio/purell" "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/jsonreference/internal" ) const ( @@ -114,7 +114,9 @@ func (r *Ref) parse(jsonReferenceString string) error { return err } - r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) + internal.NormalizeURL(parsed) + + r.referenceURL = parsed refURL := r.referenceURL if refURL.Scheme != "" && refURL.Host != "" { diff --git a/vendor/github.com/go-openapi/swag/.gitattributes b/vendor/github.com/go-openapi/swag/.gitattributes new file mode 100644 index 000000000000..49ad52766abb --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.gitattributes @@ -0,0 +1,2 @@ +# gofmt always uses LF, whereas Git uses CRLF on Windows. +*.go text eol=lf diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 842ac1c095c4..bf503e400016 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -39,3 +39,16 @@ linters: - paralleltest - thelper - ifshort + - gomoddirectives + - cyclop + - forcetypeassert + - ireturn + - tagliatelle + - varnamelen + - goimports + - tenv + - golint + - exhaustruct + - nilnil + - nonamedreturns + - nosnakecase diff --git a/vendor/github.com/go-openapi/swag/.travis.yml b/vendor/github.com/go-openapi/swag/.travis.yml deleted file mode 100644 index fc25a887286c..000000000000 --- a/vendor/github.com/go-openapi/swag/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.x -arch: -- amd64 -jobs: - include: - # include arch ppc, but only for latest go version - skip testing for race - - go: 1.x - arch: ppc64le - install: ~ - script: - - go test -v - - #- go: 1.x - # arch: arm - # install: ~ - # script: - # - go test -v - - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -language: go -notifications: - slack: - secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go index 8d2c8c5014e2..55094cb74c4d 100644 --- a/vendor/github.com/go-openapi/swag/doc.go +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -17,16 +17,15 @@ Package swag contains a bunch of helper functions for go-openapi and go-swagger You may also use it standalone for your projects. - * convert between value and pointers for builtin types - * convert from string to builtin types (wraps strconv) - * fast json concatenation - * search in path - * load from file or http - * name mangling - + - convert between value and pointers for builtin types + - convert from string to builtin types (wraps strconv) + - fast json concatenation + - search in path + - load from file or http + - name mangling This repo has only few dependencies outside of the standard library: - * YAML utilities depend on gopkg.in/yaml.v2 + - YAML utilities depend on gopkg.in/yaml.v2 */ package swag diff --git a/vendor/github.com/go-openapi/swag/file.go b/vendor/github.com/go-openapi/swag/file.go new file mode 100644 index 000000000000..16accc55f82e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/file.go @@ -0,0 +1,33 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "mime/multipart" + +// File represents an uploaded file. +type File struct { + Data multipart.File + Header *multipart.FileHeader +} + +// Read bytes from the file +func (f *File) Read(p []byte) (n int, err error) { + return f.Data.Read(p) +} + +// Close the file +func (f *File) Close() error { + return f.Data.Close() +} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 9a60409725e4..00038c3773c9 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -16,10 +16,11 @@ package swag import ( "fmt" - "io/ioutil" + "io" "log" "net/http" "net/url" + "os" "path/filepath" "runtime" "strings" @@ -40,13 +41,13 @@ var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(timeout))(path) + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) } // LoadStrategy returns a loader function for a given path or uri @@ -86,7 +87,7 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func( func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { return func(path string) ([]byte, error) { client := &http.Client{Timeout: timeout} - req, err := http.NewRequest("GET", path, nil) // nolint: noctx + req, err := http.NewRequest(http.MethodGet, path, nil) //nolint:noctx if err != nil { return nil, err } @@ -115,6 +116,6 @@ func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } } diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go index c2e686d31337..f5228b82c0f8 100644 --- a/vendor/github.com/go-openapi/swag/post_go18.go +++ b/vendor/github.com/go-openapi/swag/post_go18.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.8 // +build go1.8 package swag diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go index eb2f2d8bc784..7c7da9c08804 100644 --- a/vendor/github.com/go-openapi/swag/post_go19.go +++ b/vendor/github.com/go-openapi/swag/post_go19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package swag diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go index 6607f339357b..2757d9b95f82 100644 --- a/vendor/github.com/go-openapi/swag/pre_go18.go +++ b/vendor/github.com/go-openapi/swag/pre_go18.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.8 // +build !go1.8 package swag diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go index 4bae187d1e43..0565db377bef 100644 --- a/vendor/github.com/go-openapi/swag/pre_go19.go +++ b/vendor/github.com/go-openapi/swag/pre_go19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package swag diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index 193702f2cec0..f78ab684a0ae 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -99,10 +99,11 @@ const ( ) // JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) func JoinByFormat(data []string, format string) []string { if len(data) == 0 { return data @@ -124,11 +125,11 @@ func JoinByFormat(data []string, format string) []string { } // SplitByFormat splits a string by a known format: -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) // +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) func SplitByFormat(data, format string) []string { if data == "" { return nil diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index ec96914405b7..34e5923df073 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -22,7 +22,7 @@ import ( "github.com/mailru/easyjson/jlexer" "github.com/mailru/easyjson/jwriter" - yaml "gopkg.in/yaml.v2" + yaml "gopkg.in/yaml.v3" ) // YAMLMatcher matches yaml @@ -43,16 +43,122 @@ func YAMLToJSON(data interface{}) (json.RawMessage, error) { // BytesToYAMLDoc converts a byte slice into a YAML document func BytesToYAMLDoc(data []byte) (interface{}, error) { - var canary map[interface{}]interface{} // validate this is an object and not a different type - if err := yaml.Unmarshal(data, &canary); err != nil { + var document yaml.Node // preserve order that is present in the document + if err := yaml.Unmarshal(data, &document); err != nil { return nil, err } + if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { + return nil, fmt.Errorf("only YAML documents that are objects are supported") + } + return &document, nil +} - var document yaml.MapSlice // preserve order that is present in the document - if err := yaml.Unmarshal(data, &document); err != nil { - return nil, err +func yamlNode(root *yaml.Node) (interface{}, error) { + switch root.Kind { + case yaml.DocumentNode: + return yamlDocument(root) + case yaml.SequenceNode: + return yamlSequence(root) + case yaml.MappingNode: + return yamlMapping(root) + case yaml.ScalarNode: + return yamlScalar(root) + case yaml.AliasNode: + return yamlNode(root.Alias) + default: + return nil, fmt.Errorf("unsupported YAML node type: %v", root.Kind) + } +} + +func yamlDocument(node *yaml.Node) (interface{}, error) { + if len(node.Content) != 1 { + return nil, fmt.Errorf("unexpected YAML Document node content length: %d", len(node.Content)) + } + return yamlNode(node.Content[0]) +} + +func yamlMapping(node *yaml.Node) (interface{}, error) { + m := make(JSONMapSlice, len(node.Content)/2) + + var j int + for i := 0; i < len(node.Content); i += 2 { + var nmi JSONMapItem + k, err := yamlStringScalarC(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML map key: %w", err) + } + nmi.Key = k + v, err := yamlNode(node.Content[i+1]) + if err != nil { + return nil, fmt.Errorf("unable to process YAML map value for key %q: %w", k, err) + } + nmi.Value = v + m[j] = nmi + j++ + } + return m, nil +} + +func yamlSequence(node *yaml.Node) (interface{}, error) { + s := make([]interface{}, 0) + + for i := 0; i < len(node.Content); i++ { + + v, err := yamlNode(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML sequence value: %w", err) + } + s = append(s, v) + } + return s, nil +} + +const ( // See https://yaml.org/type/ + yamlStringScalar = "tag:yaml.org,2002:str" + yamlIntScalar = "tag:yaml.org,2002:int" + yamlBoolScalar = "tag:yaml.org,2002:bool" + yamlFloatScalar = "tag:yaml.org,2002:float" +) + +func yamlScalar(node *yaml.Node) (interface{}, error) { + switch node.LongTag() { + case yamlStringScalar: + return node.Value, nil + case yamlBoolScalar: + b, err := strconv.ParseBool(node.Value) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w", node.Value, err) + } + return b, nil + case yamlIntScalar: + i, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w", node.Value, err) + } + return i, nil + case yamlFloatScalar: + f, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w", node.Value, err) + } + return f, nil + case "tag:yaml.org,2002:null": + return nil, nil + default: + return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) + } +} + +func yamlStringScalarC(node *yaml.Node) (string, error) { + if node.Kind != yaml.ScalarNode { + return "", fmt.Errorf("expecting a string scalar but got %q", node.Kind) + } + switch node.LongTag() { + case yamlStringScalar, yamlIntScalar, yamlFloatScalar: + return node.Value, nil + default: + return "", fmt.Errorf("YAML tag %q is not supported as map key", node.LongTag()) } - return document, nil } // JSONMapSlice represent a JSON object, with the order of keys maintained @@ -105,6 +211,113 @@ func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { *s = result } +func (s JSONMapSlice) MarshalYAML() (interface{}, error) { + var n yaml.Node + n.Kind = yaml.DocumentNode + var nodes []*yaml.Node + for _, item := range s { + nn, err := json2yaml(item.Value) + if err != nil { + return nil, err + } + ns := []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: item.Key, + }, + nn, + } + nodes = append(nodes, ns...) + } + + n.Content = []*yaml.Node{ + { + Kind: yaml.MappingNode, + Content: nodes, + }, + } + + return yaml.Marshal(&n) +} + +func json2yaml(item interface{}) (*yaml.Node, error) { + switch val := item.(type) { + case JSONMapSlice: + var n yaml.Node + n.Kind = yaml.MappingNode + for i := range val { + childNode, err := json2yaml(&val[i].Value) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val[i].Key, + }, childNode) + } + return &n, nil + case map[string]interface{}: + var n yaml.Node + n.Kind = yaml.MappingNode + for k, v := range val { + childNode, err := json2yaml(v) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: k, + }, childNode) + } + return &n, nil + case []interface{}: + var n yaml.Node + n.Kind = yaml.SequenceNode + for i := range val { + childNode, err := json2yaml(val[i]) + if err != nil { + return nil, err + } + n.Content = append(n.Content, childNode) + } + return &n, nil + case string: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val, + }, nil + case float64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlFloatScalar, + Value: strconv.FormatFloat(val, 'f', -1, 64), + }, nil + case int64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatInt(val, 10), + }, nil + case uint64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatUint(val, 10), + }, nil + case bool: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlBoolScalar, + Value: strconv.FormatBool(val), + }, nil + } + return nil, nil +} + // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice type JSONMapItem struct { Key string @@ -173,23 +386,10 @@ func transformData(input interface{}) (out interface{}, err error) { } switch in := input.(type) { - case yaml.MapSlice: - - o := make(JSONMapSlice, len(in)) - for i, mi := range in { - var nmi JSONMapItem - if nmi.Key, err = format(mi.Key); err != nil { - return nil, err - } - - v, ert := transformData(mi.Value) - if ert != nil { - return nil, ert - } - nmi.Value = v - o[i] = nmi - } - return o, nil + case yaml.Node: + return yamlNode(&in) + case *yaml.Node: + return yamlNode(in) case map[interface{}]interface{}: o := make(JSONMapSlice, 0, len(in)) for ke, va := range in { diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/go-redis/redis/v8/.golangci.yml index 53ce42a818e5..de514554a9cc 100644 --- a/vendor/github.com/go-redis/redis/v8/.golangci.yml +++ b/vendor/github.com/go-redis/redis/v8/.golangci.yml @@ -2,26 +2,3 @@ run: concurrency: 8 deadline: 5m tests: false -linters: - enable-all: true - disable: - - funlen - - gochecknoglobals - - gochecknoinits - - gocognit - - goconst - - godox - - gosec - - maligned - - wsl - - gomnd - - goerr113 - - exhaustive - - nestif - - nlreturn - - exhaustivestruct - - wrapcheck - - errorlint - - cyclop - - forcetypeassert - - forbidigo diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md index c575c56803c6..195e51933866 100644 --- a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md +++ b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md @@ -1,3 +1,31 @@ +## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17) + + +### Bug Fixes + +* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a)) +* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c)) +* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475)) +* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2)) +* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32)) +* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4)) +* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc)) +* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f)) +* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2)) + + +### Features + +* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8)) +* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e)) +* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7)) +* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e)) +* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b)) +* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417)) +* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d)) + + + ## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04) diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md index 9c272413aac4..f3b6a018cb54 100644 --- a/vendor/github.com/go-redis/redis/v8/README.md +++ b/vendor/github.com/go-redis/redis/v8/README.md @@ -1,19 +1,16 @@ -

- - All-in-one tool to optimize performance and monitor errors & logs - -

- -# Redis client for Golang +# Redis client for Go ![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg) [![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) [![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/) -[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj) -- To ask questions, join [Discord](https://discord.gg/rWtp5Aj) or use - [Discussions](https://github.com/go-redis/redis/discussions). -- [Newsletter](https://blog.uptrace.dev/pages/newsletter.html) to get latest updates. +go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). +Uptrace is an open source and blazingly fast **distributed tracing** backend powered by +OpenTelemetry and ClickHouse. Give it a star as well! + +## Resources + +- [Discussions](https://github.com/go-redis/redis/discussions) - [Documentation](https://redis.uptrace.dev) - [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) - [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples) @@ -22,15 +19,14 @@ Other projects you may like: - [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite. -- [treemux](https://github.com/vmihailenco/treemux) - high-speed, flexible, tree-based HTTP router - for Go. +- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go. ## Ecosystem -- [Redis Mock](https://github.com/go-redis/redismock). -- [Distributed Locks](https://github.com/bsm/redislock). -- [Redis Cache](https://github.com/go-redis/cache). -- [Rate limiting](https://github.com/go-redis/redis_rate). +- [Redis Mock](https://github.com/go-redis/redismock) +- [Distributed Locks](https://github.com/bsm/redislock) +- [Redis Cache](https://github.com/go-redis/cache) +- [Rate limiting](https://github.com/go-redis/redis_rate) ## Features @@ -39,16 +35,16 @@ Other projects you may like: [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support. - [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub). - [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline). -- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-Pipeline) and - [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline). +- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and + [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline). - [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script). - [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options). - [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient). - [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient). -- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient--ManualSetup) +- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup) without using cluster mode and Redis Sentinel. - [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing). -- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#ex-package--Instrumentation). +- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation). ## Installation @@ -72,6 +68,7 @@ go get github.com/go-redis/redis/v8 import ( "context" "github.com/go-redis/redis/v8" + "fmt" ) var ctx = context.Background() diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go index 0079b5966f6c..4bb12a85be43 100644 --- a/vendor/github.com/go-redis/redis/v8/command.go +++ b/vendor/github.com/go-redis/redis/v8/command.go @@ -20,7 +20,7 @@ type Cmder interface { String() string stringArg(int) string firstKeyPos() int8 - setFirstKeyPos(int8) + SetFirstKeyPos(int8) readTimeout() *time.Duration readReply(rd *proto.Reader) error @@ -151,15 +151,21 @@ func (cmd *baseCmd) stringArg(pos int) string { if pos < 0 || pos >= len(cmd.args) { return "" } - s, _ := cmd.args[pos].(string) - return s + arg := cmd.args[pos] + switch v := arg.(type) { + case string: + return v + default: + // TODO: consider using appendArg + return fmt.Sprint(v) + } } func (cmd *baseCmd) firstKeyPos() int8 { return cmd.keyPos } -func (cmd *baseCmd) setFirstKeyPos(keyPos int8) { +func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) { cmd.keyPos = keyPos } diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go index c76242d7bff1..bbfe089df166 100644 --- a/vendor/github.com/go-redis/redis/v8/commands.go +++ b/vendor/github.com/go-redis/redis/v8/commands.go @@ -96,6 +96,10 @@ type Cmdable interface { Exists(ctx context.Context, keys ...string) *IntCmd Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd Keys(ctx context.Context, pattern string) *StringSliceCmd Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd Move(ctx context.Context, key string, db int) *BoolCmd @@ -139,6 +143,7 @@ type Cmdable interface { SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd StrLen(ctx context.Context, key string) *IntCmd + Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd GetBit(ctx context.Context, key string, offset int64) *IntCmd SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd @@ -431,6 +436,7 @@ func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd { cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond)) + cmd.setReadTimeout(timeout) _ = c(ctx, cmd) return cmd } @@ -525,7 +531,37 @@ func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd { } func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { - cmd := NewBoolCmd(ctx, "expire", key, formatSec(ctx, expiration)) + return c.expire(ctx, key, expiration, "") +} + +func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "NX") +} + +func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "XX") +} + +func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "GT") +} + +func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "LT") +} + +func (c cmdable) expire( + ctx context.Context, key string, expiration time.Duration, mode string, +) *BoolCmd { + args := make([]interface{}, 3, 4) + args[0] = "expire" + args[1] = key + args[2] = formatSec(ctx, expiration) + if mode != "" { + args = append(args, mode) + } + + cmd := NewBoolCmd(ctx, args...) _ = c(ctx, cmd) return cmd } @@ -990,6 +1026,16 @@ func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd { return cmd } +func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd { + args := []interface{}{"copy", sourceKey, destKey, "DB", db} + if replace { + args = append(args, "REPLACE") + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + //------------------------------------------------------------------------------ func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd { @@ -1778,7 +1824,7 @@ type XReadArgs struct { } func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { - args := make([]interface{}, 0, 5+len(a.Streams)) + args := make([]interface{}, 0, 6+len(a.Streams)) args = append(args, "xread") keyPos := int8(1) @@ -1802,7 +1848,7 @@ func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { if a.Block >= 0 { cmd.setReadTimeout(a.Block) } - cmd.setFirstKeyPos(keyPos) + cmd.SetFirstKeyPos(keyPos) _ = c(ctx, cmd) return cmd } @@ -1860,7 +1906,7 @@ type XReadGroupArgs struct { } func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd { - args := make([]interface{}, 0, 8+len(a.Streams)) + args := make([]interface{}, 0, 10+len(a.Streams)) args = append(args, "xreadgroup", "group", a.Group, a.Consumer) keyPos := int8(4) @@ -1886,7 +1932,7 @@ func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSlic if a.Block >= 0 { cmd.setReadTimeout(a.Block) } - cmd.setFirstKeyPos(keyPos) + cmd.SetFirstKeyPos(keyPos) _ = c(ctx, cmd) return cmd } @@ -1957,7 +2003,7 @@ func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAuto } func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} { - args := make([]interface{}, 0, 9) + args := make([]interface{}, 0, 8) args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start) if a.Count > 0 { args = append(args, "count", a.Count) @@ -1989,7 +2035,7 @@ func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCm } func xClaimArgs(a *XClaimArgs) []interface{} { - args := make([]interface{}, 0, 4+len(a.Messages)) + args := make([]interface{}, 0, 5+len(a.Messages)) args = append(args, "xclaim", a.Stream, @@ -2362,7 +2408,7 @@ func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZSt args = append(args, "zinterstore", destination, len(store.Keys)) args = store.appendArgs(args) cmd := NewIntCmd(ctx, args...) - cmd.setFirstKeyPos(3) + cmd.SetFirstKeyPos(3) _ = c(ctx, cmd) return cmd } @@ -2372,7 +2418,7 @@ func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd { args = append(args, "zinter", len(store.Keys)) args = store.appendArgs(args) cmd := NewStringSliceCmd(ctx, args...) - cmd.setFirstKeyPos(2) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } @@ -2383,7 +2429,7 @@ func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd args = store.appendArgs(args) args = append(args, "withscores") cmd := NewZSliceCmd(ctx, args...) - cmd.setFirstKeyPos(2) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } @@ -2710,7 +2756,7 @@ func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd { args = append(args, "zunion", len(store.Keys)) args = store.appendArgs(args) cmd := NewStringSliceCmd(ctx, args...) - cmd.setFirstKeyPos(2) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } @@ -2721,7 +2767,7 @@ func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd args = store.appendArgs(args) args = append(args, "withscores") cmd := NewZSliceCmd(ctx, args...) - cmd.setFirstKeyPos(2) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } @@ -2731,7 +2777,7 @@ func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *I args = append(args, "zunionstore", dest, len(store.Keys)) args = store.appendArgs(args) cmd := NewIntCmd(ctx, args...) - cmd.setFirstKeyPos(3) + cmd.SetFirstKeyPos(3) _ = c(ctx, cmd) return cmd } @@ -2761,7 +2807,7 @@ func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd { } cmd := NewStringSliceCmd(ctx, args...) - cmd.setFirstKeyPos(2) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } @@ -2777,7 +2823,7 @@ func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd args[len(keys)+2] = "withscores" cmd := NewZSliceCmd(ctx, args...) - cmd.setFirstKeyPos(2) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } @@ -3053,7 +3099,7 @@ func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *I args = append(args, "SAMPLES", samples[0]) } cmd := NewIntCmd(ctx, args...) - cmd.setFirstKeyPos(2) + cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } @@ -3070,7 +3116,7 @@ func (c cmdable) Eval(ctx context.Context, script string, keys []string, args .. } cmdArgs = appendArgs(cmdArgs, args) cmd := NewCmd(ctx, cmdArgs...) - cmd.setFirstKeyPos(3) + cmd.SetFirstKeyPos(3) _ = c(ctx, cmd) return cmd } @@ -3085,7 +3131,7 @@ func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args . } cmdArgs = appendArgs(cmdArgs, args) cmd := NewCmd(ctx, cmdArgs...) - cmd.setFirstKeyPos(3) + cmd.SetFirstKeyPos(3) _ = c(ctx, cmd) return cmd } diff --git a/vendor/github.com/go-redis/redis/v8/error.go b/vendor/github.com/go-redis/redis/v8/error.go index 5025215b9e98..521594bbd0c9 100644 --- a/vendor/github.com/go-redis/redis/v8/error.go +++ b/vendor/github.com/go-redis/redis/v8/error.go @@ -134,7 +134,7 @@ func isMovedSameConnAddr(err error, addr string) bool { if !strings.HasPrefix(redisError, "MOVED ") { return false } - return strings.HasSuffix(redisError, addr) + return strings.HasSuffix(redisError, " "+addr) } //------------------------------------------------------------------------------ diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go index 7d7183c32dae..0e994765feed 100644 --- a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go +++ b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go @@ -10,6 +10,7 @@ import ( ) // Scan parses bytes `b` to `v` with appropriate type. +//nolint:gocyclo func Scan(b []byte, v interface{}) error { switch v := v.(type) { case nil: @@ -105,6 +106,13 @@ func Scan(b []byte, v interface{}) error { var err error *v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b)) return err + case *time.Duration: + n, err := util.ParseInt(b, 10, 64) + if err != nil { + return err + } + *v = time.Duration(n) + return nil case encoding.BinaryUnmarshaler: return v.UnmarshalBinary(b) default: diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go index 81b09b8e4f70..c4260981ed14 100644 --- a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go +++ b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go @@ -98,6 +98,8 @@ func (w *Writer) WriteArg(v interface{}) error { case time.Time: w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano) return w.bytes(w.numBuf) + case time.Duration: + return w.int(v.Nanoseconds()) case encoding.BinaryMarshaler: b, err := v.MarshalBinary() if err != nil { diff --git a/vendor/github.com/go-redis/redis/v8/package.json b/vendor/github.com/go-redis/redis/v8/package.json index dae7b9aab2c6..e4ea4bb074c4 100644 --- a/vendor/github.com/go-redis/redis/v8/package.json +++ b/vendor/github.com/go-redis/redis/v8/package.json @@ -1,6 +1,6 @@ { "name": "redis", - "version": "8.11.4", + "version": "8.11.5", "main": "index.js", "repository": "git@github.com:go-redis/redis.git", "author": "Vladimir Mihailenco ", diff --git a/vendor/github.com/go-redis/redis/v8/pipeline.go b/vendor/github.com/go-redis/redis/v8/pipeline.go index c6ec3409980f..31bab971e63f 100644 --- a/vendor/github.com/go-redis/redis/v8/pipeline.go +++ b/vendor/github.com/go-redis/redis/v8/pipeline.go @@ -24,6 +24,7 @@ type pipelineExecer func(context.Context, []Cmder) error // depends of your batch size and/or use TxPipeline. type Pipeliner interface { StatefulCmdable + Len() int Do(ctx context.Context, args ...interface{}) *Cmd Process(ctx context.Context, cmd Cmder) error Close() error @@ -53,6 +54,15 @@ func (c *Pipeline) init() { c.statefulCmdable = c.Process } +// Len returns the number of queued commands. +func (c *Pipeline) Len() int { + c.mu.Lock() + ln := len(c.cmds) + c.mu.Unlock() + return ln +} + +// Do queues the custom command for later execution. func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd { cmd := NewCmd(ctx, args...) _ = c.Process(ctx, cmd) diff --git a/vendor/github.com/go-redis/redis/v8/ring.go b/vendor/github.com/go-redis/redis/v8/ring.go index 11a20370a875..4df00fc857f5 100644 --- a/vendor/github.com/go-redis/redis/v8/ring.go +++ b/vendor/github.com/go-redis/redis/v8/ring.go @@ -576,7 +576,7 @@ func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo { } info := cmdsInfo[name] if info == nil { - internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name) + internal.Logger.Printf(ctx, "info for cmd=%s not found", name) } return info } diff --git a/vendor/github.com/go-redis/redis/v8/universal.go b/vendor/github.com/go-redis/redis/v8/universal.go index 1e962ab3bec0..c89b3e5d7431 100644 --- a/vendor/github.com/go-redis/redis/v8/universal.go +++ b/vendor/github.com/go-redis/redis/v8/universal.go @@ -25,6 +25,7 @@ type UniversalOptions struct { Username string Password string + SentinelUsername string SentinelPassword string MaxRetries int @@ -114,6 +115,7 @@ func (o *UniversalOptions) Failover() *FailoverOptions { DB: o.DB, Username: o.Username, Password: o.Password, + SentinelUsername: o.SentinelUsername, SentinelPassword: o.SentinelPassword, MaxRetries: o.MaxRetries, diff --git a/vendor/github.com/go-redis/redis/v8/version.go b/vendor/github.com/go-redis/redis/v8/version.go index ed479038755d..112c9a2da0e3 100644 --- a/vendor/github.com/go-redis/redis/v8/version.go +++ b/vendor/github.com/go-redis/redis/v8/version.go @@ -2,5 +2,5 @@ package redis // Version is the current release version. func Version() string { - return "8.11.4" + return "8.11.5" } diff --git a/vendor/github.com/go-stack/stack/.travis.yml b/vendor/github.com/go-stack/stack/.travis.yml deleted file mode 100644 index 5c5a2b516d39..000000000000 --- a/vendor/github.com/go-stack/stack/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -sudo: false -go: - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - tip - -before_install: - - go get github.com/mattn/goveralls - -script: - - goveralls -service=travis-ci diff --git a/vendor/github.com/go-stack/stack/LICENSE.md b/vendor/github.com/go-stack/stack/LICENSE.md deleted file mode 100644 index 2abf98ea835e..000000000000 --- a/vendor/github.com/go-stack/stack/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Chris Hines - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/go-stack/stack/README.md b/vendor/github.com/go-stack/stack/README.md deleted file mode 100644 index f11ccccaa430..000000000000 --- a/vendor/github.com/go-stack/stack/README.md +++ /dev/null @@ -1,38 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/go-stack/stack?status.svg)](https://godoc.org/github.com/go-stack/stack) -[![Go Report Card](https://goreportcard.com/badge/go-stack/stack)](https://goreportcard.com/report/go-stack/stack) -[![TravisCI](https://travis-ci.org/go-stack/stack.svg?branch=master)](https://travis-ci.org/go-stack/stack) -[![Coverage Status](https://coveralls.io/repos/github/go-stack/stack/badge.svg?branch=master)](https://coveralls.io/github/go-stack/stack?branch=master) - -# stack - -Package stack implements utilities to capture, manipulate, and format call -stacks. It provides a simpler API than package runtime. - -The implementation takes care of the minutia and special cases of interpreting -the program counter (pc) values returned by runtime.Callers. - -## Versioning - -Package stack publishes releases via [semver](http://semver.org/) compatible Git -tags prefixed with a single 'v'. The master branch always contains the latest -release. The develop branch contains unreleased commits. - -## Formatting - -Package stack's types implement fmt.Formatter, which provides a simple and -flexible way to declaratively configure formatting when used with logging or -error tracking packages. - -```go -func DoTheThing() { - c := stack.Caller(0) - log.Print(c) // "source.go:10" - log.Printf("%+v", c) // "pkg/path/source.go:10" - log.Printf("%n", c) // "DoTheThing" - - s := stack.Trace().TrimRuntime() - log.Print(s) // "[source.go:15 caller.go:42 main.go:14]" -} -``` - -See the docs for all of the supported formatting options. diff --git a/vendor/github.com/go-stack/stack/stack.go b/vendor/github.com/go-stack/stack/stack.go deleted file mode 100644 index ac3b93b14f48..000000000000 --- a/vendor/github.com/go-stack/stack/stack.go +++ /dev/null @@ -1,400 +0,0 @@ -// +build go1.7 - -// Package stack implements utilities to capture, manipulate, and format call -// stacks. It provides a simpler API than package runtime. -// -// The implementation takes care of the minutia and special cases of -// interpreting the program counter (pc) values returned by runtime.Callers. -// -// Package stack's types implement fmt.Formatter, which provides a simple and -// flexible way to declaratively configure formatting when used with logging -// or error tracking packages. -package stack - -import ( - "bytes" - "errors" - "fmt" - "io" - "runtime" - "strconv" - "strings" -) - -// Call records a single function invocation from a goroutine stack. -type Call struct { - frame runtime.Frame -} - -// Caller returns a Call from the stack of the current goroutine. The argument -// skip is the number of stack frames to ascend, with 0 identifying the -// calling function. -func Caller(skip int) Call { - // As of Go 1.9 we need room for up to three PC entries. - // - // 0. An entry for the stack frame prior to the target to check for - // special handling needed if that prior entry is runtime.sigpanic. - // 1. A possible second entry to hold metadata about skipped inlined - // functions. If inline functions were not skipped the target frame - // PC will be here. - // 2. A third entry for the target frame PC when the second entry - // is used for skipped inline functions. - var pcs [3]uintptr - n := runtime.Callers(skip+1, pcs[:]) - frames := runtime.CallersFrames(pcs[:n]) - frame, _ := frames.Next() - frame, _ = frames.Next() - - return Call{ - frame: frame, - } -} - -// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c). -func (c Call) String() string { - return fmt.Sprint(c) -} - -// MarshalText implements encoding.TextMarshaler. It formats the Call the same -// as fmt.Sprintf("%v", c). -func (c Call) MarshalText() ([]byte, error) { - if c.frame == (runtime.Frame{}) { - return nil, ErrNoFunc - } - - buf := bytes.Buffer{} - fmt.Fprint(&buf, c) - return buf.Bytes(), nil -} - -// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely -// cause is a Call with the zero value. -var ErrNoFunc = errors.New("no call stack information") - -// Format implements fmt.Formatter with support for the following verbs. -// -// %s source file -// %d line number -// %n function name -// %k last segment of the package path -// %v equivalent to %s:%d -// -// It accepts the '+' and '#' flags for most of the verbs as follows. -// -// %+s path of source file relative to the compile time GOPATH, -// or the module path joined to the path of source file relative -// to module root -// %#s full path of source file -// %+n import path qualified function name -// %+k full package path -// %+v equivalent to %+s:%d -// %#v equivalent to %#s:%d -func (c Call) Format(s fmt.State, verb rune) { - if c.frame == (runtime.Frame{}) { - fmt.Fprintf(s, "%%!%c(NOFUNC)", verb) - return - } - - switch verb { - case 's', 'v': - file := c.frame.File - switch { - case s.Flag('#'): - // done - case s.Flag('+'): - file = pkgFilePath(&c.frame) - default: - const sep = "/" - if i := strings.LastIndex(file, sep); i != -1 { - file = file[i+len(sep):] - } - } - io.WriteString(s, file) - if verb == 'v' { - buf := [7]byte{':'} - s.Write(strconv.AppendInt(buf[:1], int64(c.frame.Line), 10)) - } - - case 'd': - buf := [6]byte{} - s.Write(strconv.AppendInt(buf[:0], int64(c.frame.Line), 10)) - - case 'k': - name := c.frame.Function - const pathSep = "/" - start, end := 0, len(name) - if i := strings.LastIndex(name, pathSep); i != -1 { - start = i + len(pathSep) - } - const pkgSep = "." - if i := strings.Index(name[start:], pkgSep); i != -1 { - end = start + i - } - if s.Flag('+') { - start = 0 - } - io.WriteString(s, name[start:end]) - - case 'n': - name := c.frame.Function - if !s.Flag('+') { - const pathSep = "/" - if i := strings.LastIndex(name, pathSep); i != -1 { - name = name[i+len(pathSep):] - } - const pkgSep = "." - if i := strings.Index(name, pkgSep); i != -1 { - name = name[i+len(pkgSep):] - } - } - io.WriteString(s, name) - } -} - -// Frame returns the call frame infomation for the Call. -func (c Call) Frame() runtime.Frame { - return c.frame -} - -// PC returns the program counter for this call frame; multiple frames may -// have the same PC value. -// -// Deprecated: Use Call.Frame instead. -func (c Call) PC() uintptr { - return c.frame.PC -} - -// CallStack records a sequence of function invocations from a goroutine -// stack. -type CallStack []Call - -// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs). -func (cs CallStack) String() string { - return fmt.Sprint(cs) -} - -var ( - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - spaceBytes = []byte(" ") -) - -// MarshalText implements encoding.TextMarshaler. It formats the CallStack the -// same as fmt.Sprintf("%v", cs). -func (cs CallStack) MarshalText() ([]byte, error) { - buf := bytes.Buffer{} - buf.Write(openBracketBytes) - for i, pc := range cs { - if i > 0 { - buf.Write(spaceBytes) - } - fmt.Fprint(&buf, pc) - } - buf.Write(closeBracketBytes) - return buf.Bytes(), nil -} - -// Format implements fmt.Formatter by printing the CallStack as square brackets -// ([, ]) surrounding a space separated list of Calls each formatted with the -// supplied verb and options. -func (cs CallStack) Format(s fmt.State, verb rune) { - s.Write(openBracketBytes) - for i, pc := range cs { - if i > 0 { - s.Write(spaceBytes) - } - pc.Format(s, verb) - } - s.Write(closeBracketBytes) -} - -// Trace returns a CallStack for the current goroutine with element 0 -// identifying the calling function. -func Trace() CallStack { - var pcs [512]uintptr - n := runtime.Callers(1, pcs[:]) - - frames := runtime.CallersFrames(pcs[:n]) - cs := make(CallStack, 0, n) - - // Skip extra frame retrieved just to make sure the runtime.sigpanic - // special case is handled. - frame, more := frames.Next() - - for more { - frame, more = frames.Next() - cs = append(cs, Call{frame: frame}) - } - - return cs -} - -// TrimBelow returns a slice of the CallStack with all entries below c -// removed. -func (cs CallStack) TrimBelow(c Call) CallStack { - for len(cs) > 0 && cs[0] != c { - cs = cs[1:] - } - return cs -} - -// TrimAbove returns a slice of the CallStack with all entries above c -// removed. -func (cs CallStack) TrimAbove(c Call) CallStack { - for len(cs) > 0 && cs[len(cs)-1] != c { - cs = cs[:len(cs)-1] - } - return cs -} - -// pkgIndex returns the index that results in file[index:] being the path of -// file relative to the compile time GOPATH, and file[:index] being the -// $GOPATH/src/ portion of file. funcName must be the name of a function in -// file as returned by runtime.Func.Name. -func pkgIndex(file, funcName string) int { - // As of Go 1.6.2 there is no direct way to know the compile time GOPATH - // at runtime, but we can infer the number of path segments in the GOPATH. - // We note that runtime.Func.Name() returns the function name qualified by - // the import path, which does not include the GOPATH. Thus we can trim - // segments from the beginning of the file path until the number of path - // separators remaining is one more than the number of path separators in - // the function name. For example, given: - // - // GOPATH /home/user - // file /home/user/src/pkg/sub/file.go - // fn.Name() pkg/sub.Type.Method - // - // We want to produce: - // - // file[:idx] == /home/user/src/ - // file[idx:] == pkg/sub/file.go - // - // From this we can easily see that fn.Name() has one less path separator - // than our desired result for file[idx:]. We count separators from the - // end of the file path until it finds two more than in the function name - // and then move one character forward to preserve the initial path - // segment without a leading separator. - const sep = "/" - i := len(file) - for n := strings.Count(funcName, sep) + 2; n > 0; n-- { - i = strings.LastIndex(file[:i], sep) - if i == -1 { - i = -len(sep) - break - } - } - // get back to 0 or trim the leading separator - return i + len(sep) -} - -// pkgFilePath returns the frame's filepath relative to the compile-time GOPATH, -// or its module path joined to its path relative to the module root. -// -// As of Go 1.11 there is no direct way to know the compile time GOPATH or -// module paths at runtime, but we can piece together the desired information -// from available information. We note that runtime.Frame.Function contains the -// function name qualified by the package path, which includes the module path -// but not the GOPATH. We can extract the package path from that and append the -// last segments of the file path to arrive at the desired package qualified -// file path. For example, given: -// -// GOPATH /home/user -// import path pkg/sub -// frame.File /home/user/src/pkg/sub/file.go -// frame.Function pkg/sub.Type.Method -// Desired return pkg/sub/file.go -// -// It appears that we simply need to trim ".Type.Method" from frame.Function and -// append "/" + path.Base(file). -// -// But there are other wrinkles. Although it is idiomatic to do so, the internal -// name of a package is not required to match the last segment of its import -// path. In addition, the introduction of modules in Go 1.11 allows working -// without a GOPATH. So we also must make these work right: -// -// GOPATH /home/user -// import path pkg/go-sub -// package name sub -// frame.File /home/user/src/pkg/go-sub/file.go -// frame.Function pkg/sub.Type.Method -// Desired return pkg/go-sub/file.go -// -// Module path pkg/v2 -// import path pkg/v2/go-sub -// package name sub -// frame.File /home/user/cloned-pkg/go-sub/file.go -// frame.Function pkg/v2/sub.Type.Method -// Desired return pkg/v2/go-sub/file.go -// -// We can handle all of these situations by using the package path extracted -// from frame.Function up to, but not including, the last segment as the prefix -// and the last two segments of frame.File as the suffix of the returned path. -// This preserves the existing behavior when working in a GOPATH without modules -// and a semantically equivalent behavior when used in module aware project. -func pkgFilePath(frame *runtime.Frame) string { - pre := pkgPrefix(frame.Function) - post := pathSuffix(frame.File) - if pre == "" { - return post - } - return pre + "/" + post -} - -// pkgPrefix returns the import path of the function's package with the final -// segment removed. -func pkgPrefix(funcName string) string { - const pathSep = "/" - end := strings.LastIndex(funcName, pathSep) - if end == -1 { - return "" - } - return funcName[:end] -} - -// pathSuffix returns the last two segments of path. -func pathSuffix(path string) string { - const pathSep = "/" - lastSep := strings.LastIndex(path, pathSep) - if lastSep == -1 { - return path - } - return path[strings.LastIndex(path[:lastSep], pathSep)+1:] -} - -var runtimePath string - -func init() { - var pcs [3]uintptr - runtime.Callers(0, pcs[:]) - frames := runtime.CallersFrames(pcs[:]) - frame, _ := frames.Next() - file := frame.File - - idx := pkgIndex(frame.File, frame.Function) - - runtimePath = file[:idx] - if runtime.GOOS == "windows" { - runtimePath = strings.ToLower(runtimePath) - } -} - -func inGoroot(c Call) bool { - file := c.frame.File - if len(file) == 0 || file[0] == '?' { - return true - } - if runtime.GOOS == "windows" { - file = strings.ToLower(file) - } - return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go") -} - -// TrimRuntime returns a slice of the CallStack with the topmost entries from -// the go runtime removed. It considers any calls originating from unknown -// files, files under GOROOT, or _testmain.go as part of the runtime. -func (cs CallStack) TrimRuntime() CallStack { - for len(cs) > 0 && inGoroot(cs[len(cs)-1]) { - cs = cs[:len(cs)-1] - } - return cs -} diff --git a/vendor/github.com/golang-jwt/jwt/v4/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md index 3072d24a9da6..f5d551ca8fd8 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/README.md +++ b/vendor/github.com/golang-jwt/jwt/v4/README.md @@ -36,9 +36,23 @@ The part in the middle is the interesting bit. It's called the Claims and conta This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. +## Installation Guidelines + +1. To install the jwt package, you first need to have [Go](https://go.dev/doc/install) installed, then you can use the command below to add `jwt-go` as a dependency in your Go program. + +```sh +go get -u github.com/golang-jwt/jwt/v4 +``` + +2. Import it in your code: + +```go +import "github.com/golang-jwt/jwt/v4" +``` + ## Examples -See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage: +See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage: * [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac) * [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac) @@ -46,9 +60,17 @@ See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) fo ## Extensions -This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. +This library publishes all the necessary components for adding your own signing methods or key functions. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod` or provide a `jwt.Keyfunc`. + +A common use case would be integrating with different 3rd party signature providers, like key management services from various cloud providers or Hardware Security Modules (HSMs) or to implement additional standards. -Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go +| Extension | Purpose | Repo | +| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go | +| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms | +| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc | + +*Disclaimer*: Unless otherwise specified, these integrations are maintained by third parties and should not be considered as a primary offer by any of the mentioned cloud providers ## Compliance @@ -112,3 +134,5 @@ This library uses descriptive error messages whenever possible. If you are not g Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt). The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. + +[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version of the JWT logo, which is distributed under the terms of the [MIT License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt). diff --git a/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md new file mode 100644 index 000000000000..b08402c3427f --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +As of February 2022 (and until this document is updated), the latest version `v4` is supported. + +## Reporting a Vulnerability + +If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s). + +You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem. + +## Public Discussions + +Please avoid publicly discussing a potential security vulnerability. + +Let's take this offline and find a solution first, this limits the potential impact as much as possible. + +We appreciate your help! diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go index 41cc826563ba..9d95cad2bf27 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/claims.go +++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go @@ -56,17 +56,17 @@ func (c RegisteredClaims) Valid() error { // default value in Go, let's not fail the verification for them. if !c.VerifyExpiresAt(now, false) { delta := now.Sub(c.ExpiresAt.Time) - vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta) vErr.Errors |= ValidationErrorExpired } if !c.VerifyIssuedAt(now, false) { - vErr.Inner = fmt.Errorf("token used before issued") + vErr.Inner = ErrTokenUsedBeforeIssued vErr.Errors |= ValidationErrorIssuedAt } if !c.VerifyNotBefore(now, false) { - vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Inner = ErrTokenNotValidYet vErr.Errors |= ValidationErrorNotValidYet } @@ -149,17 +149,17 @@ func (c StandardClaims) Valid() error { // default value in Go, let's not fail the verification for them. if !c.VerifyExpiresAt(now, false) { delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) - vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta) vErr.Errors |= ValidationErrorExpired } if !c.VerifyIssuedAt(now, false) { - vErr.Inner = fmt.Errorf("token used before issued") + vErr.Inner = ErrTokenUsedBeforeIssued vErr.Errors |= ValidationErrorIssuedAt } if !c.VerifyNotBefore(now, false) { - vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Inner = ErrTokenNotValidYet vErr.Errors |= ValidationErrorNotValidYet } diff --git a/vendor/github.com/golang-jwt/jwt/v4/errors.go b/vendor/github.com/golang-jwt/jwt/v4/errors.go index b9d18e498e3f..10ac8835cc88 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/errors.go +++ b/vendor/github.com/golang-jwt/jwt/v4/errors.go @@ -9,6 +9,18 @@ var ( ErrInvalidKey = errors.New("key is invalid") ErrInvalidKeyType = errors.New("key is of invalid type") ErrHashUnavailable = errors.New("the requested hash function is unavailable") + + ErrTokenMalformed = errors.New("token is malformed") + ErrTokenUnverifiable = errors.New("token is unverifiable") + ErrTokenSignatureInvalid = errors.New("token signature is invalid") + + ErrTokenInvalidAudience = errors.New("token has invalid audience") + ErrTokenExpired = errors.New("token is expired") + ErrTokenUsedBeforeIssued = errors.New("token used before issued") + ErrTokenInvalidIssuer = errors.New("token has invalid issuer") + ErrTokenNotValidYet = errors.New("token is not valid yet") + ErrTokenInvalidId = errors.New("token has invalid id") + ErrTokenInvalidClaims = errors.New("token has invalid claims") ) // The errors that might occur when parsing and validating a token @@ -62,3 +74,39 @@ func (e *ValidationError) Unwrap() error { func (e *ValidationError) valid() bool { return e.Errors == 0 } + +// Is checks if this ValidationError is of the supplied error. We are first checking for the exact error message +// by comparing the inner error message. If that fails, we compare using the error flags. This way we can use +// custom error messages (mainly for backwards compatability) and still leverage errors.Is using the global error variables. +func (e *ValidationError) Is(err error) bool { + // Check, if our inner error is a direct match + if errors.Is(errors.Unwrap(e), err) { + return true + } + + // Otherwise, we need to match using our error flags + switch err { + case ErrTokenMalformed: + return e.Errors&ValidationErrorMalformed != 0 + case ErrTokenUnverifiable: + return e.Errors&ValidationErrorUnverifiable != 0 + case ErrTokenSignatureInvalid: + return e.Errors&ValidationErrorSignatureInvalid != 0 + case ErrTokenInvalidAudience: + return e.Errors&ValidationErrorAudience != 0 + case ErrTokenExpired: + return e.Errors&ValidationErrorExpired != 0 + case ErrTokenUsedBeforeIssued: + return e.Errors&ValidationErrorIssuedAt != 0 + case ErrTokenInvalidIssuer: + return e.Errors&ValidationErrorIssuer != 0 + case ErrTokenNotValidYet: + return e.Errors&ValidationErrorNotValidYet != 0 + case ErrTokenInvalidId: + return e.Errors&ValidationErrorId != 0 + case ErrTokenInvalidClaims: + return e.Errors&ValidationErrorClaimsInvalid != 0 + } + + return false +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go index e7da633b93c6..2700d64a0d09 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go +++ b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go @@ -126,16 +126,19 @@ func (m MapClaims) Valid() error { now := TimeFunc().Unix() if !m.VerifyExpiresAt(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenExpired vErr.Inner = errors.New("Token is expired") vErr.Errors |= ValidationErrorExpired } if !m.VerifyIssuedAt(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenUsedBeforeIssued vErr.Inner = errors.New("Token used before issued") vErr.Errors |= ValidationErrorIssuedAt } if !m.VerifyNotBefore(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenNotValidYet vErr.Inner = errors.New("Token is not valid yet") vErr.Errors |= ValidationErrorNotValidYet } diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser_option.go b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go index 0fede4f15c90..6ea6f9527de6 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/parser_option.go +++ b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go @@ -1,6 +1,6 @@ package jwt -// ParserOption is used to implement functional-style options that modify the behaviour of the parser. To add +// ParserOption is used to implement functional-style options that modify the behavior of the parser. To add // new options, just create a function (ideally beginning with With or Without) that returns an anonymous function that // takes a *Parser type as input and manipulates its configuration accordingly. type ParserOption func(*Parser) @@ -13,7 +13,7 @@ func WithValidMethods(methods []string) ParserOption { } } -// WithJSONNumber is an option to configure the underyling JSON parser with UseNumber +// WithJSONNumber is an option to configure the underlying JSON parser with UseNumber func WithJSONNumber() ParserOption { return func(p *Parser) { p.UseJSONNumber = true diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go index 5a8502feb34b..4fd6f9e610b0 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go @@ -1,3 +1,4 @@ +//go:build go1.4 // +build go1.4 package jwt diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go index 12344138bedc..3cb0f3f0e4c0 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/token.go +++ b/vendor/github.com/golang-jwt/jwt/v4/token.go @@ -7,7 +7,6 @@ import ( "time" ) - // DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515 // states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations // of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global @@ -74,22 +73,19 @@ func (t *Token) SignedString(key interface{}) (string, error) { // the SignedString. func (t *Token) SigningString() (string, error) { var err error - parts := make([]string, 2) - for i := range parts { - var jsonValue []byte - if i == 0 { - if jsonValue, err = json.Marshal(t.Header); err != nil { - return "", err - } - } else { - if jsonValue, err = json.Marshal(t.Claims); err != nil { - return "", err - } - } + var jsonValue []byte - parts[i] = EncodeSegment(jsonValue) + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err } - return strings.Join(parts, "."), nil + header := EncodeSegment(jsonValue) + + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + claim := EncodeSegment(jsonValue) + + return strings.Join([]string{header, claim}, "."), nil } // Parse parses, validates, verifies the signature and returns the parsed token. diff --git a/vendor/github.com/golang-jwt/jwt/v4/types.go b/vendor/github.com/golang-jwt/jwt/v4/types.go index 80b1b96948ec..ac8e140eb119 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/types.go +++ b/vendor/github.com/golang-jwt/jwt/v4/types.go @@ -49,9 +49,27 @@ func newNumericDateFromSeconds(f float64) *NumericDate { // MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch // represented in NumericDate to a byte array, using the precision specified in TimePrecision. func (date NumericDate) MarshalJSON() (b []byte, err error) { - f := float64(date.Truncate(TimePrecision).UnixNano()) / float64(time.Second) - - return []byte(strconv.FormatFloat(f, 'f', -1, 64)), nil + var prec int + if TimePrecision < time.Second { + prec = int(math.Log10(float64(time.Second) / float64(TimePrecision))) + } + truncatedDate := date.Truncate(TimePrecision) + + // For very large timestamps, UnixNano would overflow an int64, but this + // function requires nanosecond level precision, so we have to use the + // following technique to get round the issue: + // 1. Take the normal unix timestamp to form the whole number part of the + // output, + // 2. Take the result of the Nanosecond function, which retuns the offset + // within the second of the particular unix time instance, to form the + // decimal part of the output + // 3. Concatenate them to produce the final result + seconds := strconv.FormatInt(truncatedDate.Unix(), 10) + nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64) + + output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...) + + return output, nil } // UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go new file mode 100644 index 000000000000..60e82caa9a2d --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -0,0 +1,524 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONUnmarshalV2 = false + +// UnmarshalNext unmarshals the next JSON object from d into m. +func UnmarshalNext(d *json.Decoder, m proto.Message) error { + return new(Unmarshaler).UnmarshalNext(d, m) +} + +// Unmarshal unmarshals a JSON object from r into m. +func Unmarshal(r io.Reader, m proto.Message) error { + return new(Unmarshaler).Unmarshal(r, m) +} + +// UnmarshalString unmarshals a JSON object from s into m. +func UnmarshalString(s string, m proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // AllowUnknownFields specifies whether to allow messages to contain + // unknown JSON fields, as opposed to failing to unmarshal. + AllowUnknownFields bool + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize the way +// they are unmarshaled from JSON. Messages that implement this should also +// implement JSONPBMarshaler so that the custom format can be produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Unmarshal unmarshals a JSON object from r into m. +func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { + return u.UnmarshalNext(json.NewDecoder(r), m) +} + +// UnmarshalNext unmarshals the next JSON object from d into m. +func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { + if m == nil { + return errors.New("invalid nil message") + } + + // Parse the next JSON object from the stream. + raw := json.RawMessage{} + if err := d.Decode(&raw); err != nil { + return err + } + + // Check for custom unmarshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsu, ok := m.(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, raw) + } + + mr := proto.MessageReflect(m) + + // NOTE: For historical reasons, a top-level null is treated as a noop. + // This is incorrect, but kept for compatibility. + if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { + return nil + } + + if wrapJSONUnmarshalV2 { + // NOTE: If input message is non-empty, we need to preserve merge semantics + // of the old jsonpb implementation. These semantics are not supported by + // the protobuf JSON specification. + isEmpty := true + mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { + isEmpty = false // at least one iteration implies non-empty + return false + }) + if !isEmpty { + // Perform unmarshaling into a newly allocated, empty message. + mr = mr.New() + + // Use a defer to copy all unmarshaled fields into the original message. + dst := proto.MessageReflect(m) + defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + dst.Set(fd, v) + return true + }) + } + + // Unmarshal using the v2 JSON unmarshaler. + opts := protojson.UnmarshalOptions{ + DiscardUnknown: u.AllowUnknownFields, + } + if u.AnyResolver != nil { + opts.Resolver = anyResolver{u.AnyResolver} + } + return opts.Unmarshal(raw, mr.Interface()) + } else { + if err := u.unmarshalMessage(mr, raw); err != nil { + return err + } + return protoV2.CheckInitialized(mr.Interface()) + } +} + +func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { + md := m.Descriptor() + fds := md.Fields() + + if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, in) + } + + if string(in) == "null" && md.FullName() != "google.protobuf.Value" { + return nil + } + + switch wellKnownType(md.FullName()) { + case "Any": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + rawTypeURL, ok := jsonObject["@type"] + if !ok { + return errors.New("Any JSON doesn't have '@type'") + } + typeURL, err := unquoteString(string(rawTypeURL)) + if err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) + } + m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) + + var m2 protoreflect.Message + if u.AnyResolver != nil { + mi, err := u.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + if err == protoregistry.NotFound { + return fmt.Errorf("could not resolve Any message type: %v", typeURL) + } + return err + } + m2 = mt.New() + } + + if wellKnownType(m2.Descriptor().FullName()) != "" { + rawValue, ok := jsonObject["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + if err := u.unmarshalMessage(m2, rawValue); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } else { + delete(jsonObject, "@type") + rawJSON, err := json.Marshal(jsonObject) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + if err = u.unmarshalMessage(m2, rawJSON); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } + + rawWire, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) + return nil + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + v, err := u.unmarshalValue(m.NewField(fd), in, fd) + if err != nil { + return err + } + m.Set(fd, v) + return nil + case "Duration": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + d, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + sec := d.Nanoseconds() / 1e9 + nsec := d.Nanoseconds() % 1e9 + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Timestamp": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + sec := t.Unix() + nsec := t.Nanosecond() + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Value": + switch { + case string(in) == "null": + m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) + case string(in) == "true": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) + case string(in) == "false": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) + case hasPrefixAndSuffix('"', in, '"'): + s, err := unquoteString(string(in)) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) + case hasPrefixAndSuffix('[', in, ']'): + v := m.Mutable(fds.ByNumber(6)) + return u.unmarshalMessage(v.Message(), in) + case hasPrefixAndSuffix('{', in, '}'): + v := m.Mutable(fds.ByNumber(5)) + return u.unmarshalMessage(v.Message(), in) + default: + f, err := strconv.ParseFloat(string(in), 0) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) + } + return nil + case "ListValue": + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + lv := m.Mutable(fds.ByNumber(1)).List() + for _, raw := range jsonArray { + ve := lv.NewElement() + if err := u.unmarshalMessage(ve.Message(), raw); err != nil { + return err + } + lv.Append(ve) + } + return nil + case "Struct": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + mv := m.Mutable(fds.ByNumber(1)).Map() + for key, raw := range jsonObject { + kv := protoreflect.ValueOf(key).MapKey() + vv := mv.NewValue() + if err := u.unmarshalMessage(vv.Message(), raw); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) + } + mv.Set(kv, vv) + } + return nil + } + + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + // Handle known fields. + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if fd.IsWeak() && fd.Message().IsPlaceholder() { + continue // weak reference is not linked in + } + + // Search for any raw JSON value associated with this field. + var raw json.RawMessage + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + name = string(fd.JSONName()) + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + // Handle extension fields. + for name, raw := range jsonObject { + if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { + continue + } + + // Resolve the extension field by name. + xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(md) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + continue + } + delete(jsonObject, name) + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + if !u.AllowUnknownFields && len(jsonObject) > 0 { + for name := range jsonObject { + return fmt.Errorf("unknown field %q in %v", name, md.FullName()) + } + } + return nil +} + +func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if md := fd.Message(); md != nil { + return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated + } + return false +} + +func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { + if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { + _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) + return ok + } + return false +} + +func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch { + case fd.IsList(): + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return v, err + } + lv := v.List() + for _, raw := range jsonArray { + ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) + if err != nil { + return v, err + } + lv.Append(ve) + } + return v, nil + case fd.IsMap(): + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return v, err + } + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + for key, raw := range jsonObject { + var kv protoreflect.MapKey + if kfd.Kind() == protoreflect.StringKind { + kv = protoreflect.ValueOf(key).MapKey() + } else { + v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) + if err != nil { + return v, err + } + kv = v.MapKey() + } + + vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) + if err != nil { + return v, err + } + mv.Set(kv, vv) + } + return v, nil + default: + return u.unmarshalSingularValue(v, in, fd) + } +} + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(+1), + `"-Infinity"`: math.Inf(-1), +} + +func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + return unmarshalValue(in, new(bool)) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return unmarshalValue(trimQuote(in), new(int32)) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return unmarshalValue(trimQuote(in), new(int64)) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return unmarshalValue(trimQuote(in), new(uint32)) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return unmarshalValue(trimQuote(in), new(uint64)) + case protoreflect.FloatKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat32(float32(f)), nil + } + return unmarshalValue(trimQuote(in), new(float32)) + case protoreflect.DoubleKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat64(float64(f)), nil + } + return unmarshalValue(trimQuote(in), new(float64)) + case protoreflect.StringKind: + return unmarshalValue(in, new(string)) + case protoreflect.BytesKind: + return unmarshalValue(in, new([]byte)) + case protoreflect.EnumKind: + if hasPrefixAndSuffix('"', in, '"') { + vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) + if vd == nil { + return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) + } + return protoreflect.ValueOfEnum(vd.Number()), nil + } + return unmarshalValue(in, new(protoreflect.EnumNumber)) + case protoreflect.MessageKind, protoreflect.GroupKind: + err := u.unmarshalMessage(v.Message(), in) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } +} + +func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { + err := json.Unmarshal(in, v) + return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err +} + +func unquoteString(in string) (out string, err error) { + err = json.Unmarshal([]byte(in), &out) + return out, err +} + +func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { + if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { + return true + } + return false +} + +// trimQuote is like unquoteString but simply strips surrounding quotes. +// This is incorrect, but is behavior done by the legacy implementation. +func trimQuote(in []byte) []byte { + if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { + in = in[1 : len(in)-1] + } + return in +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go new file mode 100644 index 000000000000..685c80a62bc9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -0,0 +1,559 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONMarshalV2 = false + +// Marshaler is a configurable object for marshaling protocol buffer messages +// to the specified JSON representation. +type Marshaler struct { + // OrigName specifies whether to use the original protobuf name for fields. + OrigName bool + + // EnumsAsInts specifies whether to render enum values as integers, + // as opposed to string values. + EnumsAsInts bool + + // EmitDefaults specifies whether to render fields with zero values. + EmitDefaults bool + + // Indent controls whether the output is compact or not. + // If empty, the output is compact JSON. Otherwise, every JSON object + // entry and JSON array value will be on its own line. + // Each line will be preceded by repeated copies of Indent, where the + // number of copies is the current indentation depth. + Indent string + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should also +// implement JSONPBUnmarshaler so that the custom format can be parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// Marshal serializes a protobuf message as JSON into w. +func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { + b, err := jm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// MarshalToString serializes a protobuf message as JSON in string form. +func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { + b, err := jm.marshal(m) + if err != nil { + return "", err + } + return string(b), nil +} + +func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { + v := reflect.ValueOf(m) + if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, errors.New("Marshal called with nil") + } + + // Check for custom marshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsm, ok := m.(JSONPBMarshaler); ok { + return jsm.MarshalJSONPB(jm) + } + + if wrapJSONMarshalV2 { + opts := protojson.MarshalOptions{ + UseProtoNames: jm.OrigName, + UseEnumNumbers: jm.EnumsAsInts, + EmitUnpopulated: jm.EmitDefaults, + Indent: jm.Indent, + } + if jm.AnyResolver != nil { + opts.Resolver = anyResolver{jm.AnyResolver} + } + return opts.Marshal(proto.MessageReflect(m).Interface()) + } else { + // Check for unpopulated required fields first. + m2 := proto.MessageReflect(m) + if err := protoV2.CheckInitialized(m2.Interface()); err != nil { + return nil, err + } + + w := jsonWriter{Marshaler: jm} + err := w.marshalMessage(m2, "", "") + return w.buf, err + } +} + +type jsonWriter struct { + *Marshaler + buf []byte +} + +func (w *jsonWriter) write(s string) { + w.buf = append(w.buf, s...) +} + +func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { + if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(w.Marshaler) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + w.write(string(b)) + return nil + } + + md := m.Descriptor() + fds := md.Fields() + + // Handle well-known types. + const secondInNanos = int64(time.Second / time.Nanosecond) + switch wellKnownType(md.FullName()) { + case "Any": + return w.marshalAny(m, indent) + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + case "Duration": + const maxSecondsInDuration = 315576000000 + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + var sign string + if s < 0 || ns < 0 { + sign, s, ns = "-", -1*s, -1*ns + } + x := fmt.Sprintf("%s%d.%09d", sign, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vs"`, x)) + return nil + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vZ"`, x)) + return nil + case "Value": + // JSON value; which is a null, number, string, bool, object, or array. + od := md.Oneofs().Get(0) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("nil Value") + } + return w.marshalValue(fd, m.Get(fd), indent) + case "Struct", "ListValue": + // JSON object or array. + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + + firstField := true + if typeURL != "" { + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + if fd == nil { + continue + } + } else { + i++ + } + + v := m.Get(fd) + + if !m.Has(fd) { + if !w.EmitDefaults || fd.ContainingOneof() != nil { + continue + } + if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { + v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars + } + } + + if !firstField { + w.writeComma() + } + if err := w.marshalField(fd, v, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if md.ExtensionRanges().Len() > 0 { + // Collect a sorted list of all extension descriptor and values. + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + if !firstField { + w.writeComma() + } + if err := w.marshalField(ext.desc, ext.val, indent); err != nil { + return err + } + firstField = false + } + } + + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) writeComma() { + if w.Indent != "" { + w.write(",\n") + } else { + w.write(",") + } +} + +func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + md := m.Descriptor() + typeURL := m.Get(md.Fields().ByNumber(1)).String() + rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() + + var m2 protoreflect.Message + if w.AnyResolver != nil { + mi, err := w.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + return err + } + m2 = mt.New() + } + + if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { + return err + } + + if wellKnownType(m2.Descriptor().FullName()) == "" { + return w.marshalMessage(m2, indent, typeURL) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + w.writeComma() + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + w.write(`"value": `) + } else { + w.write(`"value":`) + } + if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { + return err + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"@type":`) + if w.Indent != "" { + w.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + w.write(string(b)) + return nil +} + +// marshalField writes field description and value to the Writer. +func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"`) + switch { + case fd.IsExtension(): + // For message set, use the fname of the message as the extension name. + name := string(fd.FullName()) + if isMessageSet(fd.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + w.write("[" + name + "]") + case w.OrigName: + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + w.write(name) + default: + w.write(string(fd.JSONName())) + } + w.write(`":`) + if w.Indent != "" { + w.write(" ") + } + return w.marshalValue(fd, v, indent) +} + +func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case fd.IsList(): + w.write("[") + comma := "" + lv := v.List() + for i := 0; i < lv.Len(); i++ { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write("]") + return nil + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + + // Collect a sorted list of all map keys and values. + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + + w.write(`{`) + comma := "" + for _, entry := range entries { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + + s := fmt.Sprint(entry.key.Interface()) + b, err := json.Marshal(s) + if err != nil { + return err + } + w.write(string(b)) + + w.write(`:`) + if w.Indent != "" { + w.write(` `) + } + + if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write(`}`) + return nil + default: + return w.marshalSingularValue(fd, v, indent) + } +} + +func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case !v.IsValid(): + w.write("null") + return nil + case fd.Message() != nil: + return w.marshalMessage(v.Message(), indent+w.Indent, "") + case fd.Enum() != nil: + if fd.Enum().FullName() == "google.protobuf.NullValue" { + w.write("null") + return nil + } + + vd := fd.Enum().Values().ByNumber(v.Enum()) + if vd == nil || w.EnumsAsInts { + w.write(strconv.Itoa(int(v.Enum()))) + } else { + w.write(`"` + string(vd.Name()) + `"`) + } + return nil + default: + switch v.Interface().(type) { + case float32, float64: + switch { + case math.IsInf(v.Float(), +1): + w.write(`"Infinity"`) + return nil + case math.IsInf(v.Float(), -1): + w.write(`"-Infinity"`) + return nil + case math.IsNaN(v.Float()): + w.write(`"NaN"`) + return nil + } + case int64, uint64: + w.write(fmt.Sprintf(`"%d"`, v.Interface())) + return nil + } + + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + w.write(string(b)) + return nil + } +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go new file mode 100644 index 000000000000..480e2448de66 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/json.go @@ -0,0 +1,69 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonpb provides functionality to marshal and unmarshal between a +// protocol buffer message and JSON. It follows the specification at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// Do not rely on the default behavior of the standard encoding/json package +// when called on generated message types as it does not operate correctly. +// +// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" +// package instead. +package jsonpb + +import ( + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// AnyResolver takes a type URL, present in an Any message, +// and resolves it into an instance of the associated message. +type AnyResolver interface { + Resolve(typeURL string) (proto.Message, error) +} + +type anyResolver struct{ AnyResolver } + +func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + return r.FindMessageByURL(string(message)) +} + +func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + m, err := r.Resolve(url) + if err != nil { + return nil, err + } + return protoimpl.X.MessageTypeOf(m), nil +} + +func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +func wellKnownType(s protoreflect.FullName) string { + if s.Parent() == "google.protobuf" { + switch s.Name() { + case "Empty", "Any", + "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue", + "Duration", "Timestamp", + "NullValue", "Struct", "Value", "ListValue": + return string(s.Name()) + } + } + return "" +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 2a5446762c51..087320da7f0f 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -13,21 +13,21 @@ // // The primary features of cmp are: // -// • When the default behavior of equality does not suit the needs of the test, -// custom equality functions can override the equality operation. -// For example, an equality function may report floats as equal so long as they -// are within some tolerance of each other. +// - When the default behavior of equality does not suit the test's needs, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as +// they are within some tolerance of each other. // -// • Types that have an Equal method may use that method to determine equality. -// This allows package authors to determine the equality operation for the types -// that they define. +// - Types with an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation +// for the types that they define. // -// • If no custom equality functions are used and no Equal method is defined, -// equality is determined by recursively comparing the primitive kinds on both -// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported -// fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly -// compared using the Exporter option. +// - If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on +// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, +// unexported fields are not compared by default; they result in panics +// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) +// or explicitly compared using the Exporter option. package cmp import ( @@ -40,28 +40,30 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) +// TODO(≥go1.18): Use any instead of interface{}. + // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // -// • Let S be the set of all Ignore, Transformer, and Comparer options that -// remain after applying all path filters, value filters, and type filters. -// If at least one Ignore exists in S, then the comparison is ignored. -// If the number of Transformer and Comparer options in S is greater than one, -// then Equal panics because it is ambiguous which option to use. -// If S contains a single Transformer, then use that to transform the current -// values and recursively call Equal on the output values. -// If S contains a single Comparer, then use that to compare the current values. -// Otherwise, evaluation proceeds to the next rule. +// - Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is non-zero, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform +// the current values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. // -// • If the values have an Equal method of the form "(T) Equal(T) bool" or -// "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and -// evaluation proceeds to the next rule. +// - If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. // -// • Lastly, try to compare x and y based on their basic kinds. -// Simple kinds like booleans, integers, floats, complex numbers, strings, and -// channels are compared using the equivalent of the == operator in Go. -// Functions are only equal if they are both nil, otherwise they are unequal. +// - Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, +// and channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. // // Structs are equal if recursively calling Equal on all fields report equal. // If a struct contains unexported fields, Equal panics unless an Ignore option @@ -142,7 +144,7 @@ func rootStep(x, y interface{}) PathStep { // so that they have the same parent type. var t reflect.Type if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { - t = reflect.TypeOf((*interface{})(nil)).Elem() + t = anyType if vx.IsValid() { vvx := reflect.New(t).Elem() vvx.Set(vx) @@ -637,7 +639,9 @@ type dynChecker struct{ curr, next int } // Next increments the state and reports whether a check should be performed. // // Checks occur every Nth function call, where N is a triangular number: +// // 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// // See https://en.wikipedia.org/wiki/Triangular_number // // This sequence ensures that the cost of checks drops significantly as diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index bc196b16cfaa..a248e5436d98 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -127,9 +127,9 @@ var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 // This function returns an edit-script, which is a sequence of operations // needed to convert one list into the other. The following invariants for // the edit-script are maintained: -// • eq == (es.Dist()==0) -// • nx == es.LenX() -// • ny == es.LenY() +// - eq == (es.Dist()==0) +// - nx == es.LenX() +// - ny == es.LenY() // // This algorithm is not guaranteed to be an optimal solution (i.e., one that // produces an edit-script with a minimal Levenshtein distance). This algorithm @@ -169,12 +169,13 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A diagonal edge is equivalent to a matching symbol between both X and Y. // Invariants: - // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx - // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny // // In general: - // • fwdFrontier.X < revFrontier.X - // • fwdFrontier.Y < revFrontier.Y + // - fwdFrontier.X < revFrontier.X + // - fwdFrontier.Y < revFrontier.Y + // // Unless, it is time for the algorithm to terminate. fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} revPath := path{-1, point{nx, ny}, make(EditScript, 0)} @@ -195,19 +196,21 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // computing sub-optimal edit-scripts between two lists. // // The algorithm is approximately as follows: - // • Searching for differences switches back-and-forth between - // a search that starts at the beginning (the top-left corner), and - // a search that starts at the end (the bottom-right corner). The goal of - // the search is connect with the search from the opposite corner. - // • As we search, we build a path in a greedy manner, where the first - // match seen is added to the path (this is sub-optimal, but provides a - // decent result in practice). When matches are found, we try the next pair - // of symbols in the lists and follow all matches as far as possible. - // • When searching for matches, we search along a diagonal going through - // through the "frontier" point. If no matches are found, we advance the - // frontier towards the opposite corner. - // • This algorithm terminates when either the X coordinates or the - // Y coordinates of the forward and reverse frontier points ever intersect. + // - Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). + // The goal of the search is connect with the search + // from the opposite corner. + // - As we search, we build a path in a greedy manner, + // where the first match seen is added to the path (this is sub-optimal, + // but provides a decent result in practice). When matches are found, + // we try the next pair of symbols in the lists and follow all matches + // as far as possible. + // - When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, + // we advance the frontier towards the opposite corner. + // - This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. // This algorithm is correct even if searching only in the forward direction // or in the reverse direction. We do both because it is commonly observed @@ -389,6 +392,7 @@ type point struct{ X, Y int } func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } // zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// // [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] func zigzag(x int) int { if x&1 != 0 { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go deleted file mode 100644 index 9147a2997311..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package value - -import ( - "math" - "reflect" -) - -// IsZero reports whether v is the zero value. -// This does not rely on Interface and so can be used on unexported fields. -func IsZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 - case reflect.String: - return v.String() == "" - case reflect.UnsafePointer: - return v.Pointer() == 0 - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !IsZero(v.Index(i)) { - return false - } - } - return true - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !IsZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index e57b9eb5392d..1f9ca9c4892b 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -33,6 +33,7 @@ type Option interface { } // applicableOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Grouping: Options type applicableOption interface { @@ -43,6 +44,7 @@ type applicableOption interface { } // coreOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Filters: *pathFilter | *valuesFilter type coreOption interface { @@ -336,9 +338,9 @@ func (tr transformer) String() string { // both implement T. // // The equality function must be: -// • Symmetric: equal(x, y) == equal(y, x) -// • Deterministic: equal(x, y) == equal(x, y) -// • Pure: equal(x, y) does not modify x or y +// - Symmetric: equal(x, y) == equal(y, x) +// - Deterministic: equal(x, y) == equal(x, y) +// - Pure: equal(x, y) does not modify x or y func Comparer(f interface{}) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.Equal) || v.IsNil() { @@ -430,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option { } // Result represents the comparison result for a single node and -// is provided by cmp when calling Result (see Reporter). +// is provided by cmp when calling Report (see Reporter). type Result struct { _ [0]func() // Make Result incomparable flags resultFlags diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index c7100346323b..a0a588502ed6 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -41,13 +41,13 @@ type PathStep interface { // The type of each valid value is guaranteed to be identical to Type. // // In some cases, one or both may be invalid or have restrictions: - // • For StructField, both are not interface-able if the current field - // is unexported and the struct type is not explicitly permitted by - // an Exporter to traverse unexported fields. - // • For SliceIndex, one may be invalid if an element is missing from - // either the x or y slice. - // • For MapIndex, one may be invalid if an entry is missing from - // either the x or y map. + // - For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // - For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // - For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. // // The provided values must not be mutated. Values() (vx, vy reflect.Value) @@ -94,6 +94,7 @@ func (pa Path) Index(i int) PathStep { // The simplified path only contains struct field accesses. // // For example: +// // MyMap.MySlices.MyField func (pa Path) String() string { var ss []string @@ -108,6 +109,7 @@ func (pa Path) String() string { // GoString returns the path to a specific node using Go syntax. // // For example: +// // (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField func (pa Path) GoString() string { var ssPre, ssPost []string @@ -159,7 +161,7 @@ func (ps pathStep) String() string { if ps.typ == nil { return "" } - s := ps.typ.String() + s := value.TypeString(ps.typ, false) if s == "" || strings.ContainsAny(s, "{}\n") { return "root" // Type too simple or complex to print } @@ -282,7 +284,7 @@ type typeAssertion struct { func (ta TypeAssertion) Type() reflect.Type { return ta.typ } func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } -func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } // Transform is a transformation from the parent type to the current type. type Transform struct{ *transform } diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 104bb30538bc..2050bf6b46b7 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -7,8 +7,6 @@ package cmp import ( "fmt" "reflect" - - "github.com/google/go-cmp/cmp/internal/value" ) // numContextRecords is the number of surrounding equal records to print. @@ -116,7 +114,10 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out } // For leaf nodes, format the value based on the reflect.Values alone. - if v.MaxDepth == 0 { + // As a special case, treat equal []byte as a leaf nodes. + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType + isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 + if v.MaxDepth == 0 || isEqualBytes { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. @@ -245,11 +246,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, pt var isZero bool switch opts.DiffMode { case diffIdentical: - isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero() case diffRemoved: - isZero = value.IsZero(r.Value.ValueX) + isZero = r.Value.ValueX.IsZero() case diffInserted: - isZero = value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueY.IsZero() } if isZero { continue diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 76c04fdbd6a7..2ab41fad3fb5 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -16,6 +16,13 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) +var ( + anyType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + byteType = reflect.TypeOf((*byte)(nil)).Elem() +) + type formatValueOptions struct { // AvoidStringer controls whether to avoid calling custom stringer // methods like error.Error or fmt.Stringer.String. @@ -184,7 +191,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) - if value.IsZero(vv) { + if vv.IsZero() { continue // Elide fields with zero values } if len(list) == maxLen { @@ -205,13 +212,13 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Check whether this is a []byte of text data. - if t.Elem() == reflect.TypeOf(byte(0)) { + if t.Elem() == byteType { b := v.Bytes() isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { out = opts.formatString("", string(b)) skipType = true - return opts.WithTypeMode(emitType).FormatType(t, out) + return opts.FormatType(t, out) } } @@ -282,7 +289,12 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } defer ptrs.Pop() - skipType = true // Let the underlying value print the type instead + // Skip the name only if this is an unnamed pointer type. + // Otherwise taking the address of a value does not reproduce + // the named pointer type. + if v.Type().Name() == "" { + skipType = true // Let the underlying value print the type instead + } out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) out = &textWrap{Prefix: "&", Value: out} @@ -293,7 +305,6 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. - skipType = true // Print the concrete type instead return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 68b5c1ae164d..23e444f62f36 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -104,7 +104,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() isString = true - case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + case t.Kind() == reflect.Slice && t.Elem() == byteType: sx, sy = string(vx.Bytes()), string(vy.Bytes()) isString = true case t.Kind() == reflect.Array: @@ -147,7 +147,10 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }) efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) - isPureLinedText = efficiencyLines < 4*efficiencyBytes + quotedLength := len(strconv.Quote(sx + sy)) + unquotedLength := len(sx) + len(sy) + escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength) + isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1 } } @@ -171,12 +174,13 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // differences in a string literal. This format is more readable, // but has edge-cases where differences are visually indistinguishable. // This format is avoided under the following conditions: - // • A line starts with `"""` - // • A line starts with "..." - // • A line contains non-printable characters - // • Adjacent different lines differ only by whitespace + // - A line starts with `"""` + // - A line starts with "..." + // - A line contains non-printable characters + // - Adjacent different lines differ only by whitespace // // For example: + // // """ // ... // 3 identical lines // foo @@ -231,7 +235,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} switch t.Kind() { case reflect.String: - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: @@ -326,12 +330,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Kind() { case reflect.String: out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf([]byte(nil)) { + if t != bytesType { out = opts.FormatType(t, out) } } @@ -446,7 +450,6 @@ func (opts formatOptions) formatDiffSlice( // {NumIdentical: 3}, // {NumInserted: 1}, // ] -// func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { var prevMode byte lastStats := func(mode byte) *diffStats { @@ -503,7 +506,6 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) // {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, // {NumIdentical: 63}, // ] -// func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { @@ -548,7 +550,6 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat // {NumRemoved: 9}, // {NumIdentical: 64}, // incremented by 10 // ] -// func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { var ix, iy int // indexes into sequence x and y for i, ds := range groups { diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 0fd46d7ffb6e..388fcf571208 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -393,6 +393,7 @@ func (s diffStats) Append(ds diffStats) diffStats { // String prints a humanly-readable summary of coalesced records. // // Example: +// // diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" func (s diffStats) String() string { var ss []string diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml index f8684d99fc4e..061d72ae079b 100644 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ b/vendor/github.com/google/gofuzz/.travis.yml @@ -1,13 +1,10 @@ language: go go: - - 1.4 - - 1.3 - - 1.2 - - tip - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - 1.11.x + - 1.12.x + - 1.13.x + - master script: - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md index 51cf5cd1adae..97c1b34fd5e6 100644 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md @@ -1,7 +1,7 @@ # How to contribute # We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. +just a few small guidelines you need to follow. ## Contributor License Agreement ## diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md index 386c2a457a8b..b503aae7d713 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/github.com/google/gofuzz/README.md @@ -68,4 +68,22 @@ f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. See more examples in ```example_test.go```. +You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing. +go-fuzz provides the user a byte-slice, which should be converted to different inputs +for the tested function. This library can help convert the byte slice. Consider for +example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments: +```go +// +build gofuzz +package mypackage + +import fuzz "github.com/google/gofuzz" + +func Fuzz(data []byte) int { + var i int + fuzz.NewFromGoFuzz(data).Fuzz(&i) + MyFunc(i) + return 0 +} +``` + Happy testing! diff --git a/vendor/github.com/google/gofuzz/bytesource/bytesource.go b/vendor/github.com/google/gofuzz/bytesource/bytesource.go new file mode 100644 index 000000000000..5bb365949691 --- /dev/null +++ b/vendor/github.com/google/gofuzz/bytesource/bytesource.go @@ -0,0 +1,81 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package bytesource provides a rand.Source64 that is determined by a slice of bytes. +package bytesource + +import ( + "bytes" + "encoding/binary" + "io" + "math/rand" +) + +// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are +// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a +// fallback pseudo random source is created in case more random numbers are required. +// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly. +type ByteSource struct { + *bytes.Reader + fallback rand.Source +} + +// New returns a new ByteSource from a given slice of bytes. +func New(input []byte) *ByteSource { + s := &ByteSource{ + Reader: bytes.NewReader(input), + fallback: rand.NewSource(0), + } + if len(input) > 0 { + s.fallback = rand.NewSource(int64(s.consumeUint64())) + } + return s +} + +func (s *ByteSource) Uint64() uint64 { + // Return from input if it was not exhausted. + if s.Len() > 0 { + return s.consumeUint64() + } + + // Input was exhausted, return random number from fallback (in this case fallback should not be + // nil). Try first having a Uint64 output (Should work in current rand implementation), + // otherwise return a conversion of Int63. + if s64, ok := s.fallback.(rand.Source64); ok { + return s64.Uint64() + } + return uint64(s.fallback.Int63()) +} + +func (s *ByteSource) Int63() int64 { + return int64(s.Uint64() >> 1) +} + +func (s *ByteSource) Seed(seed int64) { + s.fallback = rand.NewSource(seed) + s.Reader = bytes.NewReader(nil) +} + +// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the +// bytes reader is not empty. +func (s *ByteSource) consumeUint64() uint64 { + var bytes [8]byte + _, err := s.Read(bytes[:]) + if err != nil && err != io.EOF { + panic("failed reading source") // Should not happen. + } + return binary.BigEndian.Uint64(bytes[:]) +} diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index da0a5f93800d..761520a8ceeb 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -22,6 +22,9 @@ import ( "reflect" "regexp" "time" + + "github.com/google/gofuzz/bytesource" + "strings" ) // fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. @@ -61,6 +64,34 @@ func NewWithSeed(seed int64) *Fuzzer { return f } +// NewFromGoFuzz is a helper function that enables using gofuzz (this +// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous +// fuzzing. Essentially, it enables translating the fuzzing bytes from +// go-fuzz to any Go object using this library. +// +// This implementation promises a constant translation from a given slice of +// bytes to the fuzzed objects. This promise will remain over future +// versions of Go and of this library. +// +// Note: the returned Fuzzer should not be shared between multiple goroutines, +// as its deterministic output will no longer be available. +// +// Example: use go-fuzz to test the function `MyFunc(int)` in the package +// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content: +// +// // +build gofuzz +// package mypacakge +// import fuzz "github.com/google/gofuzz" +// func Fuzz(data []byte) int { +// var i int +// fuzz.NewFromGoFuzz(data).Fuzz(&i) +// MyFunc(i) +// return 0 +// } +func NewFromGoFuzz(data []byte) *Fuzzer { + return New().RandSource(bytesource.New(data)) +} + // Funcs adds each entry in fuzzFuncs as a custom fuzzing function. // // Each entry in fuzzFuncs must be a function taking two parameters. @@ -141,7 +172,7 @@ func (f *Fuzzer) genElementCount() int { } func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() > f.nilChance + return f.r.Float64() >= f.nilChance } // MaxDepth sets the maximum number of recursive fuzz calls that will be made @@ -240,6 +271,7 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { fn(v, fc.fuzzer.r) return } + switch v.Kind() { case reflect.Map: if fc.fuzzer.genShouldFill() { @@ -450,10 +482,10 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ v.SetFloat(r.Float64()) }, reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) }, reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex(r.Float64(), r.Float64())) }, reflect.String: func(v reflect.Value, r *rand.Rand) { v.SetString(randString(r)) @@ -465,38 +497,105 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ // randBool returns true or false randomly. func randBool(r *rand.Rand) bool { - if r.Int()&1 == 1 { - return true - } - return false + return r.Int31()&(1<<30) == 0 +} + +type int63nPicker interface { + Int63n(int64) int64 } -type charRange struct { - first, last rune +// UnicodeRange describes a sequential range of unicode characters. +// Last must be numerically greater than First. +type UnicodeRange struct { + First, Last rune } +// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. +// To be useful, each range must have at least one character (First <= Last) and +// there must be at least one range. +type UnicodeRanges []UnicodeRange + // choose returns a random unicode character from the given range, using the // given randomness source. -func (r *charRange) choose(rand *rand.Rand) rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) +func (ur UnicodeRange) choose(r int63nPicker) rune { + count := int64(ur.Last - ur.First + 1) + return ur.First + rune(r.Int63n(count)) +} + +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from the range ur. If there are no characters +// in the range (cr.Last < cr.First), this will panic. +func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) { + ur.check() + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } } -var unicodeRanges = []charRange{ +// check is a function that used to check whether the first of ur(UnicodeRange) +// is greater than the last one. +func (ur UnicodeRange) check() { + if ur.Last < ur.First { + panic("The last encoding must be greater than the first one.") + } +} + +// randString of UnicodeRange makes a random string up to 20 characters long. +// Each character is selected form ur(UnicodeRange). +func (ur UnicodeRange) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur.choose(r)) + } + return sb.String() +} + +// defaultUnicodeRanges sets a default unicode range when user do not set +// CustomStringFuzzFunc() but wants fuzz string. +var defaultUnicodeRanges = UnicodeRanges{ {' ', '~'}, // ASCII characters {'\u00a0', '\u02af'}, // Multi-byte encoded characters {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) } +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from one of the ranges of ur(UnicodeRanges). +// Each range has an equal probability of being chosen. If there are no ranges, +// or a selected range has no characters (.Last < .First), this will panic. +// Do not modify any of the ranges in ur after calling this function. +func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) { + // Check unicode ranges slice is empty. + if len(ur) == 0 { + panic("UnicodeRanges is empty.") + } + // if not empty, each range should be checked. + for i := range ur { + ur[i].check() + } + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } +} + +// randString of UnicodeRanges makes a random string up to 20 characters long. +// Each character is selected form one of the ranges of ur(UnicodeRanges), +// and each range has an equal probability of being chosen. +func (ur UnicodeRanges) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) + } + return sb.String() +} + // randString makes a random string up to 20 characters long. The returned string // may include a variety of (valid) UTF-8 encodings. func randString(r *rand.Rand) string { - n := r.Intn(20) - runes := make([]rune, n) - for i := range runes { - runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) - } - return string(runes) + return defaultUnicodeRanges.randString(r) } // randUint64 makes random 64 bit numbers. diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go new file mode 100644 index 000000000000..d7fcbf286516 --- /dev/null +++ b/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index 60d26bb50c6a..a57207aeb6fd 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -12,6 +12,7 @@ import ( "fmt" "io" "strings" + "sync" ) // A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC @@ -33,7 +34,15 @@ const ( Future // Reserved for future definition. ) -var rander = rand.Reader // random function +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) type invalidLengthError struct{ len int } @@ -41,6 +50,12 @@ func (err invalidLengthError) Error() string { return fmt.Sprintf("invalid UUID length: %d", err.len) } +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + // Parse decodes s into a UUID or returns an error. Both the standard UUID // forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the @@ -249,3 +264,31 @@ func SetRand(r io.Reader) { } rander = r } + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go index 86160fbd0725..7697802e4d16 100644 --- a/vendor/github.com/google/uuid/version4.go +++ b/vendor/github.com/google/uuid/version4.go @@ -27,6 +27,8 @@ func NewString() string { // The strength of the UUIDs is based on the strength of the crypto/rand // package. // +// Uses the randomness pool if it was enabled with EnableRandPool. +// // A note about uniqueness derived from the UUID Wikipedia entry: // // Randomly generated UUIDs have 122 random bits. One's annual risk of being @@ -35,7 +37,10 @@ func NewString() string { // equivalent to the odds of creating a few tens of trillions of UUIDs in a // year and having one duplicate. func NewRandom() (UUID, error) { - return NewRandomFromReader(rander) + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() } // NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. @@ -49,3 +54,23 @@ func NewRandomFromReader(r io.Reader) (UUID, error) { uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 return uuid, nil } + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE b/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go new file mode 100644 index 000000000000..aecaff59ee8a --- /dev/null +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -0,0 +1,182 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). +// +// The signer binary is OS-specific, but exposes a standard set of APIs for the client to use. +package client + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/gob" + "fmt" + "io" + "io/ioutil" + "log" + "net/rpc" + "os" + "os/exec" + + "github.com/googleapis/enterprise-certificate-proxy/client/util" +) + +const signAPI = "EnterpriseCertSigner.Sign" +const certificateChainAPI = "EnterpriseCertSigner.CertificateChain" +const publicKeyAPI = "EnterpriseCertSigner.Public" + +// A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser. +type Connection struct { + io.ReadCloser + io.WriteCloser +} + +// Close closes c's underlying ReadCloser and WriteCloser. +func (c *Connection) Close() error { + rerr := c.ReadCloser.Close() + werr := c.WriteCloser.Close() + if rerr != nil { + return rerr + } + return werr +} + +// If ECP Logging is enabled return true +// Otherwise return false +func enableECPLogging() bool { + if os.Getenv("ENABLE_ENTERPRISE_CERTIFICATE_LOGS") != "" { + return true + } + + log.SetOutput(ioutil.Discard) + return false +} + +func init() { + gob.Register(crypto.SHA256) + gob.Register(&rsa.PSSOptions{}) +} + +// SignArgs contains arguments to a crypto Signer.Sign method. +type SignArgs struct { + Digest []byte // The content to sign. + Opts crypto.SignerOpts // Options for signing, such as Hash identifier. +} + +// Key implements credential.Credential by holding the executed signer subprocess. +type Key struct { + cmd *exec.Cmd // Pointer to the signer subprocess. + client *rpc.Client // Pointer to the rpc client that communicates with the signer subprocess. + publicKey crypto.PublicKey // Public key of loaded certificate. + chain [][]byte // Certificate chain of loaded certificate. +} + +// CertificateChain returns the credential as a raw X509 cert chain. This contains the public key. +func (k *Key) CertificateChain() [][]byte { + return k.chain +} + +// Close closes the RPC connection and kills the signer subprocess. +// Call this to free up resources when the Key object is no longer needed. +func (k *Key) Close() error { + if err := k.cmd.Process.Kill(); err != nil { + return fmt.Errorf("failed to kill signer process: %w", err) + } + // Wait for cmd to exit and release resources. Since the process is forcefully killed, this + // will return a non-nil error (varies by OS), which we will ignore. + k.cmd.Wait() + // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed. + // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault. + if err := k.client.Close(); err.Error() != "close |0: file already closed" { + return fmt.Errorf("failed to close RPC connection: %w", err) + } + return nil +} + +// Public returns the public key for this Key. +func (k *Key) Public() crypto.PublicKey { + return k.publicKey +} + +// Sign signs a message digest, using the specified signer options. +func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) { + if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() { + return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size()) + } + err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed) + return +} + +// Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate +// related operations, including signing messages with the private key. +// +// The signer binary path is read from the specified configFilePath, if provided. +// Otherwise, use the default config file path. +// +// The config file also specifies which certificate the signer should use. +func Cred(configFilePath string) (*Key, error) { + enableECPLogging() + if configFilePath == "" { + configFilePath = util.GetDefaultConfigFilePath() + } + enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) + if err != nil { + return nil, err + } + k := &Key{ + cmd: exec.Command(enterpriseCertSignerPath, configFilePath), + } + + // Redirect errors from subprocess to parent process. + k.cmd.Stderr = os.Stderr + + // RPC client will communicate with subprocess over stdin/stdout. + kin, err := k.cmd.StdinPipe() + if err != nil { + return nil, err + } + kout, err := k.cmd.StdoutPipe() + if err != nil { + return nil, err + } + k.client = rpc.NewClient(&Connection{kout, kin}) + + if err := k.cmd.Start(); err != nil { + return nil, fmt.Errorf("starting enterprise cert signer subprocess: %w", err) + } + + if err := k.client.Call(certificateChainAPI, struct{}{}, &k.chain); err != nil { + return nil, fmt.Errorf("failed to retrieve certificate chain: %w", err) + } + + var publicKeyBytes []byte + if err := k.client.Call(publicKeyAPI, struct{}{}, &publicKeyBytes); err != nil { + return nil, fmt.Errorf("failed to retrieve public key: %w", err) + } + + publicKey, err := x509.ParsePKIXPublicKey(publicKeyBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse public key: %w", err) + } + + var ok bool + k.publicKey, ok = publicKey.(crypto.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid public key type: %T", publicKey) + } + + switch pub := k.publicKey.(type) { + case *rsa.PublicKey: + if pub.Size() < 256 { + return nil, fmt.Errorf("RSA modulus size is less than 2048 bits: %v", pub.Size()*8) + } + case *ecdsa.PublicKey: + default: + return nil, fmt.Errorf("unsupported public key type: %v", pub) + } + + return k, nil +} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go new file mode 100644 index 000000000000..ccef5278a309 --- /dev/null +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go @@ -0,0 +1,71 @@ +// Package util provides helper functions for the client. +package util + +import ( + "encoding/json" + "errors" + "io/ioutil" + "os" + "os/user" + "path/filepath" + "runtime" +) + +const configFileName = "certificate_config.json" + +// EnterpriseCertificateConfig contains parameters for initializing signer. +type EnterpriseCertificateConfig struct { + Libs Libs `json:"libs"` +} + +// Libs specifies the locations of helper libraries. +type Libs struct { + ECP string `json:"ecp"` +} + +// LoadSignerBinaryPath retrieves the path of the signer binary from the config file. +func LoadSignerBinaryPath(configFilePath string) (path string, err error) { + jsonFile, err := os.Open(configFilePath) + if err != nil { + return "", err + } + + byteValue, err := ioutil.ReadAll(jsonFile) + if err != nil { + return "", err + } + var config EnterpriseCertificateConfig + err = json.Unmarshal(byteValue, &config) + if err != nil { + return "", err + } + signerBinaryPath := config.Libs.ECP + if signerBinaryPath == "" { + return "", errors.New("signer binary path is missing") + } + return signerBinaryPath, nil +} + +func guessHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} + +func getDefaultConfigFileDirectory() (directory string) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud") + } + return filepath.Join(guessHomeDir(), ".config/gcloud") +} + +// GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud. +func GetDefaultConfigFilePath() (path string) { + return filepath.Join(getDefaultConfigFileDirectory(), configFileName) +} diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json new file mode 100644 index 000000000000..d88960b7ef17 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + "v2": "2.7.0" +} diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md new file mode 100644 index 000000000000..b75170f2227c --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -0,0 +1,47 @@ +# Changelog + +## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) + + +### Features + +* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6)) +* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f)) + +## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13) + + +### Features + +* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3)) + +## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04) + + +### Bug Fixes + +* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23)) + +## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04) + + +### Features + +* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb)) + +## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09) + + +### Features + +* **v2:** add OnHTTPCodes CallOption ([#188](https://github.com/googleapis/gax-go/issues/188)) ([ba7c534](https://github.com/googleapis/gax-go/commit/ba7c5348363ab6c33e1cee3c03c0be68a46ca07c)) + + +### Bug Fixes + +* **v2/apierror:** use errors.As in FromError ([#189](https://github.com/googleapis/gax-go/issues/189)) ([f30f05b](https://github.com/googleapis/gax-go/commit/f30f05be583828f4c09cca4091333ea88ff8d79e)) + + +### Miscellaneous Chores + +* **v2:** bump release-please processing ([#192](https://github.com/googleapis/gax-go/issues/192)) ([56172f9](https://github.com/googleapis/gax-go/commit/56172f971d1141d7687edaac053ad3470af76719)) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index 869379da96f3..aa6be1304f17 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -28,10 +28,11 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Package apierror implements a wrapper error for parsing error details from -// API calls. Currently, only errors representing a gRPC status are supported. +// API calls. Both HTTP & gRPC status errors are supported. package apierror import ( + "errors" "fmt" "strings" @@ -40,6 +41,7 @@ import ( "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) // ErrDetails holds the google/rpc/error_details.proto messages. @@ -59,6 +61,30 @@ type ErrDetails struct { Unknown []interface{} } +// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages. +var ErrMessageNotFound = errors.New("message not found") + +// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the +// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type, +// the content of the message is copied to the provided message. +// +// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the +// protocol buffer type of the provided message. +func (e ErrDetails) ExtractProtoMessage(v proto.Message) error { + if v == nil { + return ErrMessageNotFound + } + for _, elem := range e.Unknown { + if elemProto, ok := elem.(proto.Message); ok { + if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() { + proto.Merge(v, elemProto) + return nil + } + } + } + return ErrMessageNotFound +} + func (e ErrDetails) String() string { var d strings.Builder if e.ErrorInfo != nil { @@ -207,29 +233,49 @@ func (a *APIError) Metadata() map[string]string { } -// FromError parses a Status error or a googleapi.Error and builds an APIError. -func FromError(err error) (*APIError, bool) { - if err == nil { - return nil, false - } - - ae := APIError{err: err} +// setDetailsFromError parses a Status error or a googleapi.Error +// and sets status and details or httpErr and details, respectively. +// It returns false if neither Status nor googleapi.Error can be parsed. +func (a *APIError) setDetailsFromError(err error) bool { st, isStatus := status.FromError(err) - herr, isHTTPErr := err.(*googleapi.Error) + var herr *googleapi.Error + isHTTPErr := errors.As(err, &herr) switch { case isStatus: - ae.status = st - ae.details = parseDetails(st.Details()) + a.status = st + a.details = parseDetails(st.Details()) case isHTTPErr: - ae.httpErr = herr - ae.details = parseHTTPDetails(herr) + a.httpErr = herr + a.details = parseHTTPDetails(herr) default: - return nil, false + return false } + return true +} - return &ae, true +// FromError parses a Status error or a googleapi.Error and builds an +// APIError, wrapping the provided error in the new APIError. It +// returns false if neither Status nor googleapi.Error can be parsed. +func FromError(err error) (*APIError, bool) { + return ParseError(err, true) +} +// ParseError parses a Status error or a googleapi.Error and builds an +// APIError. If wrap is true, it wraps the error in the new APIError. +// It returns false if neither Status nor googleapi.Error can be parsed. +func ParseError(err error, wrap bool) (*APIError, bool) { + if err == nil { + return nil, false + } + ae := APIError{} + if wrap { + ae = APIError{err: err} + } + if !ae.setDetailsFromError(err) { + return nil, false + } + return &ae, true } // parseDetails accepts a slice of interface{} that should be backed by some diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go new file mode 100644 index 000000000000..e4b03f161d82 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go @@ -0,0 +1,256 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.17.3 +// source: custom_error.proto + +package jsonerror + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Error code for `CustomError`. +type CustomError_CustomErrorCode int32 + +const ( + // Default error. + CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0 + // Too many foo. + CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1 + // Not enough foo. + CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2 + // Catastrophic error. + CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3 +) + +// Enum value maps for CustomError_CustomErrorCode. +var ( + CustomError_CustomErrorCode_name = map[int32]string{ + 0: "CUSTOM_ERROR_CODE_UNSPECIFIED", + 1: "TOO_MANY_FOO", + 2: "NOT_ENOUGH_FOO", + 3: "UNIVERSE_WAS_DESTROYED", + } + CustomError_CustomErrorCode_value = map[string]int32{ + "CUSTOM_ERROR_CODE_UNSPECIFIED": 0, + "TOO_MANY_FOO": 1, + "NOT_ENOUGH_FOO": 2, + "UNIVERSE_WAS_DESTROYED": 3, + } +) + +func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode { + p := new(CustomError_CustomErrorCode) + *p = x + return p +} + +func (x CustomError_CustomErrorCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor { + return file_custom_error_proto_enumTypes[0].Descriptor() +} + +func (CustomError_CustomErrorCode) Type() protoreflect.EnumType { + return &file_custom_error_proto_enumTypes[0] +} + +func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead. +func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0, 0} +} + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +type CustomError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error code specific to the custom API being invoked. + Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"` + // Name of the failed entity. + Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` + // Message that describes the error. + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *CustomError) Reset() { + *x = CustomError{} + if protoimpl.UnsafeEnabled { + mi := &file_custom_error_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomError) ProtoMessage() {} + +func (x *CustomError) ProtoReflect() protoreflect.Message { + mi := &file_custom_error_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomError.ProtoReflect.Descriptor instead. +func (*CustomError) Descriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0} +} + +func (x *CustomError) GetCode() CustomError_CustomErrorCode { + if x != nil { + return x.Code + } + return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED +} + +func (x *CustomError) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *CustomError) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_custom_error_proto protoreflect.FileDescriptor + +var file_custom_error_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x0b, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x76, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41, + 0x4e, 0x59, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x54, 0x5f, + 0x45, 0x4e, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, + 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x53, 0x5f, 0x44, 0x45, 0x53, + 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_custom_error_proto_rawDescOnce sync.Once + file_custom_error_proto_rawDescData = file_custom_error_proto_rawDesc +) + +func file_custom_error_proto_rawDescGZIP() []byte { + file_custom_error_proto_rawDescOnce.Do(func() { + file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_custom_error_proto_rawDescData) + }) + return file_custom_error_proto_rawDescData +} + +var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_custom_error_proto_goTypes = []interface{}{ + (CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode + (*CustomError)(nil), // 1: error.CustomError +} +var file_custom_error_proto_depIdxs = []int32{ + 0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_custom_error_proto_init() } +func file_custom_error_proto_init() { + if File_custom_error_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_custom_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_custom_error_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_custom_error_proto_goTypes, + DependencyIndexes: file_custom_error_proto_depIdxs, + EnumInfos: file_custom_error_proto_enumTypes, + MessageInfos: file_custom_error_proto_msgTypes, + }.Build() + File_custom_error_proto = out.File + file_custom_error_proto_rawDesc = nil + file_custom_error_proto_goTypes = nil + file_custom_error_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto new file mode 100644 index 000000000000..21678ae65c99 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto @@ -0,0 +1,50 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package error; + +option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; + + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +message CustomError { + + // Error code for `CustomError`. + enum CustomErrorCode { + // Default error. + CUSTOM_ERROR_CODE_UNSPECIFIED = 0; + + // Too many foo. + TOO_MANY_FOO = 1; + + // Not enough foo. + NOT_ENOUGH_FOO = 2; + + // Catastrophic error. + UNIVERSE_WAS_DESTROYED = 3; + + } + + // Error code specific to the custom API being invoked. + CustomErrorCode code = 1; + + // Name of the failed entity. + string entity = 2; + + // Message that describes the error. + string error_message = 3; +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go index 27b34c06e281..7dd9b83739a9 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go @@ -14,9 +14,9 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.0 // protoc v3.15.8 -// source: error.proto +// source: apierror/internal/proto/error.proto package jsonerror @@ -55,7 +55,7 @@ type Error struct { func (x *Error) Reset() { *x = Error{} if protoimpl.UnsafeEnabled { - mi := &file_error_proto_msgTypes[0] + mi := &file_apierror_internal_proto_error_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -68,7 +68,7 @@ func (x *Error) String() string { func (*Error) ProtoMessage() {} func (x *Error) ProtoReflect() protoreflect.Message { - mi := &file_error_proto_msgTypes[0] + mi := &file_apierror_internal_proto_error_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -81,7 +81,7 @@ func (x *Error) ProtoReflect() protoreflect.Message { // Deprecated: Use Error.ProtoReflect.Descriptor instead. func (*Error) Descriptor() ([]byte, []int) { - return file_error_proto_rawDescGZIP(), []int{0} + return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0} } func (x *Error) GetError() *Error_Status { @@ -112,7 +112,7 @@ type Error_Status struct { func (x *Error_Status) Reset() { *x = Error_Status{} if protoimpl.UnsafeEnabled { - mi := &file_error_proto_msgTypes[1] + mi := &file_apierror_internal_proto_error_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -125,7 +125,7 @@ func (x *Error_Status) String() string { func (*Error_Status) ProtoMessage() {} func (x *Error_Status) ProtoReflect() protoreflect.Message { - mi := &file_error_proto_msgTypes[1] + mi := &file_apierror_internal_proto_error_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -138,7 +138,7 @@ func (x *Error_Status) ProtoReflect() protoreflect.Message { // Deprecated: Use Error_Status.ProtoReflect.Descriptor instead. func (*Error_Status) Descriptor() ([]byte, []int) { - return file_error_proto_rawDescGZIP(), []int{0, 0} + return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0, 0} } func (x *Error_Status) GetCode() int32 { @@ -169,53 +169,55 @@ func (x *Error_Status) GetDetails() []*anypb.Any { return nil } -var File_error_proto protoreflect.FileDescriptor +var File_apierror_internal_proto_error_proto protoreflect.FileDescriptor -var file_error_proto_rawDesc = []byte{ - 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, - 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, - 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, - 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +var file_apierror_internal_proto_error_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, + 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, + 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( - file_error_proto_rawDescOnce sync.Once - file_error_proto_rawDescData = file_error_proto_rawDesc + file_apierror_internal_proto_error_proto_rawDescOnce sync.Once + file_apierror_internal_proto_error_proto_rawDescData = file_apierror_internal_proto_error_proto_rawDesc ) -func file_error_proto_rawDescGZIP() []byte { - file_error_proto_rawDescOnce.Do(func() { - file_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_error_proto_rawDescData) +func file_apierror_internal_proto_error_proto_rawDescGZIP() []byte { + file_apierror_internal_proto_error_proto_rawDescOnce.Do(func() { + file_apierror_internal_proto_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_apierror_internal_proto_error_proto_rawDescData) }) - return file_error_proto_rawDescData + return file_apierror_internal_proto_error_proto_rawDescData } -var file_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_error_proto_goTypes = []interface{}{ +var file_apierror_internal_proto_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_apierror_internal_proto_error_proto_goTypes = []interface{}{ (*Error)(nil), // 0: error.Error (*Error_Status)(nil), // 1: error.Error.Status (code.Code)(0), // 2: google.rpc.Code (*anypb.Any)(nil), // 3: google.protobuf.Any } -var file_error_proto_depIdxs = []int32{ +var file_apierror_internal_proto_error_proto_depIdxs = []int32{ 1, // 0: error.Error.error:type_name -> error.Error.Status 2, // 1: error.Error.Status.status:type_name -> google.rpc.Code 3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any @@ -226,13 +228,13 @@ var file_error_proto_depIdxs = []int32{ 0, // [0:3] is the sub-list for field type_name } -func init() { file_error_proto_init() } -func file_error_proto_init() { - if File_error_proto != nil { +func init() { file_apierror_internal_proto_error_proto_init() } +func file_apierror_internal_proto_error_proto_init() { + if File_apierror_internal_proto_error_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_apierror_internal_proto_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Error); i { case 0: return &v.state @@ -244,7 +246,7 @@ func file_error_proto_init() { return nil } } - file_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_apierror_internal_proto_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Error_Status); i { case 0: return &v.state @@ -261,18 +263,18 @@ func file_error_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_error_proto_rawDesc, + RawDescriptor: file_apierror_internal_proto_error_proto_rawDesc, NumEnums: 0, NumMessages: 2, NumExtensions: 0, NumServices: 0, }, - GoTypes: file_error_proto_goTypes, - DependencyIndexes: file_error_proto_depIdxs, - MessageInfos: file_error_proto_msgTypes, + GoTypes: file_apierror_internal_proto_error_proto_goTypes, + DependencyIndexes: file_apierror_internal_proto_error_proto_depIdxs, + MessageInfos: file_apierror_internal_proto_error_proto_msgTypes, }.Build() - File_error_proto = out.File - file_error_proto_rawDesc = nil - file_error_proto_goTypes = nil - file_error_proto_depIdxs = nil + File_apierror_internal_proto_error_proto = out.File + file_apierror_internal_proto_error_proto_rawDesc = nil + file_apierror_internal_proto_error_proto_goTypes = nil + file_apierror_internal_proto_error_proto_depIdxs = nil } diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go index 425a7668d1ee..e092005563bf 100644 --- a/vendor/github.com/googleapis/gax-go/v2/call_option.go +++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -30,9 +30,11 @@ package gax import ( + "errors" "math/rand" "time" + "google.golang.org/api/googleapi" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -119,6 +121,41 @@ func (r *boRetryer) Retry(err error) (time.Duration, bool) { return 0, false } +// OnHTTPCodes returns a Retryer that retries if and only if +// the previous attempt returns a googleapi.Error whose status code is stored in +// cc. Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnHTTPCodes(bo Backoff, cc ...int) Retryer { + codes := make(map[int]bool, len(cc)) + for _, c := range cc { + codes[c] = true + } + + return &httpRetryer{ + backoff: bo, + codes: codes, + } +} + +type httpRetryer struct { + backoff Backoff + codes map[int]bool +} + +func (r *httpRetryer) Retry(err error) (time.Duration, bool) { + var gerr *googleapi.Error + if !errors.As(err, &gerr) { + return 0, false + } + + if r.codes[gerr.Code] { + return r.backoff.Pause(), true + } + + return 0, false +} + // Backoff implements exponential backoff. The wait time between retries is a // random value between 0 and the "retry period" - the time between retries. The // retry period starts at Initial and increases by the factor of Multiplier @@ -173,6 +210,21 @@ func (o grpcOpt) Resolve(s *CallSettings) { s.GRPC = o } +type pathOpt struct { + p string +} + +func (p pathOpt) Resolve(s *CallSettings) { + s.Path = p.p +} + +// WithPath applies a Path override to the HTTP-based APICall. +// +// This is for internal use only. +func WithPath(p string) CallOption { + return &pathOpt{p: p} +} + // WithGRPCOptions allows passing gRPC call options during client creation. func WithGRPCOptions(opt ...grpc.CallOption) CallOption { return grpcOpt(append([]grpc.CallOption(nil), opt...)) @@ -186,4 +238,7 @@ type CallSettings struct { // CallOptions to be forwarded to GRPC. GRPC []grpc.CallOption + + // Path is an HTTP override for an APICall. + Path string } diff --git a/vendor/github.com/googleapis/gax-go/v2/content_type.go b/vendor/github.com/googleapis/gax-go/v2/content_type.go new file mode 100644 index 000000000000..1b53d0a3ac1a --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/content_type.go @@ -0,0 +1,112 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "io" + "io/ioutil" + "net/http" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// The content of media will be sniffed to determine the content type. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader) (io.Reader, string) { + // For backwards compatibility, allow clients to set content + // type by providing a ContentTyper for media. + // Note: This is an anonymous interface definition copied from googleapi.ContentTyper. + if typer, ok := media.(interface { + ContentType() string + }); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go index f634b43727eb..36cdfa33e351 100644 --- a/vendor/github.com/googleapis/gax-go/v2/gax.go +++ b/vendor/github.com/googleapis/gax-go/v2/gax.go @@ -35,5 +35,7 @@ // to simplify code generation and to provide more convenient and idiomatic API surfaces. package gax +import "github.com/googleapis/gax-go/v2/internal" + // Version specifies the gax-go version being used. -const Version = "2.1.1" +const Version = internal.Version diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go new file mode 100644 index 000000000000..0ba5da1dd1ea --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -0,0 +1,33 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package internal + +// Version is the current tagged release of the library. +const Version = "2.7.0" diff --git a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go new file mode 100644 index 000000000000..cc4486eb9e5f --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go @@ -0,0 +1,126 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "encoding/json" + "errors" + "io" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var ( + arrayOpen = json.Delim('[') + arrayClose = json.Delim(']') + errBadOpening = errors.New("unexpected opening token, expected '['") +) + +// ProtoJSONStream represents a wrapper for consuming a stream of protobuf +// messages encoded using protobuf-JSON format. More information on this format +// can be found at https://developers.google.com/protocol-buffers/docs/proto3#json. +// The stream must appear as a comma-delimited, JSON array of obbjects with +// opening and closing square braces. +// +// This is for internal use only. +type ProtoJSONStream struct { + first, closed bool + reader io.ReadCloser + stream *json.Decoder + typ protoreflect.MessageType +} + +// NewProtoJSONStreamReader accepts a stream of bytes via an io.ReadCloser that are +// protobuf-JSON encoded protobuf messages of the given type. The ProtoJSONStream +// must be closed when done. +// +// This is for internal use only. +func NewProtoJSONStreamReader(rc io.ReadCloser, typ protoreflect.MessageType) *ProtoJSONStream { + return &ProtoJSONStream{ + first: true, + reader: rc, + stream: json.NewDecoder(rc), + typ: typ, + } +} + +// Recv decodes the next protobuf message in the stream or returns io.EOF if +// the stream is done. It is not safe to call Recv on the same stream from +// different goroutines, just like it is not safe to do so with a single gRPC +// stream. Type-cast the protobuf message returned to the type provided at +// ProtoJSONStream creation. +// Calls to Recv after calling Close will produce io.EOF. +func (s *ProtoJSONStream) Recv() (proto.Message, error) { + if s.closed { + return nil, io.EOF + } + if s.first { + s.first = false + + // Consume the opening '[' so Decode gets one object at a time. + if t, err := s.stream.Token(); err != nil { + return nil, err + } else if t != arrayOpen { + return nil, errBadOpening + } + } + + // Capture the next block of data for the item (a JSON object) in the stream. + var raw json.RawMessage + if err := s.stream.Decode(&raw); err != nil { + e := err + // To avoid checking the first token of each stream, just attempt to + // Decode the next blob and if that fails, double check if it is just + // the closing token ']'. If it is the closing, return io.EOF. If it + // isn't, return the original error. + if t, _ := s.stream.Token(); t == arrayClose { + e = io.EOF + } + return nil, e + } + + // Initialize a new instance of the protobuf message to unmarshal the + // raw data into. + m := s.typ.New().Interface() + err := protojson.Unmarshal(raw, m) + + return m, err +} + +// Close closes the stream so that resources are cleaned up. +func (s *ProtoJSONStream) Close() error { + // Dereference the *json.Decoder so that the memory is gc'd. + s.stream = nil + s.closed = true + + return s.reader.Close() +} diff --git a/vendor/github.com/googleapis/gax-go/v2/release-please-config.json b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json new file mode 100644 index 000000000000..61ee266a159e --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json @@ -0,0 +1,10 @@ +{ + "release-type": "go-yoshi", + "separate-pull-requests": true, + "include-component-in-tag": false, + "packages": { + "v2": { + "component": "v2" + } + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml deleted file mode 100644 index 1f7ed53bc35d..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml +++ /dev/null @@ -1,533 +0,0 @@ -- job: - name: gophercloud-unittest - parent: golang-test - description: | - Run gophercloud unit test - run: .zuul/playbooks/gophercloud-unittest/run.yaml - nodeset: ubuntu-xenial-ut - -- job: - name: gophercloud-acceptance-test-base - parent: golang-test - run: .zuul/playbooks/gophercloud-acceptance-test/run.yaml - description: | - Base job for all gophercloud acceptance tests - timeout: 18000 # 5 hours - abstract: true - nodeset: ubuntu-focal - irrelevant-files: - - ^.*\.md$ - - ^LICENSE$ - -- job: - name: gophercloud-acceptance-test-compute - parent: gophercloud-acceptance-test-base - description: | - Run gophercloud compute acceptance test on master branch. This runs when any file is patched - except if it's doc. - vars: - # the list of all available tests can generated by: - # find acceptance/openstack -name '*_test.go' -exec dirname {} \; | sort -n | uniq -c | awk '{print $2}' - acceptance_tests: - - acceptance/openstack - - acceptance/openstack/compute/v2 - - acceptance/openstack/container/v1 - - acceptance/openstack/identity/v2 - - acceptance/openstack/identity/v3 - - acceptance/openstack/keymanager/v1 - - acceptance/openstack/orchestration/v1 - - acceptance/openstack/placement/v1 - devstack_services: - - barbican - - heat - - zun - -- job: - name: gophercloud-acceptance-test-networking - parent: gophercloud-acceptance-test-base - description: | - Run gophercloud networking acceptance test on master branch - files: - - ^.*dns.*$ - - ^.*loadbalancer.*$ - - ^.*networking.*$ - vars: - prefetch_amphora: true - devstack_env: - OCTAVIA_AMP_IMAGE_FILE: /opt/octavia-amphora/amphora-x64-haproxy.qcow2 - # the list of all available tests can generated by: - # find acceptance/openstack -name '*_test.go' -exec dirname {} \; | sort -n | uniq -c | awk '{print $2}' - acceptance_tests: - - acceptance/openstack/dns/v2 - - acceptance/openstack/loadbalancer/v2 - - acceptance/openstack/networking/v2 - - acceptance/openstack/networking/v2/extensions - - acceptance/openstack/networking/v2/extensions/agents - - acceptance/openstack/networking/v2/extensions/dns - - acceptance/openstack/networking/v2/extensions/layer3 - - acceptance/openstack/networking/v2/extensions/mtu - - acceptance/openstack/networking/v2/extensions/networkipavailabilities - - acceptance/openstack/networking/v2/extensions/portsbinding - - acceptance/openstack/networking/v2/extensions/qos/policies - - acceptance/openstack/networking/v2/extensions/qos/rules - - acceptance/openstack/networking/v2/extensions/qos/ruletypes - - acceptance/openstack/networking/v2/extensions/quotas - - acceptance/openstack/networking/v2/extensions/rbacpolicies - - acceptance/openstack/networking/v2/extensions/subnetpools - - acceptance/openstack/networking/v2/extensions/trunks - - acceptance/openstack/networking/v2/extensions/vlantransparent - devstack_projects: 'openstack/neutron-dynamic-routing' - devstack_services: - - designate - - neutron-dynamic-routing - - neutron-ext - - octavia - -- job: - name: gophercloud-acceptance-test-storage - parent: gophercloud-acceptance-test-base - description: | - Run gophercloud storage acceptance test on master branch - files: - - ^.*blockstorage.*$ - - ^.*imageservice.*$ - - ^.*objectstorage.*$ - - ^.*sharedfilesystems.*$ - vars: - acceptance_tests: - - acceptance/openstack/blockstorage - - acceptance/openstack/blockstorage/extensions - - acceptance/openstack/blockstorage/v3 - - acceptance/openstack/imageservice/v2 - - acceptance/openstack/objectstorage/v1 - - acceptance/openstack/sharedfilesystems/v2 - - acceptance/openstack/sharedfilesystems/v2/messages - devstack_services: - - manila - -- job: - name: gophercloud-acceptance-test-ironic - parent: gophercloud-acceptance-test-base - description: | - Run gophercloud ironic acceptance test on master branch - files: - - ^.*baremetal.*$ - vars: - devstack_services: - - ironic - devstack_override_enabled_services: 'g-api,g-reg,q-agt,q-dhcp,q-l3,q-svc,key,mysql,rabbit,ir-api,ir-cond,s-account,s-container,s-object,s-proxy' - devstack_projects: 'openstack/ironic-python-agent-builder openstack/ironic' - acceptance_tests: - - acceptance/openstack/baremetal/v1 - -- job: - name: gophercloud-acceptance-test-compute-ussuri - parent: gophercloud-acceptance-test-compute - nodeset: ubuntu-bionic - description: | - Run gophercloud compute acceptance test on ussuri branch - vars: - global_env: - OS_BRANCH: stable/ussuri - -- job: - name: gophercloud-acceptance-test-networking-ussuri - parent: gophercloud-acceptance-test-networking - nodeset: ubuntu-bionic - description: | - Run gophercloud networking acceptance test on ussuri branch - vars: - global_env: - OS_BRANCH: stable/ussuri - -- job: - name: gophercloud-acceptance-test-storage-ussuri - parent: gophercloud-acceptance-test-storage - nodeset: ubuntu-bionic - description: | - Run gophercloud storage test on ussuri branch - vars: - global_env: - OS_BRANCH: stable/ussuri - -- job: - name: gophercloud-acceptance-test-compute-train - parent: gophercloud-acceptance-test-compute - nodeset: ubuntu-xenial - description: | - Run gophercloud compute acceptance test on train branch - vars: - global_env: - OS_BRANCH: stable/train - -- job: - name: gophercloud-acceptance-test-networking-train - parent: gophercloud-acceptance-test-networking - nodeset: ubuntu-xenial - description: | - Run gophercloud networking acceptance test on train branch - vars: - global_env: - OS_BRANCH: stable/train - -- job: - name: gophercloud-acceptance-test-storage-train - parent: gophercloud-acceptance-test-storage - nodeset: ubuntu-xenial - description: | - Run gophercloud storage acceptance test on train branch - vars: - global_env: - OS_BRANCH: stable/train - -- job: - name: gophercloud-acceptance-test-compute-stein - parent: gophercloud-acceptance-test-compute - nodeset: ubuntu-xenial - description: | - Run gophercloud compute acceptance test on stein branch - vars: - global_env: - OS_BRANCH: stable/stein - -- job: - name: gophercloud-acceptance-test-networking-stein - parent: gophercloud-acceptance-test-networking - nodeset: ubuntu-xenial - description: | - Run gophercloud networking acceptance test on stein branch - vars: - global_env: - OS_BRANCH: stable/stein - -- job: - name: gophercloud-acceptance-test-storage-stein - parent: gophercloud-acceptance-test-storage - nodeset: ubuntu-xenial - description: | - Run gophercloud storage acceptance test on stein branch - vars: - global_env: - OS_BRANCH: stable/stein - -- job: - name: gophercloud-acceptance-test-compute-rocky - parent: gophercloud-acceptance-test-compute - nodeset: ubuntu-xenial - description: | - Run gophercloud compute acceptance test on rocky branch - vars: - global_env: - OS_BRANCH: stable/rocky - -- job: - name: gophercloud-acceptance-test-networking-rocky - parent: gophercloud-acceptance-test-networking - nodeset: ubuntu-xenial - description: | - Run gophercloud networking acceptance test on rocky branch - vars: - global_env: - OS_BRANCH: stable/rocky - -- job: - name: gophercloud-acceptance-test-storage-rocky - parent: gophercloud-acceptance-test-storage - nodeset: ubuntu-xenial - description: | - Run gophercloud storage acceptance test on rocky branch - vars: - global_env: - OS_BRANCH: stable/rocky - -- job: - name: gophercloud-acceptance-test-compute-queens - parent: gophercloud-acceptance-test-compute - description: | - Run gophercloud compute acceptance test on queens branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/queens - -- job: - name: gophercloud-acceptance-test-networking-queens - parent: gophercloud-acceptance-test-networking - description: | - Run gophercloud networking acceptance test on queens branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/queens - -- job: - name: gophercloud-acceptance-test-storage-queens - parent: gophercloud-acceptance-test-storage - description: | - Run gophercloud storage acceptance test on queens branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/queens - -# NOTE: A Pike-based devstack environment is currently -# not building correctly. This might be a temporary issue. -- job: - name: gophercloud-acceptance-test-compute-pike - parent: gophercloud-acceptance-test-compute - description: | - Run gophercloud compute acceptance test on pike branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/pike - -- job: - name: gophercloud-acceptance-test-networking-pike - parent: gophercloud-acceptance-test-networking - description: | - Run gophercloud networking acceptance test on pike branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/pike - -- job: - name: gophercloud-acceptance-test-storage-pike - parent: gophercloud-acceptance-test-storage - description: | - Run gophercloud storage acceptance test on pike branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/pike - -- job: - name: gophercloud-acceptance-test-compute-ocata - parent: gophercloud-acceptance-test-compute - description: | - Run gophercloud compute acceptance test on ocata branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/ocata - -- job: - name: gophercloud-acceptance-test-networking-ocata - parent: gophercloud-acceptance-test-networking - description: | - Run gophercloud networking acceptance test on ocata branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/ocata - -- job: - name: gophercloud-acceptance-test-storage-ocata - parent: gophercloud-acceptance-test-storage - description: | - Run gophercloud storage acceptance test on ocata branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/ocata - -# NOTE: A Newton-based devstack environment is currently -# not building correctly. This might be a temporary issue. -- job: - name: gophercloud-acceptance-test-compute-newton - parent: gophercloud-acceptance-test-compute - description: | - Run gophercloud compute acceptance test on newton branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/newton - -- job: - name: gophercloud-acceptance-test-networking-newton - parent: gophercloud-acceptance-test-networking - description: | - Run gophercloud networking acceptance test on newton branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/newton - -- job: - name: gophercloud-acceptance-test-storage-newton - parent: gophercloud-acceptance-test-storage - description: | - Run gophercloud storage acceptance test on newton branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/newton - -# The following jobs are maintained because they are parents -# for gophercloud v0.4.0 acceptance tests in theopenlab/openlab-zuul-jobs. -# TODO(emilien) update the childs to use the new variants. -- job: - name: gophercloud-acceptance-test-legacy - parent: gophercloud-acceptance-test-base - description: | - THIS JOB REMAINS FOR LEGACY. Will be removed soon to be replaced by its variants. - Run gophercloud acceptance test on the master branch. This runs when any file is patched - except if it's doc. - vars: - # the list of all available tests can generated by: - # find acceptance/openstack -name '*_test.go' -exec dirname {} \; | sort -n | uniq -c | awk '{print $2}' - acceptance_tests: - - acceptance/openstack - - acceptance/openstack/blockstorage - - acceptance/openstack/blockstorage/extensions - - acceptance/openstack/blockstorage/v3 - - acceptance/openstack/compute/v2 - - acceptance/openstack/container/v1 - - acceptance/openstack/dns/v2 - - acceptance/openstack/identity/v2 - - acceptance/openstack/identity/v3 - - acceptance/openstack/imageservice/v2 - - acceptance/openstack/keymanager/v1 - - acceptance/openstack/loadbalancer/v2 - - acceptance/openstack/networking/v2 - - acceptance/openstack/networking/v2/extensions - - acceptance/openstack/networking/v2/extensions/agents - - acceptance/openstack/networking/v2/extensions/dns - - acceptance/openstack/networking/v2/extensions/layer3 - - acceptance/openstack/networking/v2/extensions/mtu - - acceptance/openstack/networking/v2/extensions/networkipavailabilities - - acceptance/openstack/networking/v2/extensions/portsbinding - - acceptance/openstack/networking/v2/extensions/qos/policies - - acceptance/openstack/networking/v2/extensions/qos/rules - - acceptance/openstack/networking/v2/extensions/qos/ruletypes - - acceptance/openstack/networking/v2/extensions/quotas - - acceptance/openstack/networking/v2/extensions/rbacpolicies - - acceptance/openstack/networking/v2/extensions/subnetpools - - acceptance/openstack/networking/v2/extensions/trunks - - acceptance/openstack/networking/v2/extensions/vlantransparent - - acceptance/openstack/objectstorage/v1 - - acceptance/openstack/orchestration/v1 - - acceptance/openstack/placement/v1 - - acceptance/openstack/sharedfilesystems/v2 - - acceptance/openstack/sharedfilesystems/v2/messages - devstack_services: - - designate - - manila - - neutron-ext - - octavia - - zun - -- job: - name: gophercloud-acceptance-test-stein - parent: gophercloud-acceptance-test-legacy - nodeset: ubuntu-xenial - description: | - Run gophercloud acceptance test on stein branch - vars: - global_env: - OS_BRANCH: stable/stein - -- job: - name: gophercloud-acceptance-test-rocky - parent: gophercloud-acceptance-test-legacy - nodeset: ubuntu-xenial - description: | - Run gophercloud acceptance test on rocky branch - vars: - global_env: - OS_BRANCH: stable/rocky - -- job: - name: gophercloud-acceptance-test-queens - parent: gophercloud-acceptance-test-legacy - description: | - Run gophercloud acceptance test on queens branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/queens - -# NOTE: A Pike-based devstack environment is currently -# not building correctly. This might be a temporary issue. -- job: - name: gophercloud-acceptance-test-pike - parent: gophercloud-acceptance-test-legacy - description: | - Run gophercloud acceptance test on pike branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/pike - -- job: - name: gophercloud-acceptance-test-ocata - parent: gophercloud-acceptance-test-legacy - description: | - Run gophercloud acceptance test on ocata branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/ocata - -# NOTE: A Newton-based devstack environment is currently -# not building correctly. This might be a temporary issue. -- job: - name: gophercloud-acceptance-test-newton - parent: gophercloud-acceptance-test-legacy - description: | - Run gophercloud acceptance test on newton branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/newton - -- project: - name: gophercloud/gophercloud - check: - jobs: - - gophercloud-unittest - - gophercloud-acceptance-test-compute - - gophercloud-acceptance-test-networking - - gophercloud-acceptance-test-storage - - gophercloud-acceptance-test-ironic - recheck-newton: - jobs: - - gophercloud-acceptance-test-compute-newton - - gophercloud-acceptance-test-networking-newton - - gophercloud-acceptance-test-storage-newton - recheck-ocata: - jobs: - - gophercloud-acceptance-test-compute-ocata - - gophercloud-acceptance-test-networking-ocata - - gophercloud-acceptance-test-storage-ocata - recheck-pike: - jobs: - - gophercloud-acceptance-test-compute-pike - - gophercloud-acceptance-test-networking-pike - - gophercloud-acceptance-test-storage-pike - recheck-queens: - jobs: - - gophercloud-acceptance-test-compute-queens - - gophercloud-acceptance-test-networking-queens - - gophercloud-acceptance-test-storage-queens - recheck-rocky: - jobs: - - gophercloud-acceptance-test-compute-rocky - - gophercloud-acceptance-test-networking-rocky - - gophercloud-acceptance-test-storage-rocky - recheck-stein: - jobs: - - gophercloud-acceptance-test-compute-stein - - gophercloud-acceptance-test-networking-stein - - gophercloud-acceptance-test-storage-stein - recheck-train: - jobs: - - gophercloud-acceptance-test-compute-train - - gophercloud-acceptance-test-networking-train - - gophercloud-acceptance-test-storage-train - recheck-ussuri: - jobs: - - gophercloud-acceptance-test-compute-ussuri - - gophercloud-acceptance-test-networking-ussuri - - gophercloud-acceptance-test-storage-ussuri diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md index 25b52a4fb4dd..0c8137c10d2e 100644 --- a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md +++ b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md @@ -1,3 +1,62 @@ +## 1.0.0 (2022-08-29) + +UPGRADE NOTES + PROMISE OF COMPATIBILITY + +* Introducing Gophercloud v1! Like for every other release so far, all clients will upgrade automatically with `go get -d github.com/gophercloud/gophercloud` unless the dependency is pinned in `go.mod`. +* Gophercloud v1 comes with a promise of compatibility: no breaking changes are expected to merge before v2.0.0. + +IMPROVEMENTS + +* Added `compute.v2/extensions/services.Delete` [GH-2427](https://github.com/gophercloud/gophercloud/pull/2427) +* Added support for `standard-attr-revisions` to `networking/v2/networks`, `networking/v2/ports`, and `networking/v2/subnets` [GH-2437](https://github.com/gophercloud/gophercloud/pull/2437) +* Added `updated_at` and `created_at` fields to `networking/v2/ports.Port` [GH-2445](https://github.com/gophercloud/gophercloud/pull/2445) + +## 0.25.0 (May 30, 2022) + +BREAKING CHANGES + +* Replaced `blockstorage/noauth.NewBlockStorageNoAuth` with `NewBlockStorageNoAuthV2` and `NewBlockStorageNoAuthV3` [GH-2343](https://github.com/gophercloud/gophercloud/pull/2343) +* Renamed `blockstorage/extensions/schedulerstats.Capabilities`'s `GoodnessFuction` field to `GoodnessFunction` [GH-2346](https://github.com/gophercloud/gophercloud/pull/2346) + +IMPROVEMENTS + +* Added `RequestOpts.OmitHeaders` to provider client [GH-2315](https://github.com/gophercloud/gophercloud/pull/2315) +* Added `identity/v3/extensions/projectendpoints.List` [GH-2304](https://github.com/gophercloud/gophercloud/pull/2304) +* Added `identity/v3/extensions/projectendpoints.Create` [GH-2304](https://github.com/gophercloud/gophercloud/pull/2304) +* Added `identity/v3/extensions/projectendpoints.Delete` [GH-2304](https://github.com/gophercloud/gophercloud/pull/2304) +* Added protocol `any` to `networking/v2/extensions/security/rules.Create` [GH-2310](https://github.com/gophercloud/gophercloud/pull/2310) +* Added `REDIRECT_PREFIX` and `REDIRECT_HTTP_CODE` to `loadbalancer/v2/l7policies.Create` [GH-2324](https://github.com/gophercloud/gophercloud/pull/2324) +* Added `SOURCE_IP_PORT` LB method to `loadbalancer/v2/pools.Create` [GH-2300](https://github.com/gophercloud/gophercloud/pull/2300) +* Added `AllocatedCapacityGB` capability to `blockstorage/extensions/schedulerstats.Capabilities` [GH-2348](https://github.com/gophercloud/gophercloud/pull/2348) +* Added `Metadata` to `dns/v2/recordset.RecordSet` [GH-2353](https://github.com/gophercloud/gophercloud/pull/2353) +* Added missing fields to `compute/v2/extensions/servergroups.List` [GH-2355](https://github.com/gophercloud/gophercloud/pull/2355) +* Added missing labels fields to `containerinfra/v1/nodegroups` [GH-2377](https://github.com/gophercloud/gophercloud/pull/2377) +* Added missing fields to `loadbalancer/v2/listeners.Listener` [GH-2407](https://github.com/gophercloud/gophercloud/pull/2407) +* Added `identity/v3/limits.List` [GH-2360](https://github.com/gophercloud/gophercloud/pull/2360) +* Added `ParentProviderUUID` to `placement/v1/resourceproviders.Create` [GH-2356](https://github.com/gophercloud/gophercloud/pull/2356) +* Added `placement/v1/resourceproviders.Delete` [GH-2357](https://github.com/gophercloud/gophercloud/pull/2357) +* Added `placement/v1/resourceproviders.Get` [GH-2358](https://github.com/gophercloud/gophercloud/pull/2358) +* Added `placement/v1/resourceproviders.Update` [GH-2359](https://github.com/gophercloud/gophercloud/pull/2359) +* Added `networking/v2/extensions/bgp/peers.List` [GH-2241](https://github.com/gophercloud/gophercloud/pull/2241) +* Added `networking/v2/extensions/bgp/peers.Get` [GH-2241](https://github.com/gophercloud/gophercloud/pull/2241) +* Added `networking/v2/extensions/bgp/peers.Create` [GH-2388](https://github.com/gophercloud/gophercloud/pull/2388) +* Added `networking/v2/extensions/bgp/peers.Delete` [GH-2388](https://github.com/gophercloud/gophercloud/pull/2388) +* Added `networking/v2/extensions/bgp/peers.Update` [GH-2396](https://github.com/gophercloud/gophercloud/pull/2396) +* Added `networking/v2/extensions/bgp/speakers.Create` [GH-2395](https://github.com/gophercloud/gophercloud/pull/2395) +* Added `networking/v2/extensions/bgp/speakers.Delete` [GH-2395](https://github.com/gophercloud/gophercloud/pull/2395) +* Added `networking/v2/extensions/bgp/speakers.Update` [GH-2400](https://github.com/gophercloud/gophercloud/pull/2400) +* Added `networking/v2/extensions/bgp/speakers.AddBGPPeer` [GH-2400](https://github.com/gophercloud/gophercloud/pull/2400) +* Added `networking/v2/extensions/bgp/speakers.RemoveBGPPeer` [GH-2400](https://github.com/gophercloud/gophercloud/pull/2400) +* Added `networking/v2/extensions/bgp/speakers.GetAdvertisedRoutes` [GH-2406](https://github.com/gophercloud/gophercloud/pull/2406) +* Added `networking/v2/extensions/bgp/speakers.AddGatewayNetwork` [GH-2406](https://github.com/gophercloud/gophercloud/pull/2406) +* Added `networking/v2/extensions/bgp/speakers.RemoveGatewayNetwork` [GH-2406](https://github.com/gophercloud/gophercloud/pull/2406) +* Added `baremetal/v1/nodes.SetMaintenance` and `baremetal/v1/nodes.UnsetMaintenance` [GH-2384](https://github.com/gophercloud/gophercloud/pull/2384) +* Added `sharedfilesystems/v2/services.List` [GH-2350](https://github.com/gophercloud/gophercloud/pull/2350) +* Added `sharedfilesystems/v2/schedulerstats.List` [GH-2350](https://github.com/gophercloud/gophercloud/pull/2350) +* Added `sharedfilesystems/v2/schedulerstats.ListDetail` [GH-2350](https://github.com/gophercloud/gophercloud/pull/2350) +* Added ability to handle 502 and 504 errors [GH-2245](https://github.com/gophercloud/gophercloud/pull/2245) +* Added `IncludeSubtree` to `identity/v3/roles.ListAssignments` [GH-2411](https://github.com/gophercloud/gophercloud/pull/2411) + ## 0.24.0 (December 13, 2021) UPGRADE NOTES diff --git a/vendor/github.com/gophercloud/gophercloud/LICENSE b/vendor/github.com/gophercloud/gophercloud/LICENSE index fbbbc9e4cbad..c3f4f2f7c9ba 100644 --- a/vendor/github.com/gophercloud/gophercloud/LICENSE +++ b/vendor/github.com/gophercloud/gophercloud/LICENSE @@ -1,4 +1,5 @@ Copyright 2012-2013 Rackspace, Inc. +Copyright Gophercloud authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md index b429f090518f..c4fcf47533ad 100644 --- a/vendor/github.com/gophercloud/gophercloud/README.md +++ b/vendor/github.com/gophercloud/gophercloud/README.md @@ -126,7 +126,9 @@ Have a look at the [FAQ](./docs/FAQ.md) for some tips on customizing the way Gop ## Backwards-Compatibility Guarantees -None. Vendor it and write tests covering the parts you use. +Gophercloud versioning follows [semver](https://semver.org/spec/v2.0.0.html). + +Before `v1.0.0`, there were no guarantees. Starting with v1, there will be no breaking changes within a major release. ## Contributing diff --git a/vendor/github.com/gophercloud/gophercloud/auth_options.go b/vendor/github.com/gophercloud/gophercloud/auth_options.go index 4f301305e63e..335ce87957d5 100644 --- a/vendor/github.com/gophercloud/gophercloud/auth_options.go +++ b/vendor/github.com/gophercloud/gophercloud/auth_options.go @@ -12,20 +12,20 @@ provider. An example of manually providing authentication information: - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", - } + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } - provider, err := openstack.AuthenticatedClient(opts) + provider, err := openstack.AuthenticatedClient(opts) An example of using AuthOptionsFromEnv(), where the environment variables can be read from a file, such as a standard openrc file: - opts, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(opts) + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) */ type AuthOptions struct { // IdentityEndpoint specifies the HTTP endpoint that is required to work with diff --git a/vendor/github.com/gophercloud/gophercloud/doc.go b/vendor/github.com/gophercloud/gophercloud/doc.go index e2623b44e228..19b64d6508c0 100644 --- a/vendor/github.com/gophercloud/gophercloud/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/doc.go @@ -3,7 +3,7 @@ Package gophercloud provides a multi-vendor interface to OpenStack-compatible clouds. The library has a three-level hierarchy: providers, services, and resources. -Authenticating with Providers +# Authenticating with Providers Provider structs represent the cloud providers that offer and manage a collection of services. You will generally want to create one Provider @@ -49,7 +49,7 @@ instead of "project". opts, err := openstack.AuthOptionsFromEnv() provider, err := openstack.AuthenticatedClient(opts) -Service Clients +# Service Clients Service structs are specific to a provider and handle all of the logic and operations for a particular OpenStack service. Examples of services include: @@ -60,7 +60,7 @@ pass in the parent provider, like so: client, err := openstack.NewComputeV2(provider, opts) -Resources +# Resources Resource structs are the domain models that services make use of in order to work with and represent the state of API resources: @@ -144,6 +144,5 @@ An example retry backoff function, which respects the 429 HTTP response code and return nil } - */ package gophercloud diff --git a/vendor/github.com/gophercloud/gophercloud/errors.go b/vendor/github.com/gophercloud/gophercloud/errors.go index 79271d3bec31..edba02badf76 100644 --- a/vendor/github.com/gophercloud/gophercloud/errors.go +++ b/vendor/github.com/gophercloud/gophercloud/errors.go @@ -156,11 +156,21 @@ type ErrDefault500 struct { ErrUnexpectedResponseCode } +// ErrDefault502 is the default error type returned on a 502 HTTP response code. +type ErrDefault502 struct { + ErrUnexpectedResponseCode +} + // ErrDefault503 is the default error type returned on a 503 HTTP response code. type ErrDefault503 struct { ErrUnexpectedResponseCode } +// ErrDefault504 is the default error type returned on a 504 HTTP response code. +type ErrDefault504 struct { + ErrUnexpectedResponseCode +} + func (e ErrDefault400) Error() string { e.DefaultErrString = fmt.Sprintf( "Bad request with: [%s %s], error message: %s", @@ -198,10 +208,16 @@ func (e ErrDefault429) Error() string { func (e ErrDefault500) Error() string { return "Internal Server Error" } +func (e ErrDefault502) Error() string { + return "Bad Gateway" +} func (e ErrDefault503) Error() string { return "The service is currently unable to handle the request due to a temporary" + " overloading or maintenance. This is a temporary condition. Try again later." } +func (e ErrDefault504) Error() string { + return "Gateway Timeout" +} // Err400er is the interface resource error types implement to override the error message // from a 400 error. @@ -257,12 +273,24 @@ type Err500er interface { Error500(ErrUnexpectedResponseCode) error } +// Err502er is the interface resource error types implement to override the error message +// from a 502 error. +type Err502er interface { + Error502(ErrUnexpectedResponseCode) error +} + // Err503er is the interface resource error types implement to override the error message // from a 503 error. type Err503er interface { Error503(ErrUnexpectedResponseCode) error } +// Err504er is the interface resource error types implement to override the error message +// from a 504 error. +type Err504er interface { + Error504(ErrUnexpectedResponseCode) error +} + // ErrTimeOut is the error type returned when an operations times out. type ErrTimeOut struct { BaseError diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go index 72ec69e5037c..656e2de4d7f9 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go @@ -412,19 +412,19 @@ func (opts RebootOpts) ToServerRebootMap() (map[string]interface{}, error) { } /* - Reboot requests that a given server reboot. +Reboot requests that a given server reboot. - Two methods exist for rebooting a server: +Two methods exist for rebooting a server: - HardReboot (aka PowerCycle) starts the server instance by physically cutting - power to the machine, or if a VM, terminating it at the hypervisor level. - It's done. Caput. Full stop. - Then, after a brief while, power is restored or the VM instance restarted. +HardReboot (aka PowerCycle) starts the server instance by physically cutting +power to the machine, or if a VM, terminating it at the hypervisor level. +It's done. Caput. Full stop. +Then, after a brief while, power is restored or the VM instance restarted. - SoftReboot (aka OSReboot) simply tells the OS to restart under its own - procedure. - E.g., in Linux, asking it to enter runlevel 6, or executing - "sudo shutdown -r now", or by asking Windows to rtart the machine. +SoftReboot (aka OSReboot) simply tells the OS to restart under its own +procedure. +E.g., in Linux, asking it to enter runlevel 6, or executing +"sudo shutdown -r now", or by asking Windows to rtart the machine. */ func Reboot(client *gophercloud.ServiceClient, id string, opts RebootOptsBuilder) (r ActionResult) { b, err := opts.ToServerRebootMap() diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go index 8cfb8958e857..b92c666783b7 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go @@ -99,7 +99,8 @@ type GetPasswordResult struct { // If privateKey != nil the password is decrypted with the private key. // If privateKey == nil the encrypted password is returned and can be decrypted // with: -// echo '' | base64 -D | openssl rsautl -decrypt -inkey +// +// echo '' | base64 -D | openssl rsautl -decrypt -inkey func (r GetPasswordResult) ExtractPassword(privateKey *rsa.PrivateKey) (string, error) { var s struct { Password string `json:"password"` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go index 2b64f108cbd9..84f16c3fc28b 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go @@ -89,7 +89,7 @@ func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) (r Creat } resp, err := client.Post(CreateURL(client), b, &r.Body, &gophercloud.RequestOpts{ OkCodes: []int{200, 203}, - MoreHeaders: map[string]string{"X-Auth-Token": ""}, + OmitHeaders: []string{"X-Auth-Token"}, }) _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) return diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/doc.go index 1f6f807fe06e..a30d0faf3a92 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/doc.go @@ -36,6 +36,5 @@ Example to auth a client using EC2 access and secret keys if err != nil { panic(err) } - */ package ec2tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/doc.go index c5b0831ca1f2..4294ef6c8985 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/doc.go @@ -118,6 +118,5 @@ Example to Create a Token using OAuth1 method if err != nil { panic(err) } - */ package oauth1 diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go index 966e128f1281..de74c82ecd21 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go @@ -103,6 +103,5 @@ Example to Create a Token from a Username and Password with Project Name Scope if err != nil { panic(err) } - */ package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go index d8c455d16022..1af55d813775 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go @@ -135,7 +135,7 @@ func Create(c *gophercloud.ServiceClient, opts AuthOptionsBuilder) (r CreateResu } resp, err := c.Post(tokenURL(c), b, &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: map[string]string{"X-Auth-Token": ""}, + OmitHeaders: []string{"X-Auth-Token"}, }) _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) return diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go index 6282894d3ad2..17b200cd2397 100644 --- a/vendor/github.com/gophercloud/gophercloud/params.go +++ b/vendor/github.com/gophercloud/gophercloud/params.go @@ -15,17 +15,17 @@ BuildRequestBody builds a map[string]interface from the given `struct`. If parent is not an empty string, the final map[string]interface returned will encapsulate the built one. For example: - disk := 1 - createOpts := flavors.CreateOpts{ - ID: "1", - Name: "m1.tiny", - Disk: &disk, - RAM: 512, - VCPUs: 1, - RxTxFactor: 1.0, - } - - body, err := gophercloud.BuildRequestBody(createOpts, "flavor") + disk := 1 + createOpts := flavors.CreateOpts{ + ID: "1", + Name: "m1.tiny", + Disk: &disk, + RAM: 512, + VCPUs: 1, + RxTxFactor: 1.0, + } + + body, err := gophercloud.BuildRequestBody(createOpts, "flavor") The above example can be run as-is, however it is recommended to look at how BuildRequestBody is used within Gophercloud to more fully understand how it @@ -401,22 +401,22 @@ It accepts an arbitrary tagged structure and produces a string map that's suitable for use as the HTTP headers of an outgoing request. Field names are mapped to header names based in "h" tags. - type struct Something { - Bar string `h:"x_bar"` - Baz int `h:"lorem_ipsum"` - } + type struct Something { + Bar string `h:"x_bar"` + Baz int `h:"lorem_ipsum"` + } - instance := Something{ - Bar: "AAA", - Baz: "BBB", - } + instance := Something{ + Bar: "AAA", + Baz: "BBB", + } will be converted into: - map[string]string{ - "x_bar": "AAA", - "lorem_ipsum": "BBB", - } + map[string]string{ + "x_bar": "AAA", + "lorem_ipsum": "BBB", + } Untagged fields and fields left at their zero values are skipped. Integers, booleans and string values are supported. diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go index 207ab88af848..cc263680c082 100644 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -325,10 +325,12 @@ type RequestOpts struct { // OkCodes contains a list of numeric HTTP status codes that should be interpreted as success. If // the response has a different code, an error will be returned. OkCodes []int - // MoreHeaders specifies additional HTTP headers to be provide on the request. If a header is - // provided with a blank value (""), that header will be *omitted* instead: use this to suppress - // the default Accept header or an inferred Content-Type, for example. + // MoreHeaders specifies additional HTTP headers to be provided on the request. + // MoreHeaders will be overridden by OmitHeaders MoreHeaders map[string]string + // OmitHeaders specifies the HTTP headers which should be omitted. + // OmitHeaders will override MoreHeaders + OmitHeaders []string // ErrorContext specifies the resource error type to return if an error is encountered. // This lets resources override default error messages based on the response status code. ErrorContext error @@ -396,7 +398,8 @@ func (client *ProviderClient) doRequest(method, url string, options *RequestOpts req = req.WithContext(client.Context) } - // Populate the request headers. Apply options.MoreHeaders last, to give the caller the chance to + // Populate the request headers. + // Apply options.MoreHeaders and options.OmitHeaders, to give the caller the chance to // modify or omit any header. if contentType != nil { req.Header.Set("Content-Type", *contentType) @@ -412,6 +415,10 @@ func (client *ProviderClient) doRequest(method, url string, options *RequestOpts } } + for _, v := range options.OmitHeaders { + req.Header.Del(v) + } + // get latest token from client for k, v := range client.AuthenticatedHeaders() { req.Header.Set(k, v) @@ -556,11 +563,21 @@ func (client *ProviderClient) doRequest(method, url string, options *RequestOpts if error500er, ok := errType.(Err500er); ok { err = error500er.Error500(respErr) } + case http.StatusBadGateway: + err = ErrDefault502{respErr} + if error502er, ok := errType.(Err502er); ok { + err = error502er.Error502(respErr) + } case http.StatusServiceUnavailable: err = ErrDefault503{respErr} if error503er, ok := errType.(Err503er); ok { err = error503er.Error503(respErr) } + case http.StatusGatewayTimeout: + err = ErrDefault504{respErr} + if error504er, ok := errType.(Err504er); ok { + err = error504er.Error504(respErr) + } } if err == nil { diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md index 19aa2e75c824..2517a28715fa 100644 --- a/vendor/github.com/gorilla/websocket/README.md +++ b/vendor/github.com/gorilla/websocket/README.md @@ -6,6 +6,13 @@ Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + +--- + +⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)** + +--- + ### Documentation * [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) @@ -30,35 +37,3 @@ The Gorilla WebSocket package passes the server tests in the [Autobahn Test Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). -### Gorilla WebSocket compared with other packages - - - - - - - - - - - - - - - - - - -
github.com/gorillagolang.org/x/net
RFC 6455 Features
Passes Autobahn Test SuiteYesNo
Receive fragmented messageYesNo, see note 1
Send close messageYesNo
Send pings and receive pongsYesNo
Get the type of a received data messageYesYes, see note 2
Other Features
Compression ExtensionsExperimentalNo
Read message using io.ReaderYesNo, see note 3
Write message using io.WriteCloserYesNo, see note 3
- -Notes: - -1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). -2. The application can get the type of a received data message by implementing - a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) - function. -3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. - Read returns when the input buffer is full or a frame boundary is - encountered. Each call to Write sends a single frame message. The Gorilla - io.Reader and io.WriteCloser operate on a single WebSocket message. - diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go index 962c06a391c2..2efd83555d37 100644 --- a/vendor/github.com/gorilla/websocket/client.go +++ b/vendor/github.com/gorilla/websocket/client.go @@ -48,15 +48,23 @@ func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufS } // A Dialer contains options for connecting to WebSocket server. +// +// It is safe to call Dialer's methods concurrently. type Dialer struct { // NetDial specifies the dial function for creating TCP connections. If // NetDial is nil, net.Dial is used. NetDial func(network, addr string) (net.Conn, error) // NetDialContext specifies the dial function for creating TCP connections. If - // NetDialContext is nil, net.DialContext is used. + // NetDialContext is nil, NetDial is used. NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If + // NetDialTLSContext is nil, NetDialContext is used. + // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and + // TLSClientConfig is ignored. + NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) + // Proxy specifies a function to return a proxy for a given // Request. If the function returns a non-nil error, the // request is aborted with the provided error. @@ -65,6 +73,8 @@ type Dialer struct { // TLSClientConfig specifies the TLS configuration to use with tls.Client. // If nil, the default configuration is used. + // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake + // is done there and TLSClientConfig is ignored. TLSClientConfig *tls.Config // HandshakeTimeout specifies the duration for the handshake to complete. @@ -176,7 +186,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h } req := &http.Request{ - Method: "GET", + Method: http.MethodGet, URL: u, Proto: "HTTP/1.1", ProtoMajor: 1, @@ -237,13 +247,32 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h // Get network dial function. var netDial func(network, add string) (net.Conn, error) - if d.NetDialContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialContext(ctx, network, addr) + switch u.Scheme { + case "http": + if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial } - } else if d.NetDial != nil { - netDial = d.NetDial - } else { + case "https": + if d.NetDialTLSContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialTLSContext(ctx, network, addr) + } + } else if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + default: + return nil, nil, errMalformedURL + } + + if netDial == nil { netDialer := &net.Dialer{} netDial = func(network, addr string) (net.Conn, error) { return netDialer.DialContext(ctx, network, addr) @@ -304,7 +333,9 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h } }() - if u.Scheme == "https" { + if u.Scheme == "https" && d.NetDialTLSContext == nil { + // If NetDialTLSContext is set, assume that the TLS handshake has already been done + cfg := cloneTLSConfig(d.TLSClientConfig) if cfg.ServerName == "" { cfg.ServerName = hostNoPort @@ -312,11 +343,12 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h tlsConn := tls.Client(netConn, cfg) netConn = tlsConn - var err error - if trace != nil { - err = doHandshakeWithTrace(trace, tlsConn, cfg) - } else { - err = doHandshake(tlsConn, cfg) + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := doHandshake(ctx, tlsConn, cfg) + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) } if err != nil { @@ -348,8 +380,8 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h } if resp.StatusCode != 101 || - !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || - !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + !tokenListContainsValue(resp.Header, "Upgrade", "websocket") || + !tokenListContainsValue(resp.Header, "Connection", "upgrade") || resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { // Before closing the network connection on return from this // function, slurp up some of the response to aid application @@ -382,14 +414,9 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h return conn, resp, nil } -func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error { - if err := tlsConn.Handshake(); err != nil { - return err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return err - } +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} } - return nil + return cfg.Clone() } diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go deleted file mode 100644 index 4f0d943723a9..000000000000 --- a/vendor/github.com/gorilla/websocket/client_clone.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package websocket - -import "crypto/tls" - -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return cfg.Clone() -} diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go deleted file mode 100644 index babb007fb414..000000000000 --- a/vendor/github.com/gorilla/websocket/client_clone_legacy.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package websocket - -import "crypto/tls" - -// cloneTLSConfig clones all public fields except the fields -// SessionTicketsDisabled and SessionTicketKey. This avoids copying the -// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a -// config in active use. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } -} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go index ca46d2f793c2..331eebc85009 100644 --- a/vendor/github.com/gorilla/websocket/conn.go +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -13,6 +13,7 @@ import ( "math/rand" "net" "strconv" + "strings" "sync" "time" "unicode/utf8" @@ -401,6 +402,12 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return nil } +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err +} + // WriteControl writes a control message with the given deadline. The allowed // message types are CloseMessage, PingMessage and PongMessage. func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { @@ -794,47 +801,69 @@ func (c *Conn) advanceFrame() (int, error) { } // 2. Read and parse first two bytes of frame header. + // To aid debugging, collect and report all errors in the first two bytes + // of the header. + + var errors []string p, err := c.read(2) if err != nil { return noFrame, err } - final := p[0]&finalBit != 0 frameType := int(p[0] & 0xf) + final := p[0]&finalBit != 0 + rsv1 := p[0]&rsv1Bit != 0 + rsv2 := p[0]&rsv2Bit != 0 + rsv3 := p[0]&rsv3Bit != 0 mask := p[1]&maskBit != 0 c.setReadRemaining(int64(p[1] & 0x7f)) c.readDecompress = false - if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { - c.readDecompress = true - p[0] &^= rsv1Bit + if rsv1 { + if c.newDecompressionReader != nil { + c.readDecompress = true + } else { + errors = append(errors, "RSV1 set") + } + } + + if rsv2 { + errors = append(errors, "RSV2 set") } - if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { - return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) + if rsv3 { + errors = append(errors, "RSV3 set") } switch frameType { case CloseMessage, PingMessage, PongMessage: if c.readRemaining > maxControlFramePayloadSize { - return noFrame, c.handleProtocolError("control frame length > 125") + errors = append(errors, "len > 125 for control") } if !final { - return noFrame, c.handleProtocolError("control frame not final") + errors = append(errors, "FIN not set on control") } case TextMessage, BinaryMessage: if !c.readFinal { - return noFrame, c.handleProtocolError("message start before final message frame") + errors = append(errors, "data before FIN") } c.readFinal = final case continuationFrame: if c.readFinal { - return noFrame, c.handleProtocolError("continuation after final message frame") + errors = append(errors, "continuation after FIN") } c.readFinal = final default: - return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) + errors = append(errors, "bad opcode "+strconv.Itoa(frameType)) + } + + if mask != c.isServer { + errors = append(errors, "bad MASK") + } + + if len(errors) > 0 { + return noFrame, c.handleProtocolError(strings.Join(errors, ", ")) } // 3. Read and parse frame length as per @@ -872,10 +901,6 @@ func (c *Conn) advanceFrame() (int, error) { // 4. Handle frame masking. - if mask != c.isServer { - return noFrame, c.handleProtocolError("incorrect mask flag") - } - if mask { c.readMaskPos = 0 p, err := c.read(len(c.readMaskKey)) @@ -935,7 +960,7 @@ func (c *Conn) advanceFrame() (int, error) { if len(payload) >= 2 { closeCode = int(binary.BigEndian.Uint16(payload)) if !isValidReceivedCloseCode(closeCode) { - return noFrame, c.handleProtocolError("invalid close code") + return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode)) } closeText = string(payload[2:]) if !utf8.ValidString(closeText) { @@ -952,7 +977,11 @@ func (c *Conn) advanceFrame() (int, error) { } func (c *Conn) handleProtocolError(message string) error { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) + data := FormatCloseMessage(CloseProtocolError, message) + if len(data) > maxControlFramePayloadSize { + data = data[:maxControlFramePayloadSize] + } + c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) return errors.New("websocket: " + message) } diff --git a/vendor/github.com/gorilla/websocket/conn_write.go b/vendor/github.com/gorilla/websocket/conn_write.go deleted file mode 100644 index a509a21f87af..000000000000 --- a/vendor/github.com/gorilla/websocket/conn_write.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package websocket - -import "net" - -func (c *Conn) writeBufs(bufs ...[]byte) error { - b := net.Buffers(bufs) - _, err := b.WriteTo(c.conn) - return err -} diff --git a/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/vendor/github.com/gorilla/websocket/conn_write_legacy.go deleted file mode 100644 index 37edaff5a578..000000000000 --- a/vendor/github.com/gorilla/websocket/conn_write_legacy.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package websocket - -func (c *Conn) writeBufs(bufs ...[]byte) error { - for _, buf := range bufs { - if len(buf) > 0 { - if _, err := c.conn.Write(buf); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go index 577fce9efd72..d0742bf2a551 100644 --- a/vendor/github.com/gorilla/websocket/mask.go +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -2,6 +2,7 @@ // this source code is governed by a BSD-style license that can be found in the // LICENSE file. +//go:build !appengine // +build !appengine package websocket diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go index 2aac060e52e7..36250ca7c47e 100644 --- a/vendor/github.com/gorilla/websocket/mask_safe.go +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -2,6 +2,7 @@ // this source code is governed by a BSD-style license that can be found in the // LICENSE file. +//go:build appengine // +build appengine package websocket diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go index e87a8c9f0c96..e0f466b72fbb 100644 --- a/vendor/github.com/gorilla/websocket/proxy.go +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -48,7 +48,7 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) } connectReq := &http.Request{ - Method: "CONNECT", + Method: http.MethodConnect, URL: &url.URL{Opaque: addr}, Host: addr, Header: connectHeader, diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go index 887d558918c7..24d53b38abed 100644 --- a/vendor/github.com/gorilla/websocket/server.go +++ b/vendor/github.com/gorilla/websocket/server.go @@ -23,6 +23,8 @@ func (e HandshakeError) Error() string { return e.message } // Upgrader specifies parameters for upgrading an HTTP connection to a // WebSocket connection. +// +// It is safe to call Upgrader's methods concurrently. type Upgrader struct { // HandshakeTimeout specifies the duration for the handshake to complete. HandshakeTimeout time.Duration @@ -115,8 +117,8 @@ func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header // Upgrade upgrades the HTTP server connection to the WebSocket protocol. // // The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie) and the -// application negotiated subprotocol (Sec-WebSocket-Protocol). +// request. Use the responseHeader to specify cookies (Set-Cookie). To specify +// subprotocols supported by the server, set Upgrader.Subprotocols directly. // // If the upgrade fails, then Upgrade replies to the client with an HTTP error // response. @@ -131,7 +133,7 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") } - if r.Method != "GET" { + if r.Method != http.MethodGet { return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") } diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go new file mode 100644 index 000000000000..a62b68ccb11e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -0,0 +1,21 @@ +//go:build go1.17 +// +build go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.HandshakeContext(ctx); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go new file mode 100644 index 000000000000..e1b2b44f6e6c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake_116.go @@ -0,0 +1,21 @@ +//go:build !go1.17 +// +build !go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.Handshake(); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/trace.go b/vendor/github.com/gorilla/websocket/trace.go deleted file mode 100644 index 834f122a00db..000000000000 --- a/vendor/github.com/gorilla/websocket/trace.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.8 - -package websocket - -import ( - "crypto/tls" - "net/http/httptrace" -) - -func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { - if trace.TLSHandshakeStart != nil { - trace.TLSHandshakeStart() - } - err := doHandshake(tlsConn, cfg) - if trace.TLSHandshakeDone != nil { - trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) - } - return err -} diff --git a/vendor/github.com/gorilla/websocket/trace_17.go b/vendor/github.com/gorilla/websocket/trace_17.go deleted file mode 100644 index 77d05a0b5748..000000000000 --- a/vendor/github.com/gorilla/websocket/trace_17.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !go1.8 - -package websocket - -import ( - "crypto/tls" - "net/http/httptrace" -) - -func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { - return doHandshake(tlsConn, cfg) -} diff --git a/vendor/github.com/hashicorp/consul/api/LICENSE b/vendor/github.com/hashicorp/consul/api/LICENSE index c33dcc7c928c..c72625e4cc88 100644 --- a/vendor/github.com/hashicorp/consul/api/LICENSE +++ b/vendor/github.com/hashicorp/consul/api/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2013 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go index 0f44494dda8e..bd6d82563278 100644 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -62,6 +62,20 @@ type ACLToken struct { AuthMethodNamespace string `json:",omitempty"` } +type ACLTokenExpanded struct { + ExpandedPolicies []ACLPolicy + ExpandedRoles []ACLRole + + NamespaceDefaultPolicyIDs []string + NamespaceDefaultRoleIDs []string + + AgentACLDefaultPolicy string + AgentACLDownPolicy string + ResolvedByAgent string + + ACLToken +} + type ACLTokenListEntry struct { CreateIndex uint64 ModifyIndex uint64 @@ -788,6 +802,32 @@ func (a *ACL) TokenRead(tokenID string, q *QueryOptions) (*ACLToken, *QueryMeta, return &out, qm, nil } +// TokenReadExpanded retrieves the full token details, as well as the contents of any policies affecting the token. +// The tokenID parameter must be a valid Accessor ID of an existing token. +func (a *ACL) TokenReadExpanded(tokenID string, q *QueryOptions) (*ACLTokenExpanded, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/token/"+tokenID) + r.setQueryOptions(q) + r.params.Set("expanded", "true") + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ACLTokenExpanded + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + // TokenReadSelf retrieves the full token details of the token currently // assigned to the API Client. In this manner its possible to read a token // by its Secret ID. diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index e3b5d362a073..61e829a64e5c 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "net/http" - "net/url" ) // ServiceKind is the kind of service being registered. @@ -93,6 +92,7 @@ type AgentService struct { ContentHash string `json:",omitempty" bexpr:"-"` Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` Connect *AgentServiceConnect `json:",omitempty"` + PeerName string `json:",omitempty"` // NOTE: If we ever set the ContentHash outside of singular service lookup then we may need // to include the Namespace in the hash. When we do, then we are in for lots of fun with tests. // For now though, ignoring it works well enough. @@ -333,6 +333,7 @@ type AgentServiceCheck struct { Method string `json:",omitempty"` Body string `json:",omitempty"` TCP string `json:",omitempty"` + UDP string `json:",omitempty"` Status string `json:",omitempty"` Notes string `json:",omitempty"` TLSServerName string `json:",omitempty"` @@ -427,6 +428,7 @@ type Upstream struct { DestinationType UpstreamDestType `json:",omitempty"` DestinationPartition string `json:",omitempty"` DestinationNamespace string `json:",omitempty"` + DestinationPeer string `json:",omitempty"` DestinationName string Datacenter string `json:",omitempty"` LocalBindAddress string `json:",omitempty"` @@ -628,7 +630,7 @@ func (a *Agent) AgentHealthServiceByID(serviceID string) (string, *AgentServiceC } func (a *Agent) AgentHealthServiceByIDOpts(serviceID string, q *QueryOptions) (string, *AgentServiceChecksInfo, error) { - path := fmt.Sprintf("/v1/agent/health/service/id/%v", url.PathEscape(serviceID)) + path := fmt.Sprintf("/v1/agent/health/service/id/%v", serviceID) r := a.c.newRequest("GET", path) r.setQueryOptions(q) r.params.Add("format", "json") @@ -669,7 +671,7 @@ func (a *Agent) AgentHealthServiceByName(service string) (string, []AgentService } func (a *Agent) AgentHealthServiceByNameOpts(service string, q *QueryOptions) (string, []AgentServiceChecksInfo, error) { - path := fmt.Sprintf("/v1/agent/health/service/name/%v", url.PathEscape(service)) + path := fmt.Sprintf("/v1/agent/health/service/name/%v", service) r := a.c.newRequest("GET", path) r.setQueryOptions(q) r.params.Add("format", "json") diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index d97f1879f213..87d61a6f148e 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -7,7 +7,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -73,6 +72,14 @@ const ( // other ENV names we use. GRPCAddrEnvName = "CONSUL_GRPC_ADDR" + // GRPCCAFileEnvName defines an environment variable name which sets the + // CA file to use for talking to Consul gRPC over TLS. + GRPCCAFileEnvName = "CONSUL_GRPC_CACERT" + + // GRPCCAPathEnvName defines an environment variable name which sets the + // path to a directory of CA certs to use for talking to Consul gRPC over TLS. + GRPCCAPathEnvName = "CONSUL_GRPC_CAPATH" + // HTTPNamespaceEnvVar defines an environment variable name which sets // the HTTP Namespace to be used by default. This can still be overridden. HTTPNamespaceEnvName = "CONSUL_NAMESPACE" @@ -80,6 +87,12 @@ const ( // HTTPPartitionEnvName defines an environment variable name which sets // the HTTP Partition to be used by default. This can still be overridden. HTTPPartitionEnvName = "CONSUL_PARTITION" + + // QueryBackendStreaming Query backend of type streaming + QueryBackendStreaming = "streaming" + + // QueryBackendBlockingQuery Query backend of type blocking query + QueryBackendBlockingQuery = "blocking-query" ) type StatusError struct { @@ -105,6 +118,9 @@ type QueryOptions struct { // by the Config Datacenter string + // Providing a peer name in the query option + Peer string + // AllowStale allows any Consul server (non-leader) to service // a read. This allows for lower latency and higher throughput AllowStale bool @@ -184,6 +200,12 @@ type QueryOptions struct { // Filter requests filtering data prior to it being returned. The string // is a go-bexpr compatible expression. Filter string + + // MergeCentralConfig returns a service definition merged with the + // proxy-defaults/global and service-defaults/:service config entries. + // This can be used to ensure a full service definition is returned in the response + // especially when the service might not be written into the catalog that way. + MergeCentralConfig bool } func (o *QueryOptions) Context() context.Context { @@ -277,6 +299,9 @@ type QueryMeta struct { // response is. CacheAge time.Duration + // QueryBackend represent which backend served the request. + QueryBackend string + // DefaultACLPolicy is used to control the ACL interaction when there is no // defined policy. This can be "allow" which means ACLs are used to // deny-list, or "deny" which means ACLs are allow-lists. @@ -311,6 +336,11 @@ type Config struct { // Scheme is the URI scheme for the Consul server Scheme string + // Prefix for URIs for when consul is behind an API gateway (reverse + // proxy). The API gateway must strip off the PathPrefix before + // passing the request onto consul. + PathPrefix string + // Datacenter to use. If not provided, the default agent datacenter is used. Datacenter string @@ -703,13 +733,25 @@ func NewClient(config *Config) (*Client, error) { return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) } config.Address = parts[1] + + // separate out a reverse proxy prefix, if it is present. + // NOTE: Rewriting this code to use url.Parse() instead of + // strings.SplitN() breaks existing test cases. + switch parts[0] { + case "http", "https": + parts := strings.SplitN(parts[1], "/", 2) + if len(parts) == 2 { + config.Address = parts[0] + config.PathPrefix = "/" + parts[1] + } + } } // If the TokenFile is set, always use that, even if a Token is configured. // This is because when TokenFile is set it is read into the Token field. // We want any derived clients to have to re-read the token file. if config.TokenFile != "" { - data, err := ioutil.ReadFile(config.TokenFile) + data, err := os.ReadFile(config.TokenFile) if err != nil { return nil, fmt.Errorf("Error loading token file: %s", err) } @@ -780,6 +822,9 @@ func (r *request) setQueryOptions(q *QueryOptions) { if q.Datacenter != "" { r.params.Set("dc", q.Datacenter) } + if q.Peer != "" { + r.params.Set("peer", q.Peer) + } if q.AllowStale { r.params.Set("stale", "") } @@ -832,6 +877,9 @@ func (r *request) setQueryOptions(q *QueryOptions) { r.header.Set("Cache-Control", strings.Join(cc, ", ")) } } + if q.MergeCentralConfig { + r.params.Set("merge-central-config", "") + } r.ctx = q.ctx } @@ -944,7 +992,7 @@ func (c *Client) newRequest(method, path string) *request { url: &url.URL{ Scheme: c.config.Scheme, Host: c.config.Address, - Path: path, + Path: c.config.PathPrefix + path, }, params: make(map[string][]string), header: c.Headers(), @@ -1024,7 +1072,7 @@ func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (* if err := decodeBody(resp, &out); err != nil { return nil, err } - } else if _, err := ioutil.ReadAll(resp.Body); err != nil { + } else if _, err := io.ReadAll(resp.Body); err != nil { return nil, err } return wm, nil @@ -1096,6 +1144,10 @@ func parseQueryMeta(resp *http.Response, q *QueryMeta) error { q.CacheAge = time.Duration(age) * time.Second } + switch v := header.Get("X-Consul-Query-Backend"); v { + case QueryBackendStreaming, QueryBackendBlockingQuery: + q.QueryBackend = v + } return nil } @@ -1138,7 +1190,7 @@ func requireHttpCodes(resp *http.Response, httpCodes ...int) error { // is necessary to ensure that the http.Client's underlying RoundTripper is able // to re-use the TCP connection. See godoc on net/http.Client.Do. func closeResponseBody(resp *http.Response) error { - _, _ = io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(io.Discard, resp.Body) return resp.Body.Close() } diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go index 80ae325eac55..84a2bdbc6531 100644 --- a/vendor/github.com/hashicorp/consul/api/catalog.go +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -20,6 +20,7 @@ type Node struct { CreateIndex uint64 ModifyIndex uint64 Partition string `json:",omitempty"` + PeerName string `json:",omitempty"` } type ServiceAddress struct { diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go index 91c407bb533a..b1827fb595c1 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry.go @@ -177,6 +177,19 @@ type UpstreamConfig struct { // MeshGatewayConfig controls how Mesh Gateways are configured and used MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway" ` + + // BalanceOutboundConnections indicates that the proxy should attempt to evenly distribute + // outbound connections across worker threads. Only used by envoy proxies. + BalanceOutboundConnections string `json:",omitempty" alias:"balance_outbound_connections"` +} + +// DestinationConfig represents a virtual service, i.e. one that is external to Consul +type DestinationConfig struct { + // Addresses of the endpoint; hostname or IP + Addresses []string `json:",omitempty"` + + // Port allowed within this endpoint + Port int `json:",omitempty"` } type PassiveHealthCheck struct { @@ -187,6 +200,11 @@ type PassiveHealthCheck struct { // MaxFailures is the count of consecutive failures that results in a host // being removed from the pool. MaxFailures uint32 `alias:"max_failures"` + + // EnforcingConsecutive5xx is the % chance that a host will be actually ejected + // when an outlier status is detected through consecutive 5xx. + // This setting can be used to disable ejection or to ramp it up slowly. + EnforcingConsecutive5xx *uint32 `json:",omitempty" alias:"enforcing_consecutive_5xx"` } // UpstreamLimits describes the limits that are associated with a specific @@ -209,21 +227,25 @@ type UpstreamLimits struct { } type ServiceConfigEntry struct { - Kind string - Name string - Partition string `json:",omitempty"` - Namespace string `json:",omitempty"` - Protocol string `json:",omitempty"` - Mode ProxyMode `json:",omitempty"` - TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` - MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` - Expose ExposeConfig `json:",omitempty"` - ExternalSNI string `json:",omitempty" alias:"external_sni"` - UpstreamConfig *UpstreamConfiguration `json:",omitempty" alias:"upstream_config"` - - Meta map[string]string `json:",omitempty"` - CreateIndex uint64 - ModifyIndex uint64 + Kind string + Name string + Partition string `json:",omitempty"` + Namespace string `json:",omitempty"` + Protocol string `json:",omitempty"` + Mode ProxyMode `json:",omitempty"` + TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` + MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` + Expose ExposeConfig `json:",omitempty"` + ExternalSNI string `json:",omitempty" alias:"external_sni"` + UpstreamConfig *UpstreamConfiguration `json:",omitempty" alias:"upstream_config"` + Destination *DestinationConfig `json:",omitempty"` + MaxInboundConnections int `json:",omitempty" alias:"max_inbound_connections"` + LocalConnectTimeoutMs int `json:",omitempty" alias:"local_connect_timeout_ms"` + LocalRequestTimeoutMs int `json:",omitempty" alias:"local_request_timeout_ms"` + BalanceInboundConnections string `json:",omitempty" alias:"balance_inbound_connections"` + Meta map[string]string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 } func (s *ServiceConfigEntry) GetKind() string { return s.Kind } @@ -244,9 +266,10 @@ type ProxyConfigEntry struct { Config map[string]interface{} `json:",omitempty"` MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` Expose ExposeConfig `json:",omitempty"` - Meta map[string]string `json:",omitempty"` - CreateIndex uint64 - ModifyIndex uint64 + + Meta map[string]string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 } func (p *ProxyConfigEntry) GetKind() string { return p.Kind } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go index dfb2bcc10104..ff92125dcdc9 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go @@ -69,9 +69,11 @@ type ServiceRouteDestination struct { Partition string `json:",omitempty"` PrefixRewrite string `json:",omitempty" alias:"prefix_rewrite"` RequestTimeout time.Duration `json:",omitempty" alias:"request_timeout"` + IdleTimeout time.Duration `json:",omitempty" alias:"idle_timeout"` NumRetries uint32 `json:",omitempty" alias:"num_retries"` RetryOnConnectFailure bool `json:",omitempty" alias:"retry_on_connect_failure"` RetryOnStatusCodes []uint32 `json:",omitempty" alias:"retry_on_status_codes"` + RetryOn []string `json:",omitempty" alias:"retry_on"` RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"` ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"` } @@ -80,14 +82,19 @@ func (e *ServiceRouteDestination) MarshalJSON() ([]byte, error) { type Alias ServiceRouteDestination exported := &struct { RequestTimeout string `json:",omitempty"` + IdleTimeout string `json:",omitempty"` *Alias }{ RequestTimeout: e.RequestTimeout.String(), + IdleTimeout: e.IdleTimeout.String(), Alias: (*Alias)(e), } if e.RequestTimeout == 0 { exported.RequestTimeout = "" } + if e.IdleTimeout == 0 { + exported.IdleTimeout = "" + } return json.Marshal(exported) } @@ -96,6 +103,7 @@ func (e *ServiceRouteDestination) UnmarshalJSON(data []byte) error { type Alias ServiceRouteDestination aux := &struct { RequestTimeout string + IdleTimeout string *Alias }{ Alias: (*Alias)(e), @@ -109,6 +117,11 @@ func (e *ServiceRouteDestination) UnmarshalJSON(data []byte) error { return err } } + if aux.IdleTimeout != "" { + if e.IdleTimeout, err = time.ParseDuration(aux.IdleTimeout); err != nil { + return err + } + } return nil } @@ -219,14 +232,25 @@ type ServiceResolverRedirect struct { Namespace string `json:",omitempty"` Partition string `json:",omitempty"` Datacenter string `json:",omitempty"` + Peer string `json:",omitempty"` } type ServiceResolverFailover struct { Service string `json:",omitempty"` ServiceSubset string `json:",omitempty" alias:"service_subset"` // Referencing other partitions is not supported. - Namespace string `json:",omitempty"` - Datacenters []string `json:",omitempty"` + Namespace string `json:",omitempty"` + Datacenters []string `json:",omitempty"` + Targets []ServiceResolverFailoverTarget `json:",omitempty"` +} + +type ServiceResolverFailoverTarget struct { + Service string `json:",omitempty"` + ServiceSubset string `json:",omitempty" alias:"service_subset"` + Partition string `json:",omitempty"` + Namespace string `json:",omitempty"` + Datacenter string `json:",omitempty"` + Peer string `json:",omitempty"` } // LoadBalancer determines the load balancing policy and configuration for services diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_exports.go b/vendor/github.com/hashicorp/consul/api/config_entry_exports.go index ae9cb2ff62c4..52b0491f7cbc 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_exports.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_exports.go @@ -16,7 +16,7 @@ type ExportedServicesConfigEntry struct { // Services is a list of services to be exported and the list of partitions // to expose them to. - Services []ExportedService + Services []ExportedService `json:",omitempty"` Meta map[string]string `json:",omitempty"` @@ -40,19 +40,23 @@ type ExportedService struct { Namespace string `json:",omitempty"` // Consumers is a list of downstream consumers of the service to be exported. - Consumers []ServiceConsumer + Consumers []ServiceConsumer `json:",omitempty"` } // ServiceConsumer represents a downstream consumer of the service to be exported. +// At most one of Partition or Peer must be specified. type ServiceConsumer struct { // Partition is the admin partition to export the service to. - Partition string + Partition string `json:",omitempty"` + + // Peer is the name of the peer to export the service to. + Peer string `json:",omitempty" alias:"peer_name"` } func (e *ExportedServicesConfigEntry) GetKind() string { return ExportedServices } func (e *ExportedServicesConfigEntry) GetName() string { return e.Name } func (e *ExportedServicesConfigEntry) GetPartition() string { return e.Name } -func (e *ExportedServicesConfigEntry) GetNamespace() string { return IntentionDefaultNamespace } +func (e *ExportedServicesConfigEntry) GetNamespace() string { return "" } func (e *ExportedServicesConfigEntry) GetMeta() map[string]string { return e.Meta } func (e *ExportedServicesConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } func (e *ExportedServicesConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go index 0792ad824eeb..63b323e6ba86 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go @@ -27,6 +27,9 @@ type IngressGatewayConfigEntry struct { Meta map[string]string `json:",omitempty"` + // Defaults is default configuration for all upstream services + Defaults *IngressServiceConfig `json:",omitempty"` + // CreateIndex is the Raft index this entry was created at. This is a // read-only field. CreateIndex uint64 @@ -37,12 +40,25 @@ type IngressGatewayConfigEntry struct { ModifyIndex uint64 } +type IngressServiceConfig struct { + MaxConnections *uint32 + MaxPendingRequests *uint32 + MaxConcurrentRequests *uint32 +} + type GatewayTLSConfig struct { // Indicates that TLS should be enabled for this gateway service. Enabled bool // SDS allows configuring TLS certificate from an SDS service. SDS *GatewayTLSSDSConfig `json:",omitempty"` + + TLSMinVersion string `json:",omitempty" alias:"tls_min_version"` + TLSMaxVersion string `json:",omitempty" alias:"tls_max_version"` + + // Define a subset of cipher suites to restrict + // Only applicable to connections negotiated via TLS 1.2 or earlier + CipherSuites []string `json:",omitempty" alias:"cipher_suites"` } type GatewayServiceTLSConfig struct { @@ -117,6 +133,10 @@ type IngressService struct { // Allow HTTP header manipulation to be configured. RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"` ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"` + + MaxConnections *uint32 `json:",omitempty" alias:"max_connections"` + MaxPendingRequests *uint32 `json:",omitempty" alias:"max_pending_requests"` + MaxConcurrentRequests *uint32 `json:",omitempty" alias:"max_concurrent_requests"` } func (i *IngressGatewayConfigEntry) GetKind() string { return i.Kind } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go b/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go index 3741e0a5907b..0bff5e8e3976 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go @@ -18,6 +18,7 @@ type ServiceIntentionsConfigEntry struct { type SourceIntention struct { Name string + Peer string `json:",omitempty"` Partition string `json:",omitempty"` Namespace string `json:",omitempty"` Action IntentionAction `json:",omitempty"` diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go b/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go index f58fabc17de7..98b882247d6a 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_mesh.go @@ -1,6 +1,8 @@ package api -import "encoding/json" +import ( + "encoding/json" +) // MeshConfigEntry manages the global configuration for all service mesh // proxies. @@ -17,6 +19,12 @@ type MeshConfigEntry struct { // in transparent mode. TransparentProxy TransparentProxyMeshConfig `alias:"transparent_proxy"` + TLS *MeshTLSConfig `json:",omitempty"` + + HTTP *MeshHTTPConfig `json:",omitempty"` + + Peering *PeeringMeshConfig `json:",omitempty"` + Meta map[string]string `json:",omitempty"` // CreateIndex is the Raft index this entry was created at. This is a @@ -33,6 +41,25 @@ type TransparentProxyMeshConfig struct { MeshDestinationsOnly bool `alias:"mesh_destinations_only"` } +type MeshTLSConfig struct { + Incoming *MeshDirectionalTLSConfig `json:",omitempty"` + Outgoing *MeshDirectionalTLSConfig `json:",omitempty"` +} + +type MeshDirectionalTLSConfig struct { + TLSMinVersion string `json:",omitempty" alias:"tls_min_version"` + TLSMaxVersion string `json:",omitempty" alias:"tls_max_version"` + CipherSuites []string `json:",omitempty" alias:"cipher_suites"` +} + +type MeshHTTPConfig struct { + SanitizeXForwardedClientCert bool `alias:"sanitize_x_forwarded_client_cert"` +} + +type PeeringMeshConfig struct { + PeerThroughMeshGateways bool `json:",omitempty" alias:"peer_through_mesh_gateways"` +} + func (e *MeshConfigEntry) GetKind() string { return MeshConfig } func (e *MeshConfigEntry) GetName() string { return MeshConfigMesh } func (e *MeshConfigEntry) GetPartition() string { return e.Partition } diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go index 734d4ab0fde7..0c2500fd0618 100644 --- a/vendor/github.com/hashicorp/consul/api/connect_intention.go +++ b/vendor/github.com/hashicorp/consul/api/connect_intention.go @@ -35,6 +35,11 @@ type Intention struct { SourcePartition string `json:",omitempty"` DestinationPartition string `json:",omitempty"` + // SourcePeer cannot be a wildcard "*" and is not compatible with legacy + // intentions. Cannot be used with SourcePartition, as both represent the + // same level of tenancy (partition is local to cluster, peer is remote). + SourcePeer string `json:",omitempty"` + // SourceType is the type of the value for the source. SourceType IntentionSourceType diff --git a/vendor/github.com/hashicorp/consul/api/debug.go b/vendor/github.com/hashicorp/consul/api/debug.go index 0dfbfd846be0..b7e80b88d01f 100644 --- a/vendor/github.com/hashicorp/consul/api/debug.go +++ b/vendor/github.com/hashicorp/consul/api/debug.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "strconv" ) @@ -36,7 +35,7 @@ func (d *Debug) Heap() ([]byte, error) { // We return a raw response because we're just passing through a response // from the pprof handlers - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("error decoding body: %s", err) } @@ -62,7 +61,7 @@ func (d *Debug) Profile(seconds int) ([]byte, error) { // We return a raw response because we're just passing through a response // from the pprof handlers - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("error decoding body: %s", err) } @@ -107,7 +106,7 @@ func (d *Debug) Trace(seconds int) ([]byte, error) { // We return a raw response because we're just passing through a response // from the pprof handlers - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("error decoding body: %s", err) } @@ -130,7 +129,7 @@ func (d *Debug) Goroutine() ([]byte, error) { // We return a raw response because we're just passing through a response // from the pprof handlers - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("error decoding body: %s", err) } diff --git a/vendor/github.com/hashicorp/consul/api/discovery_chain.go b/vendor/github.com/hashicorp/consul/api/discovery_chain.go index 29bda8591231..4217603cf9f3 100644 --- a/vendor/github.com/hashicorp/consul/api/discovery_chain.go +++ b/vendor/github.com/hashicorp/consul/api/discovery_chain.go @@ -109,9 +109,17 @@ type CompiledDiscoveryChain struct { // non-customized versions. CustomizationHash string + // Default indicates if this discovery chain is based on no + // service-resolver, service-splitter, or service-router config entries. + Default bool + // Protocol is the overall protocol shared by everything in the chain. Protocol string + // ServiceMeta is the metadata from the underlying service-defaults config + // entry for the service named ServiceName. + ServiceMeta map[string]string + // StartNode is the first key into the Nodes map that should be followed // when walking the discovery chain. StartNode string @@ -226,9 +234,46 @@ type DiscoveryTarget struct { Namespace string Datacenter string - MeshGateway MeshGatewayConfig - Subset ServiceResolverSubset - External bool - SNI string - Name string + MeshGateway MeshGatewayConfig + Subset ServiceResolverSubset + ConnectTimeout time.Duration + External bool + SNI string + Name string +} + +func (t *DiscoveryTarget) MarshalJSON() ([]byte, error) { + type Alias DiscoveryTarget + exported := &struct { + ConnectTimeout string `json:",omitempty"` + *Alias + }{ + ConnectTimeout: t.ConnectTimeout.String(), + Alias: (*Alias)(t), + } + if t.ConnectTimeout == 0 { + exported.ConnectTimeout = "" + } + + return json.Marshal(exported) +} + +func (t *DiscoveryTarget) UnmarshalJSON(data []byte) error { + type Alias DiscoveryTarget + aux := &struct { + ConnectTimeout string + *Alias + }{ + Alias: (*Alias)(t), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + var err error + if aux.ConnectTimeout != "" { + if t.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil { + return err + } + } + return nil } diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go index 2785c4c91c0e..a89b4b7273f5 100644 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -45,6 +45,8 @@ type HealthCheck struct { Type string Namespace string `json:",omitempty"` Partition string `json:",omitempty"` + ExposedPort int + PeerName string `json:",omitempty"` Definition HealthCheckDefinition @@ -62,6 +64,10 @@ type HealthCheckDefinition struct { TLSServerName string TLSSkipVerify bool TCP string + UDP string + GRPC string + OSService string + GRPCUseTLS bool IntervalDuration time.Duration `json:"-"` TimeoutDuration time.Duration `json:"-"` DeregisterCriticalServiceAfterDuration time.Duration `json:"-"` @@ -173,8 +179,7 @@ type HealthChecks []*HealthCheck // attached, this function determines the best representative of the status as // as single string using the following heuristic: // -// maintenance > critical > warning > passing -// +// maintenance > critical > warning > passing func (c HealthChecks) AggregatedStatus() string { var passing, warning, critical, maintenance bool for _, check := range c { diff --git a/vendor/github.com/hashicorp/consul/api/namespace.go b/vendor/github.com/hashicorp/consul/api/namespace.go index 213cd8cf4411..65cc6f3f3ba9 100644 --- a/vendor/github.com/hashicorp/consul/api/namespace.go +++ b/vendor/github.com/hashicorp/consul/api/namespace.go @@ -1,6 +1,7 @@ package api import ( + "encoding/json" "fmt" "time" ) @@ -38,6 +39,25 @@ type Namespace struct { ModifyIndex uint64 `json:"ModifyIndex,omitempty"` } +func (n *Namespace) UnmarshalJSON(data []byte) error { + type Alias Namespace + aux := struct { + DeletedAtSnake *time.Time `json:"deleted_at"` + *Alias + }{ + Alias: (*Alias)(n), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + if n.DeletedAt == nil && aux.DeletedAtSnake != nil { + n.DeletedAt = aux.DeletedAtSnake + } + + return nil +} + // NamespaceACLConfig is the Namespace specific ACL configuration container type NamespaceACLConfig struct { // PolicyDefaults is the list of policies that should be used for the parent authorizer @@ -48,12 +68,38 @@ type NamespaceACLConfig struct { RoleDefaults []ACLLink `json:"RoleDefaults" alias:"role_defaults"` } +func (n *NamespaceACLConfig) UnmarshalJSON(data []byte) error { + type Alias NamespaceACLConfig + aux := struct { + PolicyDefaultsSnake []ACLLink `json:"policy_defaults"` + RoleDefaultsSnake []ACLLink `json:"role_defaults"` + *Alias + }{ + Alias: (*Alias)(n), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + if n.PolicyDefaults == nil { + for _, pd := range aux.PolicyDefaultsSnake { + n.PolicyDefaults = append(n.PolicyDefaults, pd) + } + } + if n.RoleDefaults == nil { + for _, pd := range aux.RoleDefaultsSnake { + n.RoleDefaults = append(n.RoleDefaults, pd) + } + } + return nil +} + // Namespaces can be used to manage Namespaces in Consul Enterprise.. type Namespaces struct { c *Client } -// Operator returns a handle to the operator endpoints. +// Namespaces returns a handle to the namespaces endpoints. func (c *Client) Namespaces() *Namespaces { return &Namespaces{c} } diff --git a/vendor/github.com/hashicorp/consul/api/peering.go b/vendor/github.com/hashicorp/consul/api/peering.go new file mode 100644 index 000000000000..8748b63fcc11 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/peering.go @@ -0,0 +1,282 @@ +package api + +import ( + "context" + "fmt" + "time" +) + +// PeeringState enumerates all the states a peering can be in +type PeeringState string + +const ( + // PeeringStateUndefined represents an unset value for PeeringState during + // writes. + PeeringStateUndefined PeeringState = "UNDEFINED" + + // PeeringStatePending means the peering was created by generating a peering token. + // Peerings stay in a pending state until the peer uses the token to dial + // the local cluster. + PeeringStatePending PeeringState = "PENDING" + + // PeeringStateEstablishing means the peering is being established from a peering token. + // This is the initial state for dialing peers. + PeeringStateEstablishing PeeringState = "ESTABLISHING" + + // PeeringStateActive means that the peering connection is active and + // healthy. + PeeringStateActive PeeringState = "ACTIVE" + + // PeeringStateFailing means the peering connection has been interrupted + // but has not yet been terminated. + PeeringStateFailing PeeringState = "FAILING" + + // PeeringStateDeleting means a peering was marked for deletion and is in the process + // of being deleted. + PeeringStateDeleting PeeringState = "DELETING" + + // PeeringStateTerminated means the peering relationship has been removed. + PeeringStateTerminated PeeringState = "TERMINATED" +) + +type PeeringRemoteInfo struct { + // Partition is the remote peer's partition. + Partition string + // Datacenter is the remote peer's datacenter. + Datacenter string +} + +type Peering struct { + // ID is a datacenter-scoped UUID for the peering. + ID string + // Name is the local alias for the peering relationship. + Name string + // Partition is the local partition connecting to the peer. + Partition string `json:",omitempty"` + // DeletedAt is the time when the Peering was marked for deletion + DeletedAt *time.Time `json:",omitempty" alias:"deleted_at"` + // Meta is a mapping of some string value to any other string value + Meta map[string]string `json:",omitempty"` + // State is one of the valid PeeringState values to represent the status of + // peering relationship. + State PeeringState + // PeerID is the ID that our peer assigned to this peering. This ID is to + // be used when dialing the peer, so that it can know who dialed it. + PeerID string `json:",omitempty"` + // PeerCAPems contains all the CA certificates for the remote peer. + PeerCAPems []string `json:",omitempty"` + // PeerServerName is the name of the remote server as it relates to TLS. + PeerServerName string `json:",omitempty"` + // PeerServerAddresses contains all the connection addresses for the remote peer. + PeerServerAddresses []string `json:",omitempty"` + // StreamStatus contains information computed on read based on the state of the stream. + StreamStatus PeeringStreamStatus + // CreateIndex is the Raft index at which the Peering was created. + CreateIndex uint64 + // ModifyIndex is the latest Raft index at which the Peering was modified. + ModifyIndex uint64 + // Remote contains metadata for the remote peer. + Remote PeeringRemoteInfo +} + +type PeeringStreamStatus struct { + // ImportedServices is the list of services imported from this peering. + ImportedServices []string + // ExportedServices is the list of services exported to this peering. + ExportedServices []string + // LastHeartbeat represents when the last heartbeat message was received. + LastHeartbeat time.Time + // LastReceive represents when any message was last received, regardless of success or error. + LastReceive time.Time + // LastSend represents when any message was last sent, regardless of success or error. + LastSend time.Time +} + +type PeeringReadResponse struct { + Peering *Peering +} + +type PeeringGenerateTokenRequest struct { + // PeerName is the name of the remote peer. + PeerName string + // Partition to be peered. + Partition string `json:",omitempty"` + // Meta is a mapping of some string value to any other string value + Meta map[string]string `json:",omitempty"` + // ServerExternalAddresses is a list of addresses to put into the generated token. This could be used to specify + // load balancer(s) or external IPs to reach the servers from the dialing side, and will override any server + // addresses obtained from the "consul" service. + ServerExternalAddresses []string `json:",omitempty"` +} + +type PeeringGenerateTokenResponse struct { + // PeeringToken is an opaque string provided to the remote peer for it to complete + // the peering initialization handshake. + PeeringToken string +} + +type PeeringEstablishRequest struct { + // Name of the remote peer. + PeerName string + // The peering token returned from the peer's GenerateToken endpoint. + PeeringToken string `json:",omitempty"` + // Partition to be peered. + Partition string `json:",omitempty"` + // Meta is a mapping of some string value to any other string value + Meta map[string]string `json:",omitempty"` +} + +type PeeringEstablishResponse struct { +} + +type PeeringListRequest struct { + // future proofing in case we extend List functionality +} + +type Peerings struct { + c *Client +} + +// Peerings returns a handle to the operator endpoints. +func (c *Client) Peerings() *Peerings { + return &Peerings{c: c} +} + +func (p *Peerings) Read(ctx context.Context, name string, q *QueryOptions) (*Peering, *QueryMeta, error) { + if name == "" { + return nil, nil, fmt.Errorf("peering name cannot be empty") + } + + req := p.c.newRequest("GET", fmt.Sprintf("/v1/peering/%s", name)) + req.setQueryOptions(q) + req.ctx = ctx + + rtt, resp, err := p.c.doRequest(req) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + found, resp, err := requireNotFoundOrOK(resp) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out Peering + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +func (p *Peerings) Delete(ctx context.Context, name string, q *WriteOptions) (*WriteMeta, error) { + if name == "" { + return nil, fmt.Errorf("peering name cannot be empty") + } + + req := p.c.newRequest("DELETE", fmt.Sprintf("/v1/peering/%s", name)) + req.setWriteOptions(q) + req.ctx = ctx + + rtt, resp, err := p.c.doRequest(req) + if err != nil { + return nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// TODO(peering): verify this is the ultimate signature we want +func (p *Peerings) GenerateToken(ctx context.Context, g PeeringGenerateTokenRequest, wq *WriteOptions) (*PeeringGenerateTokenResponse, *WriteMeta, error) { + if g.PeerName == "" { + return nil, nil, fmt.Errorf("peer name cannot be empty") + } + + req := p.c.newRequest("POST", fmt.Sprint("/v1/peering/token")) + req.setWriteOptions(wq) + req.ctx = ctx + req.obj = g + + rtt, resp, err := p.c.doRequest(req) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + + var out PeeringGenerateTokenResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// TODO(peering): verify this is the ultimate signature we want +func (p *Peerings) Establish(ctx context.Context, i PeeringEstablishRequest, wq *WriteOptions) (*PeeringEstablishResponse, *WriteMeta, error) { + req := p.c.newRequest("POST", fmt.Sprint("/v1/peering/establish")) + req.setWriteOptions(wq) + req.ctx = ctx + req.obj = i + + rtt, resp, err := p.c.doRequest(req) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + wm := &WriteMeta{RequestTime: rtt} + + var out PeeringEstablishResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +func (p *Peerings) List(ctx context.Context, q *QueryOptions) ([]*Peering, *QueryMeta, error) { + req := p.c.newRequest("GET", "/v1/peerings") + req.setQueryOptions(q) + req.ctx = ctx + + rtt, resp, err := p.c.doRequest(req) + if err != nil { + return nil, nil, err + } + defer closeResponseBody(resp) + if err := requireOK(resp); err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Peering + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go index b3dd7be6f2f7..753aeb0ea4a3 100644 --- a/vendor/github.com/hashicorp/consul/api/prepared_query.go +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -1,8 +1,8 @@ package api -// QueryDatacenterOptions sets options about how we fail over if there are no +// QueryFailoverOptions sets options about how we fail over if there are no // healthy nodes in the local datacenter. -type QueryDatacenterOptions struct { +type QueryFailoverOptions struct { // NearestN is set to the number of remote datacenters to try, based on // network coordinates. NearestN int @@ -11,6 +11,21 @@ type QueryDatacenterOptions struct { // never try a datacenter multiple times, so those are subtracted from // this list before proceeding. Datacenters []string + + // Targets is a fixed list of datacenters and peers to try. This field cannot + // be populated with NearestN or Datacenters. + Targets []QueryFailoverTarget +} + +// Deprecated: use QueryFailoverOptions instead. +type QueryDatacenterOptions = QueryFailoverOptions + +type QueryFailoverTarget struct { + // Peer specifies a peer to try during failover. + Peer string + + // Datacenter specifies a datacenter to try during failover. + Datacenter string } // QueryDNSOptions controls settings when query results are served over DNS. @@ -35,7 +50,7 @@ type ServiceQuery struct { // Failover controls what we do if there are no healthy nodes in the // local datacenter. - Failover QueryDatacenterOptions + Failover QueryFailoverOptions // IgnoreCheckIDs is an optional list of health check IDs to ignore when // considering which nodes are healthy. It is useful as an emergency measure diff --git a/vendor/github.com/hashicorp/consul/api/txn.go b/vendor/github.com/hashicorp/consul/api/txn.go index 59fd1c0d979a..4aa06d9f5488 100644 --- a/vendor/github.com/hashicorp/consul/api/txn.go +++ b/vendor/github.com/hashicorp/consul/api/txn.go @@ -67,6 +67,7 @@ const ( KVLock KVOp = "lock" KVUnlock KVOp = "unlock" KVGet KVOp = "get" + KVGetOrEmpty KVOp = "get-or-empty" KVGetTree KVOp = "get-tree" KVCheckSession KVOp = "check-session" KVCheckIndex KVOp = "check-index" diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go index a733bef18c05..44e368e56922 100644 --- a/vendor/github.com/hashicorp/errwrap/errwrap.go +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -44,6 +44,8 @@ func Wrap(outer, inner error) error { // // format is the format of the error message. The string '{{err}}' will // be replaced with the original error message. +// +// Deprecated: Use fmt.Errorf() func Wrapf(format string, err error) error { outerMsg := "" if err != nil { @@ -148,6 +150,9 @@ func Walk(err error, cb WalkFunc) { for _, err := range e.WrappedErrors() { Walk(err, cb) } + case interface{ Unwrap() error }: + cb(err) + Walk(e.Unwrap(), cb) default: cb(err) } @@ -167,3 +172,7 @@ func (w *wrappedError) Error() string { func (w *wrappedError) WrappedErrors() []error { return []error{w.Outer, w.Inner} } + +func (w *wrappedError) Unwrap() error { + return w.Inner +} diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md index 5d56f4b59c3f..21a17c5af39b 100644 --- a/vendor/github.com/hashicorp/go-hclog/README.md +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -17,11 +17,8 @@ JSON output mode for production. ## Stability Note -While this library is fully open source and HashiCorp will be maintaining it -(since we are and will be making extensive use of it), the API and output -format is subject to minor changes as we fully bake and vet it in our projects. -This notice will be removed once it's fully integrated into our major projects -and no further changes are anticipated. +This library has reached 1.0 stability. Its API can be considered solidified +and promised through future versions. ## Installation and Docs @@ -102,7 +99,7 @@ into all the callers. ### Using `hclog.Fmt()` ```go -var int totalBandwidth = 200 +totalBandwidth := 200 appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth)) ``` @@ -146,3 +143,6 @@ log.Printf("[DEBUG] %d", 42) Notice that if `appLogger` is initialized with the `INFO` log level _and_ you specify `InferLevels: true`, you will not see any output here. You must change `appLogger` to `DEBUG` to see output. See the docs for more information. + +If the log lines start with a timestamp you can use the +`InferLevelsWithTimestamp` option to try and ignore them. diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go index 44aa9bf2c620..99cc176a416b 100644 --- a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go +++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package hclog @@ -7,7 +8,7 @@ import ( ) // setColorization will mutate the values of this logger -// to approperately configure colorization options. It provides +// to appropriately configure colorization options. It provides // a wrapper to the output stream on Windows systems. func (l *intLogger) setColorization(opts *LoggerOptions) { switch opts.Color { @@ -21,6 +22,7 @@ func (l *intLogger) setColorization(opts *LoggerOptions) { isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd()) isTerm := isUnixTerm || isCygwinTerm if !isTerm { + l.headerColor = ColorOff l.writer.color = ColorOff } } diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go index 23486b6d74f8..26f8cef8d121 100644 --- a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go +++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package hclog @@ -10,7 +11,7 @@ import ( ) // setColorization will mutate the values of this logger -// to approperately configure colorization options. It provides +// to appropriately configure colorization options. It provides // a wrapper to the output stream on Windows systems. func (l *intLogger) setColorization(opts *LoggerOptions) { switch opts.Color { @@ -26,8 +27,12 @@ func (l *intLogger) setColorization(opts *LoggerOptions) { isTerm := isUnixTerm || isCygwinTerm if !isTerm { l.writer.color = ColorOff + l.headerColor = ColorOff return } - l.writer.w = colorable.NewColorable(fi) + + if l.headerColor == ColorOff { + l.writer.w = colorable.NewColorable(fi) + } } } diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go index 22ebc57d877f..48ff1f3a4e90 100644 --- a/vendor/github.com/hashicorp/go-hclog/global.go +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -2,6 +2,7 @@ package hclog import ( "sync" + "time" ) var ( @@ -14,17 +15,18 @@ var ( DefaultOptions = &LoggerOptions{ Level: DefaultLevel, Output: DefaultOutput, + TimeFn: time.Now, } ) // Default returns a globally held logger. This can be a good starting -// place, and then you can use .With() and .Name() to create sub-loggers +// place, and then you can use .With() and .Named() to create sub-loggers // to be used in more specific contexts. // The value of the Default logger can be set via SetDefault() or by // changing the options in DefaultOptions. // // This method is goroutine safe, returning a global from memory, but -// cause should be used if SetDefault() is called it random times +// care should be used if SetDefault() is called it random times // in the program as that may result in race conditions and an unexpected // Logger being returned. func Default() Logger { diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go index 631baf2f0cc1..ff42f1bfc1dd 100644 --- a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -180,9 +180,10 @@ func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) i func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { return &stdlogAdapter{ - log: i, - inferLevels: opts.InferLevels, - forceLevel: opts.ForceLevel, + log: i, + inferLevels: opts.InferLevels, + inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp, + forceLevel: opts.ForceLevel, } } diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index d491ae8f9789..e2ebdb01a8fe 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -60,6 +60,7 @@ type intLogger struct { callerOffset int name string timeFormat string + timeFn TimeFunction disableTime bool // This is an interface so that it's shared by any derived loggers, since @@ -68,6 +69,8 @@ type intLogger struct { writer *writer level *int32 + headerColor ColorOption + implied []interface{} exclude func(level Level, msg string, args ...interface{}) bool @@ -112,16 +115,28 @@ func newLogger(opts *LoggerOptions) *intLogger { mutex = new(sync.Mutex) } + var primaryColor, headerColor ColorOption + + if opts.ColorHeaderOnly { + primaryColor = ColorOff + headerColor = opts.Color + } else { + primaryColor = opts.Color + headerColor = ColorOff + } + l := &intLogger{ json: opts.JSONFormat, name: opts.Name, timeFormat: TimeFormat, + timeFn: time.Now, disableTime: opts.DisableTime, mutex: mutex, - writer: newWriter(output, opts.Color), + writer: newWriter(output, primaryColor), level: new(int32), exclude: opts.Exclude, independentLevels: opts.IndependentLevels, + headerColor: headerColor, } if opts.IncludeLocation { l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset @@ -130,6 +145,9 @@ func newLogger(opts *LoggerOptions) *intLogger { if l.json { l.timeFormat = TimeFormatJSON } + if opts.TimeFn != nil { + l.timeFn = opts.TimeFn + } if opts.TimeFormat != "" { l.timeFormat = opts.TimeFormat } @@ -142,7 +160,7 @@ func newLogger(opts *LoggerOptions) *intLogger { } // offsetIntLogger is the stack frame offset in the call stack for the caller to -// one of the Warn,Info,Log,etc methods. +// one of the Warn, Info, Log, etc methods. const offsetIntLogger = 3 // Log a message and a set of key/value pairs if the given level is at @@ -152,7 +170,7 @@ func (l *intLogger) log(name string, level Level, msg string, args ...interface{ return } - t := time.Now() + t := l.timeFn() l.mutex.Lock() defer l.mutex.Unlock() @@ -199,6 +217,24 @@ func trimCallerPath(path string) string { return path[idx+1:] } +// isNormal indicates if the rune is one allowed to exist as an unquoted +// string value. This is a subset of ASCII, `-` through `~`. +func isNormal(r rune) bool { + return 0x2D <= r && r <= 0x7E // - through ~ +} + +// needsQuoting returns false if all the runes in string are normal, according +// to isNormal +func needsQuoting(str string) bool { + for _, r := range str { + if !isNormal(r) { + return true + } + } + + return false +} + // Non-JSON logging format function func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { @@ -209,7 +245,12 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, s, ok := _levelToBracket[level] if ok { - l.writer.WriteString(s) + if l.headerColor != ColorOff { + color := _levelToColor[level] + color.Fprint(l.writer, s) + } else { + l.writer.WriteString(s) + } } else { l.writer.WriteString("[?????]") } @@ -228,11 +269,14 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, if name != "" { l.writer.WriteString(name) - l.writer.WriteString(": ") + if msg != "" { + l.writer.WriteString(": ") + l.writer.WriteString(msg) + } + } else if msg != "" { + l.writer.WriteString(msg) } - l.writer.WriteString(msg) - args = append(l.implied, args...) var stacktrace CapturedStacktrace @@ -263,6 +307,7 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, val = st if st == "" { val = `""` + raw = true } case int: val = strconv.FormatInt(int64(st), 10) @@ -323,13 +368,11 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, l.writer.WriteString("=\n") writeIndent(l.writer, val, " | ") l.writer.WriteString(" ") - } else if !raw && strings.ContainsAny(val, " \t") { + } else if !raw && needsQuoting(val) { l.writer.WriteByte(' ') l.writer.WriteString(key) l.writer.WriteByte('=') - l.writer.WriteByte('"') - l.writer.WriteString(val) - l.writer.WriteByte('"') + l.writer.WriteString(strconv.Quote(val)) } else { l.writer.WriteByte(' ') l.writer.WriteString(key) @@ -687,9 +730,10 @@ func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { newLog.callerOffset = l.callerOffset + 4 } return &stdlogAdapter{ - log: &newLog, - inferLevels: opts.InferLevels, - forceLevel: opts.ForceLevel, + log: &newLog, + inferLevels: opts.InferLevels, + inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp, + forceLevel: opts.ForceLevel, } } diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 6a4665ba9fea..f37187401a23 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -5,10 +5,11 @@ import ( "log" "os" "strings" + "time" ) var ( - //DefaultOutput is used as the default log output. + // DefaultOutput is used as the default log output. DefaultOutput io.Writer = os.Stderr // DefaultLevel is used as the default log level. @@ -27,7 +28,7 @@ const ( // of actions in code, such as function enters/exits, etc. Trace Level = 1 - // Debug information for programmer lowlevel analysis. + // Debug information for programmer low-level analysis. Debug Level = 2 // Info information about steady state operations. @@ -43,13 +44,13 @@ const ( Off Level = 6 ) -// Format is a simple convience type for when formatting is required. When +// Format is a simple convenience type for when formatting is required. When // processing a value of this type, the logger automatically treats the first // argument as a Printf formatting string and passes the rest as the values // to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}). type Format []interface{} -// Fmt returns a Format type. This is a convience function for creating a Format +// Fmt returns a Format type. This is a convenience function for creating a Format // type. func Fmt(str string, args ...interface{}) Format { return append(Format{str}, args...) @@ -133,7 +134,7 @@ func (l Level) String() string { } } -// Logger describes the interface that must be implemeted by all loggers. +// Logger describes the interface that must be implemented by all loggers. type Logger interface { // Args are alternating key, val pairs // keys must be strings @@ -212,6 +213,15 @@ type StandardLoggerOptions struct { // [DEBUG] and strip it off before reapplying it. InferLevels bool + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them while ignoring possible + // timestamp values in the beginning of the string. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + // The timestamp detection may result in false positives and incomplete + // string outputs. + InferLevelsWithTimestamp bool + // ForceLevel is used to force all output from the standard logger to be at // the specified level. Similar to InferLevels, this will strip any level // prefix contained in the logged string before applying the forced level. @@ -219,12 +229,14 @@ type StandardLoggerOptions struct { ForceLevel Level } +type TimeFunction = func() time.Time + // LoggerOptions can be used to configure a new logger. type LoggerOptions struct { // Name of the subsystem to prefix logs with Name string - // The threshold for the logger. Anything less severe is supressed + // The threshold for the logger. Anything less severe is suppressed Level Level // Where to write the logs to. Defaults to os.Stderr if nil @@ -248,14 +260,20 @@ type LoggerOptions struct { // The time format to use instead of the default TimeFormat string + // A function which is called to get the time object that is formatted using `TimeFormat` + TimeFn TimeFunction + // Control whether or not to display the time at all. This is required // because setting TimeFormat to empty assumes the default format. DisableTime bool - // Color the output. On Windows, colored logs are only avaiable for io.Writers that + // Color the output. On Windows, colored logs are only available for io.Writers that // are concretely instances of *os.File. Color ColorOption + // Only color the header, not the body. This can help with readability of long messages. + ColorHeaderOnly bool + // A function which is called with the log information and if it returns true the value // should not be logged. // This is useful when interacting with a system that you wish to suppress the log @@ -264,8 +282,8 @@ type LoggerOptions struct { // IndependentLevels causes subloggers to be created with an independent // copy of this logger's level. This means that using SetLevel on this - // logger will not effect any subloggers, and SetLevel on any subloggers - // will not effect the parent or sibling loggers. + // logger will not affect any subloggers, and SetLevel on any subloggers + // will not affect the parent or sibling loggers. IndependentLevels bool } diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go index 271d546d5c92..641f20ccbcc8 100644 --- a/vendor/github.com/hashicorp/go-hclog/stdlog.go +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -3,16 +3,22 @@ package hclog import ( "bytes" "log" + "regexp" "strings" ) +// Regex to ignore characters commonly found in timestamp formats from the +// beginning of inputs. +var logTimestampRegexp = regexp.MustCompile(`^[\d\s\:\/\.\+-TZ]*`) + // Provides a io.Writer to shim the data out of *log.Logger // and back into our Logger. This is basically the only way to // build upon *log.Logger. type stdlogAdapter struct { - log Logger - inferLevels bool - forceLevel Level + log Logger + inferLevels bool + inferLevelsWithTimestamp bool + forceLevel Level } // Take the data, infer the levels if configured, and send it through @@ -28,6 +34,10 @@ func (s *stdlogAdapter) Write(data []byte) (int, error) { // Log at the forced level s.dispatch(str, s.forceLevel) } else if s.inferLevels { + if s.inferLevelsWithTimestamp { + str = s.trimTimestamp(str) + } + level, str := s.pickLevel(str) s.dispatch(str, level) } else { @@ -74,6 +84,11 @@ func (s *stdlogAdapter) pickLevel(str string) (Level, string) { } } +func (s *stdlogAdapter) trimTimestamp(str string) string { + idx := logTimestampRegexp.FindStringIndex(str) + return str[idx[1]:] +} + type logWriter struct { l *log.Logger } diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml deleted file mode 100644 index 24b80388f72a..000000000000 --- a/vendor/github.com/hashicorp/go-multierror/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: - - 1.x - -branches: - only: - - master - -script: env GO111MODULE=on make test testrace diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md index e92fa614cd61..71dd308ed811 100644 --- a/vendor/github.com/hashicorp/go-multierror/README.md +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -1,10 +1,11 @@ # go-multierror -[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] +[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror) -[travis]: https://travis-ci.org/hashicorp/go-multierror -[godocs]: https://godoc.org/github.com/hashicorp/go-multierror +[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror +[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror `go-multierror` is a package for Go that provides a mechanism for representing a list of `error` values as a single `error`. @@ -24,7 +25,25 @@ for introspecting on error values. Install using `go get github.com/hashicorp/go-multierror`. Full documentation is available at -http://godoc.org/github.com/hashicorp/go-multierror +https://pkg.go.dev/github.com/hashicorp/go-multierror + +### Requires go version 1.13 or newer + +`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced +[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which +this library takes advantage of. + +If you need to use an earlier version of go, you can use the +[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) +tag, which doesn't rely on features in go 1.13. + +If you see compile errors that look like the below, it's likely that +you're on an older version of go: + +``` +/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As +/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is +``` ## Usage diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go index 775b6e753e77..3e2589bfde0c 100644 --- a/vendor/github.com/hashicorp/go-multierror/append.go +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -6,6 +6,8 @@ package multierror // If err is not a multierror.Error, then it will be turned into // one. If any of the errs are multierr.Error, they will be flattened // one level into err. +// Any nil errors within errs will be ignored. If err is nil, a new +// *Error will be returned. func Append(err error, errs ...error) *Error { switch err := err.(type) { case *Error: diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go index d05dd9269873..f54574326461 100644 --- a/vendor/github.com/hashicorp/go-multierror/multierror.go +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -40,14 +40,17 @@ func (e *Error) GoString() string { return fmt.Sprintf("*%#v", *e) } -// WrappedErrors returns the list of errors that this Error is wrapping. -// It is an implementation of the errwrap.Wrapper interface so that -// multierror.Error can be used with that library. +// WrappedErrors returns the list of errors that this Error is wrapping. It is +// an implementation of the errwrap.Wrapper interface so that multierror.Error +// can be used with that library. // -// This method is not safe to be called concurrently and is no different -// than accessing the Errors field directly. It is implemented only to -// satisfy the errwrap.Wrapper interface. +// This method is not safe to be called concurrently. Unlike accessing the +// Errors field directly, this function also checks if the multierror is nil to +// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface. func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } return e.Errors } diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE index e87a115e462e..a320b309c44a 100644 --- a/vendor/github.com/hashicorp/go-uuid/LICENSE +++ b/vendor/github.com/hashicorp/go-uuid/LICENSE @@ -1,3 +1,5 @@ +Copyright © 2015-2022 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/memberlist/.golangci.yml b/vendor/github.com/hashicorp/memberlist/.golangci.yml new file mode 100644 index 000000000000..2eb081ea7e11 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/.golangci.yml @@ -0,0 +1,80 @@ +linters: + disable-all: true + enable: + # (TODO) uncomment after fixing the lint issues + # - gofmt + - govet + # - unconvert + # - staticcheck + # - ineffassign + # - unparam + - forbidigo + +issues: + # Disable the default exclude list so that all excludes are explicitly + # defined in this file. + exclude-use-default: false + + exclude-rules: + # Temp Ignore SA9004: only the first constant in this group has an explicit type + # https://staticcheck.io/docs/checks#SA9004 + - linters: [staticcheck] + text: 'SA9004:' + + - linters: [staticcheck] + text: 'SA1019: Package github.com/golang/protobuf/jsonpb is deprecated' + + - linters: [staticcheck] + text: 'SA1019: Package github.com/golang/protobuf/proto is deprecated' + + - linters: [staticcheck] + text: 'SA1019: ptypes.MarshalAny is deprecated' + + - linters: [staticcheck] + text: 'SA1019: ptypes.UnmarshalAny is deprecated' + + - linters: [staticcheck] + text: 'SA1019: package github.com/golang/protobuf/ptypes is deprecated' + + # An argument that always receives the same value is often not a problem. + - linters: [unparam] + text: 'always receives' + + # Often functions will implement an interface that returns an error without + # needing to return an error. Sometimes the error return value is unnecessary + # but a linter can not tell the difference. + - linters: [unparam] + text: 'result \d+ \(error\) is always nil' + + # Allow unused parameters to start with an underscore. Arguments with a name + # of '_' are already ignored. + # Ignoring longer names that start with underscore allow for better + # self-documentation than a single underscore by itself. Underscore arguments + # should generally only be used when a function is implementing an interface. + - linters: [unparam] + text: '`_[^`]*` is unused' + + # Temp ignore some common unused parameters so that unparam can be added + # incrementally. + - linters: [unparam] + text: '`(t|resp|req|entMeta)` is unused' + +linters-settings: + gofmt: + simplify: true + forbidigo: + # Forbid the following identifiers (list of regexp). + forbid: + - '\brequire\.New\b(# Use package-level functions with explicit TestingT)?' + - '\bassert\.New\b(# Use package-level functions with explicit TestingT)?' + - '\bmetrics\.IncrCounter\b(# Use labeled metrics)?' + - '\bmetrics\.AddSample\b(# Use labeled metrics)?' + - '\bmetrics\.MeasureSince\b(# Use labeled metrics)?' + - '\bmetrics\.SetGauge\b(# Use labeled metrics)?' + # Exclude godoc examples from forbidigo checks. + # Default: true + exclude_godoc_examples: false + +run: + timeout: 10m + concurrency: 4 diff --git a/vendor/github.com/hashicorp/memberlist/awareness.go b/vendor/github.com/hashicorp/memberlist/awareness.go index ea95c7538800..53b1bb313617 100644 --- a/vendor/github.com/hashicorp/memberlist/awareness.go +++ b/vendor/github.com/hashicorp/memberlist/awareness.go @@ -21,13 +21,17 @@ type awareness struct { // score is the current awareness score. Lower values are healthier and // zero is the minimum value. score int + + // metricLabels is the slice of labels to put on all emitted metrics + metricLabels []metrics.Label } // newAwareness returns a new awareness object. -func newAwareness(max int) *awareness { +func newAwareness(max int, metricLabels []metrics.Label) *awareness { return &awareness{ - max: max, - score: 0, + max: max, + score: 0, + metricLabels: metricLabels, } } @@ -47,7 +51,7 @@ func (a *awareness) ApplyDelta(delta int) { a.Unlock() if initial != final { - metrics.SetGauge([]string{"memberlist", "health", "score"}, float32(final)) + metrics.SetGaugeWithLabels([]string{"memberlist", "health", "score"}, float32(final), a.metricLabels) } } diff --git a/vendor/github.com/hashicorp/memberlist/config.go b/vendor/github.com/hashicorp/memberlist/config.go index d7fe4c37b059..29f62ad6f543 100644 --- a/vendor/github.com/hashicorp/memberlist/config.go +++ b/vendor/github.com/hashicorp/memberlist/config.go @@ -9,6 +9,7 @@ import ( "strings" "time" + "github.com/armon/go-metrics" multierror "github.com/hashicorp/go-multierror" ) @@ -244,10 +245,18 @@ type Config struct { // RequireNodeNames controls if the name of a node is required when sending // a message to that node. RequireNodeNames bool + // CIDRsAllowed If nil, allow any connection (default), otherwise specify all networks // allowed to connect (you must specify IPv6/IPv4 separately) // Using [] will block all connections. CIDRsAllowed []net.IPNet + + // MetricLabels is a map of optional labels to apply to all metrics emitted. + MetricLabels []metrics.Label + + // QueueCheckInterval is the interval at which we check the message + // queue to apply the warning and max depth. + QueueCheckInterval time.Duration } // ParseCIDRs return a possible empty list of all Network that have been parsed @@ -317,6 +326,8 @@ func DefaultLANConfig() *Config { HandoffQueueDepth: 1024, UDPBufferSize: 1400, CIDRsAllowed: nil, // same as allow all + + QueueCheckInterval: 30 * time.Second, } } diff --git a/vendor/github.com/hashicorp/memberlist/memberlist.go b/vendor/github.com/hashicorp/memberlist/memberlist.go index cab6db69fd4c..db06dcbaef72 100644 --- a/vendor/github.com/hashicorp/memberlist/memberlist.go +++ b/vendor/github.com/hashicorp/memberlist/memberlist.go @@ -27,6 +27,7 @@ import ( "sync/atomic" "time" + "github.com/armon/go-metrics" multierror "github.com/hashicorp/go-multierror" sockaddr "github.com/hashicorp/go-sockaddr" "github.com/miekg/dns" @@ -77,6 +78,9 @@ type Memberlist struct { broadcasts *TransmitLimitedQueue logger *log.Logger + + // metricLabels is the slice of labels to put on all emitted metrics + metricLabels []metrics.Label } // BuildVsnArray creates the array of Vsn @@ -135,9 +139,10 @@ func newMemberlist(conf *Config) (*Memberlist, error) { transport := conf.Transport if transport == nil { nc := &NetTransportConfig{ - BindAddrs: []string{conf.BindAddr}, - BindPort: conf.BindPort, - Logger: logger, + BindAddrs: []string{conf.BindAddr}, + BindPort: conf.BindPort, + Logger: logger, + MetricLabels: conf.MetricLabels, } // See comment below for details about the retry in here. @@ -208,10 +213,11 @@ func newMemberlist(conf *Config) (*Memberlist, error) { lowPriorityMsgQueue: list.New(), nodeMap: make(map[string]*nodeState), nodeTimers: make(map[string]*suspicion), - awareness: newAwareness(conf.AwarenessMaxMultiplier), + awareness: newAwareness(conf.AwarenessMaxMultiplier, conf.MetricLabels), ackHandlers: make(map[uint32]*ackHandler), broadcasts: &TransmitLimitedQueue{RetransmitMult: conf.RetransmitMult}, logger: logger, + metricLabels: conf.MetricLabels, } m.broadcasts.NumNodes = func() int { return m.estNumNodes() @@ -227,6 +233,7 @@ func newMemberlist(conf *Config) (*Memberlist, error) { go m.streamListen() go m.packetListen() go m.packetHandler() + go m.checkBroadcastQueueDepth() return m, nil } @@ -770,3 +777,17 @@ func (m *Memberlist) changeNode(addr string, f func(*nodeState)) { n := m.nodeMap[addr] f(n) } + +// checkBroadcastQueueDepth periodically checks the size of the broadcast queue +// to see if it is too large +func (m *Memberlist) checkBroadcastQueueDepth() { + for { + select { + case <-time.After(m.config.QueueCheckInterval): + numq := m.broadcasts.NumQueued() + metrics.AddSampleWithLabels([]string{"memberlist", "queue", "broadcasts"}, float32(numq), m.metricLabels) + case <-m.shutdownCh: + return + } + } +} diff --git a/vendor/github.com/hashicorp/memberlist/net.go b/vendor/github.com/hashicorp/memberlist/net.go index 1d015afb2989..c89d38c0ff7c 100644 --- a/vendor/github.com/hashicorp/memberlist/net.go +++ b/vendor/github.com/hashicorp/memberlist/net.go @@ -7,6 +7,7 @@ import ( "fmt" "hash/crc32" "io" + "math" "net" "sync/atomic" "time" @@ -233,7 +234,7 @@ func (m *Memberlist) handleConn(conn net.Conn) { defer conn.Close() m.logger.Printf("[DEBUG] memberlist: Stream connection %s", LogConn(conn)) - metrics.IncrCounter([]string{"memberlist", "tcp", "accept"}, 1) + metrics.IncrCounterWithLabels([]string{"memberlist", "tcp", "accept"}, 1, m.metricLabels) conn.SetDeadline(time.Now().Add(m.config.TCPTimeout)) @@ -868,7 +869,7 @@ func (m *Memberlist) rawSendMsgPacket(a Address, node *Node, msg []byte) error { msg = buf.Bytes() } - metrics.IncrCounter([]string{"memberlist", "udp", "sent"}, float32(len(msg))) + metrics.IncrCounterWithLabels([]string{"memberlist", "udp", "sent"}, float32(len(msg)), m.metricLabels) _, err := m.transport.WriteToAddress(msg, a) return err } @@ -897,7 +898,7 @@ func (m *Memberlist) rawSendMsgStream(conn net.Conn, sendBuf []byte, streamLabel } // Write out the entire send buffer - metrics.IncrCounter([]string{"memberlist", "tcp", "sent"}, float32(len(sendBuf))) + metrics.IncrCounterWithLabels([]string{"memberlist", "tcp", "sent"}, float32(len(sendBuf)), m.metricLabels) if n, err := conn.Write(sendBuf); err != nil { return err @@ -952,7 +953,7 @@ func (m *Memberlist) sendAndReceiveState(a Address, join bool) ([]pushNodeState, } defer conn.Close() m.logger.Printf("[DEBUG] memberlist: Initiating push/pull sync with: %s %s", a.Name, conn.RemoteAddr()) - metrics.IncrCounter([]string{"memberlist", "tcp", "connect"}, 1) + metrics.IncrCounterWithLabels([]string{"memberlist", "tcp", "connect"}, 1, m.metricLabels) // Send our state if err := m.sendLocalState(conn, join, m.config.Label); err != nil { @@ -1006,6 +1007,22 @@ func (m *Memberlist) sendLocalState(conn net.Conn, join bool, streamLabel string } m.nodeLock.RUnlock() + nodeStateCounts := make(map[string]int) + nodeStateCounts[StateAlive.metricsString()] = 0 + nodeStateCounts[StateLeft.metricsString()] = 0 + nodeStateCounts[StateDead.metricsString()] = 0 + nodeStateCounts[StateSuspect.metricsString()] = 0 + + for _, n := range localNodes { + nodeStateCounts[n.State.metricsString()]++ + } + + for nodeState, cnt := range nodeStateCounts { + metrics.SetGaugeWithLabels([]string{"memberlist", "node", "instances"}, + float32(cnt), + append(m.metricLabels, metrics.Label{Name: "node_state", Value: nodeState})) + } + // Get the delegate state var userData []byte if m.config.Delegate != nil { @@ -1041,6 +1058,9 @@ func (m *Memberlist) sendLocalState(conn net.Conn, join bool, streamLabel string } } + moreBytes := binary.BigEndian.Uint32(bufConn.Bytes()[1:5]) + metrics.SetGaugeWithLabels([]string{"memberlist", "size", "local"}, float32(moreBytes), m.metricLabels) + // Get the send buffer return m.rawSendMsgStream(conn, bufConn.Bytes(), streamLabel) } @@ -1087,8 +1107,16 @@ func (m *Memberlist) decryptRemoteState(bufConn io.Reader, streamLabel string) ( // Ensure we aren't asked to download too much. This is to guard against // an attack vector where a huge amount of state is sent moreBytes := binary.BigEndian.Uint32(cipherText.Bytes()[1:5]) + metrics.AddSampleWithLabels([]string{"memberlist", "size", "remote"}, float32(moreBytes), m.metricLabels) + if moreBytes > maxPushStateBytes { return nil, fmt.Errorf("Remote node state is larger than limit (%d)", moreBytes) + + } + + //Start reporting the size before you cross the limit + if moreBytes > uint32(math.Floor(.6*maxPushStateBytes)) { + m.logger.Printf("[WARN] memberlist: Remote node state size is (%d) limit is (%d)", moreBytes, maxPushStateBytes) } // Read in the rest of the payload diff --git a/vendor/github.com/hashicorp/memberlist/net_transport.go b/vendor/github.com/hashicorp/memberlist/net_transport.go index 05830117297c..a379c855c2e7 100644 --- a/vendor/github.com/hashicorp/memberlist/net_transport.go +++ b/vendor/github.com/hashicorp/memberlist/net_transport.go @@ -35,6 +35,10 @@ type NetTransportConfig struct { // Logger is a logger for operator messages. Logger *log.Logger + + // MetricLabels is a map of optional labels to apply to all metrics + // emitted by this transport. + MetricLabels []metrics.Label } // NetTransport is a Transport implementation that uses connectionless UDP for @@ -48,6 +52,8 @@ type NetTransport struct { tcpListeners []*net.TCPListener udpListeners []*net.UDPConn shutdown int32 + + metricLabels []metrics.Label } var _ NodeAwareTransport = (*NetTransport)(nil) @@ -64,10 +70,11 @@ func NewNetTransport(config *NetTransportConfig) (*NetTransport, error) { // Build out the new transport. var ok bool t := NetTransport{ - config: config, - packetCh: make(chan *Packet), - streamCh: make(chan net.Conn), - logger: config.Logger, + config: config, + packetCh: make(chan *Packet), + streamCh: make(chan net.Conn), + logger: config.Logger, + metricLabels: config.MetricLabels, } // Clean up listeners if there's an error. @@ -341,7 +348,7 @@ func (t *NetTransport) udpListen(udpLn *net.UDPConn) { } // Ingest the packet. - metrics.IncrCounter([]string{"memberlist", "udp", "received"}, float32(n)) + metrics.IncrCounterWithLabels([]string{"memberlist", "udp", "received"}, float32(n), t.metricLabels) t.packetCh <- &Packet{ Buf: buf[:n], From: addr, diff --git a/vendor/github.com/hashicorp/memberlist/state.go b/vendor/github.com/hashicorp/memberlist/state.go index a6351b4b0e84..4e3590995374 100644 --- a/vendor/github.com/hashicorp/memberlist/state.go +++ b/vendor/github.com/hashicorp/memberlist/state.go @@ -15,6 +15,21 @@ import ( type NodeStateType int +func (t NodeStateType) metricsString() string { + switch t { + case StateAlive: + return "alive" + case StateDead: + return "dead" + case StateSuspect: + return "suspect" + case StateLeft: + return "left" + default: + return fmt.Sprintf("unhandled-value-%d", t) + } +} + const ( StateAlive NodeStateType = iota StateSuspect @@ -286,14 +301,14 @@ func failedRemote(err error) bool { // probeNode handles a single round of failure checking on a node. func (m *Memberlist) probeNode(node *nodeState) { - defer metrics.MeasureSince([]string{"memberlist", "probeNode"}, time.Now()) + defer metrics.MeasureSinceWithLabels([]string{"memberlist", "probeNode"}, time.Now(), m.metricLabels) // We use our health awareness to scale the overall probe interval, so we // slow down if we detect problems. The ticker that calls us can handle // us running over the base interval, and will skip missed ticks. probeInterval := m.awareness.ScaleTimeout(m.config.ProbeInterval) if probeInterval > m.config.ProbeInterval { - metrics.IncrCounter([]string{"memberlist", "degraded", "probe"}, 1) + metrics.IncrCounterWithLabels([]string{"memberlist", "degraded", "probe"}, 1, m.metricLabels) } // Prepare a ping message and setup an ack handler. @@ -329,7 +344,7 @@ func (m *Memberlist) probeNode(node *nodeState) { }() if node.State == StateAlive { if err := m.encodeAndSendMsg(node.FullAddress(), pingMsg, &ping); err != nil { - m.logger.Printf("[ERR] memberlist: Failed to send ping: %s", err) + m.logger.Printf("[ERR] memberlist: Failed to send UDP ping: %s", err) if failedRemote(err) { goto HANDLE_REMOTE_FAILURE } else { @@ -339,7 +354,7 @@ func (m *Memberlist) probeNode(node *nodeState) { } else { var msgs [][]byte if buf, err := encode(pingMsg, &ping); err != nil { - m.logger.Printf("[ERR] memberlist: Failed to encode ping message: %s", err) + m.logger.Printf("[ERR] memberlist: Failed to encode UDP ping message: %s", err) return } else { msgs = append(msgs, buf.Bytes()) @@ -354,7 +369,7 @@ func (m *Memberlist) probeNode(node *nodeState) { compound := makeCompoundMessage(msgs) if err := m.rawSendMsgPacket(node.FullAddress(), &node.Node, compound.Bytes()); err != nil { - m.logger.Printf("[ERR] memberlist: Failed to send compound ping and suspect message to %s: %s", addr, err) + m.logger.Printf("[ERR] memberlist: Failed to send UDP compound ping and suspect message to %s: %s", addr, err) if failedRemote(err) { goto HANDLE_REMOTE_FAILURE } else { @@ -393,7 +408,7 @@ func (m *Memberlist) probeNode(node *nodeState) { // probe interval it will give the TCP fallback more time, which // is more active in dealing with lost packets, and it gives more // time to wait for indirect acks/nacks. - m.logger.Printf("[DEBUG] memberlist: Failed ping: %s (timeout reached)", node.Name) + m.logger.Printf("[DEBUG] memberlist: Failed UDP ping: %s (timeout reached)", node.Name) } HANDLE_REMOTE_FAILURE: @@ -426,7 +441,7 @@ HANDLE_REMOTE_FAILURE: } if err := m.encodeAndSendMsg(peer.FullAddress(), indirectPingMsg, &ind); err != nil { - m.logger.Printf("[ERR] memberlist: Failed to send indirect ping: %s", err) + m.logger.Printf("[ERR] memberlist: Failed to send indirect UDP ping: %s", err) } } @@ -449,7 +464,11 @@ HANDLE_REMOTE_FAILURE: defer close(fallbackCh) didContact, err := m.sendPingAndWaitForAck(node.FullAddress(), ping, deadline) if err != nil { - m.logger.Printf("[ERR] memberlist: Failed fallback ping: %s", err) + var to string + if ne, ok := err.(net.Error); ok && ne.Timeout() { + to = fmt.Sprintf("timeout %s: ", probeInterval) + } + m.logger.Printf("[ERR] memberlist: Failed fallback TCP ping: %s%s", to, err) } else { fallbackCh <- didContact } @@ -474,7 +493,7 @@ HANDLE_REMOTE_FAILURE: // any additional time here. for didContact := range fallbackCh { if didContact { - m.logger.Printf("[WARN] memberlist: Was able to connect to %s but other probes failed, network may be misconfigured", node.Name) + m.logger.Printf("[WARN] memberlist: Was able to connect to %s over TCP but UDP probes failed, network may be misconfigured", node.Name) return } } @@ -569,7 +588,7 @@ func (m *Memberlist) resetNodes() { // gossip is invoked every GossipInterval period to broadcast our gossip // messages to a few random nodes. func (m *Memberlist) gossip() { - defer metrics.MeasureSince([]string{"memberlist", "gossip"}, time.Now()) + defer metrics.MeasureSinceWithLabels([]string{"memberlist", "gossip"}, time.Now(), m.metricLabels) // Get some random live, suspect, or recently dead nodes m.nodeLock.RLock() @@ -611,10 +630,12 @@ func (m *Memberlist) gossip() { m.logger.Printf("[ERR] memberlist: Failed to send gossip to %s: %s", addr, err) } } else { - // Otherwise create and send a compound message - compound := makeCompoundMessage(msgs) - if err := m.rawSendMsgPacket(node.FullAddress(), &node, compound.Bytes()); err != nil { - m.logger.Printf("[ERR] memberlist: Failed to send gossip to %s: %s", addr, err) + // Otherwise create and send one or more compound messages + compounds := makeCompoundMessages(msgs) + for _, compound := range compounds { + if err := m.rawSendMsgPacket(node.FullAddress(), &node, compound.Bytes()); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to send gossip to %s: %s", addr, err) + } } } } @@ -647,7 +668,7 @@ func (m *Memberlist) pushPull() { // pushPullNode does a complete state exchange with a specific node. func (m *Memberlist) pushPullNode(a Address, join bool) error { - defer metrics.MeasureSince([]string{"memberlist", "pushPullNode"}, time.Now()) + defer metrics.MeasureSinceWithLabels([]string{"memberlist", "pushPullNode"}, time.Now(), m.metricLabels) // Attempt to send and receive with the node remote, userState, err := m.sendAndReceiveState(a, join) @@ -1119,7 +1140,7 @@ func (m *Memberlist) aliveNode(a *alive, notify chan struct{}, bootstrap bool) { } // Update metrics - metrics.IncrCounter([]string{"memberlist", "msg", "alive"}, 1) + metrics.IncrCounterWithLabels([]string{"memberlist", "msg", "alive"}, 1, m.metricLabels) // Notify the delegate of any relevant updates if m.config.Events != nil { @@ -1177,7 +1198,7 @@ func (m *Memberlist) suspectNode(s *suspect) { } // Update metrics - metrics.IncrCounter([]string{"memberlist", "msg", "suspect"}, 1) + metrics.IncrCounterWithLabels([]string{"memberlist", "msg", "suspect"}, 1, m.metricLabels) // Update the state state.Incarnation = s.Incarnation @@ -1215,7 +1236,7 @@ func (m *Memberlist) suspectNode(s *suspect) { if timeout { if k > 0 && numConfirmations < k { - metrics.IncrCounter([]string{"memberlist", "degraded", "timeout"}, 1) + metrics.IncrCounterWithLabels([]string{"memberlist", "degraded", "timeout"}, 1, m.metricLabels) } m.logger.Printf("[INFO] memberlist: Marking %s as failed, suspect timeout reached (%d peer confirmations)", @@ -1268,7 +1289,7 @@ func (m *Memberlist) deadNode(d *dead) { } // Update metrics - metrics.IncrCounter([]string{"memberlist", "msg", "dead"}, 1) + metrics.IncrCounterWithLabels([]string{"memberlist", "msg", "dead"}, 1, m.metricLabels) // Update the state state.Incarnation = d.Incarnation diff --git a/vendor/github.com/hashicorp/memberlist/util.go b/vendor/github.com/hashicorp/memberlist/util.go index 16a7d36d0b65..24112210dfea 100644 --- a/vendor/github.com/hashicorp/memberlist/util.go +++ b/vendor/github.com/hashicorp/memberlist/util.go @@ -96,13 +96,13 @@ func pushPullScale(interval time.Duration, n int) time.Duration { return time.Duration(multiplier) * interval } -// moveDeadNodes moves nodes that are dead and beyond the gossip to the dead interval +// moveDeadNodes moves dead and left nodes that that have not changed during the gossipToTheDeadTime interval // to the end of the slice and returns the index of the first moved node. func moveDeadNodes(nodes []*nodeState, gossipToTheDeadTime time.Duration) int { numDead := 0 n := len(nodes) for i := 0; i < n-numDead; i++ { - if nodes[i].State != StateDead { + if !nodes[i].DeadOrLeft() { continue } @@ -152,6 +152,23 @@ OUTER: return kNodes } +// makeCompoundMessages takes a list of messages and packs +// them into one or multiple messages based on the limitations +// of compound messages (255 messages each). +func makeCompoundMessages(msgs [][]byte) []*bytes.Buffer { + const maxMsgs = 255 + bufs := make([]*bytes.Buffer, 0, (len(msgs)+(maxMsgs-1))/maxMsgs) + + for ; len(msgs) > maxMsgs; msgs = msgs[maxMsgs:] { + bufs = append(bufs, makeCompoundMessage(msgs[:maxMsgs])) + } + if len(msgs) > 0 { + bufs = append(bufs, makeCompoundMessage(msgs)) + } + + return bufs +} + // makeCompoundMessage takes a list of messages and generates // a single compound message containing all of them func makeCompoundMessage(msgs [][]byte) *bytes.Buffer { diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go index 3582ee4dae2e..32124a73a208 100644 --- a/vendor/github.com/hashicorp/serf/coordinate/client.go +++ b/vendor/github.com/hashicorp/serf/coordinate/client.go @@ -218,7 +218,7 @@ func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) (*Coo return nil, fmt.Errorf("round trip time not in valid range, duration %v is not a positive value less than %v ", rtt, maxRTT) } if rtt == 0 { - metrics.IncrCounter([]string{"serf", "coordinate", "zero-rtt"}, 1) + metrics.IncrCounterWithLabels([]string{"serf", "coordinate", "zero-rtt"}, 1, c.config.MetricLabels) } rttSeconds := c.latencyFilter(node, rtt.Seconds()) diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go index b85a8ab7b00c..09c0cafe830f 100644 --- a/vendor/github.com/hashicorp/serf/coordinate/config.go +++ b/vendor/github.com/hashicorp/serf/coordinate/config.go @@ -1,5 +1,9 @@ package coordinate +import ( + "github.com/armon/go-metrics" +) + // Config is used to set the parameters of the Vivaldi-based coordinate mapping // algorithm. // @@ -52,6 +56,9 @@ type Config struct { // GravityRho is a tuning factor that sets how much gravity has an effect // to try to re-center coordinates. See [2] for more details. GravityRho float64 + + // metricLabels is the slice of labels to put on all emitted metrics + MetricLabels []metrics.Label } // DefaultConfig returns a Config that has some default values suitable for diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go index 2aff57aab4d6..f427cc84e2ee 100644 --- a/vendor/github.com/huandu/xstrings/common.go +++ b/vendor/github.com/huandu/xstrings/common.go @@ -3,15 +3,11 @@ package xstrings -import ( - "bytes" -) - const bufferMaxInitGrowSize = 2048 // Lazy initialize a buffer. -func allocBuffer(orig, cur string) *bytes.Buffer { - output := &bytes.Buffer{} +func allocBuffer(orig, cur string) *stringBuilder { + output := &stringBuilder{} maxSize := len(orig) * 4 // Avoid to reserve too much memory at once. diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go index 3d58fa81ae0e..3d5a34950b0c 100644 --- a/vendor/github.com/huandu/xstrings/convert.go +++ b/vendor/github.com/huandu/xstrings/convert.go @@ -4,7 +4,6 @@ package xstrings import ( - "bytes" "math/rand" "unicode" "unicode/utf8" @@ -23,7 +22,7 @@ func ToCamelCase(str string) string { return "" } - buf := &bytes.Buffer{} + buf := &stringBuilder{} var r0, r1 rune var size int @@ -112,7 +111,7 @@ func camelCaseToLowerCase(str string, connector rune) string { return "" } - buf := &bytes.Buffer{} + buf := &stringBuilder{} wt, word, remaining := nextWord(str) for len(remaining) > 0 { @@ -374,7 +373,7 @@ func nextValidRune(str string, prev rune) (r rune, size int) { return } -func toLower(buf *bytes.Buffer, wt wordType, str string, connector rune) { +func toLower(buf *stringBuilder, wt wordType, str string, connector rune) { buf.Grow(buf.Len() + len(str)) if wt != upperCaseWord && wt != connectorWord { @@ -401,7 +400,7 @@ func SwapCase(str string) string { var r rune var size int - buf := &bytes.Buffer{} + buf := &stringBuilder{} for len(str) > 0 { r, size = utf8.DecodeRuneInString(str) @@ -435,7 +434,7 @@ func FirstRuneToUpper(str string) string { return str } - buf := &bytes.Buffer{} + buf := &stringBuilder{} buf.WriteRune(unicode.ToUpper(r)) buf.WriteString(str[size:]) return buf.String() @@ -453,7 +452,7 @@ func FirstRuneToLower(str string) string { return str } - buf := &bytes.Buffer{} + buf := &stringBuilder{} buf.WriteRune(unicode.ToLower(r)) buf.WriteString(str[size:]) return buf.String() @@ -566,7 +565,7 @@ func Successor(str string) string { // Needs to add one character for carry. if i < 0 && carry != ' ' { - buf := &bytes.Buffer{} + buf := &stringBuilder{} buf.Grow(l + 4) // Reserve enough space for write. if lastAlphanumeric != 0 { diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go index 2d02df1c042f..8cd76c525ccb 100644 --- a/vendor/github.com/huandu/xstrings/format.go +++ b/vendor/github.com/huandu/xstrings/format.go @@ -4,7 +4,6 @@ package xstrings import ( - "bytes" "unicode/utf8" ) @@ -28,7 +27,7 @@ func ExpandTabs(str string, tabSize int) string { var r rune var i, size, column, expand int - var output *bytes.Buffer + var output *stringBuilder orig := str @@ -43,7 +42,7 @@ func ExpandTabs(str string, tabSize int) string { } for i = 0; i < expand; i++ { - output.WriteByte(byte(' ')) + output.WriteRune(' ') } column += expand @@ -88,7 +87,7 @@ func LeftJustify(str string, length int, pad string) string { remains := length - l padLen := Len(pad) - output := &bytes.Buffer{} + output := &stringBuilder{} output.Grow(len(str) + (remains/padLen+1)*len(pad)) output.WriteString(str) writePadString(output, pad, padLen, remains) @@ -114,7 +113,7 @@ func RightJustify(str string, length int, pad string) string { remains := length - l padLen := Len(pad) - output := &bytes.Buffer{} + output := &stringBuilder{} output.Grow(len(str) + (remains/padLen+1)*len(pad)) writePadString(output, pad, padLen, remains) output.WriteString(str) @@ -140,7 +139,7 @@ func Center(str string, length int, pad string) string { remains := length - l padLen := Len(pad) - output := &bytes.Buffer{} + output := &stringBuilder{} output.Grow(len(str) + (remains/padLen+1)*len(pad)) writePadString(output, pad, padLen, remains/2) output.WriteString(str) @@ -148,7 +147,7 @@ func Center(str string, length int, pad string) string { return output.String() } -func writePadString(output *bytes.Buffer, pad string, padLen, remains int) { +func writePadString(output *stringBuilder, pad string, padLen, remains int) { var r rune var size int diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go index 0eefb43ed71d..64075f9bb8a7 100644 --- a/vendor/github.com/huandu/xstrings/manipulate.go +++ b/vendor/github.com/huandu/xstrings/manipulate.go @@ -4,7 +4,6 @@ package xstrings import ( - "bytes" "strings" "unicode/utf8" ) @@ -131,7 +130,7 @@ func Insert(dst, src string, index int) string { // Scrub scrubs invalid utf8 bytes with repl string. // Adjacent invalid bytes are replaced only once. func Scrub(str, repl string) string { - var buf *bytes.Buffer + var buf *stringBuilder var r rune var size, pos int var hasError bool @@ -144,7 +143,7 @@ func Scrub(str, repl string) string { if r == utf8.RuneError { if !hasError { if buf == nil { - buf = &bytes.Buffer{} + buf = &stringBuilder{} } buf.WriteString(origin[:pos]) diff --git a/vendor/github.com/huandu/xstrings/stringbuilder.go b/vendor/github.com/huandu/xstrings/stringbuilder.go new file mode 100644 index 000000000000..bb0919d32f77 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/stringbuilder.go @@ -0,0 +1,7 @@ +//+build go1.10 + +package xstrings + +import "strings" + +type stringBuilder = strings.Builder diff --git a/vendor/github.com/huandu/xstrings/stringbuilder_go110.go b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go new file mode 100644 index 000000000000..dac389d139e7 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go @@ -0,0 +1,9 @@ +//+build !go1.10 + +package xstrings + +import "bytes" + +type stringBuilder struct { + bytes.Buffer +} diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go index 66e23f86d030..42e694fb1761 100644 --- a/vendor/github.com/huandu/xstrings/translate.go +++ b/vendor/github.com/huandu/xstrings/translate.go @@ -4,7 +4,6 @@ package xstrings import ( - "bytes" "unicode" "unicode/utf8" ) @@ -152,12 +151,12 @@ func NewTranslator(from, to string) *Translator { continue } - fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) + _, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) fromEnd = utf8.RuneError } if fromEnd != utf8.RuneError { - singleRunes = tr.addRune(fromEnd, toStart, singleRunes) + tr.addRune(fromEnd, toStart, singleRunes) } tr.reverted = reverted @@ -303,7 +302,7 @@ func (tr *Translator) Translate(str string) string { orig := str - var output *bytes.Buffer + var output *stringBuilder for len(str) > 0 { r, size = utf8.DecodeRuneInString(str) @@ -500,7 +499,7 @@ func Squeeze(str, pattern string) string { var size int var skipSqueeze, matched bool var tr *Translator - var output *bytes.Buffer + var output *stringBuilder orig := str last = -1 diff --git a/vendor/github.com/influxdata/telegraf/.gitattributes b/vendor/github.com/influxdata/telegraf/.gitattributes index 21bc439bf797..7769daa83cb0 100644 --- a/vendor/github.com/influxdata/telegraf/.gitattributes +++ b/vendor/github.com/influxdata/telegraf/.gitattributes @@ -3,3 +3,4 @@ README.md merge=union go.sum merge=union plugins/inputs/all/all.go merge=union plugins/outputs/all/all.go merge=union +**/testdata/** test eol=lf diff --git a/vendor/github.com/influxdata/telegraf/.gitignore b/vendor/github.com/influxdata/telegraf/.gitignore index df2b3d06643c..c3945f1c9a78 100644 --- a/vendor/github.com/influxdata/telegraf/.gitignore +++ b/vendor/github.com/influxdata/telegraf/.gitignore @@ -3,5 +3,15 @@ /telegraf /telegraf.exe /telegraf.gz +/tools/package_lxd_test/package_lxd_test +/tools/license_checker/license_checker* +/tools/readme_config_includer/generator* +/tools/custom_builder/custom_builder* /vendor .DS_Store +process.yml +/.vscode +/*.toml +/*.conf +resource.syso +versioninfo.json diff --git a/vendor/github.com/influxdata/telegraf/.golangci.yml b/vendor/github.com/influxdata/telegraf/.golangci.yml new file mode 100644 index 000000000000..045c430452f9 --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/.golangci.yml @@ -0,0 +1,219 @@ +linters: + disable-all: true + enable: + # - telegraflinter + - asasalint + - asciicheck + - bidichk + - bodyclose + - depguard + - dogsled + - errcheck + - errname + - exportloopref + - gocheckcompilerdirectives + - goprintffuncname + - gosimple + - govet + - ineffassign + - interfacebloat + - lll + - makezero + - nakedret + - nilerr + - nolintlint + - prealloc + - predeclared + - revive + - sqlclosecheck + - staticcheck + - tenv + - tparallel + - typecheck + - unconvert + - unparam + - unused + +linters-settings: + depguard: + # Kind of list is passed in. + # Allowed values: allowlist|denylist + # Default: denylist + list-type: denylist + # Check the list against standard lib. + # Default: false + include-go-root: true + # A list of packages for the list type specified. + # Can accept both string prefixes and string glob patterns. + # Default: [] + packages: + - log + # A list of packages for the list type specified. + # Specify an error message to output when a denied package is used. + # Default: [] + packages-with-error-message: + - log: 'Use injected telegraf.Logger instead' + # Specify rules by which the linter ignores certain files for consideration. + # Can accept both string prefixes and string glob patterns. + # The ! character in front of the rule is a special character + # which signals that the linter should negate the rule. + # This allows for more precise control, but it is only available for glob patterns. + # Default: [] + ignore-file-rules: + - "**/agent/**" + - "**/cmd/**" + - "**/config/**" + - "**/filter/**" + - "**/internal/**" + - "**/logger/**" + - "**/metric/**" + - "**/models/**" + - "**/plugins/serializers/**" + - "**/scripts/**" + - "**/selfstat/**" + - "**/testutil/**" + - "**/tools/**" + - "**/*_test.go" + lll: + # Max line length, lines longer will be reported. + # '\t' is counted as 1 character by default, and can be changed with the tab-width option. + # Default: 120. + line-length: 160 + # Tab width in spaces. + # Default: 1 + tab-width: 4 + nolintlint: + # Enable to require an explanation of nonzero length after each nolint directive. + # Default: false + require-explanation: true + # Enable to require nolint directives to mention the specific linter being suppressed. + # Default: false + require-specific: true + prealloc: + # Report pre-allocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. + # Default: true + simple: false + revive: + rules: + - name: argument-limit + arguments: [ 6 ] + - name: atomic + - name: bare-return + - name: blank-imports + - name: bool-literal-in-expr + - name: call-to-gc + - name: confusing-naming + - name: confusing-results + - name: constant-logical-expr + - name: context-as-argument + - name: context-keys-type + - name: deep-exit + - name: defer + - name: dot-imports + - name: duplicated-imports + - name: early-return + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: function-result-limit + arguments: [ 3 ] + - name: identical-branches + - name: if-return + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + - name: modifies-parameter + - name: modifies-value-receiver + - name: package-comments + - name: range + - name: range-val-address + - name: range-val-in-closure + - name: receiver-naming + - name: redefines-builtin-id + - name: string-of-int + - name: struct-tag + - name: superfluous-else + - name: time-naming + - name: unconditional-recursion + - name: unexported-naming + - name: unhandled-error + arguments: [ "fmt.Printf", "fmt.Println", "fmt.Print", "fmt.Fprintf", "fmt.Fprint", "fmt.Fprintln" ] + - name: unnecessary-stmt + - name: unreachable-code + - name: unused-parameter + - name: var-declaration + - name: var-naming + - name: waitgroup-by-value + nakedret: + # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 + max-func-lines: 1 + tenv: + # The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures. + # Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked. + # Default: false + all: true + +run: + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 10m + + # which dirs to skip: issues from them won't be reported; + # can use regexp here: generated.*, regexp is applied on full path; + # default value is empty list, but default dirs are skipped independently + # from this option's value (see skip-dirs-use-default). + # "/" will be replaced by current OS file path separator to properly work + # on Windows. + skip-dirs: + - assets + - docs + - etc + + # which files to skip: they will be analyzed, but issues from them + # won't be reported. Default value is empty list, but there is + # no need to include all autogenerated files, we confidently recognize + # autogenerated files. If it's not please let us know. + # "/" will be replaced by current OS file path separator to properly work + # on Windows. + skip-files: + - plugins/parsers/influx/machine.go* + +issues: + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-issues-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 + + exclude: + - don't use an underscore in package name #revive:var-naming + + exclude-rules: + - path: plugins/parsers/influx + linters: + - govet + + - path: cmd/telegraf/(main|printer).go + text: "Error return value of `outputBuffer.Write` is not checked" + + - path: cmd/telegraf/(main|printer).go + text: "unhandled-error: Unhandled error in call to function outputBuffer.Write" + +# output configuration options +output: + # Format: colored-line-number|line-number|json|tab|checkstyle|code-climate|junit-xml|github-actions + # + # Multiple can be specified by separating them by comma, output can be provided + # for each of them by separating format name and path by colon symbol. + # Output path can be either `stdout`, `stderr` or path to the file to write to. + # Example: "checkstyle:report.json,colored-line-number" + # + # Default: colored-line-number + format: tab + # Make issues output unique by line. + # Default: true + uniq-by-line: false + # Sort results by: filepath, line and column. + sort-results: true diff --git a/vendor/github.com/influxdata/telegraf/.markdownlint.yml b/vendor/github.com/influxdata/telegraf/.markdownlint.yml new file mode 100644 index 000000000000..893179487d31 --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/.markdownlint.yml @@ -0,0 +1,6 @@ +{ + "MD013": false, + "MD033": { + "allowed_elements": ["br"] + } +} diff --git a/vendor/github.com/influxdata/telegraf/.markdownlintignore b/vendor/github.com/influxdata/telegraf/.markdownlintignore new file mode 100644 index 000000000000..56ddc43963ba --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/.markdownlintignore @@ -0,0 +1 @@ +docs/includes/* diff --git a/vendor/github.com/influxdata/telegraf/CHANGELOG.md b/vendor/github.com/influxdata/telegraf/CHANGELOG.md index a1c35eb1de9b..1531324feeb4 100644 --- a/vendor/github.com/influxdata/telegraf/CHANGELOG.md +++ b/vendor/github.com/influxdata/telegraf/CHANGELOG.md @@ -1,199 +1,1880 @@ + +# Changelog + +## v1.25.2 [2023-02-13] + +### Bugfixes + +- [#12607](https://github.com/influxdata/telegraf/pull/12607) `agent` Only read the config once +- [#12586](https://github.com/influxdata/telegraf/pull/12586) `docs` Fix link to license for Google flatbuffers +- [#12637](https://github.com/influxdata/telegraf/pull/12637) `inputs.cisco_telemetry_mdt` Check subfield sizes to avoid panics +- [#12657](https://github.com/influxdata/telegraf/pull/12657) `inputs.cloudwatch` Enable custom endpoint support +- [#12603](https://github.com/influxdata/telegraf/pull/12603) `inputs.conntrack` Resolve segfault when setting collect field +- [#12512](https://github.com/influxdata/telegraf/pull/12512) `inputs.gnmi` Handle both new-style `tag_subscription` and old-style `tag_only` +- [#12599](https://github.com/influxdata/telegraf/pull/12599) `inputs.mongodb` Improve error logging +- [#12604](https://github.com/influxdata/telegraf/pull/12604) `inputs.mongodb` SIGSEGV when restarting MongoDB node +- [#12576](https://github.com/influxdata/telegraf/pull/12576) `inputs.mysql` Avoid side-effects for TLS between plugin instances +- [#12626](https://github.com/influxdata/telegraf/pull/12626) `inputs.prometheus` Deprecate and rename the timeout variable +- [#12648](https://github.com/influxdata/telegraf/pull/12648) `inputs.tail` Fix typo in the README +- [#12543](https://github.com/influxdata/telegraf/pull/12543) `inputs.upsd` Add additional fields +- [#12629](https://github.com/influxdata/telegraf/pull/12629) `inputs.x509_cert` Fix Windows path handling +- [#12560](https://github.com/influxdata/telegraf/pull/12560) `outputs.prometheus_client` Expire with ticker, not add/collect +- [#12644](https://github.com/influxdata/telegraf/pull/12644) `secretstores` Check store id format and presence + +### Dependency Updates + +- [#12630](https://github.com/influxdata/telegraf/pull/12630) `deps` Bump cloud.google.com/go/bigquery from 1.44.0 to 1.45.0 +- [#12568](https://github.com/influxdata/telegraf/pull/12568) `deps` Bump github.com/99designs/keyring from 1.2.1 to 1.2.2 +- [#12634](https://github.com/influxdata/telegraf/pull/12634) `deps` Bump github.com/antchfx/xmlquery from 1.3.12 to 1.3.15 +- [#12633](https://github.com/influxdata/telegraf/pull/12633) `deps` Bump github.com/antchfx/xpath from 1.2.2 to 1.2.3 +- [#12571](https://github.com/influxdata/telegraf/pull/12571) `deps` Bump github.com/coreos/go-semver from 0.3.0 to 0.3.1 +- [#12632](https://github.com/influxdata/telegraf/pull/12632) `deps` Bump github.com/moby/ipvs from 1.0.2 to 1.1.0 +- [#12572](https://github.com/influxdata/telegraf/pull/12572) `deps` Bump github.com/multiplay/go-ts3 from 1.0.1 to 1.1.0 +- [#12581](https://github.com/influxdata/telegraf/pull/12581) `deps` Bump github.com/prometheus/client_golang from 1.13.1 to 1.14.0 +- [#12580](https://github.com/influxdata/telegraf/pull/12580) `deps` Bump github.com/shirou/gopsutil from 3.22.9 to 3.22.12 +- [#12570](https://github.com/influxdata/telegraf/pull/12570) `deps` Bump go.mongodb.org/mongo-driver from 1.11.0 to 1.11.1 +- [#12582](https://github.com/influxdata/telegraf/pull/12582) `deps` Bump golang/x dependencies +- [#12583](https://github.com/influxdata/telegraf/pull/12583) `deps` Bump google.golang.org/grpc from 1.51.0 to 1.52.0 +- [#12631](https://github.com/influxdata/telegraf/pull/12631) `deps` Bump google.golang.org/grpc from 1.52.0 to 1.52.3 + +## v1.25.1 [2023-01-30] + +### Bugfixes + +- [#12549](https://github.com/influxdata/telegraf/pull/12549) `agent` Catch non-existing commands and error out +- [#12453](https://github.com/influxdata/telegraf/pull/12453) `agent` Correctly reload configuration files +- [#12491](https://github.com/influxdata/telegraf/pull/12491) `agent` Handle float time with fractions of seconds correctly +- [#12457](https://github.com/influxdata/telegraf/pull/12457) `agent` Only set default snmp after reading all configs +- [#12515](https://github.com/influxdata/telegraf/pull/12515) `common.cookie` Allow any 2xx status code +- [#12459](https://github.com/influxdata/telegraf/pull/12459) `common.kafka` Add keep-alive period setting for input and output +- [#12240](https://github.com/influxdata/telegraf/pull/12240) `inputs.cisco_telemetry_mdt` Add operation-metric and class-policy prefix +- [#12533](https://github.com/influxdata/telegraf/pull/12533) `inputs.exec` Restore pre-v1.21 behavior for CSV data_format +- [#12415](https://github.com/influxdata/telegraf/pull/12415) `inputs.gnmi` Update configuration documentation +- [#12536](https://github.com/influxdata/telegraf/pull/12536) `inputs.logstash` Collect opensearch specific stats +- [#12409](https://github.com/influxdata/telegraf/pull/12409) `inputs.mysql` Revert slice declarations with non-zero initial length +- [#12529](https://github.com/influxdata/telegraf/pull/12529) `inputs.opcua` Fix opcua and opcua-listener for servers using password-based auth +- [#12522](https://github.com/influxdata/telegraf/pull/12522) `inputs.prometheus` Correctly track deleted pods +- [#12559](https://github.com/influxdata/telegraf/pull/12559) `inputs.prometheus` Set the timeout for slow running API endpoints correctly +- [#12384](https://github.com/influxdata/telegraf/pull/12384) `inputs.sqlserver` Add more precise version check +- [#12387](https://github.com/influxdata/telegraf/pull/12387) `inputs.sqlserver` Added own SPID filter +- [#12386](https://github.com/influxdata/telegraf/pull/12386) `inputs.sqlserver` SqlRequests include sleeping sessions with open transactions +- [#12528](https://github.com/influxdata/telegraf/pull/12528) `inputs.sqlserver` Suppress error on secondary replicas +- [#12516](https://github.com/influxdata/telegraf/pull/12516) `inputs.upsd` Always convert to float +- [#12486](https://github.com/influxdata/telegraf/pull/12486) `inputs.upsd` Ensure firmware is always a string +- [#12375](https://github.com/influxdata/telegraf/pull/12375) `inputs.win_eventlog` Handle remote events more robustly +- [#12404](https://github.com/influxdata/telegraf/pull/12404) `inputs.x509_cert` Fix off-by-one when adding intermediate certificates +- [#12399](https://github.com/influxdata/telegraf/pull/12399) `outputs.loki` Return response body on error +- [#12440](https://github.com/influxdata/telegraf/pull/12440) `parsers.json_v2` In case of invalid json, log message to debug log +- [#12401](https://github.com/influxdata/telegraf/pull/12401) `secretstores` Cleanup duplicate printing +- [#12468](https://github.com/influxdata/telegraf/pull/12468) `secretstores` Fix handling of "id" and print failing secret-store +- [#12490](https://github.com/influxdata/telegraf/pull/12490) `secretstores` Fix handling of TOML strings + +### Dependency Updates + +- [#12385](https://github.com/influxdata/telegraf/pull/12385) `deps` Bump cloud.google.com/go/storage from 1.23.0 to 1.28.1 +- [#12511](https://github.com/influxdata/telegraf/pull/12511) `deps` Bump github.com/antchfx/jsonquery from 1.3.0 to 1.3.1 +- [#12420](https://github.com/influxdata/telegraf/pull/12420) `deps` Bump github.com/aws/aws-sdk-go-v2 from 1.17.1 to 1.17.3 +- [#12538](https://github.com/influxdata/telegraf/pull/12538) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.54.4 to 1.80.1 +- [#12476](https://github.com/influxdata/telegraf/pull/12476) `deps` Bump github.com/denisenkom/go-mssqldb from 0.12.0 to 0.12.3 +- [#12378](https://github.com/influxdata/telegraf/pull/12378) `deps` Bump github.com/eclipse/paho.mqtt.golang from 1.4.1 to 1.4.2 +- [#12381](https://github.com/influxdata/telegraf/pull/12381) `deps` Bump github.com/hashicorp/consul/api from 1.15.2 to 1.18.0 +- [#12417](https://github.com/influxdata/telegraf/pull/12417) `deps` Bump github.com/karrick/godirwalk from 1.16.1 to 1.17.0 +- [#12418](https://github.com/influxdata/telegraf/pull/12418) `deps` Bump github.com/kardianos/service from 1.2.1 to 1.2.2 +- [#12379](https://github.com/influxdata/telegraf/pull/12379) `deps` Bump github.com/nats-io/nats-server/v2 from 2.9.4 to 2.9.9 + +## v1.25.0 [2022-12-12] + +### New Plugins + +- [#10103](https://github.com/influxdata/telegraf/pull/10103) `inputs.azure_monitor` Azure Monitor +- [#8413](https://github.com/influxdata/telegraf/pull/8413) `inputs.gcs` Google Cloud Storage +- [#11824](https://github.com/influxdata/telegraf/pull/11824) `inputs.intel_dlb` Intel DLB +- [#11814](https://github.com/influxdata/telegraf/pull/11814) `inputs.libvirt` libvirt +- [#12108](https://github.com/influxdata/telegraf/pull/12108) `inputs.netflow` netflow v5, v9, and IPFIX +- [#11786](https://github.com/influxdata/telegraf/pull/11786) `inputs.opcua_listener` OPC UA Event subscriptions + +### Features + +- [#12130](https://github.com/influxdata/telegraf/pull/12130) Add arm64 Windows builds to nightly and CI +- [#11987](https://github.com/influxdata/telegraf/pull/11987) `agent` Add method to inform of deprecated plugin option values +- [#11232](https://github.com/influxdata/telegraf/pull/11232) `agent` Secret-store implementation +- [#12358](https://github.com/influxdata/telegraf/pull/12358) `agent` Deprecate active usage of netsnmp translator +- [#12302](https://github.com/influxdata/telegraf/pull/12302) `agent.tls` Allow setting renegotiation method +- [#12111](https://github.com/influxdata/telegraf/pull/12111) `common.kafka` Add exponential backoff when connecting or reconnecting and allow plugin to start without making initial connection +- [#11860](https://github.com/influxdata/telegraf/pull/11860) `inputs.amqp_consumer` Determine content encoding automatically +- [#12014](https://github.com/influxdata/telegraf/pull/12014) `inputs.apcupsd` Add new fields +- [#12342](https://github.com/influxdata/telegraf/pull/12342) `inputs.cgroups` Do not abort on first error, print message once +- [#8958](https://github.com/influxdata/telegraf/pull/8958) `inputs.conntrack` Parse conntrack stats +- [#11703](https://github.com/influxdata/telegraf/pull/11703) `inputs.diskio` Allow selecting devices by ID +- [#11895](https://github.com/influxdata/telegraf/pull/11895) `inputs.ethtool` Gather statistics from namespaces +- [#12087](https://github.com/influxdata/telegraf/pull/12087) `inputs.ethtool` Possibility to skip gathering metrics for downed interfaces +- [#12324](https://github.com/influxdata/telegraf/pull/12324) `inputs.http_response` Add User-Agent header +- [#12304](https://github.com/influxdata/telegraf/pull/12304) `inputs.kafka_consumer` Add sarama debug logs +- [#11783](https://github.com/influxdata/telegraf/pull/11783) `inputs.knx_listener` Support TCP as transport protocol +- [#12301](https://github.com/influxdata/telegraf/pull/12301) `inputs.kubernetes` Allow fetching kublet metrics remotely + +- [#12255](https://github.com/influxdata/telegraf/pull/12255) `inputs.modbus` Add 8-bit integer types +- [#11983](https://github.com/influxdata/telegraf/pull/11983) `inputs.modbus` Add config option to pause after connect +- [#12340](https://github.com/influxdata/telegraf/pull/12340) `inputs.modbus` Add support for half-precision float (float16) +- [#11106](https://github.com/influxdata/telegraf/pull/11106) `inputs.modbus` Optimize grouped requests +- [#11273](https://github.com/influxdata/telegraf/pull/11273) `inputs.modbus` Optimize requests +- [#11630](https://github.com/influxdata/telegraf/pull/11630) `inputs.opcua` Add use regular reads workaround +- [#9633](https://github.com/influxdata/telegraf/pull/9633) `inputs.powerdns_recursor` Support for new PowerDNS recursor control protocol +- [#12050](https://github.com/influxdata/telegraf/pull/12050) `inputs.prometheus` Add support for custom header +- [#11962](https://github.com/influxdata/telegraf/pull/11962) `inputs.prometheus` Allow explicit scrape configuration without annotations +- [#11729](https://github.com/influxdata/telegraf/pull/11729) `inputs.prometheus` Use system wide proxy settings +- [#12329](https://github.com/influxdata/telegraf/pull/12329) `inputs.smart` Add additional SMART metrics that indicate/predict device failure +- [#11872](https://github.com/influxdata/telegraf/pull/11872) `inputs.snmp` Convert enum values +- [#12187](https://github.com/influxdata/telegraf/pull/12187) `inputs.socket_ listener` Allow to specify message separator for streams +- [#12351](https://github.com/influxdata/telegraf/pull/12351) `inputs.sqlserver` Add @@SERVICENAME and SERVERPROPERTY(IsClustered) in measurement sqlserver_server_properties +- [#12126](https://github.com/influxdata/telegraf/pull/12126) `inputs.sqlserver` Add data and log used space metrics for Azure SQL DB +- [#12292](https://github.com/influxdata/telegraf/pull/12292) `inputs.sqlserver` Add metric available_physical_memory_kb in sqlserver_server_properties +- [#12319](https://github.com/influxdata/telegraf/pull/12319) `inputs.sqlserver` Introduce timeout for query execution +- [#12147](https://github.com/influxdata/telegraf/pull/12147) `inputs.system` Collect unique user count logged in +- [#12281](https://github.com/influxdata/telegraf/pull/12281) `inputs.tail` Add option to preserve newlines for multiline data +- [#11762](https://github.com/influxdata/telegraf/pull/11762) `inputs.tail` Allow handling of quoted strings spanning multiple lines +- [#12170](https://github.com/influxdata/telegraf/pull/12170) `inputs.tomcat` Add source tag +- [#11874](https://github.com/influxdata/telegraf/pull/11874) `outputs.azure_data_explorer` Add support for streaming ingestion for ADX output plugin +- [#11991](https://github.com/influxdata/telegraf/pull/11991) `outputs.event_hubs` Expose max message size batch option +- [#11950](https://github.com/influxdata/telegraf/pull/11950) `outputs.graylog` Implement optional connection retries +- [#11385](https://github.com/influxdata/telegraf/pull/11385) `outputs.timestream` Support ingesting multi-measures +- [#12232](https://github.com/influxdata/telegraf/pull/12232) `parsers.binary` Handle hex-encoded inputs +- [#12008](https://github.com/influxdata/telegraf/pull/12008) `parsers.csv` Add option for overwrite tags +- [#12247](https://github.com/influxdata/telegraf/pull/12247) `parsers.csv` Support null delimiters +- [#12320](https://github.com/influxdata/telegraf/pull/12320) `parsers.grok` Add option to allow multiline messages +- [#11933](https://github.com/influxdata/telegraf/pull/11933) `parsers.xpath` Add option to skip (header) bytes +- [#11999](https://github.com/influxdata/telegraf/pull/11999) `parsers.xpath` Allow to specify byte-array fields to encode in HEX +- [#11552](https://github.com/influxdata/telegraf/pull/11552) `parsers` Add binary parser +- [#12260](https://github.com/influxdata/telegraf/pull/12260) `serializers.json` Support serializing JSON nested in string fields + +### Bugfixes + +- [#12113](https://github.com/influxdata/telegraf/pull/12113) `agent` Run processors in config order +- [#12127](https://github.com/influxdata/telegraf/pull/12127) `agent` Watch for changes in configuration files in config directories +- [#12062](https://github.com/influxdata/telegraf/pull/12062) `inputs.conntrack` Skip gather tests if conntrack kernel module is not loaded +- [#12295](https://github.com/influxdata/telegraf/pull/12295) `inputs.filecount` Revert library version +- [#12284](https://github.com/influxdata/telegraf/pull/12284) `inputs.kube_inventory` Change default token path, use in-cluster config by default +- [#12235](https://github.com/influxdata/telegraf/pull/12235) `inputs.modbus` Add workaround to read field in separate requests +- [#12339](https://github.com/influxdata/telegraf/pull/12339) `inputs.modbus` Fix Windows COM-port path +- [#12367](https://github.com/influxdata/telegraf/pull/12367) `inputs.modbus` Fix default value of transmission mode +- [#12330](https://github.com/influxdata/telegraf/pull/12330) `inputs.mongodb` Fix connection leak triggered by config reload +- [#12101](https://github.com/influxdata/telegraf/pull/12101) `inputs.opcua` Add support for opcua datetime values +- [#12376](https://github.com/influxdata/telegraf/pull/12376) `inputs.opcua` Parse full range of status codes with uint32 +- [#12278](https://github.com/influxdata/telegraf/pull/12278) `inputs.promethes` Respect selectors when scraping pods +- [#12323](https://github.com/influxdata/telegraf/pull/12323) `inputs.sql` Cast measurement_column to string +- [#12259](https://github.com/influxdata/telegraf/pull/12259) `inputs.vsphere` Eliminated duplicate samples +- [#12307](https://github.com/influxdata/telegraf/pull/12307) `inputs.zfs` Unbreak datasets stats gathering in case listsnaps is enabled on a zfs pool +- [#12291](https://github.com/influxdata/telegraf/pull/12291) `outputs.azure_data_explorer` Update test call to NewSerializer +- [#12357](https://github.com/influxdata/telegraf/pull/12357) `processors.parser` Handle empty metric names correctly + +### Dependency Updates + +- [#12334](https://github.com/influxdata/telegraf/pull/12334) `deps` Update github.com/aliyun/alibaba-cloud-sdk-go from 1.61.1836 to 1.62.77 +- [#12355](https://github.com/influxdata/telegraf/pull/12355) `deps` Update github.com/gosnmp/gosnmp from 1.34.0 to 1.35.0 +- [#12372](https://github.com/influxdata/telegraf/pull/12372) `deps` Update OpenTelemetry from 0.2.30 to 0.2.33 + +## v1.24.4 [2022-11-29] + +### Bugfixes + +- [#12177](https://github.com/influxdata/telegraf/pull/12177) `inputs.cloudwatch` Correctly handle multiple namespaces +- [#12294](https://github.com/influxdata/telegraf/pull/12294) `inputs.directory_monitor` Close input file before removal +- [#12140](https://github.com/influxdata/telegraf/pull/12140) `inputs.gnmi` Handle decimal_val as per gnmi v0.8.0 +- [#12275](https://github.com/influxdata/telegraf/pull/12275) `inputs.gnmi` Do not provide empty prefix for subscription request +- [#12258](https://github.com/influxdata/telegraf/pull/12258) `inputs.gnmi` Fix empty name for Sonic devices +- [#12171](https://github.com/influxdata/telegraf/pull/12171) `inputs.ping` Avoid -x/-X on FreeBSD 13 and newer with ping6 +- [#12282](https://github.com/influxdata/telegraf/pull/12282) `inputs.prometheus` Correctly default to port 9102 +- [#12229](https://github.com/influxdata/telegraf/pull/12229) `input.redis_sentinel` Fix sentinel and replica stats gathering +- [#12280](https://github.com/influxdata/telegraf/pull/12280) `inputs.socket_listener` Ensure closed connection +- [#12201](https://github.com/influxdata/telegraf/pull/12201) `output.datadog` Log response in case of non 2XX response from API +- [#12160](https://github.com/influxdata/telegraf/pull/12160) `outputs.prometheus` Expire metrics correctly during adds +- [#12156](https://github.com/influxdata/telegraf/pull/12156) `outputs.yandex_cloud_monitoring` Catch int64 values + +### Dependency Updates + +- [#12132](https://github.com/influxdata/telegraf/pull/12132) `deps` Bump github.com/aliyun/alibaba-cloud-sdk-go from 1.61.1818 to 1.61.1836 +- [#12197](https://github.com/influxdata/telegraf/pull/12197) `deps` Bump github.com/prometheus/client_golang from 1.13.0 to 1.13.1 +- [#12196](https://github.com/influxdata/telegraf/pull/12196) `deps` Bump github.com/aws/aws-sdk-go-v2/service/timestreamwrite from 1.13.12 to 1.14.5 +- [#12198](https://github.com/influxdata/telegraf/pull/12198) `deps` Bump github.com/aws/aws-sdk-go-v2/feature/ec2/imds from 1.12.17 to 1.12.19 +- [#12236](https://github.com/influxdata/telegraf/pull/12236) `deps` Bump github.com/gofrs/uuid from v4.3.0 to v4.3.1 +- [#12237](https://github.com/influxdata/telegraf/pull/12237) `deps` Bump github.com/aws/aws-sdk-go-v2/service/sts from 1.16.19 to 1.17.2 +- [#12238](https://github.com/influxdata/telegraf/pull/12238) `deps` Bump github.com/urfave/cli/v2 from 2.16.3 to 2.23.5 +- [#12239](https://github.com/influxdata/telegraf/pull/12239) `deps` Bump github.com/Azure/azure-event-hubs-go/v3 from 3.3.18 to 3.3.20 +- [#12248](https://github.com/influxdata/telegraf/pull/12248) `deps` Bump github.com/showwin/speedtest-go from 1.1.5 to 1.2.1 +- [#12269](https://github.com/influxdata/telegraf/pull/12269) `deps` Bump github.com/aws/aws-sdk-go-v2/credentials from 1.12.21 to 1.13.2 +- [#12268](https://github.com/influxdata/telegraf/pull/12268) `deps` Bump github.com/yuin/goldmark from 1.5.2 to 1.5.3 +- [#12267](https://github.com/influxdata/telegraf/pull/12267) `deps` Bump cloud.google.com/go/pubsub from 1.25.1 to 1.26.0 +- [#12266](https://github.com/influxdata/telegraf/pull/12266) `deps` Bump go.mongodb.org/mongo-driver from 1.10.2 to 1.11.0 + +## v1.24.3 [2022-11-02] + +### Bugfixes + +- [#12063](https://github.com/influxdata/telegraf/pull/12063) Restore warning on unused config option(s) +- [#11941](https://github.com/influxdata/telegraf/pull/11941) Setting `enable_tls` has incorrect default value +- [#12093](https://github.com/influxdata/telegraf/pull/12093) Update systemd unit description +- [#12077](https://github.com/influxdata/telegraf/pull/12077) `agent` Fix panic due to tickers slice was off-by-one in size +- [#12076](https://github.com/influxdata/telegraf/pull/12076) `config` Set default parser +- [#12124](https://github.com/influxdata/telegraf/pull/12124) `inputs.directory_monitor` Allow cross filesystem directories +- [#12064](https://github.com/influxdata/telegraf/pull/12064) `inputs.kafka` Switch to sarama's new consumer group rebalance strategy setting +- [#12038](https://github.com/influxdata/telegraf/pull/12038) `inputs.modbus` Add slave id to failing connection +- [#12109](https://github.com/influxdata/telegraf/pull/12109) `inputs.modbus` Handle field-measurement definitions correctly on duplicate field check +- [#11912](https://github.com/influxdata/telegraf/pull/11912) `inputs.modbus` Improve duplicate field checks +- [#11993](https://github.com/influxdata/telegraf/pull/11993) `inputs.opcua` Add metric tags to node +- [#11997](https://github.com/influxdata/telegraf/pull/11997) `inputs.syslog` Print error when no error or message given +- [#12023](https://github.com/influxdata/telegraf/pull/12023) `inputs.zookeeper` Add the ability to parse floats as floats +- [#11926](https://github.com/influxdata/telegraf/pull/11926) `parsers.json_v2` Remove BOM before parsing +- [#12116](https://github.com/influxdata/telegraf/pull/12116) `processors.parser` Keep name of original metric if parser doesn't return one +- [#12081](https://github.com/influxdata/telegraf/pull/12081) `processors` Correctly setup processors +- [#12016](https://github.com/influxdata/telegraf/pull/12016) `regression` Fixes problem with metrics not exposed by plugins. +- [#12024](https://github.com/influxdata/telegraf/pull/12024) `serializers.splunkmetric` Provide option to remove event metric tag + +### Features + +- [#12075](https://github.com/influxdata/telegraf/pull/12075) `tools` Allow to markdown includes for sections + +### Dependency Updates + +- [#11886](https://github.com/influxdata/telegraf/pull/11886) `deps` Bump github.com/snowflakedb/gosnowflake from 1.6.2 to 1.6.13 +- [#11928](https://github.com/influxdata/telegraf/pull/11928) `deps` Bump github.com/sensu/sensu-go/api/core/v2 from 2.14.0 to 2.15.0 +- [#11935](https://github.com/influxdata/telegraf/pull/11935) `deps` Bump github.com/gofrs/uuid from 4.2.0& to 4.3.0 +- [#11894](https://github.com/influxdata/telegraf/pull/11894) `deps` Bump github.com/hashicorp/consul/api from 1.14.0 to 1.15.2 +- [#11936](https://github.com/influxdata/telegraf/pull/11936) `deps` Bump github.com/aws/aws-sdk-go-v2/credentials from 1.12.5 to 1.12.21 +- [#11972](https://github.com/influxdata/telegraf/pull/11972) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch +- [#11979](https://github.com/influxdata/telegraf/pull/11979) `deps` Bump github.com/aws/aws-sdk-go-v2/config +- [#11938](https://github.com/influxdata/telegraf/pull/11938) `deps` Bump k8s.io/apimachinery from 0.25.1 to 0.25.2 +- [#12001](https://github.com/influxdata/telegraf/pull/12001) `deps` Bump k8s.io/api from 0.25.0 to 0.25.2 +- [#12029](https://github.com/influxdata/telegraf/pull/12029) `deps` Bump k8s.io/api from 0.25.2 to 0.25.3 +- [#12030](https://github.com/influxdata/telegraf/pull/12030) `deps` Bump modernc.org/sqlite from 1.17.3 to 1.19.2 +- [#12034](https://github.com/influxdata/telegraf/pull/12034) `deps` Bump github.com/signalfx/golib/v3 from 3.3.45 to 3.3.46 +- [#12035](https://github.com/influxdata/telegraf/pull/12035) `deps` Bump github.com/yuin/goldmark from 1.4.13 to 1.5.2 +- [#11937](https://github.com/influxdata/telegraf/pull/11937) `deps` Bump cloud.google.com/go/bigquery from 1.40.0 to 1.42.0 +- [#12037](https://github.com/influxdata/telegraf/pull/12037) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis +- [#12036](https://github.com/influxdata/telegraf/pull/12036) `deps` Bump github.com/aliyun/alibaba-cloud-sdk-go +- [#11980](https://github.com/influxdata/telegraf/pull/11980) `deps` Bump github.com/Shopify/sarama from 1.36.0 to 1.37.2 +- [#12039](https://github.com/influxdata/telegraf/pull/12039) `deps` Bump testcontainers-go from 0.13.0 to 0.14.0 and address breaking change +- [#12090](https://github.com/influxdata/telegraf/pull/12090) `deps` Bump modernc.org/libc from v1.20.3 to v1.21.2 +- [#12098](https://github.com/influxdata/telegraf/pull/12098) `deps` Bump github.com/aws/aws-sdk-go-v2/service/dynamodb +- [#12096](https://github.com/influxdata/telegraf/pull/12096) `deps` Bump google.golang.org/api from 0.95.0 to 0.100.0 +- [#12095](https://github.com/influxdata/telegraf/pull/12095) `deps` Bump github.com/gopcua/opcua from 0.3.3 to 0.3.7 +- [#12097](https://github.com/influxdata/telegraf/pull/12097) `deps` Bump github.com/prometheus/client_model from 0.2.0 to 0.3.0 +- [#12135](https://github.com/influxdata/telegraf/pull/12135) `deps` Bump cloud.google.com/go/monitoring from 1.5.0 to 1.7.0 +- [#12134](https://github.com/influxdata/telegraf/pull/12134) `deps` Bump github.com/nats-io/nats-server/v2 from 2.8.4 to 2.9.4 + +## v1.24.2 [2022-10-03] + +### Bugfixes + +- [#11806](https://github.com/influxdata/telegraf/pull/11806) Re-allow specifying the influx parser type +- [#11896](https://github.com/influxdata/telegraf/pull/11896) `cli` Support old style of filtering sample configs +- [#11519](https://github.com/influxdata/telegraf/pull/11519) `common.kafka` Enable TLS in Kafka plugins without custom config +- [#11866](https://github.com/influxdata/telegraf/pull/11866) `inputs.influxdb_listener` Error on invalid precision +- [#11877](https://github.com/influxdata/telegraf/pull/11877) `inputs.internet_speed` Rename enable_file_download to match upstream intent +- [#11849](https://github.com/influxdata/telegraf/pull/11849) `inputs.mongodb` Start plugin correctly +- [#10696](https://github.com/influxdata/telegraf/pull/10696) `inputs.mqtt_consumer` Rework connection and message tracking +- [#11696](https://github.com/influxdata/telegraf/pull/11696) `internal.ethtool` Avoid internal name conflict with aws +- [#11875](https://github.com/influxdata/telegraf/pull/11875) `parser.xpath` Handle floating-point times correctly + +### Dependency Updates + +- [#11861](https://github.com/influxdata/telegraf/pull/11861) Update dependencies for OpenBSD support +- [#11840](https://github.com/influxdata/telegraf/pull/11840) `deps` Bump k8s.io/apimachinery from 0.25.0 to 0.25.1 +- [#11844](https://github.com/influxdata/telegraf/pull/11844) `deps` Bump github.com/aerospike/aerospike-client-go/v5 from 5.9.0 to 5.10.0 +- [#11839](https://github.com/influxdata/telegraf/pull/11839) `deps` Bump github.com/nats-io/nats.go from 1.16.0 to 1.17.0 +- [#11836](https://github.com/influxdata/telegraf/pull/11836) `deps` Replace go-ping by pro-bing +- [#11887](https://github.com/influxdata/telegraf/pull/11887) `deps` Bump go.mongodb.org/mongo-driver from 1.10.1 to 1.10.2 +- [#11890](https://github.com/influxdata/telegraf/pull/11890) `deps` Bump github.com/aws/smithy-go from 1.13.2 to 1.13.3 +- [#11891](https://github.com/influxdata/telegraf/pull/11891) `deps` Bump github.com/rabbitmq/amqp091-go from 1.4.0 to 1.5.0 +- [#11893](https://github.com/influxdata/telegraf/pull/11893) `deps` Bump github.com/docker/distribution from v2.7.1 to v2.8.1 + +## v1.24.1 [2022-09-19] + +### Bugfixes + +- [#11787](https://github.com/influxdata/telegraf/pull/11787) Clear error message when provided config is not a text file +- [#11835](https://github.com/influxdata/telegraf/pull/11835) Enable global confirmation for installing mingw +- [#10797](https://github.com/influxdata/telegraf/pull/10797) `inputs.ceph` Modernize Ceph input plugin metrics +- [#11785](https://github.com/influxdata/telegraf/pull/11785) `inputs.modbus` Do not fail if a single slave reports errors +- [#11827](https://github.com/influxdata/telegraf/pull/11827) `inputs.ntpq` Handle pools with "-" when +- [#11825](https://github.com/influxdata/telegraf/pull/11825) `parsers.csv` Remove direct checks for the parser type +- [#11781](https://github.com/influxdata/telegraf/pull/11781) `parsers.xpath` Add array index when expanding names. +- [#11815](https://github.com/influxdata/telegraf/pull/11815) `parsers` Memory leak for plugins using ParserFunc. +- [#11826](https://github.com/influxdata/telegraf/pull/11826) `parsers` Unwrap parser and remove some special handling + +### Features + +- [#11228](https://github.com/influxdata/telegraf/pull/11228) `processors.parser` Add option to parse tags + +### Dependency Updates + +- [#11788](https://github.com/influxdata/telegraf/pull/11788) `deps` Bump cloud.google.com/go/pubsub from 1.24.0 to 1.25.1 +- [#11794](https://github.com/influxdata/telegraf/pull/11794) `deps` Bump github.com/urfave/cli/v2 from 2.14.1 to 2.16.3 +- [#11789](https://github.com/influxdata/telegraf/pull/11789) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 +- [#11799](https://github.com/influxdata/telegraf/pull/11799) `deps` Bump github.com/wavefronthq/wavefront-sdk-go +- [#11796](https://github.com/influxdata/telegraf/pull/11796) `deps` Bump cloud.google.com/go/bigquery from 1.33.0 to 1.40.0 + +## v1.24.0 [2022-09-12] + +### Bugfixes + +- [#11779](https://github.com/influxdata/telegraf/pull/11779) Add missing entry json_transformation to missingTomlField +- [#11288](https://github.com/influxdata/telegraf/pull/11288) Add reset-mode flag for CSV parser +- [#11512](https://github.com/influxdata/telegraf/pull/11512) Add version number to MacOS packages +- [#11489](https://github.com/influxdata/telegraf/pull/11489) Backport sync sample.conf and README.md files +- [#11777](https://github.com/influxdata/telegraf/pull/11777) Do not error out for parsing errors in datadog mode +- [#11521](https://github.com/influxdata/telegraf/pull/11521) Make docs & go.mod cleanup post-redis merge +- [#11656](https://github.com/influxdata/telegraf/pull/11656) Refactor telegraf version +- [#11563](https://github.com/influxdata/telegraf/pull/11563) Remove shell execution for license-checker +- [#11755](https://github.com/influxdata/telegraf/pull/11755) Sort labels in prometheusremotewrite serializer +- [#11440](https://github.com/influxdata/telegraf/pull/11440) Update prometheus parser to be a new style parser plugin +- [#11456](https://github.com/influxdata/telegraf/pull/11456) Update prometheusremotewrite parser to be a new style parser plugin +- [#10570](https://github.com/influxdata/telegraf/pull/10570) Use os-agnositc systemd detection, remove sysv in RPM packaging +- [#11615](https://github.com/influxdata/telegraf/pull/11615) `agent` Add flushBatch method +- [#11692](https://github.com/influxdata/telegraf/pull/11692) `inputs.jolokia2` Add optional origin header +- [#11629](https://github.com/influxdata/telegraf/pull/11629) `inputs.mongodb` Add an option to bypass connection errors on start +- [#11723](https://github.com/influxdata/telegraf/pull/11723) `inputs.opcua` Assign node id correctly +- [#11673](https://github.com/influxdata/telegraf/pull/11673) `inputs.prometheus` Plugin run outside k8s cluster error +- [#11701](https://github.com/influxdata/telegraf/pull/11701) `inputs.sqlserver` Fixing wrong filtering for sqlAzureMIRequests and sqlAzureDBRequests +- [#11471](https://github.com/influxdata/telegraf/pull/11471) `inputs.upsd` Move to new sample.conf style +- [#11613](https://github.com/influxdata/telegraf/pull/11613) `inputs.x509` Multiple sources with non-overlapping DNS entries +- [#11767](https://github.com/influxdata/telegraf/pull/11767) `outputs.execd` Fixing the execd behavior to not throw error when partially unserializable metrics are written +- [#11560](https://github.com/influxdata/telegraf/pull/11560) `outputs.wavefront` Update wavefront sdk and use non-deprecated APIs + +### Features + +- [#11307](https://github.com/influxdata/telegraf/pull/11307) `serializers.csv` Add CSV serializer +- [#11054](https://github.com/influxdata/telegraf/pull/11054) `outputs.redistimeseries` Add RedisTimeSeries plugin +- [#7995](https://github.com/influxdata/telegraf/pull/7995) `outputs.stomp` Add Stomp (Active MQ) output plugin +- [#11300](https://github.com/influxdata/telegraf/pull/11300) Add default appType as config option to groundwork output +- [#11398](https://github.com/influxdata/telegraf/pull/11398) Add license checking tool +- [#11399](https://github.com/influxdata/telegraf/pull/11399) Add proxy support for outputs/cloudwatch +- [#11516](https://github.com/influxdata/telegraf/pull/11516) Added metrics for member and replica-set avg health of MongoDB +- [#11233](https://github.com/influxdata/telegraf/pull/11233) Adding aws metric streams input plugin +- [#9717](https://github.com/influxdata/telegraf/pull/9717) Allow collecting node-level metrics for Couchbase buckets +- [#11282](https://github.com/influxdata/telegraf/pull/11282) Make the command config a subcommand +- [#11367](https://github.com/influxdata/telegraf/pull/11367) Migrate collectd parser to new style +- [#11371](https://github.com/influxdata/telegraf/pull/11371) Migrate dropwizard parser to new style +- [#11381](https://github.com/influxdata/telegraf/pull/11381) Migrate form_urlencoded parser to new style +- [#11405](https://github.com/influxdata/telegraf/pull/11405) Migrate graphite parser to new style +- [#11408](https://github.com/influxdata/telegraf/pull/11408) Migrate grok to new parser style +- [#11432](https://github.com/influxdata/telegraf/pull/11432) Migrate influx and influx_upstream parsers to new style +- [#11226](https://github.com/influxdata/telegraf/pull/11226) Migrate json parser to new style +- [#11343](https://github.com/influxdata/telegraf/pull/11343) Migrate json_v2 parser to new style +- [#11366](https://github.com/influxdata/telegraf/pull/11366) Migrate logfmt parser to new style +- [#11402](https://github.com/influxdata/telegraf/pull/11402) Migrate nagios parser to new style +- [#11700](https://github.com/influxdata/telegraf/pull/11700) Migrate to urfave/cli +- [#11407](https://github.com/influxdata/telegraf/pull/11407) Migrate value parser to new style +- [#11374](https://github.com/influxdata/telegraf/pull/11374) Migrate wavefront parser to new style +- [#11373](https://github.com/influxdata/telegraf/pull/11373) `inputs.nats_consumer` Add simple support for jetstream subjects +- [#9015](https://github.com/influxdata/telegraf/pull/9015) `inputs.supervisor` Add Supervisord input plugin +- [#11524](https://github.com/influxdata/telegraf/pull/11524) Tool to build custom Telegraf builds +- [#11493](https://github.com/influxdata/telegraf/pull/11493) `common.tls` Implement minimum TLS version for clients +- [#11619](https://github.com/influxdata/telegraf/pull/11619) `external` Add nsdp external plugin +- [#9890](https://github.com/influxdata/telegraf/pull/9890) `inputs.upsd` Add upsd implementation +- [#11458](https://github.com/influxdata/telegraf/pull/11458) `inputs.cisco_telemetry_mdt` Add GRPC Keepalive/timeout config options +- [#11784](https://github.com/influxdata/telegraf/pull/11784) `inputs.directory_monitor` Support paths for files_to_ignore and files_to_monitor +- [#11773](https://github.com/influxdata/telegraf/pull/11773) `inputs.directory_monitor` Traverse sub-directories +- [#11220](https://github.com/influxdata/telegraf/pull/11220) `inputs.kafka_consumer` Option to set default fetch message bytes +- [#8988](https://github.com/influxdata/telegraf/pull/8988) `inputs.linux_cpu` Add plugin to collect CPU metrics on Linux +- [#9185](https://github.com/influxdata/telegraf/pull/9185) `inputs.logstash` Record number of failures +- [#11469](https://github.com/influxdata/telegraf/pull/11469) `inputs.modbus` Error out on requests with no fields defined +- [#11426](https://github.com/influxdata/telegraf/pull/11426) `inputs.mqtt_consumer` Add incoming mqtt message size calculation +- [#10874](https://github.com/influxdata/telegraf/pull/10874) `inputs.nginx_plus_api` Gather limit_reqs metrics +- [#11593](https://github.com/influxdata/telegraf/pull/11593) `inputs.ntpq` Add option to specify command flags +- [#11592](https://github.com/influxdata/telegraf/pull/11592) `inputs.ntpq` Add possibility to query remote servers +- [#11594](https://github.com/influxdata/telegraf/pull/11594) `inputs.ntpq` Allow to specify `reach` output format +- [#11572](https://github.com/influxdata/telegraf/pull/11572) `inputs.openstack` Add allow_reauth config option for openstack client +- [#11391](https://github.com/influxdata/telegraf/pull/11391) `inputs.smart` Collect SSD endurance information where available in smartctl +- [#11688](https://github.com/influxdata/telegraf/pull/11688) `inputs.sqlserver` Add db name to io stats for MI +- [#11709](https://github.com/influxdata/telegraf/pull/11709) `inputs.sqlserver` Improved filtering for active requests +- [#11518](https://github.com/influxdata/telegraf/pull/11518) `inputs.statsd` Add median timing calculation to statsd input plugin +- [#9440](https://github.com/influxdata/telegraf/pull/9440) `inputs.syslog` Log remote host as source tag +- [#11271](https://github.com/influxdata/telegraf/pull/11271) `inputs.x509_cert` Add smtp protocol +- [#11284](https://github.com/influxdata/telegraf/pull/11284) `output.mqtt` Add support for MQTT protocol version 5 +- [#11649](https://github.com/influxdata/telegraf/pull/11649) `outputs.amqp` Add proxy support +- [#11439](https://github.com/influxdata/telegraf/pull/11439) `outputs.graphite` Retry connecting to servers with failed send attempts +- [#11443](https://github.com/influxdata/telegraf/pull/11443) `outputs.groundwork` Improve metric parsing to extend output +- [#11557](https://github.com/influxdata/telegraf/pull/11557) `outputs.iotdb` Add new output plugin to support Apache IoTDB +- [#11672](https://github.com/influxdata/telegraf/pull/11672) `outputs.postgresql` Add Postgresql output +- [#11529](https://github.com/influxdata/telegraf/pull/11529) `outputs.redistimeseries` Add integration test +- [#11551](https://github.com/influxdata/telegraf/pull/11551) `outputs.sql` Add settings for go sql.DB settings +- [#11251](https://github.com/influxdata/telegraf/pull/11251) `parsers.json` Allow JSONata based transformations in JSON serializer +- [#11558](https://github.com/influxdata/telegraf/pull/11558) `parsers.xpath` Add support for returning underlying data-types +- [#11306](https://github.com/influxdata/telegraf/pull/11306) `processors.starlark` Add starlark benchmark for tag-concatenation +- [#11475](https://github.com/influxdata/telegraf/pull/11475) `inputs.rabbitmq` Add support for head_message_timestamp metric +- [#9333](https://github.com/influxdata/telegraf/pull/9333) `inputs.redis` Add Redis 6 ACL auth support +- [#11690](https://github.com/influxdata/telegraf/pull/11690) `serializers.prometheus` Provide option to reduce payload size by removing HELP from payload +- [#9319](https://github.com/influxdata/telegraf/pull/9319) `proxy.x509_cert` Add proxy support + +### Dependency Updates + +- [#11671](https://github.com/influxdata/telegraf/pull/11671) Update github.com/jackc/pgx/v4 from 4.16.1 to 4.17.0 +- [#11669](https://github.com/influxdata/telegraf/pull/11669) Update github.com/Azure/go-autorest/autorest from 0.11.24 to 0.11.28 +- [#11670](https://github.com/influxdata/telegraf/pull/11670) Update github.com/aws/aws-sdk-go-v2/service/ec2 from 1.51.2 to 1.52.1 +- [#11675](https://github.com/influxdata/telegraf/pull/11675) Update github.com/urfave/cli/v2 from 2.3.0 to 2.11.2 +- [#11679](https://github.com/influxdata/telegraf/pull/11679) Update github.com/aws/aws-sdk-go-v2/service/timestreamwrite from 1.13.6 to 1.13.12 +- [#11695](https://github.com/influxdata/telegraf/pull/11695) Update github.com/aliyun/alibaba-cloud-sdk-go from 1.61.1695 to 1.61.1727 +- [#11676](https://github.com/influxdata/telegraf/pull/11676) Update go.mongodb.org/mongo-driver from 1.9.1 to 1.10.1 +- [#11710](https://github.com/influxdata/telegraf/pull/11710) Update github.com/wavefronthq/wavefront-sdk-go from 0.10.1 to 0.10.2 +- [#11711](https://github.com/influxdata/telegraf/pull/11711) Update github.com/aws/aws-sdk-go-v2/service/sts from 1.16.7 to 1.16.13 +- [#11716](https://github.com/influxdata/telegraf/pull/11716) Update github.com/aerospike/aerospike-client-go/v5 from 5.7.0 to 5.9.0 +- [#11717](https://github.com/influxdata/telegraf/pull/11717) Update github.com/hashicorp/consul/api from 1.13.1 to 1.14.0 +- [#11721](https://github.com/influxdata/telegraf/pull/11721) Update github.com/tidwall/gjson from 1.14.1 to 1.14.3 +- [#11699](https://github.com/influxdata/telegraf/pull/11699) Update github.com/rabbitmq/amqp091-go from 1.3.4 to 1.4.0 +- [#11743](https://github.com/influxdata/telegraf/pull/11743) Update github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.15.10 to 1.16.1 +- [#11744](https://github.com/influxdata/telegraf/pull/11744) Update github.com/gophercloud/gophercloud from 0.25.0 to 1.0.0 +- [#11745](https://github.com/influxdata/telegraf/pull/11745) Update k8s.io/client-go from 0.24.3 to 0.25.0 +- [#11747](https://github.com/influxdata/telegraf/pull/11747) Update github.com/aws/aws-sdk-go-v2/feature/ec2/imds from 1.12.11 to 1.12.13 +- [#11763](https://github.com/influxdata/telegraf/pull/11763) Update github.com/urfave/cli/v2 from 2.11.2 to 2.14.1 +- [#11764](https://github.com/influxdata/telegraf/pull/11764) Update gonum.org/v1/gonum from 0.11.0 to 0.12.0 +- [#11770](https://github.com/influxdata/telegraf/pull/11770) Update github.com/Azure/azure-kusto-go from 0.7.0 to 0.8.0 +- [#11746](https://github.com/influxdata/telegraf/pull/11746) Update google.golang.org/grpc from 1.48.0 to 1.49.0 + +### BREAKING CHANGES + +- [#11493](https://github.com/influxdata/telegraf/pull/11493) `common.tls` Set default minimum TLS version to v1.2 for security reasons on both server and client connections. This is a change from the previous defaults (TLS v1.0) on the server configuration and might break clients relying on older TLS versions. You can manually revert to older versions on a per-plugin basis using the `tls_min_version` option in the plugins required +## v1.23.4 [2022-08-16] + +### Bugfixes + +- [#11647](https://github.com/influxdata/telegraf/pull/11647) Bump github.com/lxc/lxd to be able to run tests +- [#11664](https://github.com/influxdata/telegraf/pull/11664) Sync sql output and input build constraints to handle loong64 in go1.19. +- [#10841](https://github.com/influxdata/telegraf/pull/10841) Updating credentials file to not use endpoint_url parameter +- [#10851](https://github.com/influxdata/telegraf/pull/10851) `inputs.cloudwatch` Customizable batch size when querying +- [#11577](https://github.com/influxdata/telegraf/pull/11577) `inputs.kube_inventory` Send file location to enable token auto-refresh +- [#11578](https://github.com/influxdata/telegraf/pull/11578) `inputs.kubernetes` Refresh token from file at each read +- [#11635](https://github.com/influxdata/telegraf/pull/11635) `inputs.mongodb` Update version check for newer versions +- [#11539](https://github.com/influxdata/telegraf/pull/11539) `inputs.opcua` Return an error with mismatched types +- [#11548](https://github.com/influxdata/telegraf/pull/11548) `inputs.sqlserver` Set lower deadlock priority +- [#11556](https://github.com/influxdata/telegraf/pull/11556) `inputs.stackdriver` Handle when no buckets available +- [#11576](https://github.com/influxdata/telegraf/pull/11576) `inputs` Linter issues +- [#11595](https://github.com/influxdata/telegraf/pull/11595) `outputs` Linter issues +- [#11607](https://github.com/influxdata/telegraf/pull/11607) `parsers` Linter issues + +### Features + +- [#11622](https://github.com/influxdata/telegraf/pull/11622) Add coralogix dialect to opentelemetry + +### Dependency Updates + +- [#11412](https://github.com/influxdata/telegraf/pull/11412) `deps` Bump github.com/testcontainers/testcontainers-go from 0.12.0 to 0.13.0 +- [#11565](https://github.com/influxdata/telegraf/pull/11565) `deps` Bump github.com/apache/thrift from 0.15.0 to 0.16.0 +- [#11567](https://github.com/influxdata/telegraf/pull/11567) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.46.0 to 1.51.0 +- [#11494](https://github.com/influxdata/telegraf/pull/11494) `deps` Update all go.opentelemetry.io dependencies +- [#11569](https://github.com/influxdata/telegraf/pull/11569) `deps` Bump github.com/go-ldap/ldap/v3 from 3.4.1 to 3.4.4 +- [#11574](https://github.com/influxdata/telegraf/pull/11574) `deps` Bump github.com/karrick/godirwalk from 1.16.1 to 1.17.0 +- [#11568](https://github.com/influxdata/telegraf/pull/11568) `deps` Bump github.com/vmware/govmomi from 0.28.0 to 0.29.0 +- [#11347](https://github.com/influxdata/telegraf/pull/11347) `deps` Bump github.com/eclipse/paho.mqtt.golang from 1.3.5 to 1.4.1 +- [#11580](https://github.com/influxdata/telegraf/pull/11580) `deps` Bump github.com/shirou/gopsutil/v3 from 3.22.4 to 3.22.7 +- [#11582](https://github.com/influxdata/telegraf/pull/11582) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs +- [#11583](https://github.com/influxdata/telegraf/pull/11583) `deps` Bump github.com/Azure/go-autorest/autorest/adal +- [#11581](https://github.com/influxdata/telegraf/pull/11581) `deps` Bump github.com/pion/dtls/v2 from 2.0.13 to 2.1.5 +- [#11590](https://github.com/influxdata/telegraf/pull/11590) `deps` Bump github.com/Azure/azure-event-hubs-go/v3 +- [#11586](https://github.com/influxdata/telegraf/pull/11586) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch +- [#11585](https://github.com/influxdata/telegraf/pull/11585) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis +- [#11584](https://github.com/influxdata/telegraf/pull/11584) `deps` Bump github.com/aws/aws-sdk-go-v2/service/dynamodb +- [#11598](https://github.com/influxdata/telegraf/pull/11598) `deps` Bump github.com/signalfx/golib/v3 from 3.3.43 to 3.3.45 +- [#11605](https://github.com/influxdata/telegraf/pull/11605) `deps` Update github.com/BurntSushi/toml from 0.4.1 to 1.2.0 +- [#11604](https://github.com/influxdata/telegraf/pull/11604) `deps` Update cloud.google.com/go/pubsub from 1.23.0 to 1.24.0 +- [#11602](https://github.com/influxdata/telegraf/pull/11602) `deps` Update k8s.io/apimachinery from 0.24.2 to 0.24.3 +- [#11603](https://github.com/influxdata/telegraf/pull/11603) `deps` Update github.com/Shopify/sarama from 1.34.1 to 1.35.0 +- [#11616](https://github.com/influxdata/telegraf/pull/11616) `deps` Bump github.com/sirupsen/logrus from 1.8.1 to 1.9.0 +- [#11636](https://github.com/influxdata/telegraf/pull/11636) `deps` Bump github.com/emicklei/go-restful from v2.9.5+incompatible to v3.8.0 +- [#11641](https://github.com/influxdata/telegraf/pull/11641) `deps` Bump github.com/hashicorp/consul/api from 1.12.0 to 1.13.1 +- [#11640](https://github.com/influxdata/telegraf/pull/11640) `deps` Bump github.com/prometheus/client_golang from 1.12.2 to 1.13.0 +- [#11643](https://github.com/influxdata/telegraf/pull/11643) `deps` Bump google.golang.org/api from 0.85.0 to 0.91.0 +- [#11644](https://github.com/influxdata/telegraf/pull/11644) `deps` Bump github.com/antchfx/xmlquery from 1.3.9 to 1.3.12 +- [#11651](https://github.com/influxdata/telegraf/pull/11651) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 +- [#11652](https://github.com/influxdata/telegraf/pull/11652) `deps` Bump github.com/aws/aws-sdk-go-v2/feature/ec2/imds +- [#11653](https://github.com/influxdata/telegraf/pull/11653) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs + +## v1.23.3 [2022-07-25] + +### Bugfixes + +- [#11481](https://github.com/influxdata/telegraf/pull/11481) `inputs.openstack` Use v3 volume library +- [#11482](https://github.com/influxdata/telegraf/pull/11482) `common.cookie` Use reader over readcloser, regen cookie-jar at reauth +- [#11527](https://github.com/influxdata/telegraf/pull/11527) `inputs.mqtt_consumer` Topic parsing error when topic having prefix '/' +- [#11534](https://github.com/influxdata/telegraf/pull/11534) `inputs.snmp_trap` Nil map panic when use snmp_trap with netsnmp translator +- [#11522](https://github.com/influxdata/telegraf/pull/11522) `inputs.sqlserver` Set lower deadlock priority on queries +- [#11486](https://github.com/influxdata/telegraf/pull/11486) `parsers.prometheus` Histogram infinity bucket must be always present + +### Dependency Updates + +- [#11461](https://github.com/influxdata/telegraf/pull/11461) Bump github.com/antchfx/jsonquery from 1.1.5 to 1.2.0 + +## v1.23.2 [2022-07-11] + +### Bugfixes + +- [#11460](https://github.com/influxdata/telegraf/pull/11460) Deprecation warnings for non-deprecated packages +- [#11472](https://github.com/influxdata/telegraf/pull/11472) `common.http` Allow 201 for cookies, update header docs +- [#11448](https://github.com/influxdata/telegraf/pull/11448) `inputs.sqlserver` Use bigint for backupsize in sqlserver +- [#11011](https://github.com/influxdata/telegraf/pull/11011) `inputs.gnmi` Refactor tag-only subs for complex keys +- [#10331](https://github.com/influxdata/telegraf/pull/10331) `inputs.snmp` Snmp UseUnconnectedUDPSocket when using udp + +### Dependency Updates + +- [#11438](https://github.com/influxdata/telegraf/pull/11438) Bump github.com/docker/docker from 20.10.14 to 20.10.17 + +## v1.23.1 [2022-07-05] + +### Bugfixes + +- [#11335](https://github.com/influxdata/telegraf/pull/11335) Bring back old xpath section names +- [#9315](https://github.com/influxdata/telegraf/pull/9315) `inputs.rabbitmq` Don't require listeners to be present in overview +- [#11280](https://github.com/influxdata/telegraf/pull/11280) Filter out views in mongodb lookup +- [#11311](https://github.com/influxdata/telegraf/pull/11311) Fix race condition in configuration and prevent concurrent map writes to c.UnusedFields +- [#11397](https://github.com/influxdata/telegraf/pull/11397) Resolve jolokia2 panic on null response +- [#11276](https://github.com/influxdata/telegraf/pull/11276) Restore sample configurations broken during initial migration +- [#11413](https://github.com/influxdata/telegraf/pull/11413) Sync back sample.confs for inputs.couchbase and outputs.groundwork. + +### Dependency Updates + +- [#11295](https://github.com/influxdata/telegraf/pull/11295) Bump cloud.google.com/go/monitoring from 1.2.0 to 1.5.0 +- [#11297](https://github.com/influxdata/telegraf/pull/11297) Bump github.com/aws/aws-sdk-go-v2/credentials from 1.12.2 to 1.12.5 +- [#11318](https://github.com/influxdata/telegraf/pull/11318) Bump google.golang.org/grpc from 1.46.2 to 1.47.0 +- [#11223](https://github.com/influxdata/telegraf/pull/11223) Bump k8s.io/client-go from 0.23.3 to 0.24.1 +- [#11299](https://github.com/influxdata/telegraf/pull/11299) Bump github.com/go-logfmt/logfmt from 0.5.0 to 0.5.1 +- [#11328](https://github.com/influxdata/telegraf/pull/11328) Bump github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.15.3 to 1.15.7 +- [#11320](https://github.com/influxdata/telegraf/pull/11320) Bump go.mongodb.org/mongo-driver from 1.9.0 to 1.9.1 +- [#11321](https://github.com/influxdata/telegraf/pull/11321) Bump github.com/gophercloud/gophercloud from 0.24.0 to 0.25.0 +- [#11338](https://github.com/influxdata/telegraf/pull/11338) Bump google.golang.org/api from 0.74.0 to 0.84.0 +- [#11340](https://github.com/influxdata/telegraf/pull/11340) Bump github.com/fatih/color from 1.10.0 to 1.13.0 +- [#11322](https://github.com/influxdata/telegraf/pull/11322) Bump github.com/aws/aws-sdk-go-v2/service/timestreamwrite from 1.3.2 to 1.13.6 +- [#11319](https://github.com/influxdata/telegraf/pull/11319) Bump github.com/Shopify/sarama from 1.32.0 to 1.34.1 +- [#11342](https://github.com/influxdata/telegraf/pull/11342) Bump github.com/dynatrace-oss/dynatrace-metric-utils-go from 0.3.0 to 0.5.0 +- [#11339](https://github.com/influxdata/telegraf/pull/11339) Bump github.com/nats-io/nats.go from 1.15.0 to 1.16.0 +- [#11349](https://github.com/influxdata/telegraf/pull/11349) Bump cloud.google.com/go/pubsub from 1.18.0 to 1.22.2 +- [#11369](https://github.com/influxdata/telegraf/pull/11369) Bump go.opentelemetry.io/collector/pdata from 0.52.0 to 0.54.0 +- [#11346](https://github.com/influxdata/telegraf/pull/11346) Bump github.com/jackc/pgx/v4 from 4.15.0 to 4.16.1 +- [#11379](https://github.com/influxdata/telegraf/pull/11379) Bump cloud.google.com/go/bigquery from 1.8.0 to 1.33.0 +- [#11378](https://github.com/influxdata/telegraf/pull/11378) Bump github.com/Azure/azure-kusto-go from 0.6.0 to 0.7.0 +- [#11394](https://github.com/influxdata/telegraf/pull/11394) Bump cloud.google.com/go/pubsub from 1.22.2 to 1.23.0 +- [#11380](https://github.com/influxdata/telegraf/pull/11380) Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.13.0 to 1.15.7 +- [#11382](https://github.com/influxdata/telegraf/pull/11382) Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.1.0 to 1.46.0 +- [#11395](https://github.com/influxdata/telegraf/pull/11395) Bump github.com/golang-jwt/jwt/v4 from 4.4.1 to 4.4.2 +- [#11396](https://github.com/influxdata/telegraf/pull/11396) Bump github.com/vmware/govmomi from 0.27.3 to 0.28.0 +- [#11415](https://github.com/influxdata/telegraf/pull/11415) Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.15.4 to 1.15.8 +- [#11416](https://github.com/influxdata/telegraf/pull/11416) Bump github.com/influxdata/influxdb-observability/otel2influx from 0.2.21 to 0.2.22 +- [#11434](https://github.com/influxdata/telegraf/pull/11434) Bump k8s.io/api from 0.24.1 to 0.24.2 +- [#11437](https://github.com/influxdata/telegraf/pull/11437) Bump github.com/prometheus/client_golang from 1.12.1 to 1.12.2 + +## v1.23.0 [2022-06-13] + +### Bugfixes + +- [#11272](https://github.com/influxdata/telegraf/pull/11272) Add missing build constraints for sqlite +- [#11253](https://github.com/influxdata/telegraf/pull/11253) Always build README-embedder for host-architecture +- [#11140](https://github.com/influxdata/telegraf/pull/11140) Avoid calling sadc with invalid 0 interval +- [#11093](https://github.com/influxdata/telegraf/pull/11093) Check net.Listen() error in tests +- [#11181](https://github.com/influxdata/telegraf/pull/11181) Convert slab plugin to new sample.conf. +- [#10979](https://github.com/influxdata/telegraf/pull/10979) Datadog count metrics +- [#11044](https://github.com/influxdata/telegraf/pull/11044) Deprecate useless database config option +- [#11150](https://github.com/influxdata/telegraf/pull/11150) Doc interval setting for internet speed plugin +- [#11120](https://github.com/influxdata/telegraf/pull/11120) Elasticsearch output float handling test +- [#11151](https://github.com/influxdata/telegraf/pull/11151) Improve slab testing without sudo. +- [#10995](https://github.com/influxdata/telegraf/pull/10995) Log instance name in skip warnings +- [#11069](https://github.com/influxdata/telegraf/pull/11069) Output erroneous namespace and continue instead of error out +- [#11237](https://github.com/influxdata/telegraf/pull/11237) Re-add event to splunk serializer +- [#11143](https://github.com/influxdata/telegraf/pull/11143) Redis plugin goroutine leak triggered by auto reload config mechanism +- [#11082](https://github.com/influxdata/telegraf/pull/11082) Remove any content type from prometheus accept header +- [#11261](https://github.com/influxdata/telegraf/pull/11261) Remove full access permissions +- [#11179](https://github.com/influxdata/telegraf/pull/11179) Search services file in /etc/services and fall back to /usr/etc/services +- [#11217](https://github.com/influxdata/telegraf/pull/11217) Update sample.conf for prometheus +- [#11241](https://github.com/influxdata/telegraf/pull/11241) Upgrade xpath and fix code +- [#11083](https://github.com/influxdata/telegraf/pull/11083) Use readers over closers in http input +- [#11149](https://github.com/influxdata/telegraf/pull/11149) `inputs.burrow` Move Dialer to variable and run `make fmt` +- [#10812](https://github.com/influxdata/telegraf/pull/10812) `outputs.sql` Table existence cache + +### Features + +- [#10880](https://github.com/influxdata/telegraf/pull/10880) Add ANSI color filter for tail input plugin +- [#11188](https://github.com/influxdata/telegraf/pull/11188) Add constant 'algorithm' to the mock plugin +- [#11159](https://github.com/influxdata/telegraf/pull/11159) Add external huebridge input plugin +- [#11076](https://github.com/influxdata/telegraf/pull/11076) Add field key option to set event partition key +- [#10818](https://github.com/influxdata/telegraf/pull/10818) Add fritzbox as external plugin +- [#11037](https://github.com/influxdata/telegraf/pull/11037) Add influx semantic commits checker, checks only last commit. +- [#11039](https://github.com/influxdata/telegraf/pull/11039) Add mount option filtering to disk plugin +- [#11075](https://github.com/influxdata/telegraf/pull/11075) Add slab metrics input plugin +- [#11056](https://github.com/influxdata/telegraf/pull/11056) Allow other fluentd metrics apart from retry_count, buffer_queu… +- [#10918](https://github.com/influxdata/telegraf/pull/10918) Artifactory Webhook Receiver +- [#11000](https://github.com/influxdata/telegraf/pull/11000) Create and push nightly docker images to quay.io +- [#11102](https://github.com/influxdata/telegraf/pull/11102) Do not error if no nodes found for current config with xpath parser +- [#10886](https://github.com/influxdata/telegraf/pull/10886) Generate the plugins sample config +- [#11084](https://github.com/influxdata/telegraf/pull/11084) Google API Auth +- [#10607](https://github.com/influxdata/telegraf/pull/10607) In Lustre input plugin, support collecting per-client stats. +- [#10912](https://github.com/influxdata/telegraf/pull/10912) Migrate aggregator plugins to new sample config format +- [#10924](https://github.com/influxdata/telegraf/pull/10924) Migrate input plugins to new sample config format (A-L) +- [#10926](https://github.com/influxdata/telegraf/pull/10926) Migrate input plugins to new sample config format (M-Z) +- [#10910](https://github.com/influxdata/telegraf/pull/10910) Migrate output plugins to new sample config format +- [#10913](https://github.com/influxdata/telegraf/pull/10913) Migrate processor plugins to new sample config format +- [#11218](https://github.com/influxdata/telegraf/pull/11218) Migrate xpath parser to new style +- [#10885](https://github.com/influxdata/telegraf/pull/10885) Update etc/telegraf.conf and etc/telegraf_windows.conf +- [#6948](https://github.com/influxdata/telegraf/pull/6948) `inputs.burrow` fill more http transport parameters +- [#11141](https://github.com/influxdata/telegraf/pull/11141) `inputs.cpu` Add tags with core id or physical id to cpus +- [#7896](https://github.com/influxdata/telegraf/pull/7896) `inputs.mongodb` Add metrics about files currently open and currently active data handles +- [#10448](https://github.com/influxdata/telegraf/pull/10448) `inputs.nginx_plus_api` Gather slab metrics +- [#11216](https://github.com/influxdata/telegraf/pull/11216) `inputs.sqlserver` Update query store and latch performance counters +- [#10574](https://github.com/influxdata/telegraf/pull/10574) `inputs.vsphere` Collect resource pools metrics and add resource pool tag in VM metrics +- [#11035](https://github.com/influxdata/telegraf/pull/11035) `inputs.intel_powerstat` Add Max Turbo Frequency and introduce improvements +- [#11254](https://github.com/influxdata/telegraf/pull/11254) `inputs.intel_powerstat` Add uncore frequency metrics +- [#10954](https://github.com/influxdata/telegraf/pull/10954) `outputs.http` Support configuration of `MaxIdleConns` and `MaxIdleConnsPerHost` +- [#10853](https://github.com/influxdata/telegraf/pull/10853) `outputs.elasticsearch` Add healthcheck timeout + +### Dependency Updates + +- [#10970](https://github.com/influxdata/telegraf/pull/10970) Update github.com/wavefronthq/wavefront-sdk-go from 0.9.10 to 0.9.11 +- [#11166](https://github.com/influxdata/telegraf/pull/11166) Update github.com/aws/aws-sdk-go-v2/config from 1.15.3 to 1.15.7 +- [#11021](https://github.com/influxdata/telegraf/pull/11021) Update github.com/sensu/sensu-go/api/core/v2 from 2.13.0 to 2.14.0 +- [#11088](https://github.com/influxdata/telegraf/pull/11088) Update go.opentelemetry.io/otel/metric from 0.28.0 to 0.30.0 +- [#11221](https://github.com/influxdata/telegraf/pull/11221) Update github.com/nats-io/nats-server/v2 from 2.7.4 to 2.8.4 +- [#11191](https://github.com/influxdata/telegraf/pull/11191) Update golangci-lint from v1.45.2 to v1.46.2 +- [#11107](https://github.com/influxdata/telegraf/pull/11107) Update gopsutil from v3.22.3 to v3.22.4 to allow for HOST_PROC_MOUNTINFO. +- [#11242](https://github.com/influxdata/telegraf/pull/11242) Update moby/ipvs dependency from v1.0.1 to v1.0.2 +- [#11260](https://github.com/influxdata/telegraf/pull/11260) Update modernc.org/sqlite from v1.10.8 to v1.17.3 +- [#11266](https://github.com/influxdata/telegraf/pull/11266) Update github.com/containerd/containerd from v1.5.11 to v1.5.13 +- [#11264](https://github.com/influxdata/telegraf/pull/11264) Update github.com/tidwall/gjson from 1.10.2 to 1.14.1 + +## v1.22.4 [2022-05-16] + +### Bugfixes + +- [#11045](https://github.com/influxdata/telegraf/pull/11045) `inputs.couchbase` Do not assume metrics will all be of the same length +- [#11043](https://github.com/influxdata/telegraf/pull/11043) `inputs.statsd` Do not error when closing statsd network connection +- [#11030](https://github.com/influxdata/telegraf/pull/11030) `outputs.azure_monitor` Re-init azure monitor http client on context deadline error +- [#11078](https://github.com/influxdata/telegraf/pull/11078) `outputs.wavefront` If no "host" tag is provided do not add "telegraf.host" tag +- [#11042](https://github.com/influxdata/telegraf/pull/11042) Have telegraf service wait for network up in systemd packaging + +### Dependency Updates + +- [#10722](https://github.com/influxdata/telegraf/pull/10722) `inputs.internet_speed` Update github.com/showwin/speedtest-go from 1.1.4 to 1.1.5 +- [#11085](https://github.com/influxdata/telegraf/pull/11085) Update OpenTelemetry plugins to v0.51.0 + +## v1.22.3 [2022-04-28] + +### Bugfixes + +- [#10961](https://github.com/influxdata/telegraf/pull/10961) Update Go to 1.18.1 +- [#10976](https://github.com/influxdata/telegraf/pull/10976) `inputs.influxdb_listener` Remove duplicate influxdb listener writes with upstream parser +- [#11024](https://github.com/influxdata/telegraf/pull/11024) `inputs.gnmi` Use external xpath parser for gnmi +- [#10925](https://github.com/influxdata/telegraf/pull/10925) `inputs.system` Reduce log level in disk plugin back to original level + +## v1.22.2 [2022-04-25] + +### Bugfixes + +- [#11008](https://github.com/influxdata/telegraf/pull/11008) `inputs.gnmi` Add mutex to gnmi lookup map +- [#11010](https://github.com/influxdata/telegraf/pull/11010) `inputs.gnmi` Use sprint to cast to strings in gnmi +- [#11001](https://github.com/influxdata/telegraf/pull/11001) `inputs.consul_agent` Use correct auth token with consul_agent +- [#10486](https://github.com/influxdata/telegraf/pull/10486) `inputs.mysql` Add mariadb_dialect to address the MariaDB differences in INNODB_METRICS +- [#10923](https://github.com/influxdata/telegraf/pull/10923) `inputs.smart` Correctly parse various numeric forms +- [#10850](https://github.com/influxdata/telegraf/pull/10850) `inputs.aliyuncms` Ensure aliyuncms metrics accept array, fix discovery +- [#10930](https://github.com/influxdata/telegraf/pull/10930) `inputs.aerospike` Statistics query bug +- [#10947](https://github.com/influxdata/telegraf/pull/10947) `inputs.cisco_telemetry_mdt` Align the default value for msg size +- [#10959](https://github.com/influxdata/telegraf/pull/10959) `inputs.cisco_telemetry_mdt` Remove overly verbose info message from cisco mdt +- [#10958](https://github.com/influxdata/telegraf/pull/10958) `outputs.influxdb_v2` Improve influxdb_v2 error message +- [#10932](https://github.com/influxdata/telegraf/pull/10932) `inputs.prometheus` Moved from watcher to informer +- [#11013](https://github.com/influxdata/telegraf/pull/11013) Also allow 0 outputs when using test-wait parameter +- [#11015](https://github.com/influxdata/telegraf/pull/11015) Allow Makefile to work on Windows + +### Dependency Updates + +- [#10966](https://github.com/influxdata/telegraf/pull/10966) Update github.com/Azure/azure-kusto-go from 0.5.0 to 0.60 +- [#10963](https://github.com/influxdata/telegraf/pull/10963) Update opentelemetry from v0.2.10 to v0.2.17 +- [#10984](https://github.com/influxdata/telegraf/pull/10984) Update go.opentelemetry.io/collector/pdata from v0.48.0 to v0.49.0 +- [#10998](https://github.com/influxdata/telegraf/pull/10998) Update github.com/aws/aws-sdk-go-v2/config from 1.13.1 to 1.15.3 +- [#10997](https://github.com/influxdata/telegraf/pull/10997) Update github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs +- [#10975](https://github.com/influxdata/telegraf/pull/10975) Update github.com/aws/aws-sdk-go-v2/credentials from 1.8.0 to 1.11.2 +- [#10981](https://github.com/influxdata/telegraf/pull/10981) Update github.com/containerd/containerd from v1.5.9 to v1.5.11 +- [#10973](https://github.com/influxdata/telegraf/pull/10973) Update github.com/miekg/dns from 1.1.46 to 1.1.48 +- [#10974](https://github.com/influxdata/telegraf/pull/10974) Update github.com/gopcua/opcua from v0.3.1 to v0.3.3 +- [#10972](https://github.com/influxdata/telegraf/pull/10972) Update github.com/aws/aws-sdk-go-v2/service/dynamodb +- [#10773](https://github.com/influxdata/telegraf/pull/10773) Update github.com/xdg/scram from 1.0.3 to 1.0.5 +- [#10971](https://github.com/influxdata/telegraf/pull/10971) Update go.mongodb.org/mongo-driver from 1.8.3 to 1.9.0 +- [#10940](https://github.com/influxdata/telegraf/pull/10940) Update starlark 7a1108eaa012->d1966c6b9fcd + +## v1.22.1 [2022-04-06] + +### Bugfixes + +- [#10937](https://github.com/influxdata/telegraf/pull/10937) Update gonum.org/v1/gonum from 0.9.3 to 0.11.0 +- [#10906](https://github.com/influxdata/telegraf/pull/10906) Update github.com/golang-jwt/jwt/v4 from 4.2.0 to 4.4.1 +- [#10931](https://github.com/influxdata/telegraf/pull/10931) Update gopsutil and associated dependencies for improved OpenBSD support +- [#10553](https://github.com/influxdata/telegraf/pull/10553) `inputs.sqlserver` Fix inconsistencies in sql*Requests queries +- [#10883](https://github.com/influxdata/telegraf/pull/10883) `agent` Fix default value for logfile rotation interval +- [#10871](https://github.com/influxdata/telegraf/pull/10871) `inputs.zfs` Fix redundant zfs pool tag +- [#10903](https://github.com/influxdata/telegraf/pull/10903) `inputs.vsphere` Update vsphere info message to debug +- [#10866](https://github.com/influxdata/telegraf/pull/10866) `outputs.azure_monitor` Include body in error message +- [#10830](https://github.com/influxdata/telegraf/pull/10830) `processors.topk` Clarify the k and fields topk params +- [#10858](https://github.com/influxdata/telegraf/pull/10858) `outputs.http` Switch HTTP 100 test case values +- [#10859](https://github.com/influxdata/telegraf/pull/10859) `inputs.intel_pmu` Fix slow running intel-pmu test +- [#10860](https://github.com/influxdata/telegraf/pull/10860) `inputs.cloud_pubsub` Skip longer/integration tests on -short mode +- [#10861](https://github.com/influxdata/telegraf/pull/10861) `inputs.cloud_pubsub_push` Reduce timeouts and sleeps + +### New External Plugins + +- [#10462](https://github.com/influxdata/telegraf/pull/10462) `external.psi` Add psi plugin + +## v1.22.0 + +### Influx Line Protocol Parser + +There is an option to use a faster, more memory-efficient +implementation of the Influx Line Protocol parser. + +### SNMP Translator + +This version introduces an agent setting to select the method of +translating SNMP objects. The agent setting "snmp_translator" can be +"netsnmp" which translates by calling external programs snmptranslate +and snmptable, or "gosmi" which translates using the built-in gosmi +library. + +Before version 1.21.0, Telegraf only used the netsnmp method. Versions +1.21.0 through 1.21.4 only used the gosmi method. Since the +translation method is now configurable and "netsnmp" is the default, +users who wish to continue using "gosmi" must add `snmp_translator = +"gosmi"` in the agent section of their config file. See +[#10802](https://github.com/influxdata/telegraf/pull/10802). + +### New Input Plugins + +- [#3649](https://github.com/influxdata/telegraf/pull/3649) `inputs.socketstat` Add socketstat input plugin +- [#9697](https://github.com/influxdata/telegraf/pull/9697) `inputs.xtremio` Add xtremio input +- [#9782](https://github.com/influxdata/telegraf/pull/9782) `inputs.mock` Add mock input plugin +- [#10042](https://github.com/influxdata/telegraf/pull/10042) `inputs.redis_sentinel` Add redis sentinel input plugin +- [#10106](https://github.com/influxdata/telegraf/pull/10106) `inputs.nomad` Add nomad input plugin +- [#10198](https://github.com/influxdata/telegraf/pull/10198) `inputs.vault` Add vault input plugin +- [#10258](https://github.com/influxdata/telegraf/pull/10258) `inputs.consul_agent` Add consul agent input plugin +- [#10763](https://github.com/influxdata/telegraf/pull/10763) `inputs.hugepages` Add hugepages input plugin + +### New Processor Plugins + +- [#10057](https://github.com/influxdata/telegraf/pull/10057) `processors.noise` Add noise processor plugin + +### Features + +- [#9332](https://github.com/influxdata/telegraf/pull/9332) `agent` HTTP basic auth for webhooks +- [#10307](https://github.com/influxdata/telegraf/pull/10307) `agent` Improve error logging on plugin initialization +- [#10341](https://github.com/influxdata/telegraf/pull/10341) `agent` Check TLSConfig early to catch missing certificates +- [#10404](https://github.com/influxdata/telegraf/pull/10404) `agent` Support headers for http plugin with cookie auth +- [#10545](https://github.com/influxdata/telegraf/pull/10545) `agent` Add a collection offset implementation +- [#10559](https://github.com/influxdata/telegraf/pull/10559) `agent` Add autorestart and restartdelay flags to Windows service +- [#10515](https://github.com/influxdata/telegraf/pull/10515) `aggregators.histogram` Add config option to push only updated values +- [#10520](https://github.com/influxdata/telegraf/pull/10520) `aggregators.histogram` Add expiration option +- [#10137](https://github.com/influxdata/telegraf/pull/10137) `inputs.bond` Add additional stats to bond collector +- [#10382](https://github.com/influxdata/telegraf/pull/10382) `inputs.docker` Update docker client API version +- [#10575](https://github.com/influxdata/telegraf/pull/10575) `inputs.file` Allow for stateful parser handling +- [#7484](https://github.com/influxdata/telegraf/pull/7484) `inputs.gnmi` add dynamic tagging to gnmi plugin +- [#10220](https://github.com/influxdata/telegraf/pull/10220) `inputs.graylog` Add timeout setting option +- [#10530](https://github.com/influxdata/telegraf/pull/10530) `inputs.internet_speed` Add caching to internet_speed +- [#10243](https://github.com/influxdata/telegraf/pull/10243) `inputs.kibana` Add heap_size_limit field +- [#10641](https://github.com/influxdata/telegraf/pull/10641) `inputs.memcached` gather additional stats from memcached +- [#10642](https://github.com/influxdata/telegraf/pull/10642) `inputs.memcached` Support client TLS origination +- [#9279](https://github.com/influxdata/telegraf/pull/9279) `inputs.modbus` Support multiple slaves with gateway +- [#10231](https://github.com/influxdata/telegraf/pull/10231) `inputs.modbus` Add per-request tags +- [#10625](https://github.com/influxdata/telegraf/pull/10625) `inputs.mongodb` Add FsTotalSize and FsUsedSize fields +- [#10787](https://github.com/influxdata/telegraf/pull/10787) `inputs.nfsclient` Add new rtt per op field +- [#10705](https://github.com/influxdata/telegraf/pull/10705) `inputs.openweathermap` Add feels_like field +- [#9710](https://github.com/influxdata/telegraf/pull/9710) `inputs.postgresql` Add option to disable prepared statements for PostgreSQL +- [#10339](https://github.com/influxdata/telegraf/pull/10339) `inputs.snmp_trap` Deprecate unused snmp_trap timeout configuration option +- [#9671](https://github.com/influxdata/telegraf/pull/9671) `inputs.sql` Add ClickHouse driver to sql inputs/outputs plugins +- [#10466](https://github.com/influxdata/telegraf/pull/10466) `inputs.statsd` Add option to sanitize collected metric names +- [#9432](https://github.com/influxdata/telegraf/pull/9432) `inputs.varnish` Create option to reduce potentially high cardinality +- [#6501](https://github.com/influxdata/telegraf/pull/6501) `inputs.win_perf_counters` Implemented support for reading raw values, added tests and doc +- [#10535](https://github.com/influxdata/telegraf/pull/10535) `inputs.win_perf_counters` Allow errors to be ignored +- [#9822](https://github.com/influxdata/telegraf/pull/9822) `inputs.x509_cert` Add exclude_root_certs option to x509_cert plugin +- [#9963](https://github.com/influxdata/telegraf/pull/9963) `outputs.datadog` Add the option to use compression +- [#10505](https://github.com/influxdata/telegraf/pull/10505) `outputs.elasticsearch` Add elastic pipeline flags +- [#10499](https://github.com/influxdata/telegraf/pull/10499) `outputs.groundwork` Process group tags +- [#10186](https://github.com/influxdata/telegraf/pull/10186) `outputs.http` Add optional list of non retryable http status codes +- [#10202](https://github.com/influxdata/telegraf/pull/10202) `outputs.http` Support AWS managed service for prometheus +- [#8192](https://github.com/influxdata/telegraf/pull/8192) `outputs.kafka` Add socks5 proxy support +- [#10673](https://github.com/influxdata/telegraf/pull/10673) `outputs.sql` Add unsigned style config option +- [#10672](https://github.com/influxdata/telegraf/pull/10672) `outputs.websocket` Add socks5 proxy support +- [#10267](https://github.com/influxdata/telegraf/pull/10267) `parsers.csv` Add option to skip errors during parsing +- [#10749](https://github.com/influxdata/telegraf/pull/10749) `parsers.influx` Add new influx line protocol parser via feature flag +- [#10585](https://github.com/influxdata/telegraf/pull/10585) `parsers.xpath` Add tag batch-processing to XPath parser +- [#10316](https://github.com/influxdata/telegraf/pull/10316) `processors.template` Add more functionality to template processor +- [#10252](https://github.com/influxdata/telegraf/pull/10252) `serializers.wavefront` Add option to disable Wavefront prefix conversion + +### Bugfixes + +- [#10803](https://github.com/influxdata/telegraf/pull/10803) `agent` Update parsing logic of config.Duration to correctly require time and duration +- [#10814](https://github.com/influxdata/telegraf/pull/10814) `agent` Update the precision parameter default value +- [#10872](https://github.com/influxdata/telegraf/pull/10872) `agent` Change name of agent snmp translator setting +- [#10876](https://github.com/influxdata/telegraf/pull/10876) `inputs.consul_agent` Rename consul_metrics -> consul_agent +- [#10711](https://github.com/influxdata/telegraf/pull/10711) `inputs.docker` Keep data type of tasks_desired field consistent +- [#10083](https://github.com/influxdata/telegraf/pull/10083) `inputs.http` Add metadata support to CSV parser plugin +- [#10701](https://github.com/influxdata/telegraf/pull/10701) `inputs.mdstat` Fix parsing output when when sync is less than 10% +- [#10385](https://github.com/influxdata/telegraf/pull/10385) `inputs.modbus` Re-enable OpenBSD modbus support +- [#10790](https://github.com/influxdata/telegraf/pull/10790) `inputs.ntpq` Correctly read ntpq long poll output with extra characters +- [#10384](https://github.com/influxdata/telegraf/pull/10384) `inputs.opcua` Accept non-standard OPC UA OK status by implementing a configurable workaround +- [#10465](https://github.com/influxdata/telegraf/pull/10465) `inputs.opcua` Add additional data to error messages +- [#10735](https://github.com/influxdata/telegraf/pull/10735) `inputs.snmp` Log err when loading mibs +- [#10748](https://github.com/influxdata/telegraf/pull/10748) `inputs.snmp` Use the correct path when evaluating symlink +- [#10802](https://github.com/influxdata/telegraf/pull/10802) `inputs.snmp` Add option to select translator +- [#10527](https://github.com/influxdata/telegraf/pull/10527) `inputs.system` Remove verbose logging from disk input plugin +- [#10706](https://github.com/influxdata/telegraf/pull/10706) `outputs.influxdb_v2` Include influxdb bucket name in error messages +- [#10623](https://github.com/influxdata/telegraf/pull/10623) `outputs.groundwork` Set NextCheckTime to LastCheckTime to avoid GroundWork to invent a value +- [#10749](https://github.com/influxdata/telegraf/pull/10749) `parsers.influx` Add new influx line protocol parser via feature flag +- [#10777](https://github.com/influxdata/telegraf/pull/10777) `parsers.json_v2` Allow multiple optional objects +- [#10799](https://github.com/influxdata/telegraf/pull/10799) `parsers.json_v2` Check if gpath exists and support optional in fields/tags +- [#10798](https://github.com/influxdata/telegraf/pull/10798) `parsers.xpath` Correctly handling imports in protocol-buffer definitions +- [#10602](https://github.com/influxdata/telegraf/pull/10602) Update github.com/aws/aws-sdk-go-v2/service/sts from 1.7.2 to 1.14.0 +- [#10604](https://github.com/influxdata/telegraf/pull/10604) Update github.com/aerospike/aerospike-client-go from 1.27.0 to 5.7.0 +- [#10686](https://github.com/influxdata/telegraf/pull/10686) Update github.com/sleepinggenius2/gosmi from v0.4.3 to v0.4.4 +- [#10692](https://github.com/influxdata/telegraf/pull/10692) Update github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.5.0 to 1.13.0 +- [#10693](https://github.com/influxdata/telegraf/pull/10693) Update github.com/gophercloud/gophercloud from 0.16.0 to 0.24.0 +- [#10702](https://github.com/influxdata/telegraf/pull/10702) Update github.com/jackc/pgx/v4 from 4.14.1 to 4.15.0 +- [#10704](https://github.com/influxdata/telegraf/pull/10704) Update github.com/sensu/sensu-go/api/core/v2 from 2.12.0 to 2.13.0 +- [#10713](https://github.com/influxdata/telegraf/pull/10713) Update k8s.io/api from 0.23.3 to 0.23.4 +- [#10714](https://github.com/influxdata/telegraf/pull/10714) Update cloud.google.com/go/pubsub from 1.17.1 to 1.18.0 +- [#10715](https://github.com/influxdata/telegraf/pull/10715) Update github.com/newrelic/newrelic-telemetry-sdk-go from 0.5.1 to 0.8.1 +- [#10717](https://github.com/influxdata/telegraf/pull/10717) Update github.com/ClickHouse/clickhouse-go from 1.5.1 to 1.5.4 +- [#10718](https://github.com/influxdata/telegraf/pull/10718) Update github.com/wavefronthq/wavefront-sdk-go from 0.9.9 to 0.9.10 +- [#10719](https://github.com/influxdata/telegraf/pull/10719) Update github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.12.0 to 1.13.0 +- [#10720](https://github.com/influxdata/telegraf/pull/10720) Update github.com/aws/aws-sdk-go-v2/config from 1.8.3 to 1.13.1 +- [#10721](https://github.com/influxdata/telegraf/pull/10721) Update github.com/aws/aws-sdk-go-v2/feature/ec2/imds from 1.6.0 to 1.10.0 +- [#10728](https://github.com/influxdata/telegraf/pull/10728) Update github.com/testcontainers/testcontainers-go from 0.11.1 to 0.12.0 +- [#10751](https://github.com/influxdata/telegraf/pull/10751) Update github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.13.0 to 1.14.0 +- [#10752](https://github.com/influxdata/telegraf/pull/10752) Update github.com/nats-io/nats-server/v2 from 2.7.2 to 2.7.3 +- [#10757](https://github.com/influxdata/telegraf/pull/10757) Update github.com/miekg/dns from 1.1.43 to 1.1.46 +- [#10758](https://github.com/influxdata/telegraf/pull/10758) Update github.com/shirou/gopsutil/v3 from 3.21.12 to 3.22.2 +- [#10759](https://github.com/influxdata/telegraf/pull/10759) Update github.com/aws/aws-sdk-go-v2/feature/ec2/imds from 1.10.0 to 1.11.0 +- [#10772](https://github.com/influxdata/telegraf/pull/10772) Update github.com/Shopify/sarama from 1.29.1 to 1.32.0 +- [#10807](https://github.com/influxdata/telegraf/pull/10807) Update github.com/nats-io/nats-server/v2 from 2.7.3 to 2.7.4 + +## v1.21.4 [2022-02-16] + +### Bugfixes + +- [#10491](https://github.com/influxdata/telegraf/pull/10491) `inputs.docker` Update docker memory usage calculation +- [#10636](https://github.com/influxdata/telegraf/pull/10636) `inputs.ecs` Use current time as timestamp +- [#10551](https://github.com/influxdata/telegraf/pull/10551) `inputs.snmp` Ensure folders do not get loaded more than once +- [#10579](https://github.com/influxdata/telegraf/pull/10579) `inputs.win_perf_counters` Add deprecated warning and version to win_perf_counters option +- [#10635](https://github.com/influxdata/telegraf/pull/10635) `outputs.amqp` Check for nil client before closing in amqp +- [#10179](https://github.com/influxdata/telegraf/pull/10179) `outputs.azure_data_explorer` Lower RAM usage +- [#10513](https://github.com/influxdata/telegraf/pull/10513) `outputs.elasticsearch` Add scheme to fix error in sniffing option +- [#10657](https://github.com/influxdata/telegraf/pull/10657) `parsers.json_v2` Fix timestamp change during execution of json_v2 parser +- [#10618](https://github.com/influxdata/telegraf/pull/10618) `parsers.json_v2` Fix incorrect handling of json_v2 timestamp_path +- [#10468](https://github.com/influxdata/telegraf/pull/10468) `parsers.json_v2` Allow optional paths and handle wrong paths correctly +- [#10547](https://github.com/influxdata/telegraf/pull/10547) `serializers.prometheusremotewrite` Use the correct timestamp unit +- [#10647](https://github.com/influxdata/telegraf/pull/10647) Update all go.opentelemetry.io from 0.24.0 to 0.27.0 +- [#10652](https://github.com/influxdata/telegraf/pull/10652) Update github.com/signalfx/golib/v3 from 3.3.38 to 3.3.43 +- [#10653](https://github.com/influxdata/telegraf/pull/10653) Update github.com/aliyun/alibaba-cloud-sdk-go from 1.61.1004 to 1.61.1483 +- [#10503](https://github.com/influxdata/telegraf/pull/10503) Update github.com/denisenkom/go-mssqldb from 0.10.0 to 0.12.0 +- [#10626](https://github.com/influxdata/telegraf/pull/10626) Update github.com/gopcua/opcua from 0.2.3 to 0.3.1 +- [#10638](https://github.com/influxdata/telegraf/pull/10638) Update github.com/nats-io/nats-server/v2 from 2.6.5 to 2.7.2 +- [#10589](https://github.com/influxdata/telegraf/pull/10589) Update k8s.io/client-go from 0.22.2 to 0.23.3 +- [#10601](https://github.com/influxdata/telegraf/pull/10601) Update github.com/aws/aws-sdk-go-v2/service/kinesis from 1.6.0 to 1.13.0 +- [#10588](https://github.com/influxdata/telegraf/pull/10588) Update github.com/benbjohnson/clock from 1.1.0 to 1.3.0 +- [#10598](https://github.com/influxdata/telegraf/pull/10598) Update github.com/Azure/azure-kusto-go from 0.5.0 to 0.5.2 +- [#10571](https://github.com/influxdata/telegraf/pull/10571) Update github.com/vmware/govmomi from 0.27.2 to 0.27.3 +- [#10572](https://github.com/influxdata/telegraf/pull/10572) Update github.com/prometheus/client_golang from 1.11.0 to 1.12.1 +- [#10564](https://github.com/influxdata/telegraf/pull/10564) Update go.mongodb.org/mongo-driver from 1.7.3 to 1.8.3 +- [#10563](https://github.com/influxdata/telegraf/pull/10563) Update github.com/google/go-cmp from 0.5.6 to 0.5.7 +- [#10562](https://github.com/influxdata/telegraf/pull/10562) Update go.opentelemetry.io/collector/model from 0.39.0 to 0.43.2 +- [#10538](https://github.com/influxdata/telegraf/pull/10538) Update github.com/multiplay/go-ts3 from 1.0.0 to 1.0.1 +- [#10454](https://github.com/influxdata/telegraf/pull/10454) Update cloud.google.com/go/monitoring from 0.2.0 to 1.2.0 +- [#10536](https://github.com/influxdata/telegraf/pull/10536) Update github.com/vmware/govmomi from 0.26.0 to 0.27.2 + +### New External Plugins + +- [apt](https://github.com/x70b1/telegraf-apt) - contributed by @x70b1 +- [knot](https://github.com/x70b1/telegraf-knot) - contributed by @x70b1 + +## v1.21.3 [2022-01-27] + +### Bugfixes + +- [#10430](https://github.com/influxdata/telegraf/pull/10430) `inputs.snmp_trap` Fix translation of partially resolved OIDs +- [#10529](https://github.com/influxdata/telegraf/pull/10529) Update deprecation notices +- [#10525](https://github.com/influxdata/telegraf/pull/10525) Update grpc module to v1.44.0 +- [#10434](https://github.com/influxdata/telegraf/pull/10434) Update google.golang.org/api module from 0.54.0 to 0.65.0 +- [#10507](https://github.com/influxdata/telegraf/pull/10507) Update antchfx/xmlquery module from 1.3.6 to 1.3.9 +- [#10521](https://github.com/influxdata/telegraf/pull/10521) Update nsqio/go-nsq module from 1.0.8 to 1.1.0 +- [#10506](https://github.com/influxdata/telegraf/pull/10506) Update prometheus/common module from 0.31.1 to 0.32.1 +- [#10474](https://github.com/influxdata/telegraf/pull/10474) `inputs.ipset` Fix panic when command not found +- [#10504](https://github.com/influxdata/telegraf/pull/10504) Update cloud.google.com/go/pubsub module from 1.17.0 to 1.17.1 +- [#10432](https://github.com/influxdata/telegraf/pull/10432) Update influxdata/influxdb-observability/influx2otel module from 0.2.8 to 0.2.10 +- [#10478](https://github.com/influxdata/telegraf/pull/10478) `inputs.opcua` Remove duplicate fields +- [#10473](https://github.com/influxdata/telegraf/pull/10473) `parsers.nagios` Log correct errors when executing commands +- [#10463](https://github.com/influxdata/telegraf/pull/10463) `inputs.execd` Add newline in execd for prometheus parsing +- [#10451](https://github.com/influxdata/telegraf/pull/10451) Update shirou/gopsutil/v3 module from 3.21.10 to 3.21.12 +- [#10453](https://github.com/influxdata/telegraf/pull/10453) Update jackc/pgx/v4 module from 4.6.0 to 4.14.1 +- [#10449](https://github.com/influxdata/telegraf/pull/10449) Update Azure/azure-event-hubs-go/v3 module from 3.3.13 to 3.3.17 +- [#10450](https://github.com/influxdata/telegraf/pull/10450) Update gosnmp/gosnmp module from 1.33.0 to 1.34.0 +- [#10442](https://github.com/influxdata/telegraf/pull/10442) `parsers.wavefront` Add missing setting wavefront_disable_prefix_conversion +- [#10435](https://github.com/influxdata/telegraf/pull/10435) Update hashicorp/consul/api module from 1.9.1 to 1.12.0 +- [#10436](https://github.com/influxdata/telegraf/pull/10436) Update antchfx/xpath module from 1.1.11 to 1.2.0 +- [#10433](https://github.com/influxdata/telegraf/pull/10433) Update antchfx/jsonquery module from 1.1.4 to 1.1.5 +- [#10414](https://github.com/influxdata/telegraf/pull/10414) Update prometheus/procfs module from 0.6.0 to 0.7.3 +- [#10354](https://github.com/influxdata/telegraf/pull/10354) `inputs.snmp` Fix panic when mibs folder doesn't exist (#10346) +- [#10393](https://github.com/influxdata/telegraf/pull/10393) `outputs.syslog` Correctly set ASCII trailer for syslog output +- [#10415](https://github.com/influxdata/telegraf/pull/10415) Update aws/aws-sdk-go-v2/service/cloudwatchlogs module from 1.5.2 to 1.12.0 +- [#10416](https://github.com/influxdata/telegraf/pull/10416) Update kardianos/service module from 1.0.0 to 1.2.1 +- [#10396](https://github.com/influxdata/telegraf/pull/10396) `inputs.http` Allow empty http body +- [#10417](https://github.com/influxdata/telegraf/pull/10417) Update couchbase/go-couchbase module from 0.1.0 to 0.1.1 +- [#10413](https://github.com/influxdata/telegraf/pull/10413) `parsers.json_v2` Fix timestamp precision when using unix_ns format +- [#10418](https://github.com/influxdata/telegraf/pull/10418) Update pion/dtls/v2 module from 2.0.9 to 2.0.13 +- [#10402](https://github.com/influxdata/telegraf/pull/10402) Update containerd/containerd module to 1.5.9 +- [#8947](https://github.com/influxdata/telegraf/pull/8947) `outputs.timestream` Fix batching logic with write records and introduce concurrent requests +- [#10360](https://github.com/influxdata/telegraf/pull/10360) `outputs.amqp` Avoid connection leak when writing error +- [#10097](https://github.com/influxdata/telegraf/pull/10097) `outputs.stackdriver` Send correct interval start times for counters + +## v1.21.2 [2022-01-05] + +### Release Notes + +Happy New Year! + +### Features + +- Added arm64 MacOS builds +- Added riscv64 Linux builds +- Numerous changes to CircleCI config to ensure more timely completion and more clear execution flow + +### Bugfixes + +- [#10318](https://github.com/influxdata/telegraf/pull/10318) `inputs.disk` Fix missing storage in containers +- [#10324](https://github.com/influxdata/telegraf/pull/10324) `inputs.dpdk` Add note about dpdk and socket availability +- [#10296](https://github.com/influxdata/telegraf/pull/10296) `inputs.logparser` Resolve panic in logparser due to missing Log +- [#10322](https://github.com/influxdata/telegraf/pull/10322) `inputs.snmp` Ensure module load order to avoid snmp marshal error +- [#10321](https://github.com/influxdata/telegraf/pull/10321) `inputs.snmp` Do not require networking during tests +- [#10303](https://github.com/influxdata/telegraf/pull/10303) `inputs.snmp` Resolve SNMP panic due to no gosmi module +- [#10295](https://github.com/influxdata/telegraf/pull/10295) `inputs.snmp` Grab MIB table columns more accurately +- [#10299](https://github.com/influxdata/telegraf/pull/10299) `inputs.snmp` Check index before assignment when floating :: exists to avoid panic +- [#10301](https://github.com/influxdata/telegraf/pull/10301) `inputs.snmp` Fix panic if no mibs folder is found +- [#10373](https://github.com/influxdata/telegraf/pull/10373) `inputs.snmp_trap` Document deprecation of timeout parameter +- [#10377](https://github.com/influxdata/telegraf/pull/10377) `parsers.csv` empty import tzdata for Windows binaries to correctly set timezone +- [#10332](https://github.com/influxdata/telegraf/pull/10332) Update github.com/djherbis/times module from v1.2.0 to v1.5.0 +- [#10343](https://github.com/influxdata/telegraf/pull/10343) Update github.com/go-ldap/ldap/v3 module from v3.1.0 to v3.4.1 +- [#10255](https://github.com/influxdata/telegraf/pull/10255) Update github.com/gwos/tcg/sdk module from v0.0.0-20211130162655-32ad77586ccf to v0.0.0-20211223101342-35fbd1ae683c and improve logging + +## v1.21.1 [2021-12-16] + +### Bugfixes + +- [#10288](https://github.com/influxdata/telegraf/pull/10288) Fix panic in parsers due to missing Log for all plugins using SetParserFunc. +- [#10288](https://github.com/influxdata/telegraf/pull/10288) Fix panic in parsers due to missing Log for all plugins using SetParserFunc +- [#10247](https://github.com/influxdata/telegraf/pull/10247) Update go-sensu module to v2.12.0 +- [#10284](https://github.com/influxdata/telegraf/pull/10284) `inputs.openstack` Fix typo in openstack neutron input plugin (newtron) + +### Features + +- [#10239](https://github.com/influxdata/telegraf/pull/10239) Enable Darwin arm64 build +- [#10150](https://github.com/influxdata/telegraf/pull/10150) `inputs.smart` Add SMART plugin concurrency configuration option, nvme-cli v1.14+ support and lint fixes. +- [#10150](https://github.com/influxdata/telegraf/pull/10150) `inputs.smart` Add SMART plugin concurrency configuration option, nvme-cli v1.14+ support and lint fixes + +## v1.21.0 [2021-12-15] + +### Release Notes + +The signing for RPM digest has changed to use sha256 to improve security. Please see the pull request for more details: [#10272](https://github.com/influxdata/telegraf/pull/10272). + +Thank you to @zak-pawel for lots of linter fixes! + +### Bugfixes + +- [#10268](https://github.com/influxdata/telegraf/pull/10268) `inputs.snmp` Update snmp plugin to respect number of retries configured +- [#10225](https://github.com/influxdata/telegraf/pull/10225) `outputs.wavefront` Flush wavefront output sender on error to clean up broken connections +- [#9970](https://github.com/influxdata/telegraf/pull/9970) Restart Telegraf service if it is already running and upgraded via RPM +- [#10188](https://github.com/influxdata/telegraf/pull/10188) `parsers.xpath` Handle duplicate registration of protocol-buffer files gracefully +- [#10132](https://github.com/influxdata/telegraf/pull/10132) `inputs.http_listener_v2` Fix panic on close to check that Telegraf is closing +- [#10196](https://github.com/influxdata/telegraf/pull/10196) `outputs.elasticsearch` Implement NaN and inf handling for elasticsearch output +- [#10205](https://github.com/influxdata/telegraf/pull/10205) Print loaded plugins and deprecations for once and test flags +- [#10214](https://github.com/influxdata/telegraf/pull/10214) `processors.ifname` Eliminate MIB dependency for ifname processor +- [#10206](https://github.com/influxdata/telegraf/pull/10206) `inputs.snmp` Optimize locking for SNMP MIBs loading +- [#9975](https://github.com/influxdata/telegraf/pull/9975) `inputs.kube_inventory` Set TLS server name config properly +- [#10230](https://github.com/influxdata/telegraf/pull/10230) Sudden close of Telegraf caused by OPC UA input plugin +- [#9913](https://github.com/influxdata/telegraf/pull/9913) Update eclipse/paho.mqtt.golang module from 1.3.0 to 1.3.5 +- [#10221](https://github.com/influxdata/telegraf/pull/10221) `parsers.json_v2` Parser timestamp setting order +- [#10209](https://github.com/influxdata/telegraf/pull/10209) `outputs.graylog` Ensure graylog spec fields not prefixed with _ +- [#10099](https://github.com/influxdata/telegraf/pull/10099) `inputs.zfs` Pool detection and metrics gathering for ZFS >= 2.1.x +- [#10007](https://github.com/influxdata/telegraf/pull/10007) `processors.ifname` Parallelism fix for ifname processor +- [#10208](https://github.com/influxdata/telegraf/pull/10208) `inputs.mqtt_consumer` Mqtt topic extracting no longer requires all three fields +- [#9616](https://github.com/influxdata/telegraf/pull/9616) Windows Service - graceful shutdown of telegraf +- [#10203](https://github.com/influxdata/telegraf/pull/10203) Revert unintented corruption of the Makefile +- [#10112](https://github.com/influxdata/telegraf/pull/10112) `inputs.cloudwatch` Cloudwatch metrics collection +- [#10178](https://github.com/influxdata/telegraf/pull/10178) `outputs.all` Register bigquery to output plugins +- [#10165](https://github.com/influxdata/telegraf/pull/10165) `inputs.sysstat` Sysstat to use unique temp file vs hard-coded +- [#10046](https://github.com/influxdata/telegraf/pull/10046) Update nats-sever to support openbsd +- [#10091](https://github.com/influxdata/telegraf/pull/10091) `inputs.prometheus` Check error before defer in prometheus k8s +- [#10101](https://github.com/influxdata/telegraf/pull/10101) `inputs.win_perf_counters` Add setting to win_perf_counters input to ignore localization +- [#10136](https://github.com/influxdata/telegraf/pull/10136) `inputs.snmp_trap` Remove snmptranslate from readme and fix default path +- [#10116](https://github.com/influxdata/telegraf/pull/10116) `inputs.statsd` Input plugin statsd parse error +- [#10131](https://github.com/influxdata/telegraf/pull/10131) Skip knxlistener when writing the sample config +- [#10119](https://github.com/influxdata/telegraf/pull/10119) `inputs.cpu` Update shirou/gopsutil from v2 to v3 +- [#10074](https://github.com/influxdata/telegraf/pull/10074) `outputs.graylog` Failing test due to port already in use +- [#9865](https://github.com/influxdata/telegraf/pull/9865) `inputs.directory_monitor` Directory monitor input plugin when data format is CSV and csv_skip_rows>0 and csv_header_row_count>=1 +- [#9862](https://github.com/influxdata/telegraf/pull/9862) `outputs.graylog` Graylog plugin TLS support and message format +- [#9908](https://github.com/influxdata/telegraf/pull/9908) `parsers.json_v2` Remove dead code +- [#9881](https://github.com/influxdata/telegraf/pull/9881) `outputs.graylog` Mute graylog UDP/TCP tests by marking them as integration +- [#9751](https://github.com/influxdata/telegraf/pull/9751) Update google.golang.org/grpc module from 1.39.1 to 1.40.0 + +### Features + +- [#10200](https://github.com/influxdata/telegraf/pull/10200) `aggregators.deprecations.go` Implement deprecation infrastructure +- [#9518](https://github.com/influxdata/telegraf/pull/9518) `inputs.snmp` Snmp to use gosmi +- [#10130](https://github.com/influxdata/telegraf/pull/10130) `outputs.influxdb_v2` Add retry to 413 errors with InfluxDB output +- [#10144](https://github.com/influxdata/telegraf/pull/10144) `inputs.win_services` Add exclude filter +- [#9995](https://github.com/influxdata/telegraf/pull/9995) `inputs.mqtt_consumer` Enable extracting tag values from MQTT topics +- [#9419](https://github.com/influxdata/telegraf/pull/9419) `aggregators.all` Add support of aggregator as Starlark script +- [#9561](https://github.com/influxdata/telegraf/pull/9561) `processors.regex` Extend regexp processor do allow renaming of measurements, tags and fields +- [#8184](https://github.com/influxdata/telegraf/pull/8184) `outputs.http` Add use_batch_format for HTTP output plugin +- [#9988](https://github.com/influxdata/telegraf/pull/9988) `inputs.kafka_consumer` Add max_processing_time config to Kafka Consumer input +- [#9841](https://github.com/influxdata/telegraf/pull/9841) `inputs.sqlserver` Add additional metrics to support elastic pool (sqlserver plugin) +- [#9910](https://github.com/influxdata/telegraf/pull/9910) `common.tls` Filter client certificates by DNS names +- [#9942](https://github.com/influxdata/telegraf/pull/9942) `outputs.azure_data_explorer` Add option to skip table creation in azure data explorer output +- [#9984](https://github.com/influxdata/telegraf/pull/9984) `processors.ifname` Add more details to logmessages +- [#9833](https://github.com/influxdata/telegraf/pull/9833) `common.kafka` Add metadata full to config +- [#9876](https://github.com/influxdata/telegraf/pull/9876) Update etc/telegraf.conf and etc/telegraf_windows.conf +- [#9256](https://github.com/influxdata/telegraf/pull/9256) `inputs.modbus` Modbus connection settings (serial) +- [#9860](https://github.com/influxdata/telegraf/pull/9860) `inputs.directory_monitor` Adds the ability to create and name a tag containing the filename using the directory monitor input plugin +- [#9740](https://github.com/influxdata/telegraf/pull/9740) `inputs.prometheus` Add ignore_timestamp option +- [#9513](https://github.com/influxdata/telegraf/pull/9513) `processors.starlark` Starlark processor example for processing sparkplug_b messages +- [#9449](https://github.com/influxdata/telegraf/pull/9449) `parsers.json_v2` Support defining field/tag tables within an object table +- [#9827](https://github.com/influxdata/telegraf/pull/9827) `inputs.elasticsearch_query` Add debug query output to elasticsearch_query +- [#9241](https://github.com/influxdata/telegraf/pull/9241) `inputs.snmp` Telegraf to merge tables with different indexes +- [#9013](https://github.com/influxdata/telegraf/pull/9013) `inputs.opcua` Allow user to select the source for the metric timestamp. +- [#9706](https://github.com/influxdata/telegraf/pull/9706) `inputs.puppetagent` Add measurements from puppet 5 +- [#9644](https://github.com/influxdata/telegraf/pull/9644) `outputs.graylog` Add graylog plugin TCP support +- [#8229](https://github.com/influxdata/telegraf/pull/8229) `outputs.azure_data_explorer` Add json_timestamp_layout option + +### New Input Plugins + +- [#9724](https://github.com/influxdata/telegraf/pull/9724) Add intel_pmu plugin +- [#9771](https://github.com/influxdata/telegraf/pull/9771) Add Linux Volume Manager input plugin +- [#9236](https://github.com/influxdata/telegraf/pull/9236) Openstack input plugin + +### New Output Plugins + +- [#9891](https://github.com/influxdata/telegraf/pull/9891) Add new groundwork output plugin +- [#9923](https://github.com/influxdata/telegraf/pull/9923) Add mongodb output plugin +- [#9346](https://github.com/influxdata/telegraf/pull/9346) Azure Event Hubs output plugin + +## v1.20.4 [2021-11-17] + +### Release Notes + +- [#10073](https://github.com/influxdata/telegraf/pull/10073) Update go version from 1.17.2 to 1.17.3 +- [#10100](https://github.com/influxdata/telegraf/pull/10100) Update deprecated plugin READMEs to better indicate deprecation + +Thank you to @zak-pawel for lots of linter fixes! + +- [#9986](https://github.com/influxdata/telegraf/pull/9986) Linter fixes for plugins/inputs/[h-j]* +- [#9999](https://github.com/influxdata/telegraf/pull/9999) Linter fixes for plugins/inputs/[k-l]* +- [#10006](https://github.com/influxdata/telegraf/pull/10006) Linter fixes for plugins/inputs/m* +- [#10011](https://github.com/influxdata/telegraf/pull/10011) Linter fixes for plugins/inputs/[n-o]* + +### Bugfixes + +- [#10089](https://github.com/influxdata/telegraf/pull/10089) Update BurntSushi/toml from 0.3.1 to 0.4.1 +- [#10075](https://github.com/influxdata/telegraf/pull/10075) `inputs.mongodb` Update readme with correct connection URI +- [#10076](https://github.com/influxdata/telegraf/pull/10076) Update gosnmp module from 1.32 to 1.33 +- [#9966](https://github.com/influxdata/telegraf/pull/9966) `inputs.mysql` Fix type conversion follow-up +- [#10068](https://github.com/influxdata/telegraf/pull/10068) `inputs.proxmox` Changed VM ID from string to int +- [#10047](https://github.com/influxdata/telegraf/pull/10047) `inputs.modbus` Do not build modbus on openbsd +- [#10019](https://github.com/influxdata/telegraf/pull/10019) `inputs.cisco_telemetry_mdt` Move to new protobuf library +- [#10001](https://github.com/influxdata/telegraf/pull/10001) `outputs.loki` Add metric name with label "__name" +- [#9980](https://github.com/influxdata/telegraf/pull/9980) `inputs.nvidia_smi` Set the default path correctly +- [#10010](https://github.com/influxdata/telegraf/pull/10010) Update go.opentelemetry.io/otel from v0.23.0 to v0.24.0 +- [#10044](https://github.com/influxdata/telegraf/pull/10044) `inputs.sqlserver` Add elastic pool in supported versions in sqlserver +- [#10029](https://github.com/influxdata/telegraf/pull/10029) `inputs.influxdb` Update influxdb input schema docs +- [#10026](https://github.com/influxdata/telegraf/pull/10026) `inputs.intel_rdt` Correct timezone handling + +## v1.20.3 [2021-10-27] + +### Release Notes + +- [#9873](https://github.com/influxdata/telegraf/pull/9873) Update go to 1.17.2 + +### Bugfixes + +- [#9948](https://github.com/influxdata/telegraf/pull/9948) Update github.com/aws/aws-sdk-go-v2/config module from 1.8.2 to 1.8.3 +- [#9997](https://github.com/influxdata/telegraf/pull/9997) `inputs.ipmi_sensor` Redact IPMI password in logs +- [#9978](https://github.com/influxdata/telegraf/pull/9978) `inputs.kube_inventory` Do not skip resources with zero s/ns timestamps +- [#9998](https://github.com/influxdata/telegraf/pull/9998) Update gjson module to v1.10.2 +- [#9973](https://github.com/influxdata/telegraf/pull/9973) `inputs.procstat` Revert and fix tag creation +- [#9943](https://github.com/influxdata/telegraf/pull/9943) `inputs.sqlserver` Add sqlserver plugin integration tests +- [#9647](https://github.com/influxdata/telegraf/pull/9647) `inputs.cloudwatch` Use the AWS SDK v2 library +- [#9954](https://github.com/influxdata/telegraf/pull/9954) `processors.starlark` Starlark pop operation for non-existing keys +- [#9956](https://github.com/influxdata/telegraf/pull/9956) `inputs.zfs` Check return code of zfs command for FreeBSD +- [#9585](https://github.com/influxdata/telegraf/pull/9585) `inputs.kube_inventory` Fix segfault in ingress, persistentvolumeclaim, statefulset in kube_inventory +- [#9901](https://github.com/influxdata/telegraf/pull/9901) `inputs.ethtool` Add normalization of tags for ethtool input plugin +- [#9957](https://github.com/influxdata/telegraf/pull/9957) `inputs.internet_speed` Resolve missing latency field +- [#9662](https://github.com/influxdata/telegraf/pull/9662) `inputs.prometheus` Decode Prometheus scrape path from Kuberentes labels +- [#9933](https://github.com/influxdata/telegraf/pull/9933) `inputs.procstat` Correct conversion of int with specific bit size +- [#9940](https://github.com/influxdata/telegraf/pull/9940) `inputs.webhooks` Provide more fields for papertrail event webhook +- [#9892](https://github.com/influxdata/telegraf/pull/9892) `inputs.mongodb` Solve compatibility issue for mongodb inputs when using 5.x relicaset +- [#9768](https://github.com/influxdata/telegraf/pull/9768) Update github.com/Azure/azure-kusto-go module from 0.3.2 to 0.4.0 +- [#9904](https://github.com/influxdata/telegraf/pull/9904) Update github.com/golang-jwt/jwt/v4 module from 4.0.0 to 4.1.0 +- [#9921](https://github.com/influxdata/telegraf/pull/9921) Update github.com/apache/thrift module from 0.14.2 to 0.15.0 +- [#9403](https://github.com/influxdata/telegraf/pull/9403) `inputs.mysql`Fix inconsistent metric types in mysql +- [#9905](https://github.com/influxdata/telegraf/pull/9905) Update github.com/docker/docker module from 20.10.7+incompatible to 20.10.9+incompatible +- [#9920](https://github.com/influxdata/telegraf/pull/9920) `inputs.prometheus` Move err check to correct place +- [#9869](https://github.com/influxdata/telegraf/pull/9869) Update github.com/prometheus/common module from 0.26.0 to 0.31.1 +- [#9866](https://github.com/influxdata/telegraf/pull/9866) Update snowflake database driver module to 1.6.2 +- [#9527](https://github.com/influxdata/telegraf/pull/9527) `inputs.intel_rdt` Allow sudo usage +- [#9893](https://github.com/influxdata/telegraf/pull/9893) Update github.com/jaegertracing/jaeger module from 1.15.1 to 1.26.0 + +### New External Plugins + +- [IBM DB2](https://github.com/bonitoo-io/telegraf-input-db2) - contributed by @sranka +- [Oracle Database](https://github.com/bonitoo-io/telegraf-input-oracle) - contributed by @sranka + +## v1.20.2 [2021-10-07] + +### Bugfixes + +- [#9878](https://github.com/influxdata/telegraf/pull/9878) `inputs.cloudwatch` Use new session API +- [#9872](https://github.com/influxdata/telegraf/pull/9872) `parsers.json_v2` Duplicate line_protocol when using object and fields +- [#9787](https://github.com/influxdata/telegraf/pull/9787) `parsers.influx` Fix memory leak in influx parser +- [#9880](https://github.com/influxdata/telegraf/pull/9880) `inputs.stackdriver` Migrate to cloud.google.com/go/monitoring/apiv3/v2 +- [#9887](https://github.com/influxdata/telegraf/pull/9887) Fix makefile typo that prevented i386 tar and rpm packages from being built + +## v1.20.1 [2021-10-06] + +### Bugfixes + +- [#9776](https://github.com/influxdata/telegraf/pull/9776) Update k8s.io/apimachinery module from 0.21.1 to 0.22.2 +- [#9864](https://github.com/influxdata/telegraf/pull/9864) Update containerd module to v1.5.7 +- [#9863](https://github.com/influxdata/telegraf/pull/9863) Update consul module to v1.11.0 +- [#9846](https://github.com/influxdata/telegraf/pull/9846) `inputs.mongodb` Fix panic due to nil dereference +- [#9850](https://github.com/influxdata/telegraf/pull/9850) `inputs.intel_rdt` Prevent timeout when logging +- [#9848](https://github.com/influxdata/telegraf/pull/9848) `outputs.loki` Update http_headers setting to match sample config +- [#9808](https://github.com/influxdata/telegraf/pull/9808) `inputs.procstat` Add missing tags +- [#9803](https://github.com/influxdata/telegraf/pull/9803) `outputs.mqtt` Add keep alive config option and documentation around issue with eclipse/mosquitto version +- [#9800](https://github.com/influxdata/telegraf/pull/9800) Fix output buffer never completely flushing +- [#9458](https://github.com/influxdata/telegraf/pull/9458) `inputs.couchbase` Fix insecure certificate validation +- [#9797](https://github.com/influxdata/telegraf/pull/9797) `inputs.opentelemetry` Fix error returned to OpenTelemetry client +- [#9789](https://github.com/influxdata/telegraf/pull/9789) Update github.com/testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 +- [#9791](https://github.com/influxdata/telegraf/pull/9791) Update github.com/Azure/go-autorest/autorest/adal module +- [#9678](https://github.com/influxdata/telegraf/pull/9678) Update github.com/Azure/go-autorest/autorest/azure/auth module from 0.5.6 to 0.5.8 +- [#9769](https://github.com/influxdata/telegraf/pull/9769) Update cloud.google.com/go/pubsub module from 1.15.0 to 1.17.0 +- [#9770](https://github.com/influxdata/telegraf/pull/9770) Update github.com/aws/smithy-go module from 1.3.1 to 1.8.0 + +### Features + +- [#9838](https://github.com/influxdata/telegraf/pull/9838) `inputs.elasticsearch_query` Add custom time/date format field + +## v1.20.0 [2021-09-17] + +### Release Notes + +- [#9642](https://github.com/influxdata/telegraf/pull/9642) Build with Golang 1.17 + +### Bugfixes + +- [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing to 0.4.5 +- [#9587](https://github.com/influxdata/telegraf/pull/9587) `outputs.opentelemetry` Use headers config in grpc requests +- [#9713](https://github.com/influxdata/telegraf/pull/9713) Update runc module to v1.0.0-rc95 to address CVE-2021-30465 +- [#9699](https://github.com/influxdata/telegraf/pull/9699) Migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 +- [#9139](https://github.com/influxdata/telegraf/pull/9139) `serializers.prometheus` Update timestamps and expiration time as new data arrives +- [#9625](https://github.com/influxdata/telegraf/pull/9625) `outputs.graylog` Output timestamp with fractional seconds +- [#9655](https://github.com/influxdata/telegraf/pull/9655) Update cloud.google.com/go/pubsub module from 1.2.0 to 1.15.0 +- [#9674](https://github.com/influxdata/telegraf/pull/9674) `inputs.mongodb` Change command based on server version +- [#9676](https://github.com/influxdata/telegraf/pull/9676) `outputs.dynatrace` Remove hardcoded int value +- [#9619](https://github.com/influxdata/telegraf/pull/9619) `outputs.influxdb_v2` Increase accepted retry-after header values. +- [#9652](https://github.com/influxdata/telegraf/pull/9652) Update tinylib/msgp module from 1.1.5 to 1.1.6 +- [#9471](https://github.com/influxdata/telegraf/pull/9471) `inputs.sql` Make timeout apply to single query +- [#9760](https://github.com/influxdata/telegraf/pull/9760) Update shirou/gopsutil module to 3.21.8 +- [#9707](https://github.com/influxdata/telegraf/pull/9707) `inputs.logstash` Add additional logstash output plugin stats +- [#9656](https://github.com/influxdata/telegraf/pull/9656) Update miekg/dns module from 1.1.31 to 1.1.43 +- [#9750](https://github.com/influxdata/telegraf/pull/9750) Update antchfx/xmlquery module from 1.3.5 to 1.3.6 +- [#9757](https://github.com/influxdata/telegraf/pull/9757) `parsers.registry.go` Fix panic for non-existing metric names +- [#9677](https://github.com/influxdata/telegraf/pull/9677) Update Azure/azure-event-hubs-go/v3 module from 3.2.0 to 3.3.13 +- [#9653](https://github.com/influxdata/telegraf/pull/9653) Update prometheus/client_golang module from 1.7.1 to 1.11.0 +- [#9693](https://github.com/influxdata/telegraf/pull/9693) `inputs.cloudwatch` Fix pagination error +- [#9727](https://github.com/influxdata/telegraf/pull/9727) `outputs.http` Add error message logging +- [#9718](https://github.com/influxdata/telegraf/pull/9718) Update influxdata/influxdb-observability module from 0.2.4 to 0.2.7 +- [#9560](https://github.com/influxdata/telegraf/pull/9560) Update gopcua/opcua module +- [#9544](https://github.com/influxdata/telegraf/pull/9544) `inputs.couchbase` Fix memory leak +- [#9588](https://github.com/influxdata/telegraf/pull/9588) `outputs.opentelemetry` Use attributes setting + +### Features + +- [#9665](https://github.com/influxdata/telegraf/pull/9665) `inputs.systemd_units` feat(plugins/inputs/systemd_units): add pattern support +- [#9598](https://github.com/influxdata/telegraf/pull/9598) `outputs.sql` Add bool datatype +- [#9386](https://github.com/influxdata/telegraf/pull/9386) `inputs.cloudwatch` Pull metrics from multiple AWS CloudWatch namespaces +- [#9411](https://github.com/influxdata/telegraf/pull/9411) `inputs.cloudwatch` Support AWS Web Identity Provider +- [#9570](https://github.com/influxdata/telegraf/pull/9570) `inputs.modbus` Add support for RTU over TCP +- [#9488](https://github.com/influxdata/telegraf/pull/9488) `inputs.procstat` Support cgroup globs and include systemd unit children +- [#9322](https://github.com/influxdata/telegraf/pull/9322) `inputs.suricata` Support alert event type +- [#5464](https://github.com/influxdata/telegraf/pull/5464) `inputs.prometheus` Add ability to query Consul Service catalog +- [#8641](https://github.com/influxdata/telegraf/pull/8641) `outputs.prometheus_client` Add Landing page +- [#9529](https://github.com/influxdata/telegraf/pull/9529) `inputs.http_listener_v2` Allows multiple paths and add path_tag +- [#9395](https://github.com/influxdata/telegraf/pull/9395) Add cookie authentication to HTTP input and output plugins +- [#8454](https://github.com/influxdata/telegraf/pull/8454) `inputs.syslog` Add RFC3164 support +- [#9351](https://github.com/influxdata/telegraf/pull/9351) `inputs.jenkins` Add option to include nodes by name +- [#9277](https://github.com/influxdata/telegraf/pull/9277) Add JSON, MessagePack, and Protocol-buffers format support to the XPath parser +- [#9343](https://github.com/influxdata/telegraf/pull/9343) `inputs.snmp_trap` Improve MIB lookup performance +- [#9342](https://github.com/influxdata/telegraf/pull/9342) `outputs.newrelic` Add option to override metric_url +- [#9306](https://github.com/influxdata/telegraf/pull/9306) `inputs.smart` Add power mode status +- [#9762](https://github.com/influxdata/telegraf/pull/9762) `inputs.bond` Add count of bonded slaves (for easier alerting) +- [#9675](https://github.com/influxdata/telegraf/pull/9675) `outputs.dynatrace` Remove special handling from counters and update dynatrace-oss/dynatrace-metric-utils-go module to 0.3.0 + +### New Input Plugins + +- [#9602](https://github.com/influxdata/telegraf/pull/9602) Add rocm_smi input to monitor AMD GPUs +- [#9101](https://github.com/influxdata/telegraf/pull/9101) Add mdstat input to gather from /proc/mdstat collection +- [#3536](https://github.com/influxdata/telegraf/pull/3536) Add Elasticsearch query input +- [#9623](https://github.com/influxdata/telegraf/pull/9623) Add internet Speed Monitor Input Plugin + +### New Output Plugins + +- [#9228](https://github.com/influxdata/telegraf/pull/9228) Add OpenTelemetry output +- [#9426](https://github.com/influxdata/telegraf/pull/9426) Add Azure Data Explorer(ADX) output + +## v1.19.3 [2021-08-18] + +### Bugfixes + +- [#9639](https://github.com/influxdata/telegraf/pull/9639) Update sirupsen/logrus module from 1.7.0 to 1.8.1 +- [#9638](https://github.com/influxdata/telegraf/pull/9638) Update testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 +- [#9637](https://github.com/influxdata/telegraf/pull/9637) Update golang/snappy module from 0.0.3 to 0.0.4 +- [#9636](https://github.com/influxdata/telegraf/pull/9636) Update aws/aws-sdk-go-v2 module from 1.3.2 to 1.8.0 +- [#9605](https://github.com/influxdata/telegraf/pull/9605) `inputs.prometheus` Fix prometheus kubernetes pod discovery +- [#9606](https://github.com/influxdata/telegraf/pull/9606) `inputs.redis` Improve redis commands documentation +- [#9566](https://github.com/influxdata/telegraf/pull/9566) `outputs.cratedb` Replace dots in tag keys with underscores +- [#9401](https://github.com/influxdata/telegraf/pull/9401) `inputs.clickhouse` Fix panic, improve handling empty result set +- [#9583](https://github.com/influxdata/telegraf/pull/9583) `inputs.opcua` Avoid closing session on a closed connection +- [#9576](https://github.com/influxdata/telegraf/pull/9576) `processors.aws` Refactor ec2 init for config-api +- [#9571](https://github.com/influxdata/telegraf/pull/9571) `outputs.loki` Sort logs by timestamp before writing to Loki +- [#9524](https://github.com/influxdata/telegraf/pull/9524) `inputs.opcua` Fix reconnection regression introduced in 1.19.1 +- [#9581](https://github.com/influxdata/telegraf/pull/9581) `inputs.kube_inventory` Fix k8s nodes and pods parsing error +- [#9577](https://github.com/influxdata/telegraf/pull/9577) Update sensu/go module to v2.9.0 +- [#9554](https://github.com/influxdata/telegraf/pull/9554) `inputs.postgresql` Normalize unix socket path +- [#9565](https://github.com/influxdata/telegraf/pull/9565) Update hashicorp/consul/api module to 1.9.1 +- [#9552](https://github.com/influxdata/telegraf/pull/9552) `inputs.vsphere` Update vmware/govmomi module to v0.26.0 in order to support vSphere 7.0 +- [#9550](https://github.com/influxdata/telegraf/pull/9550) `inputs.opcua` Do not skip good quality nodes after a bad quality node is encountered + +## v1.19.2 [2021-07-28] + +### Release Notes + +- [#9542](https://github.com/influxdata/telegraf/pull/9542) Update Go to v1.16.6 + +### Bugfixes + +- [#9363](https://github.com/influxdata/telegraf/pull/9363) `outputs.dynatrace` Update dynatrace output to allow optional default dimensions +- [#9526](https://github.com/influxdata/telegraf/pull/9526) `outputs.influxdb` Fix metrics reported as written but not actually written +- [#9549](https://github.com/influxdata/telegraf/pull/9549) `inputs.kube_inventory` Prevent segfault in persistent volume claims +- [#9503](https://github.com/influxdata/telegraf/pull/9503) `inputs.nsq_consumer` Fix connection error when not using server setting +- [#9540](https://github.com/influxdata/telegraf/pull/9540) `inputs.sql` Fix handling bool column +- [#9387](https://github.com/influxdata/telegraf/pull/9387) Linter fixes for plugins/inputs/[fg]* +- [#9438](https://github.com/influxdata/telegraf/pull/9438) `inputs.kubernetes` Attach the pod labels to kubernetes_pod_volume and kubernetes_pod_network metrics +- [#9519](https://github.com/influxdata/telegraf/pull/9519) `processors.ifname` Fix SNMP empty metric name +- [#8587](https://github.com/influxdata/telegraf/pull/8587) `inputs.sqlserver` Add tempdb troubleshooting stats and missing V2 query metrics +- [#9323](https://github.com/influxdata/telegraf/pull/9323) `inputs.x509_cert` Prevent x509_cert from hanging on UDP connection +- [#9504](https://github.com/influxdata/telegraf/pull/9504) `parsers.json_v2` Simplify how nesting is handled +- [#9493](https://github.com/influxdata/telegraf/pull/9493) `inputs.mongodb` Switch to official mongo-go-driver module to fix SSL auth failure +- [#9491](https://github.com/influxdata/telegraf/pull/9491) `outputs.dynatrace` Fix panic caused by uninitialized loggedMetrics map +- [#9497](https://github.com/influxdata/telegraf/pull/9497) `inputs.prometheus` Fix prometheus cadvisor authentication +- [#9520](https://github.com/influxdata/telegraf/pull/9520) `parsers.json_v2` Add support for large uint64 and int64 numbers +- [#9447](https://github.com/influxdata/telegraf/pull/9447) `inputs.statsd` Fix regression that didn't allow integer percentiles +- [#9466](https://github.com/influxdata/telegraf/pull/9466) `inputs.sqlserver` Provide detailed error message in telegraf log +- [#9399](https://github.com/influxdata/telegraf/pull/9399) Update dynatrace-metric-utils-go module to v0.2.0 +- [#8108](https://github.com/influxdata/telegraf/pull/8108) `inputs.cgroup` Allow multiple keys when parsing cgroups +- [#9479](https://github.com/influxdata/telegraf/pull/9479) `parsers.json_v2` Fix json_v2 parser to handle nested objects in arrays properly + +### Features + +- [#9485](https://github.com/influxdata/telegraf/pull/9485) Add option to automatically reload settings when config file is modified + +## v1.19.1 [2021-07-07] + +### Bugfixes + +- [#9388](https://github.com/influxdata/telegraf/pull/9388) `inputs.sqlserver` Require authentication method to be specified +- [#9456](https://github.com/influxdata/telegraf/pull/9456) `inputs.kube_inventory` Fix segfault in kube_inventory +- [#9448](https://github.com/influxdata/telegraf/pull/9448) `inputs.couchbase` Fix panic +- [#9444](https://github.com/influxdata/telegraf/pull/9444) `inputs.knx_listener` Fix nil pointer panic +- [#9446](https://github.com/influxdata/telegraf/pull/9446) `inputs.procstat` Update gopsutil module to fix panic +- [#9443](https://github.com/influxdata/telegraf/pull/9443) `inputs.rabbitmq` Fix JSON unmarshall regression +- [#9369](https://github.com/influxdata/telegraf/pull/9369) Update nat-server module to v2.2.6 +- [#9429](https://github.com/influxdata/telegraf/pull/9429) `inputs.dovecot` Exclude read-timeout from being an error +- [#9423](https://github.com/influxdata/telegraf/pull/9423) `inputs.statsd` Don't stop parsing after parsing error +- [#9370](https://github.com/influxdata/telegraf/pull/9370) Update apimachinary module to v0.21.1 +- [#9373](https://github.com/influxdata/telegraf/pull/9373) Update jwt module to v1.2.2 and jwt-go module to v3.2.3 +- [#9412](https://github.com/influxdata/telegraf/pull/9412) Update couchbase Module to v0.1.0 +- [#9366](https://github.com/influxdata/telegraf/pull/9366) `inputs.snmp` Add a check for oid and name to prevent empty metrics +- [#9413](https://github.com/influxdata/telegraf/pull/9413) `outputs.http` Fix toml error when parsing insecure_skip_verify +- [#9400](https://github.com/influxdata/telegraf/pull/9400) `inputs.x509_cert` Fix 'source' tag for https +- [#9375](https://github.com/influxdata/telegraf/pull/9375) Update signalfx module to v3.3.34 +- [#9406](https://github.com/influxdata/telegraf/pull/9406) `parsers.json_v2` Don't require tags to be added to included_keys +- [#9289](https://github.com/influxdata/telegraf/pull/9289) `inputs.x509_cert` Fix SNI support +- [#9372](https://github.com/influxdata/telegraf/pull/9372) Update gjson module to v1.8.0 +- [#9379](https://github.com/influxdata/telegraf/pull/9379) Linter fixes for plugins/inputs/[de]* + +## v1.19.0 [2021-06-17] + +### Release Notes + +- Many linter fixes - thanks @zak-pawel and all! +- [#9331](https://github.com/influxdata/telegraf/pull/9331) Update Go to 1.16.5 + +### Bugfixes + +- [#9182](https://github.com/influxdata/telegraf/pull/9182) Update pgx to v4 +- [#9275](https://github.com/influxdata/telegraf/pull/9275) Fix reading config files starting with http: +- [#9196](https://github.com/influxdata/telegraf/pull/9196) `serializers.prometheusremotewrite` Update dependency and remove tags with empty values +- [#9051](https://github.com/influxdata/telegraf/pull/9051) `outputs.kafka` Don't prevent telegraf from starting when there's a connection error +- [#8795](https://github.com/influxdata/telegraf/pull/8795) `parsers.prometheusremotewrite` Update prometheus dependency to v2.21.0 +- [#9295](https://github.com/influxdata/telegraf/pull/9295) `outputs.dynatrace` Use dynatrace-metric-utils +- [#9368](https://github.com/influxdata/telegraf/pull/9368) `parsers.json_v2` Update json_v2 parser to handle null types +- [#9359](https://github.com/influxdata/telegraf/pull/9359) `inputs.sql` Fix import of sqlite and ignore it on all platforms that require CGO. +- [#9329](https://github.com/influxdata/telegraf/pull/9329) `inputs.kube_inventory` Fix connecting to the wrong url +- [#9358](https://github.com/influxdata/telegraf/pull/9358) upgrade denisenkom go-mssql to v0.10.0 +- [#9283](https://github.com/influxdata/telegraf/pull/9283) `processors.parser` Fix segfault +- [#9243](https://github.com/influxdata/telegraf/pull/9243) `inputs.docker` Close all idle connections +- [#9338](https://github.com/influxdata/telegraf/pull/9338) `inputs.suricata` Support new JSON format +- [#9296](https://github.com/influxdata/telegraf/pull/9296) `outputs.influxdb` Fix endless retries + +### Features + +- [#8987](https://github.com/influxdata/telegraf/pull/8987) Config file environment variable can be a URL +- [#9297](https://github.com/influxdata/telegraf/pull/9297) `outputs.datadog` Add HTTP proxy to datadog output +- [#9087](https://github.com/influxdata/telegraf/pull/9087) Add named timestamp formats +- [#9276](https://github.com/influxdata/telegraf/pull/9276) `inputs.vsphere` Add config option for the historical interval duration +- [#9274](https://github.com/influxdata/telegraf/pull/9274) `inputs.ping` Add an option to specify packet size +- [#9007](https://github.com/influxdata/telegraf/pull/9007) Allow multiple "--config" and "--config-directory" flags +- [#9249](https://github.com/influxdata/telegraf/pull/9249) `outputs.graphite` Allow more characters in graphite tags +- [#8351](https://github.com/influxdata/telegraf/pull/8351) `inputs.sqlserver` Added login_name +- [#9223](https://github.com/influxdata/telegraf/pull/9223) `inputs.dovecot` Add support for unix domain sockets +- [#9118](https://github.com/influxdata/telegraf/pull/9118) `processors.strings` Add UTF-8 sanitizer +- [#9156](https://github.com/influxdata/telegraf/pull/9156) `inputs.aliyuncms` Add config option list of regions to query +- [#9138](https://github.com/influxdata/telegraf/pull/9138) `common.http` Add OAuth2 to HTTP input +- [#8822](https://github.com/influxdata/telegraf/pull/8822) `inputs.sqlserver` Enable Azure Active Directory (AAD) authentication support +- [#9136](https://github.com/influxdata/telegraf/pull/9136) `inputs.cloudwatch` Add wildcard support in dimensions configuration +- [#5517](https://github.com/influxdata/telegraf/pull/5517) `inputs.mysql` Gather all mysql channels +- [#8911](https://github.com/influxdata/telegraf/pull/8911) `processors.enum` Support float64 +- [#9105](https://github.com/influxdata/telegraf/pull/9105) `processors.starlark` Support nanosecond resolution timestamp +- [#9080](https://github.com/influxdata/telegraf/pull/9080) `inputs.logstash` Add support for version 7 queue stats +- [#9074](https://github.com/influxdata/telegraf/pull/9074) `parsers.prometheusremotewrite` Add starlark script for renaming metrics +- [#9032](https://github.com/influxdata/telegraf/pull/9032) `inputs.couchbase` Add ~200 more Couchbase metrics via Buckets endpoint +- [#8596](https://github.com/influxdata/telegraf/pull/8596) `inputs.sqlserver` input/sqlserver: Add service and save connection pools +- [#9042](https://github.com/influxdata/telegraf/pull/9042) `processors.starlark` Add math module +- [#6952](https://github.com/influxdata/telegraf/pull/6952) `inputs.x509_cert` Wildcard support for cert filenames +- [#9004](https://github.com/influxdata/telegraf/pull/9004) `processors.starlark` Add time module +- [#8891](https://github.com/influxdata/telegraf/pull/8891) `inputs.kinesis_consumer` Add content_encoding option with gzip and zlib support +- [#8996](https://github.com/influxdata/telegraf/pull/8996) `processors.starlark` Add an example showing how to obtain IOPS from diskio input +- [#8966](https://github.com/influxdata/telegraf/pull/8966) `inputs.http_listener_v2` Add support for snappy compression +- [#8661](https://github.com/influxdata/telegraf/pull/8661) `inputs.cisco_telemetry_mdt` Add support for events and class based query +- [#8861](https://github.com/influxdata/telegraf/pull/8861) `inputs.mongodb` Optionally collect top stats +- [#8979](https://github.com/influxdata/telegraf/pull/8979) `parsers.value` Add custom field name config option +- [#8544](https://github.com/influxdata/telegraf/pull/8544) `inputs.sqlserver` Add an optional health metric + +### New Input Plugins + +- [Alibaba CloudMonitor Service (Aliyun)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aliyuncms) - contributed by @i-prudnikov +- [OpenTelemetry](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opentelemetry) - contributed by @jacobmarble +- [Intel Data Plane Development Kit (DPDK)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dpdk) - contributed by @p-zak +- [KNX](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/knx_listener) - contributed by @DocLambda +- [SQL](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sql) - contributed by @srebhan + +### New Output Plugins + +- [Websocket](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/websocket) - contributed by @FZambia +- [SQL](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/sql) - contributed by @illuusio +- [AWS Cloudwatch logs](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch_logs) - contributed by @i-prudnikov + +### New Parser Plugins + +- [Prometheus Remote Write](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/prometheusremotewrite) - contributed by @helenosheaa +- [JSON V2](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/json_v2) - contributed by @sspaink + +### New External Plugins + +- [ldap_org and ds389](https://github.com/falon/CSI-telegraf-plugins) - contributed by @falon +- [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - contributed by @jcgonnard +- [dnsmasq](https://github.com/machinly/dnsmasq-telegraf-plugin) - contributed by @machinly +- [Big Blue Button](https://github.com/bigblueswarm/bigbluebutton-telegraf-plugin) - contributed by @SLedunois + +## v1.18.3 [2021-05-20] + +### Release Notes + +- Added FreeBSD armv7 build + +### Bugfixes + +- [#9271](https://github.com/influxdata/telegraf/pull/9271) `inputs.prometheus` Set user agent when scraping prom metrics +- [#9203](https://github.com/influxdata/telegraf/pull/9203) Migrate from soniah/gosnmp to gosnmp/gosnmp and update to 1.32.0 +- [#9169](https://github.com/influxdata/telegraf/pull/9169) `inputs.kinesis_consumer` Fix repeating parser error +- [#9130](https://github.com/influxdata/telegraf/pull/9130) `inputs.sqlserver` Remove disallowed whitespace from sqlServerRingBufferCPU query +- [#9238](https://github.com/influxdata/telegraf/pull/9238) Update hashicorp/consul/api module to v1.8.1 +- [#9235](https://github.com/influxdata/telegraf/pull/9235) Migrate from docker/libnetwork/ipvs to moby/ipvs +- [#9224](https://github.com/influxdata/telegraf/pull/9224) Update shirou/gopsutil to 3.21.3 +- [#9209](https://github.com/influxdata/telegraf/pull/9209) Update microsoft/ApplicationInsights-Go to 0.4.4 +- [#9190](https://github.com/influxdata/telegraf/pull/9190) Update gogo/protobuf to 1.3.2 +- [#8746](https://github.com/influxdata/telegraf/pull/8746) Update Azure/go-autorest/autorest/azure/auth to 0.5.6 and Azure/go-autorest/autorest to 0.11.17 +- [#8745](https://github.com/influxdata/telegraf/pull/8745) Update collectd.org to 0.5.0 +- [#8716](https://github.com/influxdata/telegraf/pull/8716) Update nats-io/nats.go 1.10.0 +- [#9039](https://github.com/influxdata/telegraf/pull/9039) Update golang/protobuf to v1.5.1 +- [#8937](https://github.com/influxdata/telegraf/pull/8937) Migrate from ericchiang/k8s to kubernetes/client-go + +### Features + +- [#8913](https://github.com/influxdata/telegraf/pull/8913) `outputs.elasticsearch` Add ability to enable gzip compression + +## v1.18.2 [2021-04-28] + +### Bugfixes + +- [#9160](https://github.com/influxdata/telegraf/pull/9160) `processors.converter` Add support for large hexadecimal strings +- [#9195](https://github.com/influxdata/telegraf/pull/9195) `inputs.apcupsd` Fix apcupsd 'ALARMDEL' bug via forked repo +- [#9110](https://github.com/influxdata/telegraf/pull/9110) `parsers.json` Make JSON format compatible with nulls +- [#9128](https://github.com/influxdata/telegraf/pull/9128) `inputs.nfsclient` Fix nfsclient ops map to allow collection of metrics other than read and write +- [#8917](https://github.com/influxdata/telegraf/pull/8917) `inputs.snmp` Log snmpv3 auth failures +- [#8892](https://github.com/influxdata/telegraf/pull/8892) `common.shim` Accept larger inputs from scanner +- [#9045](https://github.com/influxdata/telegraf/pull/9045) `inputs.vsphere` Add MetricLookback setting to handle reporting delays in vCenter 6.7 and later +- [#9026](https://github.com/influxdata/telegraf/pull/9026) `outputs.sumologic` Carbon2 serializer: sanitize metric name +- [#9086](https://github.com/influxdata/telegraf/pull/9086) `inputs.opcua` Fix error handling + +## v1.18.1 [2021-04-07] + +### Bugfixes + +- [#9082](https://github.com/influxdata/telegraf/pull/9082) `inputs.mysql` Fix 'binary logs' query for MySQL 8 +- [#9069](https://github.com/influxdata/telegraf/pull/9069) `inputs.tail` Add configurable option for the 'path' tag override +- [#9067](https://github.com/influxdata/telegraf/pull/9067) `inputs.nfsclient` Fix integer overflow in fields from mountstat +- [#9050](https://github.com/influxdata/telegraf/pull/9050) `inputs.snmp` Fix init when no mibs are installed +- [#9072](https://github.com/influxdata/telegraf/pull/9072) `inputs.ping` Always call SetPrivileged(true) in native mode +- [#9043](https://github.com/influxdata/telegraf/pull/9043) `processors.ifname` Get interface name more effeciently +- [#9056](https://github.com/influxdata/telegraf/pull/9056) `outputs.yandex_cloud_monitoring` Use correct compute metadata URL to get folder-id +- [#9048](https://github.com/influxdata/telegraf/pull/9048) `outputs.azure_monitor` Handle error when initializing the auth object +- [#8549](https://github.com/influxdata/telegraf/pull/8549) `inputs.sqlserver` Fix sqlserver_process_cpu calculation +- [#9035](https://github.com/influxdata/telegraf/pull/9035) `inputs.ipmi_sensor` Fix panic +- [#9009](https://github.com/influxdata/telegraf/pull/9009) `inputs.docker` Fix panic when parsing container stats +- [#8333](https://github.com/influxdata/telegraf/pull/8333) `inputs.exec` Don't truncate messages in debug mode +- [#8769](https://github.com/influxdata/telegraf/pull/8769) `agent` Close running outputs when reloadinlg + +## v1.18.0 [2021-03-17] + +### Release Notes + +- Support Go version 1.16.2 +- Added support for code signing in Windows + +### Bugfixes + +- [#7312](https://github.com/influxdata/telegraf/pull/7312) `inputs.docker` CPU stats respect perdevice +- [#8397](https://github.com/influxdata/telegraf/pull/8397) `outputs.dynatrace` Dynatrace Plugin: Make conversion to counters possible / Changed large bulk handling +- [#8655](https://github.com/influxdata/telegraf/pull/8655) `inputs.sqlserver` SqlServer - fix for default server list +- [#8703](https://github.com/influxdata/telegraf/pull/8703) `inputs.docker` Use consistent container name in docker input plugin +- [#8902](https://github.com/influxdata/telegraf/pull/8902) `inputs.snmp` Fix max_repetitions signedness issues +- [#8817](https://github.com/influxdata/telegraf/pull/8817) `outputs.kinesis` outputs.kinesis - log record error count +- [#8833](https://github.com/influxdata/telegraf/pull/8833) `inputs.sqlserver` Bug Fix - SQL Server HADR queries for SQL Versions +- [#8628](https://github.com/influxdata/telegraf/pull/8628) `inputs.modbus` fix: reading multiple holding registers in modbus input plugin +- [#8885](https://github.com/influxdata/telegraf/pull/8885) `inputs.statsd` Fix statsd concurrency bug +- [#8393](https://github.com/influxdata/telegraf/pull/8393) `inputs.sqlserver` SQL Perfmon counters - synced queries from v2 to all db types +- [#8873](https://github.com/influxdata/telegraf/pull/8873) `processors.ifname` Fix mutex locking around ifname cache +- [#8720](https://github.com/influxdata/telegraf/pull/8720) `parsers.influx` fix: remove ambiguity on '\v' from line-protocol parser +- [#8678](https://github.com/influxdata/telegraf/pull/8678) `inputs.redis` Fix Redis output field type inconsistencies +- [#8953](https://github.com/influxdata/telegraf/pull/8953) `agent` Reset the flush interval timer when flush is requested or batch is ready. +- [#8954](https://github.com/influxdata/telegraf/pull/8954) `common.kafka` Fix max open requests to one if idempotent writes is set to true +- [#8721](https://github.com/influxdata/telegraf/pull/8721) `inputs.kube_inventory` Set $HOSTIP in default URL +- [#8995](https://github.com/influxdata/telegraf/pull/8995) `inputs.sflow` fix segfaults in sflow plugin by checking if protocol headers are set +- [#8986](https://github.com/influxdata/telegraf/pull/8986) `outputs.nats` nats_output: use the configured credentials file + +### Features + +- [#8887](https://github.com/influxdata/telegraf/pull/8887) `inputs.procstat` Add PPID field to procstat input plugin +- [#8852](https://github.com/influxdata/telegraf/pull/8852) `processors.starlark` Add Starlark script for estimating Line Protocol cardinality +- [#8915](https://github.com/influxdata/telegraf/pull/8915) `inputs.cloudwatch` add proxy +- [#8910](https://github.com/influxdata/telegraf/pull/8910) `agent` Display error message on badly formatted config string array (eg. namepass) +- [#8785](https://github.com/influxdata/telegraf/pull/8785) `inputs.diskio` Non systemd support with unittest +- [#8850](https://github.com/influxdata/telegraf/pull/8850) `inputs.snmp` Support more snmpv3 authentication protocols +- [#8813](https://github.com/influxdata/telegraf/pull/8813) `inputs.redfish` added member_id as tag(as it is a unique value) for redfish plugin and added address of the server when the status is other than 200 for better debugging +- [#8613](https://github.com/influxdata/telegraf/pull/8613) `inputs.phpfpm` Support exclamation mark to create non-matching list in tail plugin +- [#8179](https://github.com/influxdata/telegraf/pull/8179) `inputs.statsd` Add support for datadog distributions metric +- [#8803](https://github.com/influxdata/telegraf/pull/8803) `agent` Add default retry for load config via url +- [#8816](https://github.com/influxdata/telegraf/pull/8816) Code Signing for Windows +- [#8772](https://github.com/influxdata/telegraf/pull/8772) `processors.starlark` Allow to provide constants to a starlark script +- [#8749](https://github.com/influxdata/telegraf/pull/8749) `outputs.newrelic` Add HTTP proxy setting to New Relic output plugin +- [#8543](https://github.com/influxdata/telegraf/pull/8543) `inputs.elasticsearch` Add configurable number of 'most recent' date-stamped indices to gather in Elasticsearch input +- [#8675](https://github.com/influxdata/telegraf/pull/8675) `processors.starlark` Add Starlark parsing example of nested JSON +- [#8762](https://github.com/influxdata/telegraf/pull/8762) `inputs.prometheus` Optimize for bigger kubernetes clusters (500+ pods) +- [#8950](https://github.com/influxdata/telegraf/pull/8950) `inputs.teamspeak` Teamspeak input plugin query clients +- [#8849](https://github.com/influxdata/telegraf/pull/8849) `inputs.sqlserver` Filter data out from system databases for Azure SQL DB only + +### New Inputs + +- [Beat Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/beat) - Contributed by @nferch +- [CS:GO Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/csgo) - Contributed by @oofdog +- [Directory Monitoring Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/directory_monitor) - Contributed by @InfluxData +- [RavenDB Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ravendb) - Contributed by @ml054 and @bartoncasey +- [NFS Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nfsclient) - Contributed by @pmoranga + +### New Outputs + +- [Grafana Loki Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @Eraac +- [Google BigQuery Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @gkatzioura +- [Sensu Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/sensu) - Contributed by @calebhailey +- [SignalFX Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/signalfx) - Contributed by @keitwb + +### New Aggregators + +- [Derivative Aggregator Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/derivative) - Contributed by @KarstenSchnitter +- [Quantile Aggregator Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/quantile) - Contributed by @srebhan + +### New Processors + +- [AWS EC2 Metadata Processor Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/processors/aws/ec2) - Contributed by @pmalek-sumo + +### New Parsers + +- [XML Parser Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/xml) - Contributed by @srebhan + +### New Serializers + +- [MessagePack Serializer Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/serializers/msgpack) - Contributed by @dialogbox + +### New External Plugins + +- [GeoIP Processor Plugin](https://github.com/a-bali/telegraf-geoip) - Contributed by @a-bali +- [Plex Webhook Input Plugin](https://github.com/russorat/telegraf-webhooks-plex) - Contributed by @russorat +- [SMCIPMITool Input Plugin](https://github.com/jhpope/smc_ipmi) - Contributed by @jhpope + +## v1.17.3 [2021-02-17] + +### Bugfixes + +- [#7316](https://github.com/influxdata/telegraf/pull/7316) `inputs.filestat` plugins/filestat: Skip missing files +- [#8868](https://github.com/influxdata/telegraf/pull/8868) Update to Go 1.15.8 +- [#8744](https://github.com/influxdata/telegraf/pull/8744) Bump github.com/gopcua/opcua from 0.1.12 to 0.1.13 +- [#8657](https://github.com/influxdata/telegraf/pull/8657) `outputs.warp10` outputs/warp10: url encode comma in tags value +- [#8824](https://github.com/influxdata/telegraf/pull/8824) `inputs.x509_cert` inputs.x509_cert: Fix timeout issue +- [#8821](https://github.com/influxdata/telegraf/pull/8821) `inputs.mqtt_consumer` Fix reconnection issues mqtt +- [#8775](https://github.com/influxdata/telegraf/pull/8775) `outputs.influxdb` Validate the response from InfluxDB after writing/creating a database to avoid json parsing panics/errors +- [#8804](https://github.com/influxdata/telegraf/pull/8804) `inputs.snmp` Expose v4/v6-only connection-schemes through GosnmpWrapper +- [#8838](https://github.com/influxdata/telegraf/pull/8838) `agent` fix issue with reading flush_jitter output from config +- [#8839](https://github.com/influxdata/telegraf/pull/8839) `inputs.ping` fixes Sort and timeout around deadline +- [#8787](https://github.com/influxdata/telegraf/pull/8787) `inputs.ping` Update README for inputs.ping with correct cmd for native ping on Linux +- [#8771](https://github.com/influxdata/telegraf/pull/8771) Update go-ping to latest version + +## v1.17.2 [2021-01-28] + +### Bugfixes + +- [#8770](https://github.com/influxdata/telegraf/pull/8770) `inputs.ping` Set interface for native +- [#8764](https://github.com/influxdata/telegraf/pull/8764) `inputs.ping` Resolve regression, re-add missing function + +## v1.17.1 [2021-01-27] + +### Release Notes + +Included a few more changes that add configuration options to plugins as it's been while since the last release + +- [#8335](https://github.com/influxdata/telegraf/pull/8335) `inputs.ipmi_sensor` Add setting to enable caching in ipmitool +- [#8616](https://github.com/influxdata/telegraf/pull/8616) Add Event Log support for Windows +- [#8602](https://github.com/influxdata/telegraf/pull/8602) `inputs.postgresql_extensible` Add timestamp column support to postgresql_extensible +- [#8627](https://github.com/influxdata/telegraf/pull/8627) `parsers.csv` Added ability to define skip values in csv parser +- [#8055](https://github.com/influxdata/telegraf/pull/8055) `outputs.http` outputs/http: add option to control idle connection timeout +- [#7897](https://github.com/influxdata/telegraf/pull/7897) `common.tls` common/tls: Allow specifying SNI hostnames +- [#8541](https://github.com/influxdata/telegraf/pull/8541) `inputs.snmp` Extended the internal snmp wrapper to support AES192, AES192C, AES256, and AES256C +- [#6165](https://github.com/influxdata/telegraf/pull/6165) `inputs.procstat` Provide method to include core count when reporting cpu_usage in procstat input +- [#8287](https://github.com/influxdata/telegraf/pull/8287) `inputs.jenkins` Add support for an inclusive job list in Jenkins plugin +- [#8524](https://github.com/influxdata/telegraf/pull/8524) `inputs.ipmi_sensor` Add hex_key parameter for IPMI input plugin connection + +### Bugfixes + +- [#8662](https://github.com/influxdata/telegraf/pull/8662) `outputs.influxdb_v2` [outputs.influxdb_v2] add exponential backoff, and respect client error responses +- [#8748](https://github.com/influxdata/telegraf/pull/8748) `outputs.elasticsearch` Fix issue with elasticsearch output being really noisy about some errors +- [#7533](https://github.com/influxdata/telegraf/pull/7533) `inputs.zookeeper` improve mntr regex to match user specific keys. +- [#7967](https://github.com/influxdata/telegraf/pull/7967) `inputs.lustre2` Fix crash in lustre2 input plugin, when field name and value +- [#8673](https://github.com/influxdata/telegraf/pull/8673) Update grok-library to v1.0.1 with dots and dash-patterns fixed. +- [#8679](https://github.com/influxdata/telegraf/pull/8679) `inputs.ping` Use go-ping for "native" execution in Ping plugin +- [#8741](https://github.com/influxdata/telegraf/pull/8741) `inputs.x509_cert` fix x509 cert timeout issue +- [#8714](https://github.com/influxdata/telegraf/pull/8714) Bump github.com/nsqio/go-nsq from 1.0.7 to 1.0.8 +- [#8715](https://github.com/influxdata/telegraf/pull/8715) Bump github.com/Shopify/sarama from 1.27.1 to 1.27.2 +- [#8712](https://github.com/influxdata/telegraf/pull/8712) Bump github.com/newrelic/newrelic-telemetry-sdk-go from 0.2.0 to 0.5.1 +- [#8659](https://github.com/influxdata/telegraf/pull/8659) `inputs.gnmi` GNMI plugin should not take off the first character of field keys when no 'alias path' exists. +- [#8609](https://github.com/influxdata/telegraf/pull/8609) `inputs.webhooks` Use the 'measurement' json field from the particle webhook as the measurment name, or if it's blank, use the 'name' field of the event's json. +- [#8658](https://github.com/influxdata/telegraf/pull/8658) `inputs.procstat` Procstat input plugin should use the same timestamp in all metrics in the same Gather() cycle. +- [#8391](https://github.com/influxdata/telegraf/pull/8391) `aggregators.merge` Optimize SeriesGrouper & aggregators.merge +- [#8545](https://github.com/influxdata/telegraf/pull/8545) `inputs.prometheus` Using mime-type in prometheus parser to handle protocol-buffer responses +- [#8588](https://github.com/influxdata/telegraf/pull/8588) `inputs.snmp` Input SNMP plugin - upgrade gosnmp library to version 1.29.0 +- [#8502](https://github.com/influxdata/telegraf/pull/8502) `inputs.http_listener_v2` Fix Stop() bug when plugin fails to start + +### New External Plugins + +- [#8646](https://github.com/influxdata/telegraf/pull/8646) [Open Hardware Monitoring](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) Input Plugin + +## v1.17.0 [2020-12-18] + +### Release Notes + +- Starlark plugins can now store state between runs using a global state variable. This lets you make custom aggregators as well as custom processors that are state-aware. +- New input plugins: Riemann-Protobuff Listener, Intel PowerStat +- New output plugins: Yandex.Cloud monitoring, Logz.io +- New parser plugin: Prometheus +- New serializer: Prometheus remote write + +### Bugfixes + +- [#8505](https://github.com/influxdata/telegraf/pull/8505) `inputs.vsphere` Fixed misspelled check for datacenter +- [#8499](https://github.com/influxdata/telegraf/pull/8499) `processors.execd` Adding support for new lines in influx line protocol fields. +- [#8254](https://github.com/influxdata/telegraf/pull/8254) `serializers.carbon2` Fix carbon2 tests +- [#8498](https://github.com/influxdata/telegraf/pull/8498) `inputs.http_response` fixed network test +- [#8414](https://github.com/influxdata/telegraf/pull/8414) `inputs.bcache` Fix tests for Windows - part 1 +- [#8577](https://github.com/influxdata/telegraf/pull/8577) `inputs.ping` fix potential issue with race condition +- [#8562](https://github.com/influxdata/telegraf/pull/8562) `inputs.mqtt_consumer` fix issue with mqtt concurrent map write +- [#8574](https://github.com/influxdata/telegraf/pull/8574) `inputs.ecs` Remove duplicated field "revision" from ecs_task because it's already defined as a tag there +- [#8551](https://github.com/influxdata/telegraf/pull/8551) `inputs.socket_listener` fix crash when socket_listener receiving invalid data +- [#8564](https://github.com/influxdata/telegraf/pull/8564) `parsers.graphite` Graphite tags parser +- [#8472](https://github.com/influxdata/telegraf/pull/8472) `inputs.kube_inventory` Fixing issue with missing metrics when pod has only pending containers +- [#8542](https://github.com/influxdata/telegraf/pull/8542) `inputs.aerospike` fix edge case in aerospike plugin where an expected hex string was converted to integer if all digits +- [#8512](https://github.com/influxdata/telegraf/pull/8512) `inputs.kube_inventory` Update string parsing of allocatable cpu cores in kube_inventory + +### Features + +- [#8038](https://github.com/influxdata/telegraf/pull/8038) `inputs.jenkins` feat: add build number field to jenkins_job measurement +- [#7345](https://github.com/influxdata/telegraf/pull/7345) `inputs.ping` Add percentiles to the ping plugin +- [#8369](https://github.com/influxdata/telegraf/pull/8369) `inputs.sqlserver` Added tags for monitoring readable secondaries for Azure SQL MI +- [#8379](https://github.com/influxdata/telegraf/pull/8379) `inputs.sqlserver` SQL Server HA/DR Availability Group queries +- [#8520](https://github.com/influxdata/telegraf/pull/8520) Add initialization example to mock-plugin. +- [#8426](https://github.com/influxdata/telegraf/pull/8426) `inputs.snmp` Add support to convert snmp hex strings to integers +- [#8509](https://github.com/influxdata/telegraf/pull/8509) `inputs.statsd` Add configurable Max TTL duration for statsd input plugin entries +- [#8508](https://github.com/influxdata/telegraf/pull/8508) `inputs.bind` Add configurable timeout to bind input plugin http call +- [#8368](https://github.com/influxdata/telegraf/pull/8368) `inputs.sqlserver` Added is_primary_replica for monitoring readable secondaries for Azure SQL DB +- [#8462](https://github.com/influxdata/telegraf/pull/8462) `inputs.sqlserver` sqlAzureMIRequests - remove duplicate column [session_db_name] +- [#8464](https://github.com/influxdata/telegraf/pull/8464) `inputs.sqlserver` Add column measurement_db_type to output of all queries if not empty +- [#8389](https://github.com/influxdata/telegraf/pull/8389) `inputs.opcua` Add node groups to opcua input plugin +- [#8432](https://github.com/influxdata/telegraf/pull/8432) add support for linux/ppc64le +- [#8474](https://github.com/influxdata/telegraf/pull/8474) `inputs.modbus` Add FLOAT64-IEEE support to inputs.modbus (#8361) (by @Nemecsek) +- [#8447](https://github.com/influxdata/telegraf/pull/8447) `processors.starlark` Add the shared state to the global scope to get previous data +- [#8383](https://github.com/influxdata/telegraf/pull/8383) `inputs.zfs` Add dataset metrics to zfs input +- [#8429](https://github.com/influxdata/telegraf/pull/8429) `outputs.nats` Added "name" parameter to NATS output plugin +- [#8477](https://github.com/influxdata/telegraf/pull/8477) `inputs.http` proxy support for http input +- [#8466](https://github.com/influxdata/telegraf/pull/8466) `inputs.snmp` Translate snmp field values +- [#8435](https://github.com/influxdata/telegraf/pull/8435) `common.kafka` Enable kafka zstd compression and idempotent writes +- [#8056](https://github.com/influxdata/telegraf/pull/8056) `inputs.monit` Add response_time to monit plugin +- [#8446](https://github.com/influxdata/telegraf/pull/8446) update to go 1.15.5 +- [#8428](https://github.com/influxdata/telegraf/pull/8428) `aggregators.basicstats` Add rate and interval to the basicstats aggregator plugin +- [#8575](https://github.com/influxdata/telegraf/pull/8575) `inputs.win_services` Added Glob pattern matching for "Windows Services" plugin +- [#6132](https://github.com/influxdata/telegraf/pull/6132) `inputs.mysql` Add per user metrics to mysql input +- [#8500](https://github.com/influxdata/telegraf/pull/8500) `inputs.github` [inputs.github] Add query of pull-request statistics +- [#8598](https://github.com/influxdata/telegraf/pull/8598) `processors.enum` Allow globs (wildcards) in config for tags/fields in enum processor +- [#8590](https://github.com/influxdata/telegraf/pull/8590) `inputs.ethtool` [ethtool] interface_up field added +- [#8579](https://github.com/influxdata/telegraf/pull/8579) `parsers.json` Add wildcard tags json parser support + +### New Parser Plugins + +- [#7778](https://github.com/influxdata/telegraf/pull/7778) `parsers.prometheus` Add a parser plugin for prometheus + +### New Serializer Plugins + +- [#8360](https://github.com/influxdata/telegraf/pull/8360) `serializers.prometheusremotewrite` Add prometheus remote write serializer + +### New Input Plugins + +- [#8163](https://github.com/influxdata/telegraf/pull/8163) `inputs.riemann` Support Riemann-Protobuff Listener +- [#8488](https://github.com/influxdata/telegraf/pull/8488) `inputs.intel_powerstat` New Intel PowerStat input plugin + +### New Output Plugins + +- [#8296](https://github.com/influxdata/telegraf/pull/8296) `outputs.yandex_cloud_monitoring` #8295 Initial Yandex.Cloud monitoring +- [#8202](https://github.com/influxdata/telegraf/pull/8202) `outputs.logzio` A new Logz.io output plugin + ## v1.16.3 [2020-12-01] -#### Bugfixes - - - [#8483](https://github.com/influxdata/telegraf/pull/8483) `inputs.gnmi` Log SubscribeResponse_Error message and code. #8482 - - [#7987](https://github.com/influxdata/telegraf/pull/7987) update godirwalk to v1.16.1 - - [#8438](https://github.com/influxdata/telegraf/pull/8438) `processors.starlark` Starlark example dropbytype - - [#8468](https://github.com/influxdata/telegraf/pull/8468) `inputs.sqlserver` Fix typo in column name - - [#8461](https://github.com/influxdata/telegraf/pull/8461) `inputs.phpfpm` [php-fpm] Fix possible "index out of range" - - [#8444](https://github.com/influxdata/telegraf/pull/8444) `inputs.apcupsd` Update mdlayher/apcupsd dependency - - [#8439](https://github.com/influxdata/telegraf/pull/8439) `processors.starlark` Show how to return a custom error with the Starlark processor - - [#8440](https://github.com/influxdata/telegraf/pull/8440) `parsers.csv` keep field name as is for csv timestamp column - - [#8436](https://github.com/influxdata/telegraf/pull/8436) `inputs.nvidia_smi` Add DriverVersion and CUDA Version to output - - [#8423](https://github.com/influxdata/telegraf/pull/8423) `processors.starlark` Show how to return several metrics with the Starlark processor - - [#8408](https://github.com/influxdata/telegraf/pull/8408) `processors.starlark` Support logging in starlark - - [#8315](https://github.com/influxdata/telegraf/pull/8315) add kinesis output to external plugins list - - [#8406](https://github.com/influxdata/telegraf/pull/8406) `outputs.wavefront` #8405 add non-retryable debug logging - - [#8404](https://github.com/influxdata/telegraf/pull/8404) `outputs.wavefront` Wavefront output should distinguish between retryable and non-retryable errors - - [#8401](https://github.com/influxdata/telegraf/pull/8401) `processors.starlark` Allow to catch errors that occur in the apply function +### Bugfixes + +- [#8483](https://github.com/influxdata/telegraf/pull/8483) `inputs.gnmi` Log SubscribeResponse_Error message and code. #8482 +- [#7987](https://github.com/influxdata/telegraf/pull/7987) update godirwalk to v1.16.1 +- [#8438](https://github.com/influxdata/telegraf/pull/8438) `processors.starlark` Starlark example dropbytype +- [#8468](https://github.com/influxdata/telegraf/pull/8468) `inputs.sqlserver` Fix typo in column name +- [#8461](https://github.com/influxdata/telegraf/pull/8461) `inputs.phpfpm` [php-fpm] Fix possible "index out of range" +- [#8444](https://github.com/influxdata/telegraf/pull/8444) `inputs.apcupsd` Update mdlayher/apcupsd dependency +- [#8439](https://github.com/influxdata/telegraf/pull/8439) `processors.starlark` Show how to return a custom error with the Starlark processor +- [#8440](https://github.com/influxdata/telegraf/pull/8440) `parsers.csv` keep field name as is for csv timestamp column +- [#8436](https://github.com/influxdata/telegraf/pull/8436) `inputs.nvidia_smi` Add DriverVersion and CUDA Version to output +- [#8423](https://github.com/influxdata/telegraf/pull/8423) `processors.starlark` Show how to return several metrics with the Starlark processor +- [#8408](https://github.com/influxdata/telegraf/pull/8408) `processors.starlark` Support logging in starlark +- [#8315](https://github.com/influxdata/telegraf/pull/8315) add kinesis output to external plugins list +- [#8406](https://github.com/influxdata/telegraf/pull/8406) `outputs.wavefront` #8405 add non-retryable debug logging +- [#8404](https://github.com/influxdata/telegraf/pull/8404) `outputs.wavefront` Wavefront output should distinguish between retryable and non-retryable errors +- [#8401](https://github.com/influxdata/telegraf/pull/8401) `processors.starlark` Allow to catch errors that occur in the apply function ## v1.16.2 [2020-11-13] -#### Bugfixes - - - [#8400](https://github.com/influxdata/telegraf/pull/8400) `parsers.csv` Fix parsing of multiple files with different headers (#6318). - - [#8326](https://github.com/influxdata/telegraf/pull/8326) `inputs.proxmox` proxmox: ignore QEMU templates and iron out a few bugs - - [#7991](https://github.com/influxdata/telegraf/pull/7991) `inputs.systemd_units` systemd_units: add --plain to command invocation (#7990) - - [#8307](https://github.com/influxdata/telegraf/pull/8307) fix links in external plugins readme - - [#8370](https://github.com/influxdata/telegraf/pull/8370) `inputs.redis` Fix minor typos in readmes - - [#8374](https://github.com/influxdata/telegraf/pull/8374) `inputs.smart` Fix SMART plugin to recognize all devices from config - - [#8288](https://github.com/influxdata/telegraf/pull/8288) `inputs.redfish` Add OData-Version header to requests - - [#8357](https://github.com/influxdata/telegraf/pull/8357) `inputs.vsphere` Prydin issue 8169 - - [#8356](https://github.com/influxdata/telegraf/pull/8356) `inputs.sqlserver` On-prem fix for #8324 - - [#8165](https://github.com/influxdata/telegraf/pull/8165) `outputs.wavefront` [output.wavefront] Introduced "immediate_flush" flag - - [#7938](https://github.com/influxdata/telegraf/pull/7938) `inputs.gnmi` added support for bytes encoding - - [#8337](https://github.com/influxdata/telegraf/pull/8337) `inputs.dcos` Update jwt-go module to address CVE-2020-26160 - - [#8350](https://github.com/influxdata/telegraf/pull/8350) `inputs.ras` fix plugins/input/ras test - - [#8329](https://github.com/influxdata/telegraf/pull/8329) `outputs.dynatrace` #8328 Fixed a bug with the state map in Dynatrace Plugin +### Bugfixes + +- [#8400](https://github.com/influxdata/telegraf/pull/8400) `parsers.csv` Fix parsing of multiple files with different headers (#6318). +- [#8326](https://github.com/influxdata/telegraf/pull/8326) `inputs.proxmox` proxmox: ignore QEMU templates and iron out a few bugs +- [#7991](https://github.com/influxdata/telegraf/pull/7991) `inputs.systemd_units` systemd_units: add --plain to command invocation (#7990) +- [#8307](https://github.com/influxdata/telegraf/pull/8307) fix links in external plugins readme +- [#8370](https://github.com/influxdata/telegraf/pull/8370) `inputs.redis` Fix minor typos in readmes +- [#8374](https://github.com/influxdata/telegraf/pull/8374) `inputs.smart` Fix SMART plugin to recognize all devices from config +- [#8288](https://github.com/influxdata/telegraf/pull/8288) `inputs.redfish` Add OData-Version header to requests +- [#8357](https://github.com/influxdata/telegraf/pull/8357) `inputs.vsphere` Prydin issue 8169 +- [#8356](https://github.com/influxdata/telegraf/pull/8356) `inputs.sqlserver` On-prem fix for #8324 +- [#8165](https://github.com/influxdata/telegraf/pull/8165) `outputs.wavefront` [output.wavefront] Introduced "immediate_flush" flag +- [#7938](https://github.com/influxdata/telegraf/pull/7938) `inputs.gnmi` added support for bytes encoding +- [#8337](https://github.com/influxdata/telegraf/pull/8337) `inputs.dcos` Update jwt-go module to address CVE-2020-26160 +- [#8350](https://github.com/influxdata/telegraf/pull/8350) `inputs.ras` fix plugins/input/ras test +- [#8329](https://github.com/influxdata/telegraf/pull/8329) `outputs.dynatrace` #8328 Fixed a bug with the state map in Dynatrace Plugin ## v1.16.1 [2020-10-28] -#### Release Notes - - - [#8318](https://github.com/influxdata/telegraf/pull/8318) `common.kafka` kafka sasl-mechanism auth support for SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +### Release Notes -#### Bugfixes +- [#8318](https://github.com/influxdata/telegraf/pull/8318) `common.kafka` kafka sasl-mechanism auth support for SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI - - [#8331](https://github.com/influxdata/telegraf/pull/8331) `inputs.sqlserver` SQL Server Azure PerfCounters Fix - - [#8325](https://github.com/influxdata/telegraf/pull/8325) `inputs.sqlserver` SQL Server - PerformanceCounters - removed synthetic counters - - [#8324](https://github.com/influxdata/telegraf/pull/8324) `inputs.sqlserver` SQL Server - server_properties added sql_version_desc - - [#8317](https://github.com/influxdata/telegraf/pull/8317) `inputs.ras` Disable RAS input plugin on specific Linux architectures: mips64, mips64le, ppc64le, riscv64 - - [#8309](https://github.com/influxdata/telegraf/pull/8309) `inputs.processes` processes: fix issue with stat no such file/dir - - [#8308](https://github.com/influxdata/telegraf/pull/8308) `inputs.win_perf_counters` fix issue with PDH_CALC_NEGATIVE_DENOMINATOR error - - [#8306](https://github.com/influxdata/telegraf/pull/8306) `inputs.ras` RAS plugin - fix for too many open files handlers +### Bugfixes +- [#8331](https://github.com/influxdata/telegraf/pull/8331) `inputs.sqlserver` SQL Server Azure PerfCounters Fix +- [#8325](https://github.com/influxdata/telegraf/pull/8325) `inputs.sqlserver` SQL Server - PerformanceCounters - removed synthetic counters +- [#8324](https://github.com/influxdata/telegraf/pull/8324) `inputs.sqlserver` SQL Server - server_properties added sql_version_desc +- [#8317](https://github.com/influxdata/telegraf/pull/8317) `inputs.ras` Disable RAS input plugin on specific Linux architectures: mips64, mips64le, ppc64le, riscv64 +- [#8309](https://github.com/influxdata/telegraf/pull/8309) `inputs.processes` processes: fix issue with stat no such file/dir +- [#8308](https://github.com/influxdata/telegraf/pull/8308) `inputs.win_perf_counters` fix issue with PDH_CALC_NEGATIVE_DENOMINATOR error +- [#8306](https://github.com/influxdata/telegraf/pull/8306) `inputs.ras` RAS plugin - fix for too many open files handlers ## v1.16.0 [2020-10-21] -#### Release Notes - - - New [code examples](/plugins/processors/starlark/testdata) for the [Starlark processor](/plugins/processors/starlark/README.md) - - [#7920](https://github.com/influxdata/telegraf/pull/7920) `inputs.rabbitmq` remove deprecated healthcheck - - [#7953](https://github.com/influxdata/telegraf/pull/7953) Add details to connect to InfluxDB OSS 2 and Cloud 2 - - [#8054](https://github.com/influxdata/telegraf/pull/8054) add guidelines run to external plugins with execd - - [#8198](https://github.com/influxdata/telegraf/pull/8198) `inputs.influxdb_v2_listener` change default influxdb port from 9999 to 8086 to match OSS 2.0 release - - [starlark](https://github.com/influxdata/telegraf/tree/release-1.16/plugins/processors/starlark/testdata) `processors.starlark` add various code exampels for the Starlark processor - -#### Features - - - [#7814](https://github.com/influxdata/telegraf/pull/7814) `agent` Send metrics in FIFO order - - [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input - - [#7870](https://github.com/influxdata/telegraf/pull/7870) `inputs.mongodb` Added new metric "pages written from cache" - - [#7875](https://github.com/influxdata/telegraf/pull/7875) `inputs.consul` input consul - added metric_version flag - - [#7894](https://github.com/influxdata/telegraf/pull/7894) `inputs.cloudwatch` Implement AWS CloudWatch Input Plugin ListMetrics API calls to use Active Metric Filter - - [#7904](https://github.com/influxdata/telegraf/pull/7904) `inputs.clickhouse` add additional metrics to clickhouse input plugin - - [#7934](https://github.com/influxdata/telegraf/pull/7934) `inputs.sqlserver` Database_type config to Split up sql queries by engine type - - [#8018](https://github.com/influxdata/telegraf/pull/8018) `processors.ifname` Add addTag debugging in ifname plugin - - [#8019](https://github.com/influxdata/telegraf/pull/8019) `outputs.elasticsearch` added force_document_id option to ES output enable resend data and avoiding duplicated ES documents - - [#8025](https://github.com/influxdata/telegraf/pull/8025) `inputs.aerospike` Add set, and histogram reporting to aerospike telegraf plugin - - [#8082](https://github.com/influxdata/telegraf/pull/8082) `inputs.snmp` Add agent host tag configuration option - - [#8113](https://github.com/influxdata/telegraf/pull/8113) `inputs.smart` Add more missing NVMe attributes to smart plugin - - [#8120](https://github.com/influxdata/telegraf/pull/8120) `inputs.sqlserver` Added more performance counters to SqlServer input plugin - - [#8127](https://github.com/influxdata/telegraf/pull/8127) `agent` Sort plugin name lists for output - - [#8132](https://github.com/influxdata/telegraf/pull/8132) `outputs.sumologic` Sumo Logic output plugin: carbon2 default to include field in metric - - [#8133](https://github.com/influxdata/telegraf/pull/8133) `inputs.influxdb_v2_listener` influxdb_v2_listener - add /ready route - - [#8168](https://github.com/influxdata/telegraf/pull/8168) `processors.starlark` add json parsing support to starlark - - [#8186](https://github.com/influxdata/telegraf/pull/8186) `inputs.sqlserver` New sql server queries (Azure) - - [#8189](https://github.com/influxdata/telegraf/pull/8189) `inputs.snmp_trap` If the community string is available, add it as a tag - - [#8190](https://github.com/influxdata/telegraf/pull/8190) `inputs.tail` Semigroupoid multiline (#8167) - - [#8196](https://github.com/influxdata/telegraf/pull/8196) `inputs.redis` add functionality to get values from redis commands - - [#8220](https://github.com/influxdata/telegraf/pull/8220) `build` update to Go 1.15 - - [#8032](https://github.com/influxdata/telegraf/pull/8032) `inputs.http_response` http_response: match on status code - - [#8172](https://github.com/influxdata/telegraf/pull/8172) `inputs.sqlserver` New sql server queries (on-prem) - refactoring and formatting - - [#8216](https://github.com/influxdata/telegraf/pull/8216) `inputs.win_eventlog` Use TimeCreated timestamp from the event log as the metric timestamp (#8216) - - [#8157](https://github.com/influxdata/telegraf/pull/8157) `processors.port_name` Add support for fields and protocol lookups (#8157) - - [#8269](https://github.com/influxdata/telegraf/pull/8269) `outputs.influxdb` Default to gzip content encoding (#8269) - -#### Bugfixes - - - [#7816](https://github.com/influxdata/telegraf/pull/7816) `shim` fix bug with loading plugins in shim with no config - - [#7818](https://github.com/influxdata/telegraf/pull/7818) `build` Fix darwin package build flags - - [#7819](https://github.com/influxdata/telegraf/pull/7819) `inputs.tail` Close file to ensure it has been flushed - - [#7853](https://github.com/influxdata/telegraf/pull/7853) Initialize aggregation processors - - [#7865](https://github.com/influxdata/telegraf/pull/7865) `common.shim` shim logger improvements - - [#7867](https://github.com/influxdata/telegraf/pull/7867) `inputs.execd` fix issue with execd restart_delay being ignored - - [#7872](https://github.com/influxdata/telegraf/pull/7872) `inputs.gnmi` Recv next message after send returns EOF - - [#7877](https://github.com/influxdata/telegraf/pull/7877) Fix arch name in deb/rpm builds - - [#7909](https://github.com/influxdata/telegraf/pull/7909) fixes issue with rpm /var/log/telegraf permissions - - [#7918](https://github.com/influxdata/telegraf/pull/7918) `inputs.net` fix broken link to proc.c - - [#7927](https://github.com/influxdata/telegraf/pull/7927) `inputs.tail` Fix tail following on EOF - - [#8005](https://github.com/influxdata/telegraf/pull/8005) Fix docker-image make target - - [#8039](https://github.com/influxdata/telegraf/pull/8039) `serializers.splunkmetric` Remove Event field as it is causing issues with pre-trained source types - - [#8048](https://github.com/influxdata/telegraf/pull/8048) `inputs.jenkins` Multiple escaping occurs on Jenkins URLs at certain folder depth - - [#8071](https://github.com/influxdata/telegraf/pull/8071) `inputs.kubernetes` add missing error check for HTTP req failure - - [#8145](https://github.com/influxdata/telegraf/pull/8145) `processors.execd` Increased the maximum serialized metric size in line protocol - - [#8159](https://github.com/influxdata/telegraf/pull/8159) `outputs.dynatrace` Dynatrace Output: change handling of monotonic counters - - [#8176](https://github.com/influxdata/telegraf/pull/8176) fix panic on streaming processers using logging - - [#8177](https://github.com/influxdata/telegraf/pull/8177) `parsers.influx` fix: plugins/parsers/influx: avoid ParseError.Error panic - - [#8199](https://github.com/influxdata/telegraf/pull/8199) `inputs.docker` Fix vulnerabilities found in BDBA scan - - [#8200](https://github.com/influxdata/telegraf/pull/8200) `inputs.sqlserver` Fixed Query mapping - - [#8201](https://github.com/influxdata/telegraf/pull/8201) `outputs.sumologic` Fix carbon2 serializer not falling through to field separate when carbon2_format field is unset - - [#8210](https://github.com/influxdata/telegraf/pull/8210) update gopsutil: fix procstat performance regression - - [#8162](https://github.com/influxdata/telegraf/pull/8162) Fix bool serialization when using carbon2 - - [#8240](https://github.com/influxdata/telegraf/pull/8240) Fix bugs found by LGTM analysis platform - - [#8251](https://github.com/influxdata/telegraf/pull/8251) `outputs.dynatrace` Dynatrace Output Plugin: Fixed behaviour when state map is cleared - - [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd - - [#8282](https://github.com/influxdata/telegraf/pull/8282) `outputs.azure_monitor` Fix using empty string as the namespace prefix (#8282) - - [#7263](https://github.com/influxdata/telegraf/issues/7263) `inputs.kafka_consumer` Fix using old partition leader during Kafka cluster node failure (#7263) - - [#1906](https://github.com/influxdata/telegraf/issues/1906) `inputs.phpfpm` Fix handling URLs with question mark (#1906) - - [#8290](https://github.com/influxdata/telegraf/pull/8290) `inputs.proxmox` Fix wrong memory measurements of containers and vms (#8290) - -#### New Input Plugins - - - [influxdb_v2_listener](/plugins/inputs/influxdb_v2_listener/README.md) Influxdb v2 listener - Contributed by @magichair - - [intel_rdt](/plugins/inputs/intel_rdt/README.md) New input plugin for Intel RDT (Intel Resource Director Technology) - Contributed by @p-zak - - [nsd](/plugins/inputs/nsd/README.md) add nsd input plugin - Contributed by @gearnode - - [opcua](/plugins/inputs/opcua/README.md) Add OPC UA input plugin - Contributed by InfluxData - - [proxmox](/plugins/inputs/proxmox/README.md) Proxmox plugin - Contributed by @effitient - - [ras](/plugins/inputs/ras/README.md) New input plugin for RAS (Reliability, Availability and Serviceability) - Contributed by @p-zak - - [win_eventlog](/plugins/inputs/win_eventlog/README.md) Windows eventlog input plugin - Contributed by @simnv - -#### New Output Plugins - - - [dynatrace](/plugins/outputs/dynatrace/README.md) Dynatrace output plugin - Contributed by @thschue - - [sumologic](/plugins/outputs/sumologic/README.md) Sumo Logic output plugin - Contributed by @pmalek-sumo - - [timestream](/plugins/outputs/timestream) Timestream Output Plugin - Contributed by @piotrwest - -#### New External Plugins - - See [EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) for a full list of external plugins - - - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - - [youtube-telegraf-plugin](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather view and subscriber stats from your youtube videos - - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. - - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. +### Release Notes -## v1.15.3 [2020-09-11] +- New [code examples](/plugins/processors/starlark/testdata) for the [Starlark processor](/plugins/processors/starlark/README.md) +- [#7920](https://github.com/influxdata/telegraf/pull/7920) `inputs.rabbitmq` remove deprecated healthcheck +- [#7953](https://github.com/influxdata/telegraf/pull/7953) Add details to connect to InfluxDB OSS 2 and Cloud 2 +- [#8054](https://github.com/influxdata/telegraf/pull/8054) add guidelines run to external plugins with execd +- [#8198](https://github.com/influxdata/telegraf/pull/8198) `inputs.influxdb_v2_listener` change default influxdb port from 9999 to 8086 to match OSS 2.0 release +- [starlark](https://github.com/influxdata/telegraf/tree/release-1.16/plugins/processors/starlark/testdata) `processors.starlark` add various code exampels for the Starlark processor -#### Release Notes +### Features + +- [#7814](https://github.com/influxdata/telegraf/pull/7814) `agent` Send metrics in FIFO order +- [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input +- [#7870](https://github.com/influxdata/telegraf/pull/7870) `inputs.mongodb` Added new metric "pages written from cache" +- [#7875](https://github.com/influxdata/telegraf/pull/7875) `inputs.consul` input consul - added metric_version flag +- [#7894](https://github.com/influxdata/telegraf/pull/7894) `inputs.cloudwatch` Implement AWS CloudWatch Input Plugin ListMetrics API calls to use Active Metric Filter +- [#7904](https://github.com/influxdata/telegraf/pull/7904) `inputs.clickhouse` add additional metrics to clickhouse input plugin +- [#7934](https://github.com/influxdata/telegraf/pull/7934) `inputs.sqlserver` Database_type config to Split up sql queries by engine type +- [#8018](https://github.com/influxdata/telegraf/pull/8018) `processors.ifname` Add addTag debugging in ifname plugin +- [#8019](https://github.com/influxdata/telegraf/pull/8019) `outputs.elasticsearch` added force_document_id option to ES output enable resend data and avoiding duplicated ES documents +- [#8025](https://github.com/influxdata/telegraf/pull/8025) `inputs.aerospike` Add set, and histogram reporting to aerospike telegraf plugin +- [#8082](https://github.com/influxdata/telegraf/pull/8082) `inputs.snmp` Add agent host tag configuration option +- [#8113](https://github.com/influxdata/telegraf/pull/8113) `inputs.smart` Add more missing NVMe attributes to smart plugin +- [#8120](https://github.com/influxdata/telegraf/pull/8120) `inputs.sqlserver` Added more performance counters to SqlServer input plugin +- [#8127](https://github.com/influxdata/telegraf/pull/8127) `agent` Sort plugin name lists for output +- [#8132](https://github.com/influxdata/telegraf/pull/8132) `outputs.sumologic` Sumo Logic output plugin: carbon2 default to include field in metric +- [#8133](https://github.com/influxdata/telegraf/pull/8133) `inputs.influxdb_v2_listener` influxdb_v2_listener - add /ready route +- [#8168](https://github.com/influxdata/telegraf/pull/8168) `processors.starlark` add json parsing support to starlark +- [#8186](https://github.com/influxdata/telegraf/pull/8186) `inputs.sqlserver` New sql server queries (Azure) +- [#8189](https://github.com/influxdata/telegraf/pull/8189) `inputs.snmp_trap` If the community string is available, add it as a tag +- [#8190](https://github.com/influxdata/telegraf/pull/8190) `inputs.tail` Semigroupoid multiline (#8167) +- [#8196](https://github.com/influxdata/telegraf/pull/8196) `inputs.redis` add functionality to get values from redis commands +- [#8220](https://github.com/influxdata/telegraf/pull/8220) `build` update to Go 1.15 +- [#8032](https://github.com/influxdata/telegraf/pull/8032) `inputs.http_response` http_response: match on status code +- [#8172](https://github.com/influxdata/telegraf/pull/8172) `inputs.sqlserver` New sql server queries (on-prem) - refactoring and formatting + +### Bugfixes + +- [#7816](https://github.com/influxdata/telegraf/pull/7816) `shim` fix bug with loading plugins in shim with no config +- [#7818](https://github.com/influxdata/telegraf/pull/7818) `build` Fix darwin package build flags +- [#7819](https://github.com/influxdata/telegraf/pull/7819) `inputs.tail` Close file to ensure it has been flushed +- [#7853](https://github.com/influxdata/telegraf/pull/7853) Initialize aggregation processors +- [#7865](https://github.com/influxdata/telegraf/pull/7865) `common.shim` shim logger improvements +- [#7867](https://github.com/influxdata/telegraf/pull/7867) `inputs.execd` fix issue with execd restart_delay being ignored +- [#7872](https://github.com/influxdata/telegraf/pull/7872) `inputs.gnmi` Recv next message after send returns EOF +- [#7877](https://github.com/influxdata/telegraf/pull/7877) Fix arch name in deb/rpm builds +- [#7909](https://github.com/influxdata/telegraf/pull/7909) fixes issue with rpm /var/log/telegraf permissions +- [#7918](https://github.com/influxdata/telegraf/pull/7918) `inputs.net` fix broken link to proc.c +- [#7927](https://github.com/influxdata/telegraf/pull/7927) `inputs.tail` Fix tail following on EOF +- [#8005](https://github.com/influxdata/telegraf/pull/8005) Fix docker-image make target +- [#8039](https://github.com/influxdata/telegraf/pull/8039) `serializers.splunkmetric` Remove Event field as it is causing issues with pre-trained source types +- [#8048](https://github.com/influxdata/telegraf/pull/8048) `inputs.jenkins` Multiple escaping occurs on Jenkins URLs at certain folder depth +- [#8071](https://github.com/influxdata/telegraf/pull/8071) `inputs.kubernetes` add missing error check for HTTP req failure +- [#8145](https://github.com/influxdata/telegraf/pull/8145) `processors.execd` Increased the maximum serialized metric size in line protocol +- [#8159](https://github.com/influxdata/telegraf/pull/8159) `outputs.dynatrace` Dynatrace Output: change handling of monotonic counters +- [#8176](https://github.com/influxdata/telegraf/pull/8176) fix panic on streaming processers using logging +- [#8177](https://github.com/influxdata/telegraf/pull/8177) `parsers.influx` fix: plugins/parsers/influx: avoid ParseError.Error panic +- [#8199](https://github.com/influxdata/telegraf/pull/8199) `inputs.docker` Fix vulnerabilities found in BDBA scan +- [#8200](https://github.com/influxdata/telegraf/pull/8200) `inputs.sqlserver` Fixed Query mapping +- [#8201](https://github.com/influxdata/telegraf/pull/8201) `outputs.sumologic` Fix carbon2 serializer not falling through to field separate when carbon2_format field is unset +- [#8210](https://github.com/influxdata/telegraf/pull/8210) update gopsutil: fix procstat performance regression +- [#8162](https://github.com/influxdata/telegraf/pull/8162) Fix bool serialization when using carbon2 +- [#8240](https://github.com/influxdata/telegraf/pull/8240) Fix bugs found by LGTM analysis platform +- [#8251](https://github.com/influxdata/telegraf/pull/8251) `outputs.dynatrace` Dynatrace Output Plugin: Fixed behaviour when state map is cleared +- [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd + +### New Input Plugins + +- [influxdb_v2_listener](/plugins/inputs/influxdb_v2_listener/README.md) Influxdb v2 listener - Contributed by @magichair +- [intel_rdt](/plugins/inputs/intel_rdt/README.md) New input plugin for Intel RDT (Intel Resource Director Technology) - Contributed by @p-zak +- [nsd](/plugins/inputs/nsd/README.md) add nsd input plugin - Contributed by @gearnode +- [opcua](/plugins/inputs/opcua/README.md) Add OPC UA input plugin - Contributed by InfluxData +- [proxmox](/plugins/inputs/proxmox/README.md) Proxmox plugin - Contributed by @effitient +- [ras](/plugins/inputs/ras/README.md) New input plugin for RAS (Reliability, Availability and Serviceability) - Contributed by @p-zak +- [win_eventlog](/plugins/inputs/win_eventlog/README.md) Windows eventlog input plugin - Contributed by @simnv + +### New Output Plugins + +- [dynatrace](/plugins/outputs/dynatrace/README.md) Dynatrace output plugin - Contributed by @thschue +- [sumologic](/plugins/outputs/sumologic/README.md) Sumo Logic output plugin - Contributed by @pmalek-sumo +- [timestream](/plugins/outputs/timestream) Timestream Output Plugin - Contributed by @piotrwest + +### New External Plugins + +See [EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) for a full list of external plugins + +- [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. +- [youtube-telegraf-plugin](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather view and subscriber stats from your youtube videos +- [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. +- [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. + +## v1.15.4 [2020-10-20] + +### Bugfixes + +- [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd +- [#8176](https://github.com/influxdata/telegraf/pull/8176) `agent` fix panic on streaming processers using logging - - Many documentation updates - - New [code examples](https://github.com/influxdata/telegraf/tree/master/plugins/processors/starlark/testdata) for the [Starlark processor](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/README.md) +## v1.15.3 [2020-09-11] -#### Bugfixes +### Release Notes - - [#7999](https://github.com/influxdata/telegraf/pull/7999) `agent` fix minor agent error message race condition - - [#8051](https://github.com/influxdata/telegraf/pull/8051) `build` fix docker build. update dockerfiles to Go 1.14 - - [#8052](https://github.com/influxdata/telegraf/pull/8052) `shim` fix bug in shim logger affecting AddError - - [#7996](https://github.com/influxdata/telegraf/pull/7996) `shim` fix issue with shim use of config.Duration - - [#8006](https://github.com/influxdata/telegraf/pull/8006) `inputs.eventhub_consumer` Fix string to int conversion in eventhub consumer - - [#7986](https://github.com/influxdata/telegraf/pull/7986) `inputs.http_listener_v2` make http header tags case insensitive - - [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input - - [#7861](https://github.com/influxdata/telegraf/pull/7861) `inputs.ping` Fix Ping Input plugin for FreeBSD's ping6 - - [#7808](https://github.com/influxdata/telegraf/pull/7808) `inputs.sqlserver` added new counter - Lock Timeouts (timeout > 0)/sec - - [#8026](https://github.com/influxdata/telegraf/pull/8026) `inputs.vsphere` vSphere Fixed missing clustername issue 7878 - - [#8020](https://github.com/influxdata/telegraf/pull/8020) `processors.starlark` improve the quality of starlark docs by executing them as tests - - [#7976](https://github.com/influxdata/telegraf/pull/7976) `processors.starlark` add pivot example for starlark processor - - [#7134](https://github.com/influxdata/telegraf/pull/7134) `outputs.application_insights` Added the ability to set the endpoint url - - [#7908](https://github.com/influxdata/telegraf/pull/7908) `outputs.opentsdb` fix JSON handling of values NaN and Inf +- Many documentation updates +- New [code examples](https://github.com/influxdata/telegraf/tree/master/plugins/processors/starlark/testdata) for the [Starlark processor](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/README.md) + +### Bugfixes + +- [#7999](https://github.com/influxdata/telegraf/pull/7999) `agent` fix minor agent error message race condition +- [#8051](https://github.com/influxdata/telegraf/pull/8051) `build` fix docker build. update dockerfiles to Go 1.14 +- [#8052](https://github.com/influxdata/telegraf/pull/8052) `shim` fix bug in shim logger affecting AddError +- [#7996](https://github.com/influxdata/telegraf/pull/7996) `shim` fix issue with shim use of config.Duration +- [#8006](https://github.com/influxdata/telegraf/pull/8006) `inputs.eventhub_consumer` Fix string to int conversion in eventhub consumer +- [#7986](https://github.com/influxdata/telegraf/pull/7986) `inputs.http_listener_v2` make http header tags case insensitive +- [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input +- [#7861](https://github.com/influxdata/telegraf/pull/7861) `inputs.ping` Fix Ping Input plugin for FreeBSD's ping6 +- [#7808](https://github.com/influxdata/telegraf/pull/7808) `inputs.sqlserver` added new counter - Lock Timeouts (timeout > 0)/sec +- [#8026](https://github.com/influxdata/telegraf/pull/8026) `inputs.vsphere` vSphere Fixed missing clustername issue 7878 +- [#8020](https://github.com/influxdata/telegraf/pull/8020) `processors.starlark` improve the quality of starlark docs by executing them as tests +- [#7976](https://github.com/influxdata/telegraf/pull/7976) `processors.starlark` add pivot example for starlark processor +- [#7134](https://github.com/influxdata/telegraf/pull/7134) `outputs.application_insights` Added the ability to set the endpoint url +- [#7908](https://github.com/influxdata/telegraf/pull/7908) `outputs.opentsdb` fix JSON handling of values NaN and Inf ## v1.15.2 [2020-07-31] -#### Bug Fixes +### Bug Fixes - [#7905](https://github.com/influxdata/telegraf/issues/7905): Fix RPM /var/log/telegraf permissions - [#7880](https://github.com/influxdata/telegraf/issues/7880): Fix tail following on EOF ## v1.15.1 [2020-07-22] -#### Bug Fixes +### Bug Fixes - [#7877](https://github.com/influxdata/telegraf/pull/7877): Fix architecture in non-amd64 deb and rpm packages. ## v1.15.0 [2020-07-22] -#### Release Notes +### Release Notes - The `logparser` input is deprecated, use the `tail` input with `data_format = "grok"` as a replacement. @@ -215,12 +1896,12 @@ `/etc/telegraf/telegraf.conf.sample`. The tar and zip packages now include the version in the top level directory. -#### New Inputs +### New Inputs - [nginx_sts](/plugins/inputs/nginx_sts/README.md) - Contributed by @zdmytriv - [redfish](/plugins/inputs/redfish/README.md) - Contributed by @sarvanikonda -#### New Processors +### New Processors - [defaults](/plugins/processors/defaults/README.md) - Contributed by @jregistr - [execd](/plugins/processors/execd/README.md) - Contributed by @influxdata @@ -230,12 +1911,12 @@ - [reverse_dns](/plugins/processors/reverse_dns/README.md) - Contributed by @influxdata - [starlark](/plugins/processors/starlark/README.md) - Contributed by @influxdata -#### New Outputs +### New Outputs - [newrelic](/plugins/outputs/newrelic/README.md) - Contributed by @hsinghkalsi - [execd](/plugins/outputs/execd/README.md) - Contributed by @influxdata -#### Features +### Features - [#7634](https://github.com/influxdata/telegraf/pull/7634): Add support for streaming processors. - [#6905](https://github.com/influxdata/telegraf/pull/6905): Add commands stats to mongodb input plugin. @@ -283,7 +1964,7 @@ - [#7154](https://github.com/influxdata/telegraf/pull/7154): Add v3 metadata support to ecs input. - [#7792](https://github.com/influxdata/telegraf/pull/7792): Support utf-16 in file and tail inputs. -#### Bug Fixes +### Bug Fixes - [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. - [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. @@ -302,7 +1983,7 @@ ## v1.14.5 [2020-06-30] -#### Bug Fixes +### Bug Fixes - [#7686](https://github.com/influxdata/telegraf/pull/7686): Improve the performance of the procstat input. - [#7658](https://github.com/influxdata/telegraf/pull/7658): Fix ping exit code handling on non-Linux. @@ -314,7 +1995,7 @@ ## v1.14.4 [2020-06-09] -#### Bug Fixes +### Bug Fixes - [#7325](https://github.com/influxdata/telegraf/issues/7325): Fix "cannot insert the value NULL error" with PerformanceCounters query. - [#7579](https://github.com/influxdata/telegraf/pull/7579): Fix numeric to bool conversion in converter processor. @@ -323,7 +2004,7 @@ ## v1.14.3 [2020-05-19] -#### Bug Fixes +### Bug Fixes - [#7412](https://github.com/influxdata/telegraf/pull/7412): Use same timestamp for all objects in arrays in the json parser. - [#7343](https://github.com/influxdata/telegraf/issues/7343): Handle multiple metrics with the same timestamp in dedup processor. @@ -332,7 +2013,7 @@ ## v1.14.2 [2020-04-28] -#### Bug Fixes +### Bug Fixes - [#7241](https://github.com/influxdata/telegraf/issues/7241): Trim whitespace from instance tag in sqlserver input. - [#7322](https://github.com/influxdata/telegraf/issues/7322): Use increased AWS Cloudwatch GetMetricData limit of 500 metrics per call. @@ -346,7 +2027,7 @@ ## v1.14.1 [2020-04-14] -#### Bug Fixes +### Bug Fixes - [#7236](https://github.com/influxdata/telegraf/issues/7236): Fix PerformanceCounter query performance degradation in sqlserver input. - [#7257](https://github.com/influxdata/telegraf/issues/7257): Fix error when using the Name field in template processor. @@ -356,7 +2037,7 @@ ## v1.14 [2020-03-26] -#### Release Notes +### Release Notes - In the `sqlserver` input, the `sqlserver_azurestats` measurement has been renamed to `sqlserver_azure_db_resource_stats` due to an issue where numeric @@ -365,7 +2046,7 @@ - The `date` processor now uses the UTC timezone when creating its tag. In previous versions the local time was used. -#### New Inputs +### New Inputs - [clickhouse](/plugins/inputs/clickhouse/README.md) - Contributed by @kshvakov - [execd](/plugins/inputs/execd/README.md) - Contributed by @jgraichen @@ -377,17 +2058,17 @@ - [sflow](/plugins/inputs/sflow/README.md) - Contributed by @influxdata - [wireguard](/plugins/inputs/wireguard/README.md) - Contributed by @LINKIWI -#### New Processors +### New Processors - [dedup](/plugins/processors/dedup/README.md) - Contributed by @igomura - [template](/plugins/processors/template/README.md) - Contributed by @RobMalvern - [s2geo](/plugins/processors/s2geo/README.md) - Contributed by @alespour -#### New Outputs +### New Outputs - [warp10](/plugins/outputs/warp10/README.md) - Contributed by @aurrelhebert -#### Features +### Features - [#6730](https://github.com/influxdata/telegraf/pull/6730): Add page_faults for mongodb wired tiger. - [#6798](https://github.com/influxdata/telegraf/pull/6798): Add use_sudo option to ipmi_sensor input. @@ -426,7 +2107,7 @@ - [#7150](https://github.com/influxdata/telegraf/pull/7150): Add option for explicitly including queries in sqlserver input. - [#7173](https://github.com/influxdata/telegraf/pull/7173): Add support for GNMI DecimalVal type to cisco_telemetry_gnmi. -#### Bug Fixes +### Bug Fixes - [#6397](https://github.com/influxdata/telegraf/issues/6397): Fix conversion to floats in AzureDBResourceStats query in the sqlserver input. - [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input. @@ -442,11 +2123,11 @@ ## v1.13.4 [2020-02-25] -#### Release Notes +### Release Notes - Official packages now built with Go 1.13.8. -#### Bug Fixes +### Bug Fixes - [#6988](https://github.com/influxdata/telegraf/issues/6988): Parse NaN values from summary types in prometheus input. - [#6820](https://github.com/influxdata/telegraf/issues/6820): Fix pgbouncer input when used with newer pgbouncer versions. @@ -458,7 +2139,7 @@ ## v1.13.3 [2020-02-04] -#### Bug Fixes +### Bug Fixes - [#5744](https://github.com/influxdata/telegraf/issues/5744): Fix kibana input with Kibana versions greater than 6.4. - [#6960](https://github.com/influxdata/telegraf/issues/6960): Fix duplicate TrackingIDs can be returned in queue consumer plugins. @@ -467,7 +2148,7 @@ ## v1.13.2 [2020-01-21] -#### Bug Fixes +### Bug Fixes - [#2652](https://github.com/influxdata/telegraf/issues/2652): Warn without error when processes input is started on Windows. - [#6890](https://github.com/influxdata/telegraf/issues/6890): Only parse certificate blocks in x509_cert input. @@ -479,7 +2160,7 @@ ## v1.13.1 [2020-01-08] -#### Bug Fixes +### Bug Fixes - [#6788](https://github.com/influxdata/telegraf/issues/6788): Fix ServerProperty query stops working on Azure after failover. - [#6803](https://github.com/influxdata/telegraf/pull/6803): Add leading period to OID in SNMP v1 generic traps. @@ -494,7 +2175,7 @@ ## v1.13 [2019-12-12] -#### Release Notes +### Release Notes - Official packages built with Go 1.13.5. This affects the minimum supported version on several platforms, most notably requiring Windows 7 (2008 R2) or @@ -506,7 +2187,7 @@ passthrough metrics will be unchanged. Refer to the `prometheus` input for details about the mapping. -#### New Inputs +### New Inputs - [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn - [ethtool](/plugins/inputs/ethtool/README.md) - Contributed by @philippreston @@ -515,15 +2196,15 @@ - [synproxy](/plugins/inputs/synproxy/README.md) - Contributed by @rfrenayworldstream - [systemd_units](/plugins/inputs/systemd_units/README.md) - Contributed by @benschweizer -#### New Processors +### New Processors - [clone](/plugins/processors/clone/README.md) - Contributed by @adrianlzt -#### New Aggregators +### New Aggregators - [merge](/plugins/aggregators/merge/README.md) - Contributed by @influxdata -#### Features +### Features - [#6326](https://github.com/influxdata/telegraf/pull/5842): Add per node memory stats to rabbitmq input. - [#6361](https://github.com/influxdata/telegraf/pull/6361): Add ability to read query from file to postgresql_extensible input. @@ -565,7 +2246,7 @@ - [#6740](https://github.com/influxdata/telegraf/pull/6740): Add base64decode operation to string processor. - [#6790](https://github.com/influxdata/telegraf/pull/6790): Add option to control collecting global variables to mysql input. -#### Bug Fixes +### Bug Fixes - [#6484](https://github.com/influxdata/telegraf/issues/6484): Show correct default settings in mysql sample config. - [#6583](https://github.com/influxdata/telegraf/issues/6583): Use 1h or 3h rain values as appropriate in openweathermap input. @@ -582,7 +2263,7 @@ ## v1.12.6 [2019-11-19] -#### Bug Fixes +### Bug Fixes - [#6666](https://github.com/influxdata/telegraf/issues/6666): Fix many plugin errors are logged at debug logging level. - [#6652](https://github.com/influxdata/telegraf/issues/6652): Use nanosecond precision in docker_log input. @@ -591,7 +2272,7 @@ ## v1.12.5 [2019-11-12] -#### Bug Fixes +### Bug Fixes - [#6576](https://github.com/influxdata/telegraf/issues/6576): Fix incorrect results in ping input plugin. - [#6610](https://github.com/influxdata/telegraf/pull/6610): Add missing character replacement to sql_instance tag. @@ -605,11 +2286,11 @@ ## v1.12.4 [2019-10-23] -#### Release Notes +### Release Notes - Official packages built with Go 1.12.12. -#### Bug Fixes +### Bug Fixes - [#6521](https://github.com/influxdata/telegraf/issues/6521): Fix metric generation with ping input native method. - [#6541](https://github.com/influxdata/telegraf/issues/6541): Exclude alias tag if unset from plugin internal stats. @@ -617,7 +2298,7 @@ ## v1.12.3 [2019-10-07] -#### Bug Fixes +### Bug Fixes - [#6445](https://github.com/influxdata/telegraf/issues/6445): Use batch serialization format in exec output. - [#6455](https://github.com/influxdata/telegraf/issues/6455): Build official packages with Go 1.12.10. @@ -629,7 +2310,7 @@ ## v1.12.2 [2019-09-24] -#### Bug Fixes +### Bug Fixes - [#6386](https://github.com/influxdata/telegraf/issues/6386): Fix detection of layout timestamps in csv and json parser. - [#6394](https://github.com/influxdata/telegraf/issues/6394): Fix parsing of BATTDATE in apcupsd input. @@ -639,7 +2320,7 @@ ## v1.12.1 [2019-09-10] -#### Bug Fixes +### Bug Fixes - [#6344](https://github.com/influxdata/telegraf/issues/6344): Fix depends on GLIBC_2.14 symbol version. - [#6329](https://github.com/influxdata/telegraf/issues/6329): Fix filecount for paths with trailing slash. @@ -652,14 +2333,14 @@ ## v1.12 [2019-09-03] -#### Release Notes +### Release Notes - The cluster health related fields in the elasticsearch input have been split out from the `elasticsearch_indices` measurement into the new `elasticsearch_cluster_health_indices` measurement as they were originally combined by error. -#### New Inputs +### New Inputs - [apcupsd](/plugins/inputs/apcupsd/README.md) - Contributed by @jonaz - [docker_log](/plugins/inputs/docker_log/README.md) - Contributed by @prashanthjbabu @@ -669,22 +2350,22 @@ - [openntpd](/plugins/inputs/openntpd/README.md) - Contributed by @aromeyer - [uwsgi](/plugins/inputs/uwsgi/README.md) - Contributed by @blaggacao -#### New Parsers +### New Parsers - [form_urlencoded](/plugins/parsers/form_urlencoded/README.md) - Contributed by @byonchev -#### New Processors +### New Processors - [date](/plugins/processors/date/README.md) - Contributed by @influxdata - [pivot](/plugins/processors/pivot/README.md) - Contributed by @influxdata - [tag_limit](/plugins/processors/tag_limit/README.md) - Contributed by @memory - [unpivot](/plugins/processors/unpivot/README.md) - Contributed by @influxdata -#### New Outputs +### New Outputs - [exec](/plugins/outputs/exec/README.md) - Contributed by @Jaeyo -#### Features +### Features - [#5842](https://github.com/influxdata/telegraf/pull/5842): Improve performance of wavefront serializer. - [#5863](https://github.com/influxdata/telegraf/pull/5863): Allow regex processor to append tag values. @@ -736,7 +2417,7 @@ - [#6207](https://github.com/influxdata/telegraf/pull/6207): Add ability to label inputs for logging. - [#6300](https://github.com/influxdata/telegraf/pull/6300): Add TLS support to nginx_plus, nginx_plus_api and nginx_vts. -#### Bug Fixes +### Bug Fixes - [#5692](https://github.com/influxdata/telegraf/issues/5692): Fix sensor read error stops reporting of all sensors in temp input. - [#4356](https://github.com/influxdata/telegraf/issues/4356): Fix double pct replacement in sysstat input. @@ -753,7 +2434,7 @@ ## v1.11.5 [2019-08-27] -#### Bug Fixes +### Bug Fixes - [#6250](https://github.com/influxdata/telegraf/pull/6250): Update go-sql-driver/mysql driver to 1.4.1 to address auth issues. - [#6279](https://github.com/influxdata/telegraf/issues/6279): Return error status from --test if input plugins produce an error. @@ -766,7 +2447,7 @@ ## v1.11.4 [2019-08-06] -#### Bug Fixes +### Bug Fixes - [#6200](https://github.com/influxdata/telegraf/pull/6200): Correct typo in kubernetes logsfs_available_bytes field. - [#6191](https://github.com/influxdata/telegraf/issues/6191): Skip floats that are NaN or Inf in Datadog output. @@ -774,7 +2455,7 @@ ## v1.11.3 [2019-07-23] -#### Bug Fixes +### Bug Fixes - [#6054](https://github.com/influxdata/telegraf/issues/6054): Fix unable to reconnect after vCenter reboot in vsphere input. - [#6073](https://github.com/influxdata/telegraf/issues/6073): Handle unknown error in nvidia-smi output. @@ -787,7 +2468,7 @@ ## v1.11.2 [2019-07-09] -#### Bug Fixes +### Bug Fixes - [#6056](https://github.com/influxdata/telegraf/pull/6056): Fix source address ping flag on BSD. - [#6059](https://github.com/influxdata/telegraf/issues/6059): Fix value out of range error on 32-bit systems in bind input. @@ -798,7 +2479,7 @@ ## v1.11.1 [2019-06-25] -#### Bug Fixes +### Bug Fixes - [#5980](https://github.com/influxdata/telegraf/issues/5980): Cannot set mount_points option in disk input. - [#5983](https://github.com/influxdata/telegraf/issues/5983): Omit keys when creating measurement names for GNMI telemetry. @@ -812,7 +2493,7 @@ ## v1.11 [2019-06-11] -#### Release Notes +### Release Notes - The `uptime_format` field in the system input has been deprecated, use the `uptime` field instead. @@ -820,7 +2501,7 @@ requires `GetMetricData` permissions instead of `GetMetricStatistics`. The `units` tag is not available from this API and is no longer collected. -#### New Inputs +### New Inputs - [bind](/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek - [cisco_telemetry_gnmi](/plugins/inputs/cisco_telemetry_gnmi/README.md) - Contributed by @sbyx @@ -830,20 +2511,20 @@ - [openweathermap](/plugins/inputs/openweathermap/README.md) - Contributed by @regel - [powerdns_recursor](/plugins/inputs/powerdns_recursor/README.md) - Contributed by @dupondje -#### New Aggregators +### New Aggregators - [final](/plugins/aggregators/final/README.md) - Contributed by @oplehto -#### New Outputs +### New Outputs - [syslog](/plugins/outputs/syslog/README.md) - Contributed by @javicrespo - [health](/plugins/outputs/health/README.md) - Contributed by @influxdata -#### New Serializers +### New Serializers - [wavefront](/plugins/serializers/wavefront/README.md) - Contributed by @puckpuck -#### Features +### Features - [#5556](https://github.com/influxdata/telegraf/pull/5556): Add TTL field to ping input. - [#5569](https://github.com/influxdata/telegraf/pull/5569): Add hexadecimal string to integer conversion to converter processor. @@ -875,7 +2556,7 @@ - [#5547](https://github.com/influxdata/telegraf/pull/5547): Add file rotation support to the file output. - [#5955](https://github.com/influxdata/telegraf/pull/5955): Add source tag to hddtemp plugin. -#### Bug Fixes +### Bug Fixes - [#5692](https://github.com/influxdata/telegraf/pull/5692): Temperature input plugin stops working when WiFi is turned off. - [#5631](https://github.com/influxdata/telegraf/pull/5631): Create Windows service only when specified or in service manager. @@ -901,7 +2582,7 @@ ## v1.10.4 [2019-05-14] -#### Bug Fixes +### Bug Fixes - [#5764](https://github.com/influxdata/telegraf/pull/5764): Fix race condition in the Wavefront parser. - [#5783](https://github.com/influxdata/telegraf/pull/5783): Create telegraf user in pre-install rpm scriptlet. @@ -915,20 +2596,20 @@ ## v1.10.3 [2019-04-16] -#### Bug Fixes +### Bug Fixes - [#5680](https://github.com/influxdata/telegraf/pull/5680): Allow colons in metric names in prometheus_client output. - [#5716](https://github.com/influxdata/telegraf/pull/5716): Set log directory attributes in rpm spec. ## v1.10.2 [2019-04-02] -#### Release Notes +### Release Notes - String fields no longer have leading and trailing quotation marks removed in the grok parser. If you are capturing quoted strings you may need to update the patterns. -#### Bug Fixes +### Bug Fixes - [#5612](https://github.com/influxdata/telegraf/pull/5612): Fix deadlock when Telegraf is aligning aggregators. - [#5523](https://github.com/influxdata/telegraf/issues/5523): Fix missing cluster stats in ceph input. @@ -948,7 +2629,7 @@ ## v1.10.1 [2019-03-19] -#### Bug Fixes +### Bug Fixes - [#5448](https://github.com/influxdata/telegraf/issues/5448): Show error when TLS configuration cannot be loaded. - [#5543](https://github.com/influxdata/telegraf/pull/5543): Add Base64-encoding/decoding for Google Cloud PubSub plugins. @@ -960,7 +2641,7 @@ ## v1.10 [2019-03-05] -#### New Inputs +### New Inputs - [cloud_pubsub](/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye - [cloud_pubsub_push](/plugins/inputs/cloud_pubsub_push/README.md) - Contributed by @influxdata @@ -971,16 +2652,16 @@ - [multifile](/plugins/inputs/multifile/README.md) - Contributed by @martin2250 - [stackdriver](/plugins/inputs/stackdriver/README.md) - Contributed by @WuHan0608 -#### New Outputs +### New Outputs - [cloud_pubsub](/plugins/outputs/cloud_pubsub/README.md) - Contributed by @emilymye -#### New Serializers +### New Serializers - [nowmetric](/plugins/serializers/nowmetric/README.md) - Contributed by @JefMuller - [carbon2](/plugins/serializers/carbon2/README.md) - Contributed by @frankreno -#### Features +### Features - [#4345](https://github.com/influxdata/telegraf/pull/4345): Allow for force gathering ES cluster stats. - [#5047](https://github.com/influxdata/telegraf/pull/5047): Add support for unix and unix_ms timestamps to csv parser. @@ -1018,7 +2699,7 @@ - [#5490](https://github.com/influxdata/telegraf/pull/5490): Add tag based routing in influxdb/influxdb_v2 outputs. - [#5533](https://github.com/influxdata/telegraf/pull/5533): Allow grok parser to produce metrics with no fields. -#### Bug Fixes +### Bug Fixes - [#4610](https://github.com/influxdata/telegraf/pull/4610): Fix initscript removes pidfile of restarted Telegraf process. - [#5320](https://github.com/influxdata/telegraf/pull/5320): Use datacenter option spelling in consul input. @@ -1034,7 +2715,7 @@ ## v1.9.5 [2019-02-26] -#### Bug Fixes +### Bug Fixes - [#5315](https://github.com/influxdata/telegraf/issues/5315): Skip string fields when writing to stackdriver output. - [#5364](https://github.com/influxdata/telegraf/issues/5364): Send metrics in ascending time order in stackdriver output. @@ -1048,7 +2729,7 @@ ## v1.9.4 [2019-02-05] -#### Bug Fixes +### Bug Fixes - [#5334](https://github.com/influxdata/telegraf/issues/5334): Fix skip_rows and skip_columns options in csv parser. - [#5181](https://github.com/influxdata/telegraf/issues/5181): Always send basic auth in jenkins input. @@ -1057,7 +2738,7 @@ ## v1.9.3 [2019-01-22] -#### Bug Fixes +### Bug Fixes - [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input. - [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails. @@ -1068,7 +2749,7 @@ ## v1.9.2 [2019-01-08] -#### Bug Fixes +### Bug Fixes - [#5130](https://github.com/influxdata/telegraf/pull/5130): Increase varnishstat timeout. - [#5135](https://github.com/influxdata/telegraf/pull/5135): Remove storage calculation for non Azure managed instances and add server version. @@ -1087,7 +2768,7 @@ ## v1.9.1 [2018-12-11] -#### Bug Fixes +### Bug Fixes - [#5006](https://github.com/influxdata/telegraf/issues/5006): Fix boolean handling in splunkmetric serializer. - [#5046](https://github.com/influxdata/telegraf/issues/5046): Set default config values in jenkins input. @@ -1102,7 +2783,7 @@ ## v1.9 [2018-11-20] -#### Release Notes +### Release Notes - The `http_listener` input plugin has been renamed to `influxdb_listener` and use of the original name is deprecated. The new name better describes the @@ -1120,7 +2801,7 @@ the new option `max_undelivered_messages` to limit the number of outstanding unwritten metrics. -#### New Inputs +### New Inputs - [http_listener_v2](/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 - [ipvs](/plugins/inputs/ipvs/README.md) - Contributed by @amoghe @@ -1129,11 +2810,11 @@ - [nginx_vts](/plugins/inputs/nginx_vts/README.md) - Contributed by @monder - [wireless](/plugins/inputs/wireless/README.md) - Contributed by @jamesmaidment -#### New Outputs +### New Outputs - [stackdriver](/plugins/outputs/stackdriver/README.md) - Contributed by @jamesmaidment -#### Features +### Features - [#4686](https://github.com/influxdata/telegraf/pull/4686): Add replace function to strings processor. - [#4754](https://github.com/influxdata/telegraf/pull/4754): Query servers in parallel in dns_query input. @@ -1156,7 +2837,7 @@ - [#4920](https://github.com/influxdata/telegraf/pull/4920): Add scraping for Prometheus endpoint in Kubernetes. - [#4938](https://github.com/influxdata/telegraf/pull/4938): Add per output flush_interval, metric_buffer_limit and metric_batch_size. -#### Bug Fixes +### Bug Fixes - [#4950](https://github.com/influxdata/telegraf/pull/4950): Remove the time_key from the field values in JSON parser. - [#3968](https://github.com/influxdata/telegraf/issues/3968): Fix input time rounding when using a custom interval. @@ -1540,7 +3221,6 @@ - The new `http` input configured with `data_format = "json"` can perform the same task as the, now deprecated, `httpjson` input. - ### New Inputs - [http](./plugins/inputs/http/README.md) - Thanks to @grange74 @@ -1659,6 +3339,7 @@ ## v1.5 [2017-12-14] ### New Plugins + - [basicstats](./plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno - [bond](./plugins/inputs/bond/README.md) - Thanks to @ildarsv - [cratedb](./plugins/outputs/cratedb/README.md) - Thanks to @felixge @@ -1989,7 +3670,7 @@ machines. Telegraf < 1.3: -``` +```text # field_name value active+clean 123 active+clean+scrubbing 3 @@ -1997,7 +3678,7 @@ active+clean+scrubbing 3 Telegraf >= 1.3: -``` +```text # field_name value tag count 123 state=active+clean count 3 state=active+clean+scrubbing @@ -2307,7 +3988,7 @@ that pertain to node vs. namespace statistics. This means that the default github_webhooks config: -``` +```toml # A Github Webhook Event collector [[inputs.github_webhooks]] ## Address and port to host Webhook listener on @@ -2316,7 +3997,7 @@ This means that the default github_webhooks config: should now look like: -``` +```toml # A Webhooks Event collector [[inputs.webhooks]] ## Address and port to host Webhook listener on @@ -2371,7 +4052,7 @@ consistent with the behavior of `collection_jitter`. - [#1265](https://github.com/influxdata/telegraf/pull/1265): Make dns lookups for chrony configurable. Thanks @zbindenren! - [#1275](https://github.com/influxdata/telegraf/pull/1275): Allow wildcard filtering of varnish stats. - [#1142](https://github.com/influxdata/telegraf/pull/1142): Support for glob patterns in exec plugin commands configuration. -- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL (http://localhost:15672) if not specified +- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL `http://localhost:15672` if not specified - [#1197](https://github.com/influxdata/telegraf/pull/1197): Limit AWS GetMetricStatistics requests to 10 per second. - [#1278](https://github.com/influxdata/telegraf/pull/1278) & [#1288](https://github.com/influxdata/telegraf/pull/1288) & [#1295](https://github.com/influxdata/telegraf/pull/1295): RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified - [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument. @@ -2469,8 +4150,8 @@ to "stdout". ### Release Notes -- **Breaking change** in jolokia plugin. See -https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia/README.md +- **Breaking change** in jolokia plugin. See the +[jolokia README](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia/README.md) for updated configuration. The plugin will now support proxy mode and will make POST requests. @@ -2575,14 +4256,16 @@ It is not included on the report path. This is necessary for reporting host disk ## v0.12.1 [2016-04-14] ### Release Notes + - Breaking change in the dovecot input plugin. See Features section below. -- Graphite output templates are now supported. See -https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +- Graphite output templates are now supported. See the +[Output Formats README](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite) - Possible breaking change for the librato and graphite outputs. Telegraf will no longer insert field names when the field is simply named `value`. This is because the `value` field is redundant in the graphite/librato context. ### Features + - [#1009](https://github.com/influxdata/telegraf/pull/1009): Cassandra input plugin. Thanks @subhachandrachandra! - [#976](https://github.com/influxdata/telegraf/pull/976): Reduce allocations in the UDP and statsd inputs. - [#979](https://github.com/influxdata/telegraf/pull/979): Reduce allocations in the TCP listener. @@ -2595,6 +4278,7 @@ because the `value` field is redundant in the graphite/librato context. - [#1008](https://github.com/influxdata/telegraf/pull/1008): Adding memstats metrics to the influxdb plugin. ### Bug Fixes + - [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name) - [#969](https://github.com/influxdata/telegraf/pull/969): ipmi_sensors: allow : in password. Thanks @awaw! - [#972](https://github.com/influxdata/telegraf/pull/972): dovecot: remove extra newline in dovecot command. Thanks @mrannanj! @@ -2603,6 +4287,7 @@ because the `value` field is redundant in the graphite/librato context. ## v0.12.0 [2016-04-05] ### Features + - [#951](https://github.com/influxdata/telegraf/pull/951): Parse environment variables in the config file. - [#948](https://github.com/influxdata/telegraf/pull/948): Cleanup config file and make default package version include all plugins (but commented). - [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension @@ -2622,6 +4307,7 @@ because the `value` field is redundant in the graphite/librato context. - [#945](https://github.com/influxdata/telegraf/pull/945): KAFKA output: codec, acks, and retry configuration. Thanks @framiere! ### Bug Fixes + - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. - [#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write. - [#898](https://github.com/influxdata/telegraf/issues/898): Put database name in quotes, fixes special characters in the database name. @@ -2636,21 +4322,23 @@ because the `value` field is redundant in the graphite/librato context. ## v0.11.1 [2016-03-17] ### Release Notes + - Primarily this release was cut to fix [#859](https://github.com/influxdata/telegraf/issues/859) ### Features + - [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @PierreF! - [#794](https://github.com/influxdata/telegraf/pull/794): Add service reload ability. Thanks @entertainyou! ### Bug Fixes + - [#852](https://github.com/influxdata/telegraf/issues/852): Windows zip package fix - [#859](https://github.com/influxdata/telegraf/issues/859): httpjson plugin panic ## v0.11.0 [2016-03-15] -### Release Notes - ### Features + - [#692](https://github.com/influxdata/telegraf/pull/770): Support InfluxDB retention policies - [#771](https://github.com/influxdata/telegraf/pull/771): Default timeouts for input plugns. Thanks @PierreF! - [#758](https://github.com/influxdata/telegraf/pull/758): UDP Listener input plugin, thanks @whatyouhide! @@ -2668,6 +4356,7 @@ because the `value` field is redundant in the graphite/librato context. - [#847](https://github.com/influxdata/telegraf/pull/847): `ntpq`: Input plugin for running ntp query executable and gathering metrics. ### Bug Fixes + - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" - [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty! - [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert! @@ -2683,15 +4372,18 @@ because the `value` field is redundant in the graphite/librato context. ## v0.10.4.1 ### Release Notes + - Bug in the build script broke deb and rpm packages. ### Bug Fixes + - [#750](https://github.com/influxdata/telegraf/issues/750): deb package broken - [#752](https://github.com/influxdata/telegraf/issues/752): rpm package broken ## v0.10.4 [2016-02-24] ### Release Notes + - The pass/drop parameters have been renamed to fielddrop/fieldpass parameters, to more accurately indicate their purpose. - There are also now namedrop/namepass parameters for passing/dropping based @@ -2699,6 +4391,7 @@ on the metric _name_. - Experimental windows builds now available. ### Features + - [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene! - [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion! - [#724](https://github.com/influxdata/telegraf/pull/724): username matching for procstat input, thanks @zorel! @@ -2706,12 +4399,14 @@ on the metric _name_. - [#737](https://github.com/influxdata/telegraf/pull/737): Support multiple fields for statsd input. Thanks @mattheath! ### Bug Fixes + - [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode. - [#746](https://github.com/influxdata/telegraf/pull/746): httpjson plugin: Fix HTTP GET parameters. ## v0.10.3 [2016-02-18] ### Release Notes + - Users of the `exec` and `kafka_consumer` (and the new `nats_consumer` and `mqtt_consumer` plugins) can now specify the incoming data format that they would like to parse. Currently supports: "json", "influx", and @@ -2728,6 +4423,7 @@ points and only flushing on a set time interval. This will default to `true` and is in the `[agent]` config section. ### Features + - [#652](https://github.com/influxdata/telegraf/pull/652): CouchDB Input Plugin. Thanks @codehate! - [#655](https://github.com/influxdata/telegraf/pull/655): Support parsing arbitrary data formats. Currently limited to kafka_consumer and exec inputs. - [#671](https://github.com/influxdata/telegraf/pull/671): Dovecot input plugin. Thanks @mikif70! @@ -2742,6 +4438,7 @@ and is in the `[agent]` config section. - [#682](https://github.com/influxdata/telegraf/pull/682): Mesos input plugin. Thanks @tripledes! ### Bug Fixes + - [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux. - [#662](https://github.com/influxdata/telegraf/pull/667): Change `[tags]` to `[global_tags]` to fix multiple-plugin tags bug. - [#642](https://github.com/influxdata/telegraf/issues/642): Riemann output plugin issues. @@ -2751,6 +4448,7 @@ and is in the `[agent]` config section. ## v0.10.2 [2016-02-04] ### Release Notes + - Statsd timing measurements are now aggregated into a single measurement with fields. - Graphite output now inserts tags into the bucket in alphabetical order. @@ -2760,6 +4458,7 @@ doing the opposite of what it claimed to do (yikes). It's been replaced by `insecure_skip_verify` ### Features + - [#575](https://github.com/influxdata/telegraf/pull/575): Support for collecting Windows Performance Counters. Thanks @TheFlyingCorpse! - [#564](https://github.com/influxdata/telegraf/issues/564): features for plugin writing simplification. Internal metric data type. - [#603](https://github.com/influxdata/telegraf/pull/603): Aggregate statsd timing measurements into fields. Thanks @marcinbunsch! @@ -2769,6 +4468,7 @@ doing the opposite of what it claimed to do (yikes). It's been replaced by - [#628](https://github.com/influxdata/telegraf/pull/628): Windows perf counters: pre-vista support ### Bug Fixes + - [#595](https://github.com/influxdata/telegraf/issues/595): graphite output should include tags to separate duplicate measurements. - [#599](https://github.com/influxdata/telegraf/issues/599): datadog plugin tags not working. - [#600](https://github.com/influxdata/telegraf/issues/600): datadog measurement/field name parsing is wrong. @@ -2790,6 +4490,7 @@ for the latest measurements, fields, and tags. There is also now support for specifying a docker endpoint to get metrics from. ### Features + - [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261! - [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod! - [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert! @@ -2814,6 +4515,7 @@ specifying a docker endpoint to get metrics from. - [#471](https://github.com/influxdata/telegraf/pull/471): httpjson request headers. Thanks @asosso! ### Bug Fixes + - [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! - [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin - [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain! @@ -2826,6 +4528,7 @@ specifying a docker endpoint to get metrics from. ## v0.10.0 [2016-01-12] ### Release Notes + - Linux packages have been taken out of `opt`, the binary is now in `/usr/bin` and configuration files are in `/etc/telegraf` - **breaking change** `plugins` have been renamed to `inputs`. This was done because @@ -2846,13 +4549,14 @@ instead of only `cpu_` - The prometheus plugin schema has not been changed (measurements have not been aggregated). -### Packaging change note: +### Packaging change note RHEL/CentOS users upgrading from 0.2.x to 0.10.0 will probably have their configurations overwritten by the upgrade. There is a backup stored at /etc/telegraf/telegraf.conf.$(date +%s).backup. ### Features + - Plugin measurements aggregated into a single measurement. - Added ability to specify per-plugin tags - Added ability to specify per-plugin measurement suffix and prefix. @@ -2864,17 +4568,20 @@ configurations overwritten by the upgrade. There is a backup stored at ## v0.2.5 [unreleased] ### Features + - [#427](https://github.com/influxdata/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen! - [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot! - [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff ### Bug Fixes + - [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham! - [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham! ## v0.2.4 [2015-12-08] ### Features + - [#412](https://github.com/influxdata/telegraf/pull/412): Additional memcached stats. Thanks @mgresser! - [#410](https://github.com/influxdata/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain! - [#414](https://github.com/influxdata/telegraf/issues/414): Jolokia plugin auth parameters @@ -2885,12 +4592,14 @@ configurations overwritten by the upgrade. There is a backup stored at - [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter! ### Bug Fixes + - [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue - [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement. ## v0.2.3 [2015-11-30] ### Release Notes + - **breaking change** The `kafka` plugin has been renamed to `kafka_consumer`. and most of the config option names have changed. This only affects the kafka consumer _plugin_ (not the @@ -2900,7 +4609,7 @@ functional. - Plugins can now be specified as a list, and multiple plugin instances of the same type can be specified, like this: -``` +```toml [[inputs.cpu]] percpu = false totalcpu = true @@ -2915,6 +4624,7 @@ same type can be specified, like this: - Aerospike plugin: tag changed from `host` -> `aerospike_host` ### Features + - [#379](https://github.com/influxdata/telegraf/pull/379): Riemann output, thanks @allenj! - [#375](https://github.com/influxdata/telegraf/pull/375): kafka_consumer service plugin. - [#392](https://github.com/influxdata/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras! @@ -2922,21 +4632,25 @@ same type can be specified, like this: - [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC! ### Bug Fixes + - [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning. - [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic ## v0.2.2 [2015-11-18] ### Release Notes + - 0.2.1 has a bug where all lists within plugins get duplicated, this includes lists of servers/URLs. 0.2.2 is being released solely to fix that bug ### Bug Fixes + - [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs. ## v0.2.1 [2015-11-16] ### Release Notes + - Telegraf will no longer use docker-compose for "long" unit test, it has been changed to just run docker commands in the Makefile. See `make docker-run` and `make docker-kill`. `make test` will still run all unit tests with docker. @@ -2949,6 +4663,7 @@ changed to just run docker commands in the Makefile. See `make docker-run` and same type. ### Features + - [#325](https://github.com/influxdata/telegraf/pull/325): NSQ output. Thanks @jrxFive! - [#318](https://github.com/influxdata/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter! - [#338](https://github.com/influxdata/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac! @@ -2961,6 +4676,7 @@ same type. - [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC! ### Bug Fixes + - [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin. - [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements. - [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes @@ -2969,6 +4685,7 @@ same type. ## v0.2.0 [2015-10-27] ### Release Notes + - The -test flag will now only output 2 collections for plugins that need it - There is a new agent configuration option: `flush_interval`. This option tells Telegraf how often to flush data to InfluxDB and other output sinks. For example, @@ -2985,6 +4702,7 @@ be controlled via the `round_interval` and `flush_jitter` config options. - Telegraf will now retry metric flushes twice ### Features + - [#205](https://github.com/influxdata/telegraf/issues/205): Include per-db redis keyspace info - [#226](https://github.com/influxdata/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini - [#90](https://github.com/influxdata/telegraf/issues/90): Add Docker labels to tags in docker plugin @@ -3009,6 +4727,7 @@ of metrics collected and from how many inputs. - [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham! ### Bug Fixes + - [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini! - [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime! - [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini! @@ -3021,6 +4740,7 @@ of metrics collected and from how many inputs. ## v0.1.9 [2015-09-22] ### Release Notes + - InfluxDB output config change: `url` is now `urls`, and is a list. Config files will still be backwards compatible if only `url` is specified. - The -test flag will now output two metric collections @@ -3042,6 +4762,7 @@ have been renamed for consistency. Some measurements have also been removed from re-added in a "verbose" mode if there is demand for it. ### Features + - [#143](https://github.com/influxdata/telegraf/issues/143): InfluxDB clustering support - [#181](https://github.com/influxdata/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye! - [#203](https://github.com/influxdata/telegraf/pull/200): AMQP output. Thanks @ekini! @@ -3052,6 +4773,7 @@ re-added in a "verbose" mode if there is demand for it. and filtering when specifying a config file. ### Bug Fixes + - [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support - [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics - [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug @@ -3067,10 +4789,12 @@ and filtering when specifying a config file. ## v0.1.8 [2015-09-04] ### Release Notes + - Telegraf will now write data in UTC at second precision by default - Now using Go 1.5 to build telegraf ### Features + - [#150](https://github.com/influxdata/telegraf/pull/150): Add Host Uptime metric to system plugin - [#158](https://github.com/influxdata/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4 - [#159](https://github.com/influxdata/telegraf/pull/159): Use second precision for InfluxDB writes @@ -3084,6 +4808,7 @@ and filtering when specifying a config file. ## v0.1.7 [2015-08-28] ### Features + - [#38](https://github.com/influxdata/telegraf/pull/38): Kafka output producer. - [#133](https://github.com/influxdata/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! - [#136](https://github.com/influxdata/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin. @@ -3093,6 +4818,7 @@ and filtering when specifying a config file. - Indent the toml config file for readability ### Bug Fixes + - [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing. - [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix. - [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra! @@ -3101,11 +4827,13 @@ and filtering when specifying a config file. ## v0.1.6 [2015-08-20] ### Features + - [#112](https://github.com/influxdata/telegraf/pull/112): Datadog output. Thanks @jipperinbham! - [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies - [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales! ### Bug Fixes + - [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility - [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser! - [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser! @@ -3114,6 +4842,7 @@ and filtering when specifying a config file. ## v0.1.5 [2015-08-13] ### Features + - [#54](https://github.com/influxdata/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham! - [#55](https://github.com/influxdata/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar! - [#71](https://github.com/influxdata/telegraf/pull/71): HAProxy plugin. Thanks @kureikain! @@ -3132,6 +4861,7 @@ and filtering when specifying a config file. - [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay! ### Bug Fixes + - [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users - [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes - [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama @@ -3141,31 +4871,37 @@ and filtering when specifying a config file. ## v0.1.4 [2015-07-09] ### Features + - [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS! ### Bug Fixes + - [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff! - [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb! ## v0.1.3 [2015-07-05] ### Features + - [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS! - [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham! ### Bug Fixes + - [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz! - [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils! ## v0.1.2 [2015-07-01] ### Features + - [#12](https://github.com/influxdata/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit! - [#14](https://github.com/influxdata/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to. - [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham! - [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki! ### Bug Fixes + - [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script. - [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain! - [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros! diff --git a/vendor/github.com/influxdata/telegraf/CODE_OF_CONDUCT.md b/vendor/github.com/influxdata/telegraf/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..94ab4b571dca --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/CODE_OF_CONDUCT.md @@ -0,0 +1,77 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +community@influxdata.com. + +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +[version 2.1][v2.1]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html diff --git a/vendor/github.com/influxdata/telegraf/CONTRIBUTING.md b/vendor/github.com/influxdata/telegraf/CONTRIBUTING.md index 897ac1377e6e..822291238560 100644 --- a/vendor/github.com/influxdata/telegraf/CONTRIBUTING.md +++ b/vendor/github.com/influxdata/telegraf/CONTRIBUTING.md @@ -1,39 +1,64 @@ -### Contributing +# Contributing to Telegraf + +There are many ways to get involved in the Telegraf project! From opening issues, creating pull requests, to joining the conversation in Slack. We would love to see you contribute your expertise and join our community. To get started review this document to learn best practices. + +![tiger](assets/GopherAndTiger.png "tiger") + +## Opening Issues + +### Bug reports + +Before you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. If you file an issue, please ensure you include all the requested details (e.g. Telegraf config and logs, platform, etc.) + +Please note that issues are not the place to file general support requests such as "How do I use the mongoDB plugin?" Questions of this nature should be sent to the [Community Slack](https://influxdata.com/slack) or [Community Page](https://community.influxdata.com/), not filed as issues. + +### Feature requests + +We really like to receive feature requests as it helps us prioritize our work. Before you file a feature request, please search existing issues, you can filter issues that have the label `feature request`. Please be clear about your requirements and goals, help us to understand what you would like to see added to Telegraf with examples and the reasons why it is important to you. If you find your feature request already exists as a Github issue please indicate your support for that feature by using the "thumbs up" reaction. + +### Support questions + +We recommend posting support questions in our [Community Slack](https://influxdata.com/slack) or [Community Page](https://community.influxdata.com/), we have a lot of talented community members there who could help answer your question more quickly. + +## Contributing code + +### Creating a pull request 1. [Sign the CLA][cla]. -1. Open a [new issue][] to discuss the changes you would like to make. This is +2. Open a [new issue][] to discuss the changes you would like to make. This is not strictly required but it may help reduce the amount of rework you need to do later. -1. Make changes or write plugin using the guidelines in the following +3. Make changes or write plugin using the guidelines in the following documents: - [Input Plugins][inputs] - [Processor Plugins][processors] - [Aggregator Plugins][aggregators] - [Output Plugins][outputs] -1. Ensure you have added proper unit tests and documentation. -1. Open a new [pull request][]. +4. Ensure you have added proper unit tests and documentation. +5. Open a new [pull request][]. +6. The pull request title needs to follow [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) -#### Contributing an External Plugin *(experimental)* -Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](plugins/inputs/execd), [Execd Output](/plugins/inputs/execd), and [Execd Processor](plugins/processors/execd) Plugins without having to change the plugin code. +**Note:** If you have a pull request with only one commit, then that commit needs to follow the conventional commit format or the `Semantic Pull Request` check will fail. This is because github will use the pull request title if there are multiple commits, but if there is only one commit it will use it instead. -Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. -Check out our [guidelines](docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`. +### When will your contribution get released? +We have two kinds of releases: patch releases, which happen every few weeks, and feature releases, which happen once a quarter. If your fix is a bug fix, it will be released in the next patch release after it is merged to master. If your release is a new plugin or other feature, it will be released in the next quarterly release after it is merged to master. Quarterly releases are on the third Wednesday of March, June, September, and December. -#### Security Vulnerability Reporting -InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our -open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about -security vulnerability reporting, -including our GPG key, [can be found here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). +### Contributing an External Plugin -### GoDoc +Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](/plugins/inputs/execd), [Execd Output](/plugins/outputs/execd), and [Execd Processor](/plugins/processors/execd) Plugins without having to change the plugin code. -Public interfaces for inputs, outputs, processors, aggregators, metrics, -and the accumulator can be found in the GoDoc: +Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. +Check out our [guidelines](/docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`. -[![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf) +## Security Vulnerability Reporting + +InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our +open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about +security vulnerability reporting, +including our GPG key, [can be found here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). -### Common development tasks +## Common development tasks **Adding a dependency:** @@ -43,31 +68,42 @@ Telegraf uses Go modules. Assuming you can already build the project, run this i **Unit Tests:** -Before opening a pull request you should run the linter checks and -the short tests. +Before opening a pull request you should run the following checks to make sure the CI will pass. -``` +```shell make check +make check-deps make test +make docs ``` **Execute integration tests:** (Optional) -Running the integration tests requires several docker containers to be -running. You can start the containers with: -``` -docker-compose up -``` +To run only the integration tests use: -And run the full test suite with: +```shell +make test-integration ``` + +To run the full test suite use: + +```shell make test-all ``` -Use `make docker-kill` to stop the containers. +### For more developer resources +- [Code Style][codestyle] +- [Deprecation][deprecation] +- [Logging][logging] +- [Metric Format Changes][metricformat] +- [Packaging][packaging] +- [Profiling][profiling] +- [Reviews][reviews] +- [Sample Config][sample config] +- [Code of Conduct][code of conduct] [cla]: https://www.influxdata.com/legal/cla/ [new issue]: https://github.com/influxdata/telegraf/issues/new/choose @@ -76,3 +112,12 @@ Use `make docker-kill` to stop the containers. [processors]: /docs/PROCESSORS.md [aggregators]: /docs/AGGREGATORS.md [outputs]: /docs/OUTPUTS.md +[codestyle]: /docs/developers/CODE_STYLE.md +[deprecation]: /docs/developers/DEPRECATION.md +[logging]: /docs/developers/LOGGING.md +[metricformat]: /docs/developers/METRIC_FORMAT_CHANGES.md +[packaging]: /docs/developers/PACKAGING.md +[profiling]: /docs/developers/PROFILING.md +[reviews]: /docs/developers/REVIEWS.md +[sample config]: /docs/developers/SAMPLE_CONFIG.md +[code of conduct]: /CODE_OF_CONDUCT.md diff --git a/vendor/github.com/influxdata/telegraf/EXTERNAL_PLUGINS.md b/vendor/github.com/influxdata/telegraf/EXTERNAL_PLUGINS.md index bdbd244ca3ec..60db4b40631d 100644 --- a/vendor/github.com/influxdata/telegraf/EXTERNAL_PLUGINS.md +++ b/vendor/github.com/influxdata/telegraf/EXTERNAL_PLUGINS.md @@ -1,18 +1,46 @@ # External Plugins -This is a list of plugins that can be compiled outside of Telegraf and used via the `execd` [input](plugins/inputs/execd), [output](plugins/outputs/execd), or [processor](plugins/processors/execd). -Check out the [external plugin documentation](/docs/EXTERNAL_PLUGINS.md) for more information on writing and contributing a plugin. +This is a list of plugins that can be compiled outside of Telegraf and used via the `execd` [input](plugins/inputs/execd), [output](plugins/outputs/execd), or [processor](plugins/processors/execd). +Check out the [external plugin documentation](/docs/EXTERNAL_PLUGINS.md) for more information on writing and contributing a plugin. Pull requests welcome. - ## Inputs -- [rand](https://github.com/ssoroka/rand) - Generate random numbers -- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts -- [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels + - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. +- [opcda](https://github.com/lpc921/telegraf-execd-opcda) - Gather data from [OPC Fundation's Data Access (DA)](https://opcfoundation.org/about/opc-technologies/opc-classic/) protocol for industrial automation. +- [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open Hardware Monitor](http://openhardwaremonitor.org) +- [plex](https://github.com/russorat/telegraf-webhooks-plex) - Listens for events from Plex Media Server [Webhooks](https://support.plex.tv/articles/115002267687-webhooks/). +- [rand](https://github.com/ssoroka/rand) - Generate random numbers +- [SMCIPMITool](https://github.com/jhpope/smc_ipmi) - Python script to parse the output of [SMCIPMITool](https://www.supermicro.com/en/solutions/management-software/ipmi-utilities) into [InfluxDB line protocol](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/). - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. +- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts +- [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels +- [Big Blue Button](https://github.com/bigblueswarm/bigbluebutton-telegraf-plugin) - Gather meetings information from [Big Blue Button](https://bigbluebutton.org/) server +- [dnsmasq](https://github.com/machinly/dnsmasq-telegraf-plugin) - Gather dnsmasq statistics from dnsmasq +- [ldap_org and ds389](https://github.com/falon/CSI-telegraf-plugins) - Gather statistics from 389ds and from LDAP trees. +- [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - Gather information from your X509 CRL files +- [s7comm](https://github.com/nicolasme/s7comm) - Gather information from Siemens PLC +- [net_irtt](https://github.com/iAnatoly/telegraf-input-net_irtt) - Gather information from IRTT network test +- [dht_sensor](https://github.com/iAnatoly/telegraf-input-dht_sensor) - Gather temperature and humidity from DHTXX sensors +- [oracle](https://github.com/bonitoo-io/telegraf-input-oracle) - Gather the statistic data from Oracle RDBMS +- [db2](https://github.com/bonitoo-io/telegraf-input-db2) - Gather the statistic data from DB2 RDBMS +- [apt](https://github.com/x70b1/telegraf-apt) - Check Debian for package updates. +- [knot](https://github.com/x70b1/telegraf-knot) - Collect stats from Knot DNS. +- [fritzbox](https://github.com/hdecarne-github/fritzbox-telegraf-plugin) - Gather statistics from [FRITZ!Box](https://avm.de/produkte/fritzbox/) router and repeater +- [linux-psi-telegraf-plugin](https://github.com/gridscale/linux-psi-telegraf-plugin) - Gather pressure stall information ([PSI](https://facebookmicrosites.github.io/psi/)) from the Linux Kernel +- [huebridge](https://github.com/hdecarne-github/huebridge-telegraf-plugin) - Gather smart home statistics from [Hue Bridge](https://www.philips-hue.com/) devices +- [nsdp](https://github.com/hdecarne-github/nsdp-telegraf-plugin) - Gather switch network statistics via [Netgear Switch Discovery Protocol](https://en.wikipedia.org/wiki/Netgear_Switch_Discovery_Protocol) +- [hwinfo](https://github.com/zachstence/hwinfo-telegraf-plugin) - Gather Windows system hardware information from [HWiNFO](https://www.hwinfo.com/) +- [libvirt](https://gitlab.com/warrenio/tools/telegraf-input-libvirt) - Gather libvirt domain stats, based on a historical Telegraf implementation (libvirt)[https://libvirt.org/] ## Outputs + - [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. +- [firehose](https://github.com/muhlba91/telegraf-output-kinesis-data-firehose) - Sends metrics in batches to Amazon Kinesis Data Firehose. + +## Processors + +- [geoip](https://github.com/a-bali/telegraf-geoip) - Add GeoIP information to IP addresses. +- [metadata](https://github.com/lawdt/metadata) - Appends metadata gathered from Openstack to metrics. diff --git a/vendor/github.com/influxdata/telegraf/Makefile b/vendor/github.com/influxdata/telegraf/Makefile index eebd15c30bff..9ed5f354b713 100644 --- a/vendor/github.com/influxdata/telegraf/Makefile +++ b/vendor/github.com/influxdata/telegraf/Makefile @@ -1,5 +1,10 @@ -next_version := $(shell cat build_version.txt) -tag := $(shell git describe --exact-match --tags 2>git_describe_error.tmp; rm -f git_describe_error.tmp) +ifneq (,$(filter $(OS),Windows_NT Windows)) + EXEEXT=.exe +endif + +next_version := $(file < build_version.txt) +tag := $(shell git describe --exact-match --tags 2>/dev/null) + branch := $(shell git rev-parse --abbrev-ref HEAD) commit := $(shell git rev-parse --short=8 HEAD) glibc_version := 2.17 @@ -40,10 +45,12 @@ MAKEFLAGS += --no-print-directory GOOS ?= $(shell go env GOOS) GOARCH ?= $(shell go env GOARCH) HOSTGO := env -u GOOS -u GOARCH -u GOARM -- go - -LDFLAGS := $(LDFLAGS) -X main.commit=$(commit) -X main.branch=$(branch) -X main.goos=$(GOOS) -X main.goarch=$(GOARCH) +INTERNAL_PKG=github.com/influxdata/telegraf/internal +LDFLAGS := $(LDFLAGS) -X $(INTERNAL_PKG).Commit=$(commit) -X $(INTERNAL_PKG).Branch=$(branch) ifneq ($(tag),) - LDFLAGS += -X main.version=$(version) + LDFLAGS += -X $(INTERNAL_PKG).Version=$(version) +else + LDFLAGS += -X $(INTERNAL_PKG).Version=$(version)-$(commit) endif # Go built-in race detector works only for 64 bits architectures. @@ -62,32 +69,63 @@ localstatedir ?= $(prefix)/var pkgdir ?= build/dist .PHONY: all -all: - @$(MAKE) deps - @$(MAKE) telegraf +all: deps docs telegraf .PHONY: help help: @echo 'Targets:' - @echo ' all - download dependencies and compile telegraf binary' - @echo ' deps - download dependencies' - @echo ' telegraf - compile telegraf binary' - @echo ' test - run short unit tests' - @echo ' fmt - format source files' - @echo ' tidy - tidy go modules' - @echo ' check-deps - check docs/LICENSE_OF_DEPENDENCIES.md' - @echo ' clean - delete build artifacts' + @echo ' all - download dependencies and compile telegraf binary' + @echo ' deps - download dependencies' + @echo ' docs - embed sample-configurations into READMEs' + @echo ' telegraf - compile telegraf binary' + @echo ' test - run short unit tests' + @echo ' fmt - format source files' + @echo ' tidy - tidy go modules' + @echo ' lint - run linter' + @echo ' lint-branch - run linter on changes in current branch since master' + @echo ' lint-install - install linter' + @echo ' check-deps - check docs/LICENSE_OF_DEPENDENCIES.md' + @echo ' clean - delete build artifacts' + @echo ' package - build all supported packages, override include_packages to only build a subset' + @echo ' e.g.: make package include_packages="amd64.deb"' @echo '' - @echo 'Package Targets:' - @$(foreach dist,$(dists),echo " $(dist)";) + @echo 'Possible values for include_packages variable' + @$(foreach package,$(include_packages),echo " $(package)";) + @echo '' + @echo 'Resulting package name format (where arch will be the arch of the package):' + @echo ' telegraf_$(deb_version)_arch.deb' + @echo ' telegraf-$(rpm_version).arch.rpm' + @echo ' telegraf-$(tar_version)_arch.tar.gz' + @echo ' telegraf-$(tar_version)_arch.zip' + .PHONY: deps deps: - go mod download + go mod download -x + +.PHONY: version +version: + @echo $(version)-$(commit) + +build_tools: + $(HOSTGO) build -o ./tools/custom_builder/custom_builder$(EXEEXT) ./tools/custom_builder + $(HOSTGO) build -o ./tools/license_checker/license_checker$(EXEEXT) ./tools/license_checker + $(HOSTGO) build -o ./tools/readme_config_includer/generator$(EXEEXT) ./tools/readme_config_includer/generator.go + +embed_readme_%: + GOOS=linux go generate -run="readme_config_includer/generator$$" ./plugins/$*/... + GOOS=windows go generate -run="readme_config_includer/generator$$" ./plugins/$*/... + GOOS=darwin go generate -run="readme_config_includer/generator$$" ./plugins/$*/... + +.PHONY: docs +docs: build_tools embed_readme_inputs embed_readme_outputs embed_readme_processors embed_readme_aggregators embed_readme_secretstores + +.PHONY: build +build: + go build -tags "$(BUILDTAGS)" -ldflags "$(LDFLAGS)" ./cmd/telegraf .PHONY: telegraf -telegraf: - go build -ldflags "$(LDFLAGS)" ./cmd/telegraf +telegraf: build # Used by dockerfile builds .PHONY: go-install @@ -98,6 +136,10 @@ go-install: test: go test -short $(race_detector) ./... +.PHONY: test-integration +test-integration: + go test -run Integration $(race_detector) ./... + .PHONY: fmt fmt: @gofmt -s -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)) @@ -112,15 +154,6 @@ fmtcheck: exit 1 ;\ fi -.PHONY: test-windows -test-windows: - go test -short $(race_detector) ./plugins/inputs/ping/... - go test -short $(race_detector) ./plugins/inputs/win_perf_counters/... - go test -short $(race_detector) ./plugins/inputs/win_services/... - go test -short $(race_detector) ./plugins/inputs/procstat/... - go test -short $(race_detector) ./plugins/inputs/ntpq/... - go test -short $(race_detector) ./plugins/processors/port_name/... - .PHONY: vet vet: @echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)' @@ -131,18 +164,47 @@ vet: exit 1; \ fi +.PHONY: lint-install +lint-install: + @echo "Installing golangci-lint" + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.0 + + @echo "Installing markdownlint" + npm install -g markdownlint-cli + +.PHONY: lint +lint: + @which golangci-lint >/dev/null 2>&1 || { \ + echo "golangci-lint not found, please run: make lint-install"; \ + exit 1; \ + } + golangci-lint run + + @which markdownlint >/dev/null 2>&1 || { \ + echo "markdownlint not found, please run: make lint-install"; \ + exit 1; \ + } + markdownlint . + +.PHONY: lint-branch +lint-branch: + @which golangci-lint >/dev/null 2>&1 || { \ + echo "golangci-lint not found, please run: make lint-install"; \ + exit 1; \ + } + golangci-lint run + .PHONY: tidy tidy: go mod verify go mod tidy @if ! git diff --quiet go.mod go.sum; then \ - echo "please run go mod tidy and check in changes"; \ + echo "please run go mod tidy and check in changes, you might have to use the same version of Go as the CI"; \ exit 1; \ fi .PHONY: check check: fmtcheck vet - @$(MAKE) --no-print-directory tidy .PHONY: test-all test-all: fmtcheck vet @@ -157,6 +219,16 @@ clean: rm -f telegraf rm -f telegraf.exe rm -rf build + rm -rf cmd/telegraf/resource.syso + rm -rf cmd/telegraf/versioninfo.json + rm -rf tools/custom_builder/custom_builder + rm -rf tools/custom_builder/custom_builder.exe + rm -rf tools/readme_config_includer/generator + rm -rf tools/readme_config_includer/generator.exe + rm -rf tools/package_lxd_test/package_lxd_test + rm -rf tools/package_lxd_test/package_lxd_test.exe + rm -rf tools/license_checker/license_checker + rm -rf tools/license_checker/license_checker.exe .PHONY: docker-image docker-image: @@ -165,20 +237,10 @@ docker-image: plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl ragel -Z -G2 $^ -o $@ -.PHONY: plugin-% -plugin-%: - @echo "Starting dev environment for $${$(@)} input plugin..." - @docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up - -.PHONY: ci-1.15 -ci-1.15: - docker build -t quay.io/influxdb/telegraf-ci:1.15.2 - < scripts/ci-1.15.docker - docker push quay.io/influxdb/telegraf-ci:1.15.2 - -.PHONY: ci-1.14 -ci-1.14: - docker build -t quay.io/influxdb/telegraf-ci:1.14.9 - < scripts/ci-1.14.docker - docker push quay.io/influxdb/telegraf-ci:1.14.9 +.PHONY: ci +ci: + docker build -t quay.io/influxdb/telegraf-ci:1.20 - < scripts/ci.docker + docker push quay.io/influxdb/telegraf-ci:1.20 .PHONY: install install: $(buildbin) @@ -201,190 +263,205 @@ install: $(buildbin) # the bin between deb/rpm/tar packages over building directly into the package # directory. $(buildbin): + echo $(GOOS) @mkdir -pv $(dir $@) go build -o $(dir $@) -ldflags "$(LDFLAGS)" ./cmd/telegraf -debs := telegraf_$(deb_version)_amd64.deb -debs += telegraf_$(deb_version)_arm64.deb -debs += telegraf_$(deb_version)_armel.deb -debs += telegraf_$(deb_version)_armhf.deb -debs += telegraf_$(deb_version)_i386.deb -debs += telegraf_$(deb_version)_mips.deb -debs += telegraf_$(deb_version)_mipsel.deb -debs += telegraf_$(deb_version)_s390x.deb - -rpms += telegraf-$(rpm_version).aarch64.rpm -rpms += telegraf-$(rpm_version).armel.rpm -rpms += telegraf-$(rpm_version).armv6hl.rpm -rpms += telegraf-$(rpm_version).i386.rpm -rpms += telegraf-$(rpm_version).s390x.rpm -rpms += telegraf-$(rpm_version).x86_64.rpm - -tars += telegraf-$(tar_version)_darwin_amd64.tar.gz -tars += telegraf-$(tar_version)_freebsd_amd64.tar.gz -tars += telegraf-$(tar_version)_freebsd_i386.tar.gz -tars += telegraf-$(tar_version)_linux_amd64.tar.gz -tars += telegraf-$(tar_version)_linux_arm64.tar.gz -tars += telegraf-$(tar_version)_linux_armel.tar.gz -tars += telegraf-$(tar_version)_linux_armhf.tar.gz -tars += telegraf-$(tar_version)_linux_i386.tar.gz -tars += telegraf-$(tar_version)_linux_mips.tar.gz -tars += telegraf-$(tar_version)_linux_mipsel.tar.gz -tars += telegraf-$(tar_version)_linux_s390x.tar.gz -tars += telegraf-$(tar_version)_static_linux_amd64.tar.gz - -zips += telegraf-$(tar_version)_windows_amd64.zip -zips += telegraf-$(tar_version)_windows_i386.zip - -dists := $(debs) $(rpms) $(tars) $(zips) +# Define packages Telegraf supports, organized by architecture with a rule to echo the list to limit include_packages +# e.g. make package include_packages="$(make amd64)" +mips += linux_mips.tar.gz mips.deb +.PHONY: mips +mips: + @ echo $(mips) +mipsel += mipsel.deb linux_mipsel.tar.gz +.PHONY: mipsel +mipsel: + @ echo $(mipsel) +arm64 += linux_arm64.tar.gz arm64.deb aarch64.rpm +.PHONY: arm64 +arm64: + @ echo $(arm64) +amd64 += freebsd_amd64.tar.gz linux_amd64.tar.gz amd64.deb x86_64.rpm +.PHONY: amd64 +amd64: + @ echo $(amd64) +static += static_linux_amd64.tar.gz +.PHONY: static +static: + @ echo $(static) +armel += linux_armel.tar.gz armel.rpm armel.deb +.PHONY: armel +armel: + @ echo $(armel) +armhf += linux_armhf.tar.gz freebsd_armv7.tar.gz armhf.deb armv6hl.rpm +.PHONY: armhf +armhf: + @ echo $(armhf) +s390x += linux_s390x.tar.gz s390x.deb s390x.rpm +.PHONY: riscv64 +riscv64: + @ echo $(riscv64) +riscv64 += linux_riscv64.tar.gz riscv64.rpm riscv64.deb +.PHONY: s390x +s390x: + @ echo $(s390x) +ppc64le += linux_ppc64le.tar.gz ppc64le.rpm ppc64el.deb +.PHONY: ppc64le +ppc64le: + @ echo $(ppc64le) +i386 += freebsd_i386.tar.gz i386.deb linux_i386.tar.gz i386.rpm +.PHONY: i386 +i386: + @ echo $(i386) +windows += windows_i386.zip windows_amd64.zip windows_arm64.zip +.PHONY: windows +windows: + @ echo $(windows) +darwin-amd64 += darwin_amd64.tar.gz +.PHONY: darwin-amd64 +darwin-amd64: + @ echo $(darwin-amd64) + +darwin-arm64 += darwin_arm64.tar.gz +.PHONY: darwin-arm64 +darwin-arm64: + @ echo $(darwin-arm64) + +include_packages := $(mips) $(mipsel) $(arm64) $(amd64) $(static) $(armel) $(armhf) $(riscv64) $(s390x) $(ppc64le) $(i386) $(windows) $(darwin-amd64) $(darwin-arm64) .PHONY: package -package: $(dists) - -rpm_amd64 := amd64 -rpm_386 := i386 -rpm_s390x := s390x -rpm_arm5 := armel -rpm_arm6 := armv6hl -rpm_arm647 := aarch64 -rpm_arch = $(rpm_$(GOARCH)$(GOARM)) - -.PHONY: $(rpms) -$(rpms): - @$(MAKE) install - @mkdir -p $(pkgdir) - fpm --force \ - --log info \ - --architecture $(rpm_arch) \ - --input-type dir \ - --output-type rpm \ - --vendor InfluxData \ - --url https://github.com/influxdata/telegraf \ - --license MIT \ - --maintainer support@influxdb.com \ - --config-files /etc/telegraf/telegraf.conf \ - --config-files /etc/logrotate.d/telegraf \ - --after-install scripts/rpm/post-install.sh \ - --before-install scripts/rpm/pre-install.sh \ - --after-remove scripts/rpm/post-remove.sh \ - --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ - --depends coreutils \ - --depends shadow-utils \ - --rpm-posttrans scripts/rpm/post-install.sh \ - --name telegraf \ - --version $(version) \ - --iteration $(rpm_iteration) \ - --chdir $(DESTDIR) \ - --package $(pkgdir)/$@ - -deb_amd64 := amd64 -deb_386 := i386 -deb_s390x := s390x -deb_arm5 := armel -deb_arm6 := armhf -deb_arm647 := arm64 -deb_mips := mips -deb_mipsle := mipsel -deb_arch = $(deb_$(GOARCH)$(GOARM)) - -.PHONY: $(debs) -$(debs): - @$(MAKE) install - @mkdir -pv $(pkgdir) - fpm --force \ - --log info \ - --architecture $(deb_arch) \ - --input-type dir \ - --output-type deb \ - --vendor InfluxData \ - --url https://github.com/influxdata/telegraf \ - --license MIT \ - --maintainer support@influxdb.com \ - --config-files /etc/telegraf/telegraf.conf.sample \ - --config-files /etc/logrotate.d/telegraf \ - --after-install scripts/deb/post-install.sh \ - --before-install scripts/deb/pre-install.sh \ - --after-remove scripts/deb/post-remove.sh \ - --before-remove scripts/deb/pre-remove.sh \ - --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ - --name telegraf \ - --version $(version) \ - --iteration $(deb_iteration) \ - --chdir $(DESTDIR) \ - --package $(pkgdir)/$@ - -.PHONY: $(zips) -$(zips): - @$(MAKE) install - @mkdir -p $(pkgdir) - (cd $(dir $(DESTDIR)) && zip -r - ./*) > $(pkgdir)/$@ +package: docs $(include_packages) + +.PHONY: $(include_packages) +$(include_packages): + if [ "$(suffix $@)" = ".zip" ]; then go generate cmd/telegraf/telegraf_windows.go; fi -.PHONY: $(tars) -$(tars): @$(MAKE) install @mkdir -p $(pkgdir) - tar --owner 0 --group 0 -czvf $(pkgdir)/$@ -C $(dir $(DESTDIR)) . -.PHONY: upload-nightly -upload-nightly: - aws s3 sync $(pkgdir) s3://dl.influxdata.com/telegraf/nightlies/ \ - --exclude "*" \ - --include "*.tar.gz" \ - --include "*.deb" \ - --include "*.rpm" \ - --include "*.zip" \ - --acl public-read + @if [ "$(suffix $@)" = ".rpm" ]; then \ + echo "# DO NOT EDIT OR REMOVE" > $(DESTDIR)$(sysconfdir)/telegraf/telegraf.d/.ignore; \ + echo "# This file prevents the rpm from changing permissions on this directory" >> $(DESTDIR)$(sysconfdir)/telegraf/telegraf.d/.ignore; \ + fpm --force \ + --log info \ + --architecture $(basename $@) \ + --input-type dir \ + --output-type rpm \ + --vendor InfluxData \ + --url https://github.com/influxdata/telegraf \ + --license MIT \ + --maintainer support@influxdb.com \ + --config-files /etc/telegraf/telegraf.conf \ + --config-files /etc/telegraf/telegraf.d/.ignore \ + --config-files /etc/logrotate.d/telegraf \ + --after-install scripts/rpm/post-install.sh \ + --before-install scripts/rpm/pre-install.sh \ + --after-remove scripts/rpm/post-remove.sh \ + --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ + --depends coreutils \ + --depends shadow-utils \ + --rpm-digest sha256 \ + --rpm-posttrans scripts/rpm/post-install.sh \ + --rpm-os ${GOOS} \ + --name telegraf \ + --version $(version) \ + --iteration $(rpm_iteration) \ + --chdir $(DESTDIR) \ + --package $(pkgdir)/telegraf-$(rpm_version).$@ ;\ + elif [ "$(suffix $@)" = ".deb" ]; then \ + fpm --force \ + --log info \ + --architecture $(basename $@) \ + --input-type dir \ + --output-type deb \ + --vendor InfluxData \ + --url https://github.com/influxdata/telegraf \ + --license MIT \ + --maintainer support@influxdb.com \ + --config-files /etc/telegraf/telegraf.conf.sample \ + --config-files /etc/logrotate.d/telegraf \ + --after-install scripts/deb/post-install.sh \ + --before-install scripts/deb/pre-install.sh \ + --after-remove scripts/deb/post-remove.sh \ + --before-remove scripts/deb/pre-remove.sh \ + --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ + --name telegraf \ + --version $(version) \ + --iteration $(deb_iteration) \ + --chdir $(DESTDIR) \ + --package $(pkgdir)/telegraf_$(deb_version)_$@ ;\ + elif [ "$(suffix $@)" = ".zip" ]; then \ + (cd $(dir $(DESTDIR)) && zip -r - ./*) > $(pkgdir)/telegraf-$(tar_version)_$@ ;\ + elif [ "$(suffix $@)" = ".gz" ]; then \ + tar --owner 0 --group 0 -czvf $(pkgdir)/telegraf-$(tar_version)_$@ -C $(dir $(DESTDIR)) . ;\ + fi + +amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOOS := linux +amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOARCH := amd64 + +static_linux_amd64.tar.gz: export cgo := -nocgo +static_linux_amd64.tar.gz: export CGO_ENABLED := 0 + +i386.deb i386.rpm linux_i386.tar.gz: export GOOS := linux +i386.deb i386.rpm linux_i386.tar.gz: export GOARCH := 386 + +armel.deb armel.rpm linux_armel.tar.gz: export GOOS := linux +armel.deb armel.rpm linux_armel.tar.gz: export GOARCH := arm +armel.deb armel.rpm linux_armel.tar.gz: export GOARM := 5 + +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOOS := linux +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOARCH := arm +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOARM := 6 -%amd64.deb %x86_64.rpm %linux_amd64.tar.gz: export GOOS := linux -%amd64.deb %x86_64.rpm %linux_amd64.tar.gz: export GOARCH := amd64 +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOOS := linux +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOARCH := arm64 +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOARM := 7 -%static_linux_amd64.tar.gz: export cgo := -nocgo -%static_linux_amd64.tar.gz: export CGO_ENABLED := 0 +mips.deb linux_mips.tar.gz: export GOOS := linux +mips.deb linux_mips.tar.gz: export GOARCH := mips -%i386.deb %i386.rpm %linux_i386.tar.gz: export GOOS := linux -%i386.deb %i386.rpm %linux_i386.tar.gz: export GOARCH := 386 +mipsel.deb linux_mipsel.tar.gz: export GOOS := linux +mipsel.deb linux_mipsel.tar.gz: export GOARCH := mipsle -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOOS := linux -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOARCH := arm -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOARM := 5 +riscv64.deb riscv64.rpm linux_riscv64.tar.gz: export GOOS := linux +riscv64.deb riscv64.rpm linux_riscv64.tar.gz: export GOARCH := riscv64 -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOOS := linux -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOARCH := arm -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOARM := 6 +s390x.deb s390x.rpm linux_s390x.tar.gz: export GOOS := linux +s390x.deb s390x.rpm linux_s390x.tar.gz: export GOARCH := s390x -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOOS := linux -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOARCH := arm64 -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOARM := 7 +ppc64el.deb ppc64le.rpm linux_ppc64le.tar.gz: export GOOS := linux +ppc64el.deb ppc64le.rpm linux_ppc64le.tar.gz: export GOARCH := ppc64le -%mips.deb %linux_mips.tar.gz: export GOOS := linux -%mips.deb %linux_mips.tar.gz: export GOARCH := mips +freebsd_amd64.tar.gz: export GOOS := freebsd +freebsd_amd64.tar.gz: export GOARCH := amd64 -%mipsel.deb %linux_mipsel.tar.gz: export GOOS := linux -%mipsel.deb %linux_mipsel.tar.gz: export GOARCH := mipsle +freebsd_i386.tar.gz: export GOOS := freebsd +freebsd_i386.tar.gz: export GOARCH := 386 -%s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOOS := linux -%s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOARCH := s390x +freebsd_armv7.tar.gz: export GOOS := freebsd +freebsd_armv7.tar.gz: export GOARCH := arm +freebsd_armv7.tar.gz: export GOARM := 7 -%freebsd_amd64.tar.gz: export GOOS := freebsd -%freebsd_amd64.tar.gz: export GOARCH := amd64 +windows_amd64.zip: export GOOS := windows +windows_amd64.zip: export GOARCH := amd64 -%freebsd_i386.tar.gz: export GOOS := freebsd -%freebsd_i386.tar.gz: export GOARCH := 386 +windows_arm64.zip: export GOOS := windows +windows_arm64.zip: export GOARCH := arm64 -%windows_amd64.zip: export GOOS := windows -%windows_amd64.zip: export GOARCH := amd64 +darwin_amd64.tar.gz: export GOOS := darwin +darwin_amd64.tar.gz: export GOARCH := amd64 -%darwin_amd64.tar.gz: export GOOS := darwin -%darwin_amd64.tar.gz: export GOARCH := amd64 +darwin_arm64.tar.gz: export GOOS := darwin +darwin_arm64.tar.gz: export GOARCH := arm64 -%windows_i386.zip: export GOOS := windows -%windows_i386.zip: export GOARCH := 386 +windows_i386.zip: export GOOS := windows +windows_i386.zip: export GOARCH := 386 -%windows_i386.zip %windows_amd64.zip: export prefix = -%windows_i386.zip %windows_amd64.zip: export bindir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export sysconfdir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export localstatedir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export EXEEXT := .exe +windows_i386.zip windows_amd64.zip windows_arm64.zip: export prefix = +windows_i386.zip windows_amd64.zip windows_arm64.zip: export bindir = $(prefix) +windows_i386.zip windows_amd64.zip windows_arm64.zip: export sysconfdir = $(prefix) +windows_i386.zip windows_amd64.zip windows_arm64.zip: export localstatedir = $(prefix) +windows_i386.zip windows_amd64.zip windows_arm64.zip: export EXEEXT := .exe %.deb: export pkg := deb %.deb: export prefix := /usr diff --git a/vendor/github.com/influxdata/telegraf/README.md b/vendor/github.com/influxdata/telegraf/README.md index 168db50fd6a2..206087e9ff4b 100644 --- a/vendor/github.com/influxdata/telegraf/README.md +++ b/vendor/github.com/influxdata/telegraf/README.md @@ -1,39 +1,27 @@ -# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) -[![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://www.influxdata.com/slack) -Telegraf is an agent for collecting, processing, aggregating, and writing metrics. +# Telegraf -Design goals are to have a minimal memory footprint with a plugin system so -that developers in the community can easily add support for collecting -metrics. +![tiger](assets/TelegrafTiger.png "tiger") -Telegraf is plugin-driven and has the concept of 4 distinct plugin types: +[![Contribute](https://img.shields.io/badge/Contribute%20To%20Telegraf-orange.svg?logo=influx&style=for-the-badge)](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md) [![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=for-the-badge)](https://www.influxdata.com/slack) [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) [![Go Report Card](https://goreportcard.com/badge/github.com/influxdata/telegraf)](https://goreportcard.com/report/github.com/influxdata/telegraf) -1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs -2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics -3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) -4. [Output Plugins](#output-plugins) write metrics to various destinations +Telegraf is an agent for collecting, processing, aggregating, and writing metrics. Based on a +plugin system to enable developers in the community to easily add support for additional +metric collection. There are four distinct types of plugins: -New plugins are designed to be easy to contribute, pull requests are welcomed -and we work to incorporate as many pull requests as possible. +1. [Input Plugins](/docs/INPUTS.md) collect metrics from the system, services, or 3rd party APIs +2. [Processor Plugins](/docs/PROCESSORS.md) transform, decorate, and/or filter metrics +3. [Aggregator Plugins](/docs/AGGREGATORS.md) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) +4. [Output Plugins](/docs/OUTPUTS.md) write metrics to various destinations -## Try in Browser :rocket: - -You can try Telegraf right in your browser in the [Telegraf playground](https://rootnroll.com/d/telegraf/). - -## Contributing - -There are many ways to contribute: -- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new) -- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation) -- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) -- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) -- [Contribute plugins](CONTRIBUTING.md) -- [Contribute external plugins](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd/shim) *(experimental)* +New plugins are designed to be easy to contribute, pull requests are welcomed, and we work to +incorporate as many pull requests as possible. Consider looking at the +[list of external plugins](EXTERNAL_PLUGINS.md) as well. ## Minimum Requirements Telegraf shares the same [minimum requirements][] as Go: + - Linux kernel version 2.6.23 or later - Windows 7 or later - FreeBSD 11.2 or later @@ -41,408 +29,136 @@ Telegraf shares the same [minimum requirements][] as Go: [minimum requirements]: https://github.com/golang/go/wiki/MinimumRequirements#minimum-requirements -## Installation: +## Obtaining Telegraf + +View the [changelog](/CHANGELOG.md) for the latest updates and changes by version. + +### Binary Downloads + +Binary downloads are available from the [InfluxData downloads](https://www.influxdata.com/downloads) +page or from each [GitHub Releases](https://github.com/influxdata/telegraf/releases) page. + +### Package Repository -You can download the binaries directly from the [downloads](https://www.influxdata.com/downloads) page -or from the [releases](https://github.com/influxdata/telegraf/releases) section. +InfluxData also provides a package repo that contains both DEB and RPM downloads. -### Ansible Role: +For deb-based platforms (e.g. Ubuntu and Debian) run the following to add the +repo key and setup a new sources.list entry: -Ansible role: https://github.com/rossmcdonald/telegraf +```shell +# influxdata-archive_compat.key GPG fingerprint: +# 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E +wget -q https://repos.influxdata.com/influxdata-archive_compat.key +echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c && cat influxdata-archive_compat.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null +echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list +sudo apt-get update && sudo apt-get install telegraf +``` + +For RPM-based platforms (e.g. RHEL, CentOS) use the following to create a repo +file and install telegraf: + +```shell +# influxdata-archive_compat.key GPG fingerprint: +# 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E +cat <=1.13 (1.15 recommended) +1. [Install Go](https://golang.org/doc/install) >=1.18 (1.18.0 recommended) 2. Clone the Telegraf repository: - ``` - cd ~/src + + ```shell git clone https://github.com/influxdata/telegraf.git ``` -3. Run `make` from the source directory - ``` - cd ~/src/telegraf - make - ``` -### Changelog +3. Run `make build` from the source directory -View the [changelog](/CHANGELOG.md) for the latest updates and changes by -version. + ```shell + cd telegraf + make build + ``` ### Nightly Builds -These builds are generated from the master branch: -- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) -- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) -- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) -- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm) -- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) -- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) -- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) -- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) -- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) -- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) -- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) -- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) -- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) -- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) -- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) -- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) -- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) -- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) -- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) -- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) -- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) -- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) -- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) -- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz) - -## How to use it: +[Nightly](/docs/NIGHTLIES.md) builds are available, generated from the master branch. + +### 3rd Party Builds + +Builds for other platforms or package formats are provided by members of theTelegraf community. +These packages are not built, tested, or supported by the Telegraf project or InfluxData. Please +get in touch with the package author if support is needed: + +- [Ansible Role](https://github.com/rossmcdonald/telegraf) +- [Chocolatey](https://chocolatey.org/packages/telegraf) by [ripclawffb](https://chocolatey.org/profiles/ripclawffb) +- [Scoop](https://github.com/ScoopInstaller/Main/blob/master/bucket/telegraf.json) +- [Snap](https://snapcraft.io/telegraf) by Laurent Sesquès (sajoupa) +- [Homebrew](https://formulae.brew.sh/formula/telegraf#default) + +## Getting Started See usage with: -``` +```shell telegraf --help ``` -#### Generate a telegraf config file: +### Generate a telegraf config file -``` +```shell telegraf config > telegraf.conf ``` -#### Generate config with only cpu input & influxdb output plugins defined: +### Generate config with only cpu input & influxdb output plugins defined -``` -telegraf --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb config +```shell +telegraf config --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb ``` -#### Run a single telegraf collection, outputting metrics to stdout: +### Run a single telegraf collection, outputting metrics to stdout -``` +```shell telegraf --config telegraf.conf --test ``` -#### Run telegraf with all plugins defined in config file: +### Run telegraf with all plugins defined in config file -``` +```shell telegraf --config telegraf.conf ``` -#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins: +### Run telegraf, enabling the cpu & memory input, and influxdb output plugins -``` +```shell telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb ``` +## Contribute to the Project + +Telegraf is an MIT licensed open source project and we love our community. The fastest way to get something fixed is to open a PR. Check out our [contributing guide](CONTRIBUTING.md) if you're interested in helping out. Also, join us on our [Community Slack](https://influxdata.com/slack) or [Community Page](https://community.influxdata.com/) if you have questions or comments for our engineering teams. + +If your completely new to Telegraf and InfluxDB, you can also enroll for free at [InfluxDB university](https://www.influxdata.com/university/) to take courses to learn more. + ## Documentation -[Latest Release Documentation][release docs]. - -For documentation on the latest development code see the [documentation index][devel docs]. - -[release docs]: https://docs.influxdata.com/telegraf -[devel docs]: docs - -## Input Plugins - -* [activemq](./plugins/inputs/activemq) -* [aerospike](./plugins/inputs/aerospike) -* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq) -* [apache](./plugins/inputs/apache) -* [apcupsd](./plugins/inputs/apcupsd) -* [aurora](./plugins/inputs/aurora) -* [aws cloudwatch](./plugins/inputs/cloudwatch) (Amazon Cloudwatch) -* [azure_storage_queue](./plugins/inputs/azure_storage_queue) -* [bcache](./plugins/inputs/bcache) -* [beanstalkd](./plugins/inputs/beanstalkd) -* [bind](./plugins/inputs/bind) -* [bond](./plugins/inputs/bond) -* [burrow](./plugins/inputs/burrow) -* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) -* [ceph](./plugins/inputs/ceph) -* [cgroup](./plugins/inputs/cgroup) -* [chrony](./plugins/inputs/chrony) -* [cisco_telemetry_gnmi](./plugins/inputs/cisco_telemetry_gnmi) (deprecated, renamed to [gnmi](/plugins/inputs/gnmi)) -* [cisco_telemetry_mdt](./plugins/inputs/cisco_telemetry_mdt) -* [clickhouse](./plugins/inputs/clickhouse) -* [cloud_pubsub](./plugins/inputs/cloud_pubsub) Google Cloud Pub/Sub -* [cloud_pubsub_push](./plugins/inputs/cloud_pubsub_push) Google Cloud Pub/Sub push endpoint -* [conntrack](./plugins/inputs/conntrack) -* [consul](./plugins/inputs/consul) -* [couchbase](./plugins/inputs/couchbase) -* [couchdb](./plugins/inputs/couchdb) -* [cpu](./plugins/inputs/cpu) -* [DC/OS](./plugins/inputs/dcos) -* [diskio](./plugins/inputs/diskio) -* [disk](./plugins/inputs/disk) -* [disque](./plugins/inputs/disque) -* [dmcache](./plugins/inputs/dmcache) -* [dns query time](./plugins/inputs/dns_query) -* [docker](./plugins/inputs/docker) -* [docker_log](./plugins/inputs/docker_log) -* [dovecot](./plugins/inputs/dovecot) -* [aws ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) -* [elasticsearch](./plugins/inputs/elasticsearch) -* [ethtool](./plugins/inputs/ethtool) -* [eventhub_consumer](./plugins/inputs/eventhub_consumer) (Azure Event Hubs \& Azure IoT Hub) -* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) -* [execd](./plugins/inputs/execd) (generic executable "daemon" processes) -* [fail2ban](./plugins/inputs/fail2ban) -* [fibaro](./plugins/inputs/fibaro) -* [file](./plugins/inputs/file) -* [filestat](./plugins/inputs/filestat) -* [filecount](./plugins/inputs/filecount) -* [fireboard](/plugins/inputs/fireboard) -* [fluentd](./plugins/inputs/fluentd) -* [github](./plugins/inputs/github) -* [gnmi](./plugins/inputs/gnmi) -* [graylog](./plugins/inputs/graylog) -* [haproxy](./plugins/inputs/haproxy) -* [hddtemp](./plugins/inputs/hddtemp) -* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin) -* [http_listener](./plugins/inputs/influxdb_listener) (deprecated, renamed to [influxdb_listener](/plugins/inputs/influxdb_listener)) -* [http_listener_v2](./plugins/inputs/http_listener_v2) -* [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats) -* [http_response](./plugins/inputs/http_response) -* [icinga2](./plugins/inputs/icinga2) -* [infiniband](./plugins/inputs/infiniband) -* [influxdb](./plugins/inputs/influxdb) -* [influxdb_listener](./plugins/inputs/influxdb_listener) -* [influxdb_v2_listener](./plugins/inputs/influxdb_v2_listener) -* [intel_rdt](./plugins/inputs/intel_rdt) -* [internal](./plugins/inputs/internal) -* [interrupts](./plugins/inputs/interrupts) -* [ipmi_sensor](./plugins/inputs/ipmi_sensor) -* [ipset](./plugins/inputs/ipset) -* [iptables](./plugins/inputs/iptables) -* [ipvs](./plugins/inputs/ipvs) -* [jenkins](./plugins/inputs/jenkins) -* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka) -* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) -* [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) -* [kafka_consumer](./plugins/inputs/kafka_consumer) -* [kapacitor](./plugins/inputs/kapacitor) -* [aws kinesis](./plugins/inputs/kinesis_consumer) (Amazon Kinesis) -* [kernel](./plugins/inputs/kernel) -* [kernel_vmstat](./plugins/inputs/kernel_vmstat) -* [kibana](./plugins/inputs/kibana) -* [kubernetes](./plugins/inputs/kubernetes) -* [kube_inventory](./plugins/inputs/kube_inventory) -* [lanz](./plugins/inputs/lanz) -* [leofs](./plugins/inputs/leofs) -* [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs) -* [logparser](./plugins/inputs/logparser) (deprecated, use [tail](/plugins/inputs/tail)) -* [logstash](./plugins/inputs/logstash) -* [lustre2](./plugins/inputs/lustre2) -* [mailchimp](./plugins/inputs/mailchimp) -* [marklogic](./plugins/inputs/marklogic) -* [mcrouter](./plugins/inputs/mcrouter) -* [memcached](./plugins/inputs/memcached) -* [mem](./plugins/inputs/mem) -* [mesos](./plugins/inputs/mesos) -* [minecraft](./plugins/inputs/minecraft) -* [modbus](./plugins/inputs/modbus) -* [mongodb](./plugins/inputs/mongodb) -* [monit](./plugins/inputs/monit) -* [mqtt_consumer](./plugins/inputs/mqtt_consumer) -* [multifile](./plugins/inputs/multifile) -* [mysql](./plugins/inputs/mysql) -* [nats_consumer](./plugins/inputs/nats_consumer) -* [nats](./plugins/inputs/nats) -* [neptune_apex](./plugins/inputs/neptune_apex) -* [net](./plugins/inputs/net) -* [net_response](./plugins/inputs/net_response) -* [netstat](./plugins/inputs/net) -* [nginx](./plugins/inputs/nginx) -* [nginx_plus_api](./plugins/inputs/nginx_plus_api) -* [nginx_plus](./plugins/inputs/nginx_plus) -* [nginx_sts](./plugins/inputs/nginx_sts) -* [nginx_upstream_check](./plugins/inputs/nginx_upstream_check) -* [nginx_vts](./plugins/inputs/nginx_vts) -* [nsd](./plugins/inputs/nsd) -* [nsq_consumer](./plugins/inputs/nsq_consumer) -* [nsq](./plugins/inputs/nsq) -* [nstat](./plugins/inputs/nstat) -* [ntpq](./plugins/inputs/ntpq) -* [nvidia_smi](./plugins/inputs/nvidia_smi) -* [opcua](./plugins/inputs/opcua) -* [openldap](./plugins/inputs/openldap) -* [openntpd](./plugins/inputs/openntpd) -* [opensmtpd](./plugins/inputs/opensmtpd) -* [openweathermap](./plugins/inputs/openweathermap) -* [pf](./plugins/inputs/pf) -* [pgbouncer](./plugins/inputs/pgbouncer) -* [phpfpm](./plugins/inputs/phpfpm) -* [phusion passenger](./plugins/inputs/passenger) -* [ping](./plugins/inputs/ping) -* [postfix](./plugins/inputs/postfix) -* [postgresql_extensible](./plugins/inputs/postgresql_extensible) -* [postgresql](./plugins/inputs/postgresql) -* [powerdns](./plugins/inputs/powerdns) -* [powerdns_recursor](./plugins/inputs/powerdns_recursor) -* [processes](./plugins/inputs/processes) -* [procstat](./plugins/inputs/procstat) -* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server)) -* [proxmox](./plugins/inputs/proxmox) -* [puppetagent](./plugins/inputs/puppetagent) -* [rabbitmq](./plugins/inputs/rabbitmq) -* [raindrops](./plugins/inputs/raindrops) -* [ras](./plugins/inputs/ras) -* [redfish](./plugins/inputs/redfish) -* [redis](./plugins/inputs/redis) -* [rethinkdb](./plugins/inputs/rethinkdb) -* [riak](./plugins/inputs/riak) -* [salesforce](./plugins/inputs/salesforce) -* [sensors](./plugins/inputs/sensors) -* [sflow](./plugins/inputs/sflow) -* [smart](./plugins/inputs/smart) -* [snmp_legacy](./plugins/inputs/snmp_legacy) -* [snmp](./plugins/inputs/snmp) -* [snmp_trap](./plugins/inputs/snmp_trap) -* [socket_listener](./plugins/inputs/socket_listener) -* [solr](./plugins/inputs/solr) -* [sql server](./plugins/inputs/sqlserver) (microsoft) -* [stackdriver](./plugins/inputs/stackdriver) (Google Cloud Monitoring) -* [statsd](./plugins/inputs/statsd) -* [suricata](./plugins/inputs/suricata) -* [swap](./plugins/inputs/swap) -* [synproxy](./plugins/inputs/synproxy) -* [syslog](./plugins/inputs/syslog) -* [sysstat](./plugins/inputs/sysstat) -* [systemd_units](./plugins/inputs/systemd_units) -* [system](./plugins/inputs/system) -* [tail](./plugins/inputs/tail) -* [temp](./plugins/inputs/temp) -* [tcp_listener](./plugins/inputs/socket_listener) -* [teamspeak](./plugins/inputs/teamspeak) -* [tengine](./plugins/inputs/tengine) -* [tomcat](./plugins/inputs/tomcat) -* [twemproxy](./plugins/inputs/twemproxy) -* [udp_listener](./plugins/inputs/socket_listener) -* [unbound](./plugins/inputs/unbound) -* [uwsgi](./plugins/inputs/uwsgi) -* [varnish](./plugins/inputs/varnish) -* [vsphere](./plugins/inputs/vsphere) VMware vSphere -* [webhooks](./plugins/inputs/webhooks) - * [filestack](./plugins/inputs/webhooks/filestack) - * [github](./plugins/inputs/webhooks/github) - * [mandrill](./plugins/inputs/webhooks/mandrill) - * [papertrail](./plugins/inputs/webhooks/papertrail) - * [particle](./plugins/inputs/webhooks/particle) - * [rollbar](./plugins/inputs/webhooks/rollbar) -* [win_eventlog](./plugins/inputs/win_eventlog) -* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters) -* [win_services](./plugins/inputs/win_services) -* [wireguard](./plugins/inputs/wireguard) -* [wireless](./plugins/inputs/wireless) -* [x509_cert](./plugins/inputs/x509_cert) -* [zfs](./plugins/inputs/zfs) -* [zipkin](./plugins/inputs/zipkin) -* [zookeeper](./plugins/inputs/zookeeper) - -## Parsers - -- [InfluxDB Line Protocol](/plugins/parsers/influx) -- [Collectd](/plugins/parsers/collectd) -- [CSV](/plugins/parsers/csv) -- [Dropwizard](/plugins/parsers/dropwizard) -- [FormUrlencoded](/plugins/parser/form_urlencoded) -- [Graphite](/plugins/parsers/graphite) -- [Grok](/plugins/parsers/grok) -- [JSON](/plugins/parsers/json) -- [Logfmt](/plugins/parsers/logfmt) -- [Nagios](/plugins/parsers/nagios) -- [Value](/plugins/parsers/value), ie: 45 or "booyah" -- [Wavefront](/plugins/parsers/wavefront) - -## Serializers - -- [InfluxDB Line Protocol](/plugins/serializers/influx) -- [JSON](/plugins/serializers/json) -- [Graphite](/plugins/serializers/graphite) -- [ServiceNow](/plugins/serializers/nowmetric) -- [SplunkMetric](/plugins/serializers/splunkmetric) -- [Carbon2](/plugins/serializers/carbon2) -- [Wavefront](/plugins/serializers/wavefront) - -## Processor Plugins - -* [clone](/plugins/processors/clone) -* [converter](/plugins/processors/converter) -* [date](/plugins/processors/date) -* [dedup](/plugins/processors/dedup) -* [defaults](/plugins/processors/defaults) -* [enum](/plugins/processors/enum) -* [execd](/plugins/processors/execd) -* [ifname](/plugins/processors/ifname) -* [filepath](/plugins/processors/filepath) -* [override](/plugins/processors/override) -* [parser](/plugins/processors/parser) -* [pivot](/plugins/processors/pivot) -* [port_name](/plugins/processors/port_name) -* [printer](/plugins/processors/printer) -* [regex](/plugins/processors/regex) -* [rename](/plugins/processors/rename) -* [reverse_dns](/plugins/processors/reverse_dns) -* [s2geo](/plugins/processors/s2geo) -* [starlark](/plugins/processors/starlark) -* [strings](/plugins/processors/strings) -* [tag_limit](/plugins/processors/tag_limit) -* [template](/plugins/processors/template) -* [topk](/plugins/processors/topk) -* [unpivot](/plugins/processors/unpivot) - -## Aggregator Plugins - -* [basicstats](./plugins/aggregators/basicstats) -* [final](./plugins/aggregators/final) -* [histogram](./plugins/aggregators/histogram) -* [merge](./plugins/aggregators/merge) -* [minmax](./plugins/aggregators/minmax) -* [valuecounter](./plugins/aggregators/valuecounter) - -## Output Plugins - -* [influxdb](./plugins/outputs/influxdb) (InfluxDB 1.x) -* [influxdb_v2](./plugins/outputs/influxdb_v2) ([InfluxDB 2.x](https://github.com/influxdata/influxdb)) -* [amon](./plugins/outputs/amon) -* [amqp](./plugins/outputs/amqp) (rabbitmq) -* [application_insights](./plugins/outputs/application_insights) -* [aws kinesis](./plugins/outputs/kinesis) -* [aws cloudwatch](./plugins/outputs/cloudwatch) -* [azure_monitor](./plugins/outputs/azure_monitor) -* [cloud_pubsub](./plugins/outputs/cloud_pubsub) Google Cloud Pub/Sub -* [cratedb](./plugins/outputs/cratedb) -* [datadog](./plugins/outputs/datadog) -* [discard](./plugins/outputs/discard) -* [dynatrace](./plugins/outputs/dynatrace) -* [elasticsearch](./plugins/outputs/elasticsearch) -* [exec](./plugins/outputs/exec) -* [execd](./plugins/outputs/execd) -* [file](./plugins/outputs/file) -* [graphite](./plugins/outputs/graphite) -* [graylog](./plugins/outputs/graylog) -* [health](./plugins/outputs/health) -* [http](./plugins/outputs/http) -* [instrumental](./plugins/outputs/instrumental) -* [kafka](./plugins/outputs/kafka) -* [librato](./plugins/outputs/librato) -* [mqtt](./plugins/outputs/mqtt) -* [nats](./plugins/outputs/nats) -* [newrelic](./plugins/outputs/newrelic) -* [nsq](./plugins/outputs/nsq) -* [opentsdb](./plugins/outputs/opentsdb) -* [prometheus](./plugins/outputs/prometheus_client) -* [riemann](./plugins/outputs/riemann) -* [riemann_legacy](./plugins/outputs/riemann_legacy) -* [socket_writer](./plugins/outputs/socket_writer) -* [stackdriver](./plugins/outputs/stackdriver) (Google Cloud Monitoring) -* [syslog](./plugins/outputs/syslog) -* [tcp](./plugins/outputs/socket_writer) -* [udp](./plugins/outputs/socket_writer) -* [warp10](./plugins/outputs/warp10) -* [wavefront](./plugins/outputs/wavefront) -* [sumologic](./plugins/outputs/sumologic) +[Latest Release Documentation](https://docs.influxdata.com/telegraf/latest/) + +For documentation on the latest development code see the [documentation index](/docs). + +- [Input Plugins](/docs/INPUTS.md) +- [Output Plugins](/docs/OUTPUTS.md) +- [Processor Plugins](/docs/PROCESSORS.md) +- [Aggregator Plugins](/docs/AGGREGATORS.md) diff --git a/vendor/github.com/influxdata/telegraf/SECURITY.md b/vendor/github.com/influxdata/telegraf/SECURITY.md new file mode 100644 index 000000000000..5b72cf863446 --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/SECURITY.md @@ -0,0 +1,6 @@ +# Security Policy + +## Reporting a Vulnerability + +InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our open source projects, +please responsibly disclose it by contacting security@influxdata.com. More details about security vulnerability reporting, including our GPG key, can be found [here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). diff --git a/vendor/github.com/influxdata/telegraf/accumulator.go b/vendor/github.com/influxdata/telegraf/accumulator.go index 1ea5737a84a9..53ce0642bed0 100644 --- a/vendor/github.com/influxdata/telegraf/accumulator.go +++ b/vendor/github.com/influxdata/telegraf/accumulator.go @@ -41,7 +41,7 @@ type Accumulator interface { // AddMetric adds an metric to the accumulator. AddMetric(Metric) - // SetPrecision sets the timestamp rounding precision. All metrics addeds + // SetPrecision sets the timestamp rounding precision. All metrics // added to the accumulator will have their timestamp rounded to the // nearest multiple of precision. SetPrecision(precision time.Duration) diff --git a/vendor/github.com/influxdata/telegraf/appveyor.yml b/vendor/github.com/influxdata/telegraf/appveyor.yml deleted file mode 100644 index b454c8dc8d9d..000000000000 --- a/vendor/github.com/influxdata/telegraf/appveyor.yml +++ /dev/null @@ -1,35 +0,0 @@ -version: "{build}" - -image: Visual Studio 2019 - -cache: - - C:\gopath\pkg\mod -> go.sum - - C:\ProgramData\chocolatey\bin -> appveyor.yml - - C:\ProgramData\chocolatey\lib -> appveyor.yml - -clone_folder: C:\gopath\src\github.com\influxdata\telegraf - -environment: - GOPATH: C:\gopath - -stack: go 1.14 - -platform: x64 - -install: - - choco install make - - cd "%GOPATH%\src\github.com\influxdata\telegraf" - - git config --system core.longpaths true - - go version - - go env - -build_script: - - make deps - - make telegraf - -test_script: - - make check - - make test-windows - -artifacts: - - path: telegraf.exe diff --git a/vendor/github.com/influxdata/telegraf/build_version.txt b/vendor/github.com/influxdata/telegraf/build_version.txt index c807441cfed7..2aa7a9648ce6 100644 --- a/vendor/github.com/influxdata/telegraf/build_version.txt +++ b/vendor/github.com/influxdata/telegraf/build_version.txt @@ -1 +1 @@ -1.16.3 +1.25.2 \ No newline at end of file diff --git a/vendor/github.com/influxdata/telegraf/docker-compose.yml b/vendor/github.com/influxdata/telegraf/docker-compose.yml deleted file mode 100644 index 4e94b8f012ea..000000000000 --- a/vendor/github.com/influxdata/telegraf/docker-compose.yml +++ /dev/null @@ -1,105 +0,0 @@ -version: '3' - -services: - aerospike: - image: aerospike/aerospike-server:4.9.0.11 - ports: - - "3000:3000" - - "3001:3001" - - "3002:3002" - - "3003:3003" - zookeeper: - image: wurstmeister/zookeeper - environment: - - JAVA_OPTS="-Xms256m -Xmx256m" - ports: - - "2181:2181" - kafka: - image: wurstmeister/kafka - environment: - - KAFKA_ADVERTISED_HOST_NAME=localhost - - KAFKA_ADVERTISED_PORT=9092 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 - - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 - - JAVA_OPTS="-Xms256m -Xmx256m" - ports: - - "9092:9092" - depends_on: - - zookeeper - elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0 - environment: - - "ES_JAVA_OPTS=-Xms256m -Xmx256m" - - discovery.type=single-node - - xpack.security.enabled=false - ports: - - "9200:9200" - mysql: - image: mysql - environment: - - MYSQL_ALLOW_EMPTY_PASSWORD=yes - ports: - - "3306:3306" - memcached: - image: memcached - ports: - - "11211:11211" - pgbouncer: - image: mbentley/ubuntu-pgbouncer - environment: - - PG_ENV_POSTGRESQL_USER=pgbouncer - - PG_ENV_POSTGRESQL_PASS=pgbouncer - ports: - - "6432:6432" - postgres: - image: postgres:alpine - environment: - - POSTGRES_HOST_AUTH_METHOD=trust - ports: - - "5432:5432" - rabbitmq: - image: rabbitmq:3-management - ports: - - "15672:15672" - - "5672:5672" - redis: - image: redis:alpine - ports: - - "6379:6379" - nsq: - image: nsqio/nsq - ports: - - "4150:4150" - command: "/nsqd" - mqtt: - image: ncarlier/mqtt - ports: - - "1883:1883" - riemann: - image: stealthly/docker-riemann - ports: - - "5555:5555" - nats: - image: nats - ports: - - "4222:4222" - openldap: - image: cobaugh/openldap-alpine - environment: - - SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" - - SLAPD_CONFIG_ROOTPW="secret" - ports: - - "389:389" - - "636:636" - crate: - image: crate/crate - ports: - - "4200:4200" - - "4230:4230" - - "6543:5432" - command: - - crate - - -Cnetwork.host=0.0.0.0 - - -Ctransport.host=localhost - environment: - - CRATE_HEAP_SIZE=128m diff --git a/vendor/github.com/influxdata/telegraf/info.plist b/vendor/github.com/influxdata/telegraf/info.plist new file mode 100644 index 000000000000..e1267df8c178 --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/info.plist @@ -0,0 +1,16 @@ + + + + + CFBundleExecutable + telegraf_entry_mac + CFBundleIconFile + icon.icns + CFBundleIdentifier + com.influxdata.telegraf + NSHighResolutionCapable + + LSUIElement + + + \ No newline at end of file diff --git a/vendor/github.com/influxdata/telegraf/metric.go b/vendor/github.com/influxdata/telegraf/metric.go index 6c7b1c6c5f75..f940350ea319 100644 --- a/vendor/github.com/influxdata/telegraf/metric.go +++ b/vendor/github.com/influxdata/telegraf/metric.go @@ -32,6 +32,8 @@ type Field struct { // Metric is the type of data that is processed by Telegraf. Input plugins, // and to a lesser degree, Processor and Aggregator plugins create new Metrics // and Output plugins write them. +// +//nolint:interfacebloat // conditionally allow to contain more methods type Metric interface { // Name is the primary identifier for the Metric and corresponds to the // measurement in the InfluxDB data model. @@ -57,9 +59,7 @@ type Metric interface { Time() time.Time // Type returns a general type for the entire metric that describes how you - // might interpret, aggregate the values. - // - // This method may be removed in the future and its use is discouraged. + // might interpret, aggregate the values. Used by prometheus and statsd. Type() ValueType // SetName sets the metric name. @@ -122,14 +122,4 @@ type Metric interface { // Drop marks the metric as processed successfully without being written // to any output. Drop() - - // SetAggregate indicates the metric is an aggregated value. - // - // This method may be removed in the future and its use is discouraged. - SetAggregate(bool) - - // IsAggregate returns true if the Metric is an aggregate. - // - // This method may be removed in the future and its use is discouraged. - IsAggregate() bool } diff --git a/vendor/github.com/influxdata/telegraf/parser.go b/vendor/github.com/influxdata/telegraf/parser.go new file mode 100644 index 000000000000..5d67de987e8c --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/parser.go @@ -0,0 +1,39 @@ +package telegraf + +// Parser is an interface defining functions that a parser plugin must satisfy. +type Parser interface { + // Parse takes a byte buffer separated by newlines + // ie, `cpu.usage.idle 90\ncpu.usage.busy 10` + // and parses it into telegraf metrics + // + // Must be thread-safe. + Parse(buf []byte) ([]Metric, error) + + // ParseLine takes a single string metric + // ie, "cpu.usage.idle 90" + // and parses it into a telegraf metric. + // + // Must be thread-safe. + ParseLine(line string) (Metric, error) + + // SetDefaultTags tells the parser to add all of the given tags + // to each parsed metric. + // NOTE: do _not_ modify the map after you've passed it here!! + SetDefaultTags(tags map[string]string) +} + +type ParserFunc func() (Parser, error) + +// ParserPlugin is an interface for plugins that are able to parse +// arbitrary data formats. +type ParserPlugin interface { + // SetParser sets the parser function for the interface + SetParser(parser Parser) +} + +// ParserFuncPlugin is an interface for plugins that are able to parse +// arbitrary data formats. +type ParserFuncPlugin interface { + // GetParser returns a new parser. + SetParserFunc(fn ParserFunc) +} diff --git a/vendor/github.com/influxdata/telegraf/plugin.go b/vendor/github.com/influxdata/telegraf/plugin.go index 0793fbb06111..d20d057b51e5 100644 --- a/vendor/github.com/influxdata/telegraf/plugin.go +++ b/vendor/github.com/influxdata/telegraf/plugin.go @@ -1,5 +1,39 @@ package telegraf +var Debug bool + +// Escalation level for the plugin or option +type Escalation int + +func (e Escalation) String() string { + switch e { + case Warn: + return "WARN" + case Error: + return "ERROR" + } + return "NONE" +} + +const ( + // None means no deprecation + None Escalation = iota + // Warn means deprecated but still within the grace period + Warn + // Error means deprecated and beyond grace period + Error +) + +// DeprecationInfo contains information for marking a plugin deprecated. +type DeprecationInfo struct { + // Since specifies the version since when the plugin is deprecated + Since string + // RemovalIn optionally specifies the version when the plugin is scheduled for removal + RemovalIn string + // Notice for the user on suggested replacements etc. + Notice string +} + // Initializer is an interface that all plugin types: Inputs, Outputs, // Processors, and Aggregators can optionally implement to initialize the // plugin. @@ -14,14 +48,11 @@ type Initializer interface { // not part of the interface, but will receive an injected logger if it's set. // eg: Log telegraf.Logger `toml:"-"` type PluginDescriber interface { - // SampleConfig returns the default configuration of the Processor + // SampleConfig returns the default configuration of the Plugin SampleConfig() string - - // Description returns a one-sentence description on the Processor - Description() string } -// Logger defines an interface for logging. +// Logger defines an plugin-related interface for logging. type Logger interface { // Errorf logs an error message, patterned after log.Printf. Errorf(format string, args ...interface{}) diff --git a/vendor/github.com/influxdata/telegraf/plugins/inputs/EXAMPLE_README.md b/vendor/github.com/influxdata/telegraf/plugins/inputs/EXAMPLE_README.md deleted file mode 100644 index 6b86615b0e6a..000000000000 --- a/vendor/github.com/influxdata/telegraf/plugins/inputs/EXAMPLE_README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Example Input Plugin - -The `example` plugin gathers metrics about example things. This description -explains at a high level what the plugin does and provides links to where -additional information can be found. - -Telegraf minimum version: Telegraf x.x -Plugin minimum tested version: x.x - -### Configuration - -This section contains the default TOML to configure the plugin. You can -generate it using `telegraf --usage `. - -```toml -[[inputs.example]] - example_option = "example_value" -``` - -#### example_option - -A more in depth description of an option can be provided here, but only do so -if the option cannot be fully described in the sample config. - -### Metrics - -Here you should add an optional description and links to where the user can -get more information about the measurements. - -If the output is determined dynamically based on the input source, or there -are more metrics than can reasonably be listed, describe how the input is -mapped to the output. - -- measurement1 - - tags: - - tag1 (optional description) - - tag2 - - fields: - - field1 (type, unit) - - field2 (float, percent) - -+ measurement2 - - tags: - - tag3 - - fields: - - field3 (integer, bytes) - - field4 (integer, green=1 yellow=2 red=3) - - field5 (string) - - field6 (float) - - field7 (boolean) - -### Sample Queries - -This section can contain some useful InfluxDB queries that can be used to get -started with the plugin or to generate dashboards. For each query listed, -describe at a high level what data is returned. - -Get the max, mean, and min for the measurement in the last hour: -``` -SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag -``` - -### Troubleshooting - -This optional section can provide basic troubleshooting steps that a user can -perform. - -### Example Output - -This section shows example output in Line Protocol format. You can often use -`telegraf --input-filter --test` or use the `file` output to get -this information. - -``` -measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455 -measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455 -``` diff --git a/vendor/github.com/influxdata/telegraf/plugins/inputs/deprecations.go b/vendor/github.com/influxdata/telegraf/plugins/inputs/deprecations.go new file mode 100644 index 000000000000..9c9a05634463 --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/plugins/inputs/deprecations.go @@ -0,0 +1,56 @@ +package inputs + +import "github.com/influxdata/telegraf" + +// Deprecations lists the deprecated plugins +var Deprecations = map[string]telegraf.DeprecationInfo{ + "cassandra": { + Since: "1.7.0", + Notice: "use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead", + }, + "cisco_telemetry_gnmi": { + Since: "1.15.0", + Notice: "has been renamed to 'gnmi'", + }, + "http_listener": { + Since: "1.9.0", + Notice: "has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.http_listener_v2' instead", + }, + "httpjson": { + Since: "1.6.0", + Notice: "use 'inputs.http' instead", + }, + "io": { + Since: "0.10.0", + RemovalIn: "2.0.0", + Notice: "use 'inputs.diskio' instead", + }, + "jolokia": { + Since: "1.5.0", + Notice: "use 'inputs.jolokia2' instead", + }, + "kafka_consumer_legacy": { + Since: "1.4.0", + Notice: "use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+", + }, + "KNXListener": { + Since: "1.20.1", + Notice: "has been renamed to 'knx_listener'", + }, + "logparser": { + Since: "1.15.0", + Notice: "use 'inputs.tail' with 'grok' data format instead", + }, + "snmp_legacy": { + Since: "1.0.0", + Notice: "use 'inputs.snmp' instead", + }, + "tcp_listener": { + Since: "1.3.0", + Notice: "use 'inputs.socket_listener' instead", + }, + "udp_listener": { + Since: "1.3.0", + Notice: "use 'inputs.socket_listener' instead", + }, +} diff --git a/vendor/github.com/influxdata/telegraf/plugins/inputs/mock_Plugin.go b/vendor/github.com/influxdata/telegraf/plugins/inputs/mock_Plugin.go deleted file mode 100644 index 4dec121bc7b6..000000000000 --- a/vendor/github.com/influxdata/telegraf/plugins/inputs/mock_Plugin.go +++ /dev/null @@ -1,31 +0,0 @@ -package inputs - -import ( - "github.com/influxdata/telegraf" - - "github.com/stretchr/testify/mock" -) - -// MockPlugin struct should be named the same as the Plugin -type MockPlugin struct { - mock.Mock -} - -// Description will appear directly above the plugin definition in the config file -func (m *MockPlugin) Description() string { - return `This is an example plugin` -} - -// SampleConfig will populate the sample configuration portion of the plugin's configuration -func (m *MockPlugin) SampleConfig() string { - return ` sampleVar = 'foo'` -} - -// Gather defines what data the plugin will gather. -func (m *MockPlugin) Gather(_a0 telegraf.Accumulator) error { - ret := m.Called(_a0) - - r0 := ret.Error(0) - - return r0 -} diff --git a/vendor/github.com/influxdata/telegraf/processor.go b/vendor/github.com/influxdata/telegraf/processor.go index 128a426c394e..bccf8ca5c7cd 100644 --- a/vendor/github.com/influxdata/telegraf/processor.go +++ b/vendor/github.com/influxdata/telegraf/processor.go @@ -37,5 +37,5 @@ type StreamingProcessor interface { // before returning from Stop(). // When stop returns, you should no longer be writing metrics to the // accumulator. - Stop() error + Stop() } diff --git a/vendor/github.com/influxdata/telegraf/secretstore.go b/vendor/github.com/influxdata/telegraf/secretstore.go new file mode 100644 index 000000000000..69e92baff0bc --- /dev/null +++ b/vendor/github.com/influxdata/telegraf/secretstore.go @@ -0,0 +1,25 @@ +package telegraf + +// SecretStore is an interface defining functions that a secret-store plugin must satisfy. +type SecretStore interface { + Initializer + PluginDescriber + + // Get searches for the given key and return the secret + Get(key string) ([]byte, error) + + // Set sets the given secret for the given key + Set(key, value string) error + + // List lists all known secret keys + List() ([]string, error) + + // GetResolver returns a function to resolve the given key. + GetResolver(key string) (ResolveFunc, error) +} + +// ResolveFunc is a function to resolve the secret. +// The returned flag indicates if the resolver is static (false), i.e. +// the secret will not change over time, or dynamic (true) to handle +// secrets that change over time (e.g. TOTP). +type ResolveFunc func() ([]byte, bool, error) diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go b/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go index e4571ce86691..fd01342e68a0 100644 --- a/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go @@ -89,7 +89,12 @@ func (cl *Client) GetServiceTicket(spn string) (messages.Ticket, types.Encryptio return tkt, skey, nil } princ := types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn) - realm := cl.Config.ResolveRealm(princ.NameString[len(princ.NameString)-1]) + realm := cl.spnRealm(princ) + + // if we don't know the SPN's realm, ask the client realm's KDC + if realm == "" { + realm = cl.Credentials.Realm() + } tgt, skey, err := cl.sessionTGT(realm) if err != nil { diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/network.go b/vendor/github.com/jcmturner/gokrb5/v8/client/network.go index 634f015c214a..688ad3a134e5 100644 --- a/vendor/github.com/jcmturner/gokrb5/v8/client/network.go +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/network.go @@ -2,7 +2,6 @@ package client import ( "encoding/binary" - "errors" "fmt" "io" "net" @@ -173,7 +172,7 @@ func dialSendTCP(kdcs map[int]string, b []byte) ([]byte, error) { } return rb, nil } - return nil, errors.New("error in getting a TCP connection to any of the KDCs") + return nil, fmt.Errorf("error sending to a KDC: %s", strings.Join(errs, "; ")) } // sendTCP sends bytes to connection over TCP. diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/session.go b/vendor/github.com/jcmturner/gokrb5/v8/client/session.go index f7654d0d07eb..7e1e65cfdb01 100644 --- a/vendor/github.com/jcmturner/gokrb5/v8/client/session.go +++ b/vendor/github.com/jcmturner/gokrb5/v8/client/session.go @@ -41,7 +41,9 @@ func (s *sessions) update(sess *session) { // Cancel the one in the cache and add this one. i.mux.Lock() defer i.mux.Unlock() - i.cancel <- true + if i.cancel != nil { + i.cancel <- true + } s.Entries[sess.realm] = sess return } diff --git a/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go b/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go index a7638433d5fa..f448c8a4f5ef 100644 --- a/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go +++ b/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go @@ -95,7 +95,7 @@ func newLibDefaults() LibDefaults { opts := asn1.BitString{} opts.Bytes, _ = hex.DecodeString("00000010") opts.BitLength = len(opts.Bytes) * 8 - return LibDefaults{ + l := LibDefaults{ CCacheType: 4, Clockskew: time.Duration(300) * time.Second, DefaultClientKeytabName: fmt.Sprintf("/usr/local/var/krb5/user/%s/client.keytab", uid), @@ -115,6 +115,10 @@ func newLibDefaults() LibDefaults { UDPPreferenceLimit: 1465, PreferredPreauthTypes: []int{17, 16, 15, 14}, } + l.DefaultTGSEnctypeIDs = parseETypes(l.DefaultTGSEnctypes, l.AllowWeakCrypto) + l.DefaultTktEnctypeIDs = parseETypes(l.DefaultTktEnctypes, l.AllowWeakCrypto) + l.PermittedEnctypeIDs = parseETypes(l.PermittedEnctypes, l.AllowWeakCrypto) + return l } // Parse the lines of the [libdefaults] section of the configuration into the LibDefaults struct. @@ -507,7 +511,7 @@ func (c *Config) ResolveRealm(domainName string) string { return r } } - return c.LibDefaults.DefaultRealm + return "" } // Load the KRB5 configuration from the specified file path. diff --git a/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go b/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go index c3b35c77a12d..a021e4141f42 100644 --- a/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go +++ b/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/binary" "errors" - "io/ioutil" + "os" "strings" "time" "unsafe" @@ -63,7 +63,7 @@ type Credential struct { // LoadCCache loads a credential cache file into a CCache type. func LoadCCache(cpath string) (*CCache, error) { c := new(CCache) - b, err := ioutil.ReadFile(cpath) + b, err := os.ReadFile(cpath) if err != nil { return c, err } diff --git a/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go b/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go index 5c2e9d79a1b9..cd1b8b1ef7de 100644 --- a/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go +++ b/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go @@ -8,7 +8,7 @@ import ( "errors" "fmt" "io" - "io/ioutil" + "os" "strings" "time" "unsafe" @@ -93,7 +93,7 @@ func (kt *Keytab) GetEncryptionKey(princName types.PrincipalName, realm string, } } if len(key.KeyValue) < 1 { - return key, 0, fmt.Errorf("matching key not found in keytab. Looking for %v realm: %v kvno: %v etype: %v", princName.NameString, realm, kvno, etype) + return key, 0, fmt.Errorf("matching key not found in keytab. Looking for %q realm: %v kvno: %v etype: %v", princName.PrincipalNameString(), realm, kvno, etype) } return key, kv, nil } @@ -170,7 +170,7 @@ func newPrincipal() principal { // Load a Keytab file into a Keytab type. func Load(ktPath string) (*Keytab, error) { kt := new(Keytab) - b, err := ioutil.ReadFile(ktPath) + b, err := os.ReadFile(ktPath) if err != nil { return kt, err } diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go b/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go index 1fdba78a37d4..115a02ae31dc 100644 --- a/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go +++ b/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go @@ -43,7 +43,7 @@ func NewAuthenticator(realm string, cname PrincipalName) (Authenticator, error) Cksum: Checksum{}, Cusec: int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)), CTime: t, - SeqNumber: seq.Int64(), + SeqNumber: seq.Int64() & 0x3fffffff, }, nil } @@ -53,7 +53,7 @@ func (a *Authenticator) GenerateSeqNumberAndSubKey(keyType int32, keySize int) e if err != nil { return err } - a.SeqNumber = seq.Int64() + a.SeqNumber = seq.Int64() & 0x3fffffff //Generate subkey value sk := make([]byte, keySize, keySize) rand.Read(sk) diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore index b35f8449bf28..d31b37815279 100644 --- a/vendor/github.com/klauspost/compress/.gitignore +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -23,3 +23,10 @@ _testmain.go *.test *.prof /s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 0af08e65e682..a2bf06e94f61 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,7 +3,7 @@ before: hooks: - ./gen.sh - - go install mvdan.cc/garble@latest + - go install mvdan.cc/garble@v0.7.2 builds: - diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index e8ff994f8bcb..63f2cd5b25f4 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -9,7 +9,6 @@ This package provides various compression algorithms. * [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. * [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. * [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. -* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. [![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) @@ -17,13 +16,159 @@ This package provides various compression algorithms. # changelog +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + * Jan 11, 2022 (v1.14.1) * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+
+ See changes to v1.13.x + * Aug 30, 2021 (v1.13.5) * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) @@ -52,7 +197,12 @@ This package provides various compression algorithms. * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ +
+ See changes to v1.12.x + * May 25, 2021 (v1.12.3) * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) @@ -74,9 +224,10 @@ This package provides various compression algorithms. * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
- See changes prior to v1.12.1 + See changes to v1.11.x * Mar 26, 2021 (v1.11.13) * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) @@ -135,7 +286,7 @@ This package provides various compression algorithms.
- See changes prior to v1.11.0 + See changes to v1.10.x * July 8, 2020 (v1.10.11) * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) @@ -297,11 +448,6 @@ This package provides various compression algorithms. # deflate usage -* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/). -* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/). -* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). -* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) - The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: | old import | new import | Documentation @@ -323,6 +469,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range. If you expect to have a lot of concurrently allocated Writers consider using the stateless compress described below. +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index b27f5a93bc2b..82882961a012 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -10,9 +10,6 @@ import ( "fmt" "io" "math" - "math/bits" - - comp "github.com/klauspost/compress" ) const ( @@ -76,8 +73,8 @@ var levels = []compressionLevel{ {0, 0, 0, 0, 0, 6}, // Levels 7-9 use increasingly more lazy matching // and increasingly stringent conditions for "good enough". - {6, 10, 12, 16, skipNever, 7}, - {10, 24, 32, 64, skipNever, 8}, + {8, 12, 16, 24, skipNever, 7}, + {16, 30, 40, 64, skipNever, 8}, {32, 258, 258, 1024, skipNever, 9}, } @@ -87,29 +84,29 @@ type advancedState struct { length int offset int maxInsertIndex int + chainHead int + hashOffset int - // Input hash chains - // hashHead[hashValue] contains the largest inputIndex with the specified hash value - // If hashHead[hashValue] is within the current window, then - // hashPrev[hashHead[hashValue] & windowMask] contains the previous index - // with the same hash value. - chainHead int - hashHead [hashSize]uint32 - hashPrev [windowSize]uint32 - hashOffset int + ii uint16 // position of last match, intended to overflow to reset. // input window: unprocessed data is window[index:windowEnd] index int estBitsPerByte int hashMatch [maxMatchLength + minMatchLength]uint32 - hash uint32 - ii uint16 // position of last match, intended to overflow to reset. + // Input hash chains + // hashHead[hashValue] contains the largest inputIndex with the specified hash value + // If hashHead[hashValue] is within the current window, then + // hashPrev[hashHead[hashValue] & windowMask] contains the previous index + // with the same hash value. + hashHead [hashSize]uint32 + hashPrev [windowSize]uint32 } type compressor struct { compressionLevel + h *huffmanEncoder w *huffmanBitWriter // compression algorithm @@ -134,7 +131,8 @@ func (d *compressor) fillDeflate(b []byte) int { s := d.state if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { // shift the window by windowSize - copy(d.window[:], d.window[windowSize:2*windowSize]) + //copy(d.window[:], d.window[windowSize:2*windowSize]) + *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:]) s.index -= windowSize d.windowEnd -= windowSize if d.blockStart >= windowSize { @@ -261,7 +259,6 @@ func (d *compressor) fillWindow(b []byte) { // Set the head of the hash chain to us. s.hashHead[newH] = uint32(di + s.hashOffset) } - s.hash = newH } // Update window information. d.windowEnd += n @@ -271,7 +268,7 @@ func (d *compressor) fillWindow(b []byte) { // Try to find a match starting at index whose length is greater than prevSize. // We only look at chainCount possibilities before giving up. // pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead -func (d *compressor) findMatch(pos int, prevHead int, lookahead, bpb int) (length, offset int, ok bool) { +func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { minMatchLook := maxMatchLength if lookahead < minMatchLook { minMatchLook = lookahead @@ -297,14 +294,49 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead, bpb int) (lengt } offset = 0 + if d.chain < 100 { + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return + } + + // Minimum gain to accept a match. + cGain := 4 + + // Some like it higher (CSV), some like it lower (JSON) + const baseCost = 3 // Base is 4 bytes at with an additional cost. // Matches must be better than this. - cGain := minMatchLength*bpb - 12 + for i := prevHead; tries > 0; tries-- { if wEnd == win[i+length] { n := matchLen(win[i:i+minMatchLook], wPos) if n > length { - newGain := n*bpb - bits.Len32(uint32(pos-i)) + // Calculate gain. Estimate + newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) + + //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length) if newGain > cGain { length = n offset = pos - i @@ -345,6 +377,12 @@ func hash4(b []byte) uint32 { return hash4u(binary.LittleEndian.Uint32(b), hashBits) } +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4u(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> (32 - h) +} + // bulkHash4 will compute hashes using the same // algorithm as hash4 func bulkHash4(b []byte, dst []uint32) { @@ -373,7 +411,6 @@ func (d *compressor) initDeflate() { s.hashOffset = 1 s.length = minMatchLength - 1 s.offset = 0 - s.hash = 0 s.chainHead = -1 } @@ -389,16 +426,19 @@ func (d *compressor) deflateLazy() { if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { return } - s.estBitsPerByte = 8 - if !d.sync { - s.estBitsPerByte = comp.ShannonEntropyBits(d.window[s.index:d.windowEnd]) - s.estBitsPerByte = int(1 + float64(s.estBitsPerByte)/float64(d.windowEnd-s.index)) + if d.windowEnd != s.index && d.chain > 100 { + // Get literal huffman coder. + if d.h == nil { + d.h = newHuffmanEncoder(maxFlateBlockTokens) + } + var tmp [256]uint16 + for _, v := range d.window[s.index:d.windowEnd] { + tmp[v]++ + } + d.h.generate(tmp[:], 15) } s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) - if s.index < s.maxInsertIndex { - s.hash = hash4(d.window[s.index:]) - } for { if sanity && s.index > d.windowEnd { @@ -430,11 +470,11 @@ func (d *compressor) deflateLazy() { } if s.index < s.maxInsertIndex { // Update the hash - s.hash = hash4(d.window[s.index:]) - ch := s.hashHead[s.hash&hashMask] + hash := hash4(d.window[s.index:]) + ch := s.hashHead[hash] s.chainHead = int(ch) s.hashPrev[s.index&windowMask] = ch - s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset) + s.hashHead[hash] = uint32(s.index + s.hashOffset) } prevLength := s.length prevOffset := s.offset @@ -446,34 +486,110 @@ func (d *compressor) deflateLazy() { } if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { - if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead, s.estBitsPerByte); ok { + if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { s.length = newLength s.offset = newOffset } } if prevLength >= minMatchLength && s.length <= prevLength { - // Check for better match at end... + // No better match, but check for better match at end... // - // checkOff must be >=2 since we otherwise risk checking s.index - // Offset of 2 seems to yield best results. + // Skip forward a number of bytes. + // Offset of 2 seems to yield best results. 3 is sometimes better. const checkOff = 2 - prevIndex := s.index - 1 - if prevIndex+prevLength+checkOff < s.maxInsertIndex { - end := lookahead - if lookahead > maxMatchLength { - end = maxMatchLength - } - end += prevIndex - idx := prevIndex + prevLength - (4 - checkOff) - h := hash4(d.window[idx:]) - ch2 := int(s.hashHead[h&hashMask]) - s.hashOffset - prevLength + (4 - checkOff) - if ch2 > minIndex { - length := matchLen(d.window[prevIndex:end], d.window[ch2:]) - // It seems like a pure length metric is best. - if length > prevLength { - prevLength = length - prevOffset = prevIndex - ch2 + + // Check all, except full length + if prevLength < maxMatchLength-checkOff { + prevIndex := s.index - 1 + if prevIndex+prevLength < s.maxInsertIndex { + end := lookahead + if lookahead > maxMatchLength+checkOff { + end = maxMatchLength + checkOff + } + end += prevIndex + + // Hash at match end. + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength { + prevLength = length + prevOffset = prevIndex - ch2 + + // Extend back... + for i := checkOff - 1; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } else if false { + // Check one further ahead. + // Only rarely better, disabled for now. + prevIndex++ + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength+checkOff { + prevLength = length + prevOffset = prevIndex - ch2 + prevIndex-- + + // Extend back... + for i := checkOff; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } + } + } } } } @@ -511,7 +627,6 @@ func (d *compressor) deflateLazy() { // Set the head of the hash chain to us. s.hashHead[newH] = uint32(di + s.hashOffset) } - s.hash = newH } s.index = newIndex @@ -757,7 +872,6 @@ func (d *compressor) reset(w io.Writer) { d.tokens.Reset() s.length = minMatchLength - 1 s.offset = 0 - s.hash = 0 s.ii = 0 s.maxInsertIndex = 0 } diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go index 71c75a065ea7..bb36351a5af3 100644 --- a/vendor/github.com/klauspost/compress/flate/dict_decoder.go +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -7,19 +7,19 @@ package flate // dictDecoder implements the LZ77 sliding dictionary as used in decompression. // LZ77 decompresses data through sequences of two forms of commands: // -// * Literal insertions: Runs of one or more symbols are inserted into the data -// stream as is. This is accomplished through the writeByte method for a -// single symbol, or combinations of writeSlice/writeMark for multiple symbols. -// Any valid stream must start with a literal insertion if no preset dictionary -// is used. +// - Literal insertions: Runs of one or more symbols are inserted into the data +// stream as is. This is accomplished through the writeByte method for a +// single symbol, or combinations of writeSlice/writeMark for multiple symbols. +// Any valid stream must start with a literal insertion if no preset dictionary +// is used. // -// * Backward copies: Runs of one or more symbols are copied from previously -// emitted data. Backward copies come as the tuple (dist, length) where dist -// determines how far back in the stream to copy from and length determines how -// many bytes to copy. Note that it is valid for the length to be greater than -// the distance. Since LZ77 uses forward copies, that situation is used to -// perform a form of run-length encoding on repeated runs of symbols. -// The writeCopy and tryWriteCopy are used to implement this command. +// - Backward copies: Runs of one or more symbols are copied from previously +// emitted data. Backward copies come as the tuple (dist, length) where dist +// determines how far back in the stream to copy from and length determines how +// many bytes to copy. Note that it is valid for the length to be greater than +// the distance. Since LZ77 uses forward copies, that situation is used to +// perform a form of run-length encoding on repeated runs of symbols. +// The writeCopy and tryWriteCopy are used to implement this command. // // For performance reasons, this implementation performs little to no sanity // checks about the arguments. As such, the invariants documented for each diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 0b2e54972cdd..24caf5f70b00 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -58,17 +58,6 @@ const ( prime8bytes = 0xcf1bbcdcb7a56463 ) -func load32(b []byte, i int) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - func load3232(b []byte, i int32) uint32 { return binary.LittleEndian.Uint32(b[i:]) } @@ -77,10 +66,6 @@ func load6432(b []byte, i int32) uint64 { return binary.LittleEndian.Uint64(b[i:]) } -func hash(u uint32) uint32 { - return (u * 0x1e35a7bd) >> tableShift -} - type tableEntry struct { offset int32 } @@ -104,7 +89,8 @@ func (e *fastGen) addBlock(src []byte) int32 { } // Move down offset := int32(len(e.hist)) - maxMatchOffset - copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + // copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:]) e.cur += offset e.hist = e.hist[:maxMatchOffset] } @@ -114,39 +100,36 @@ func (e *fastGen) addBlock(src []byte) int32 { return s } -// hash4 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4u(u uint32, h uint8) uint32 { - return (u * prime4bytes) >> ((32 - h) & reg8SizeMask32) -} - type tableEntryPrev struct { Cur tableEntry Prev tableEntry } -// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4x64(u uint64, h uint8) uint32 { - return (uint32(u) * prime4bytes) >> ((32 - h) & reg8SizeMask32) -} - // hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. // Preferably h should be a constant and should always be <64. func hash7(u uint64, h uint8) uint32 { return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) } -// hash8 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash8(u uint64, h uint8) uint32 { - return uint32((u * prime8bytes) >> ((64 - h) & reg8SizeMask64)) -} - -// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash6(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & reg8SizeMask64)) +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } } // matchlen will return the match length between offsets and t in src. @@ -179,7 +162,7 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 { // matchlenLong will return the match length between offsets and t in src. // It is assumed that s > t, that t >=0 and s < len(src). func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { - if debugDecode { + if debugDeflate { if t >= s { panic(fmt.Sprint("t >=s:", t, s)) } diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index fb1701eeccea..89a5dd89f983 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "fmt" "io" + "math" ) const ( @@ -24,6 +25,10 @@ const ( codegenCodeCount = 19 badCode = 255 + // maxPredefinedTokens is the maximum number of tokens + // where we check if fixed size is smaller. + maxPredefinedTokens = 250 + // bufferFlushSize indicates the buffer size // after which bytes are flushed to the writer. // Should preferably be a multiple of 6, since @@ -36,8 +41,11 @@ const ( bufferSize = bufferFlushSize + 8 ) +// Minimum length code that emits bits. +const lengthExtraBitsMinCode = 8 + // The number of extra bits needed by length code X - LENGTH_CODES_START. -var lengthExtraBits = [32]int8{ +var lengthExtraBits = [32]uint8{ /* 257 */ 0, 0, 0, /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, @@ -51,19 +59,22 @@ var lengthBase = [32]uint8{ 64, 80, 96, 112, 128, 160, 192, 224, 255, } +// Minimum offset code that emits bits. +const offsetExtraBitsMinCode = 4 + // offset code word extra bits. -var offsetExtraBits = [64]int8{ +var offsetExtraBits = [32]int8{ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, /* extended window */ - 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, + 14, 14, } var offsetCombined = [32]uint32{} func init() { - var offsetBase = [64]uint32{ + var offsetBase = [32]uint32{ /* normal deflate */ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, @@ -73,17 +84,15 @@ func init() { 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, /* extended window */ - 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, - 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, - 0x100000, 0x180000, 0x200000, 0x300000, + 0x008000, 0x00c000, } for i := range offsetCombined[:] { // Don't use extended window values... - if offsetBase[i] > 0x006000 { + if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { continue } - offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i]) + offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) } } @@ -99,7 +108,7 @@ type huffmanBitWriter struct { // Data waiting to be written is bytes[0:nbytes] // and then the low nbits of bits. bits uint64 - nbits uint16 + nbits uint8 nbytes uint8 lastHuffMan bool literalEncoding *huffmanEncoder @@ -160,7 +169,7 @@ func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { b := w.offsetEncoding.codes b = b[:len(a)] for i, v := range a { - if v != 0 && b[i].len == 0 { + if v != 0 && b[i].zero() { return false } } @@ -169,7 +178,7 @@ func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { b = w.literalEncoding.codes[256:literalCount] b = b[:len(a)] for i, v := range a { - if v != 0 && b[i].len == 0 { + if v != 0 && b[i].zero() { return false } } @@ -177,7 +186,7 @@ func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { a = t.litHist[:256] b = w.literalEncoding.codes[:len(a)] for i, v := range a { - if v != 0 && b[i].len == 0 { + if v != 0 && b[i].zero() { return false } } @@ -217,7 +226,7 @@ func (w *huffmanBitWriter) write(b []byte) { _, w.err = w.writer.Write(b) } -func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { +func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { w.bits |= uint64(b) << (w.nbits & 63) w.nbits += nb if w.nbits >= 48 { @@ -256,9 +265,9 @@ func (w *huffmanBitWriter) writeBytes(bytes []byte) { // Codes 0-15 are single byte codes. Codes 16-18 are followed by additional // information. Code badCode is an end marker // -// numLiterals The number of literals in literalEncoding -// numOffsets The number of offsets in offsetEncoding -// litenc, offenc The literal and offset encoder to use +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +// litenc, offenc The literal and offset encoder to use func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { for i := range w.codegenFreq { w.codegenFreq[i] = 0 @@ -271,12 +280,12 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE // Copy the concatenated code sizes to codegen. Put a marker at the end. cgnl := codegen[:numLiterals] for i := range cgnl { - cgnl[i] = uint8(litEnc.codes[i].len) + cgnl[i] = litEnc.codes[i].len() } cgnl = codegen[numLiterals : numLiterals+numOffsets] for i := range cgnl { - cgnl[i] = uint8(offEnc.codes[i].len) + cgnl[i] = offEnc.codes[i].len() } codegen[numLiterals+numOffsets] = badCode @@ -419,8 +428,8 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { func (w *huffmanBitWriter) writeCode(c hcode) { // The function does not get inlined if we "& 63" the shift. - w.bits |= uint64(c.code) << (w.nbits & 63) - w.nbits += c.len + w.bits |= c.code64() << (w.nbits & 63) + w.nbits += c.len() if w.nbits >= 48 { w.writeOutBits() } @@ -451,9 +460,9 @@ func (w *huffmanBitWriter) writeOutBits() { // Write the header of a dynamic Huffman block to the output stream. // -// numLiterals The number of literals specified in codegen -// numOffsets The number of offsets specified in codegen -// numCodegens The number of codegens used in codegen +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { if w.err != nil { return @@ -468,7 +477,7 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n w.writeBits(int32(numCodegens-4), 4) for i := 0; i < numCodegens; i++ { - value := uint(w.codegenEncoding.codes[codegenOrder[i]].len) + value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) w.writeBits(int32(value), 3) } @@ -573,7 +582,10 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { // Fixed Huffman baseline. var literalEncoding = fixedLiteralEncoding var offsetEncoding = fixedOffsetEncoding - var size = w.fixedSize(extraBits) + var size = math.MaxInt32 + if tokens.n < maxPredefinedTokens { + size = w.fixedSize(extraBits) + } // Dynamic Huffman? var numCodegens int @@ -658,7 +670,7 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b // Estimate size for using a new table. // Use the previous header size as the best estimate. newSize := w.lastHeader + tokens.EstimatedBits() - newSize += int(w.literalEncoding.codes[endBlockMarker].len) + newSize>>w.logNewTablePenalty + newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty // The estimated size is calculated as an optimal table. // We add a penalty to make it more realistic and re-use a bit more. @@ -674,19 +686,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b size = reuseSize } - if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { - // Check if we get a reasonable size decrease. - if storable && ssize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) return } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return } // Check if we get a reasonable size decrease. if storable && ssize <= size { @@ -719,19 +733,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) // Store predefined, if we don't get a reasonable improvement. - if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { - // Store bytes, if we don't get an improvement. - if storable && ssize <= preSize { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { + // Store bytes, if we don't get an improvement. + if storable && ssize <= preSize { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) return } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return } if storable && ssize <= size { @@ -774,9 +790,11 @@ func (w *huffmanBitWriter) fillTokens() { // and offsetEncoding. // The number of literal and offset tokens is returned. func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { - copy(w.literalFreq[:], t.litHist[:]) - copy(w.literalFreq[256:], t.extraHist[:]) - copy(w.offsetFreq[:], t.offHist[:offsetCodeCount]) + //copy(w.literalFreq[:], t.litHist[:]) + *(*[256]uint16)(w.literalFreq[:]) = t.litHist + //copy(w.literalFreq[256:], t.extraHist[:]) + *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist + w.offsetFreq = t.offHist if t.n == 0 { return @@ -835,11 +853,11 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits, nbits, nbytes := w.bits, w.nbits, w.nbytes for _, t := range tokens { - if t < matchType { + if t < 256 { //w.writeCode(lits[t.literal()]) - c := lits[t.literal()] - bits |= uint64(c.code) << (nbits & 63) - nbits += c.len + c := lits[t] + bits |= c.code64() << (nbits & 63) + nbits += c.len() if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -860,14 +878,14 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) // Write the length length := t.length() - lengthCode := lengthCode(length) + lengthCode := lengthCode(length) & 31 if false { - w.writeCode(lengths[lengthCode&31]) + w.writeCode(lengths[lengthCode]) } else { // inlined - c := lengths[lengthCode&31] - bits |= uint64(c.code) << (nbits & 63) - nbits += c.len + c := lengths[lengthCode] + bits |= c.code64() << (nbits & 63) + nbits += c.len() if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -885,10 +903,10 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } } - extraLengthBits := uint16(lengthExtraBits[lengthCode&31]) - if extraLengthBits > 0 { + if lengthCode >= lengthExtraBitsMinCode { + extraLengthBits := lengthExtraBits[lengthCode] //w.writeBits(extraLength, extraLengthBits) - extraLength := int32(length - lengthBase[lengthCode&31]) + extraLength := int32(length - lengthBase[lengthCode]) bits |= uint64(extraLength) << (nbits & 63) nbits += extraLengthBits if nbits >= 48 { @@ -909,15 +927,14 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } // Write the offset offset := t.offset() - offsetCode := offset >> 16 - offset &= matchOffsetOnlyMask + offsetCode := (offset >> 16) & 31 if false { - w.writeCode(offs[offsetCode&31]) + w.writeCode(offs[offsetCode]) } else { // inlined c := offs[offsetCode] - bits |= uint64(c.code) << (nbits & 63) - nbits += c.len + bits |= c.code64() << (nbits & 63) + nbits += c.len() if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -934,11 +951,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } } } - offsetComb := offsetCombined[offsetCode] - if offsetComb > 1<<16 { + + if offsetCode >= offsetExtraBitsMinCode { + offsetComb := offsetCombined[offsetCode] //w.writeBits(extraOffset, extraOffsetBits) - bits |= uint64(offset-(offsetComb&0xffff)) << (nbits & 63) - nbits += uint16(offsetComb >> 16) + bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) + nbits += uint8(offsetComb) if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -993,8 +1011,6 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { } } - // Fill is rarely better... - const fill = false const numLiterals = endBlockMarker + 1 const numOffsets = 1 @@ -1003,26 +1019,46 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Assume header is around 70 bytes: // https://stackoverflow.com/a/25454430 const guessHeaderSizeBits = 70 * 8 - histogram(input, w.literalFreq[:numLiterals], fill) - w.literalFreq[endBlockMarker] = 1 - w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) - if fill { - // Clear fill... - for i := range w.literalFreq[:numLiterals] { - w.literalFreq[i] = 0 + histogram(input, w.literalFreq[:numLiterals]) + ssize, storable := w.storedSize(input) + if storable && len(input) > 1024 { + // Quick check for incompressible content. + abs := float64(0) + avg := float64(len(input)) / 256 + max := float64(len(input) * 2) + for _, v := range w.literalFreq[:256] { + diff := float64(v) - avg + abs += diff * diff + if abs > max { + break + } + } + if abs < max { + if debugDeflate { + fmt.Println("stored", abs, "<", max) + } + // No chance we can compress this... + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return } - histogram(input, w.literalFreq[:numLiterals], false) } + w.literalFreq[endBlockMarker] = 1 + w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) - estBits += w.lastHeader - if w.lastHeader == 0 { - estBits += guessHeaderSizeBits + if estBits < math.MaxInt32 { + estBits += w.lastHeader + if w.lastHeader == 0 { + estBits += guessHeaderSizeBits + } + estBits += estBits >> w.logNewTablePenalty } - estBits += estBits >> w.logNewTablePenalty // Store bytes, if we don't get a reasonable improvement. - ssize, storable := w.storedSize(input) if storable && ssize <= estBits { + if debugDeflate { + fmt.Println("stored,", ssize, "<=", estBits) + } w.writeStoredHeader(len(input), eof) w.writeBytes(input) return @@ -1033,7 +1069,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { if estBits < reuseSize { if debugDeflate { - //fmt.Println("not reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") } // We owe an EOB w.writeCode(w.literalEncoding.codes[endBlockMarker]) @@ -1067,6 +1103,9 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Go 1.16 LOVES having these on stack. At least 1.5x the speed. bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + if debugDeflate { + count -= int(nbytes)*8 + int(nbits) + } // Unroll, write 3 codes/loop. // Fastest number of unrolls. for len(input) > 3 { @@ -1076,35 +1115,31 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) bits >>= (n * 8) & 63 nbits -= n * 8 - nbytes += uint8(n) + nbytes += n } if nbytes >= bufferFlushSize { if w.err != nil { nbytes = 0 return } + if debugDeflate { + count += int(nbytes) * 8 + } _, w.err = w.writer.Write(w.bytes[:nbytes]) nbytes = 0 } a, b := encoding[input[0]], encoding[input[1]] - bits |= uint64(a.code) << (nbits & 63) - bits |= uint64(b.code) << ((nbits + a.len) & 63) + bits |= a.code64() << (nbits & 63) + bits |= b.code64() << ((nbits + a.len()) & 63) c := encoding[input[2]] - nbits += b.len + a.len - bits |= uint64(c.code) << (nbits & 63) - nbits += c.len + nbits += b.len() + a.len() + bits |= c.code64() << (nbits & 63) + nbits += c.len() input = input[3:] } // Remaining... for _, t := range input { - // Bitwriting inlined, ~30% speedup - c := encoding[t] - bits |= uint64(c.code) << (nbits & 63) - nbits += c.len - if debugDeflate { - count += int(c.len) - } if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -1116,17 +1151,34 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { nbytes = 0 return } + if debugDeflate { + count += int(nbytes) * 8 + } _, w.err = w.writer.Write(w.bytes[:nbytes]) nbytes = 0 } } + // Bitwriting inlined, ~30% speedup + c := encoding[t] + bits |= c.code64() << (nbits & 63) + + nbits += c.len() + if debugDeflate { + count += int(c.len()) + } } // Restore... w.bits, w.nbits, w.nbytes = bits, nbits, nbytes if debugDeflate { - fmt.Println("wrote", count/8, "bytes") + nb := count + int(nbytes)*8 + int(nbits) + fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") + } + // Flush if needed to have space. + if w.nbits >= 48 { + w.writeOutBits() } + if eof || sync { w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index 67b2b3872843..be7b58b473f9 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -16,8 +16,18 @@ const ( ) // hcode is a huffman code with a bit code and bit length. -type hcode struct { - code, len uint16 +type hcode uint32 + +func (h hcode) len() uint8 { + return uint8(h) +} + +func (h hcode) code64() uint64 { + return uint64(h >> 8) +} + +func (h hcode) zero() bool { + return h == 0 } type huffmanEncoder struct { @@ -56,9 +66,12 @@ type levelInfo struct { } // set sets the code and length of an hcode. -func (h *hcode) set(code uint16, length uint16) { - h.len = length - h.code = code +func (h *hcode) set(code uint16, length uint8) { + *h = hcode(length) | (hcode(code) << 8) +} + +func newhcode(code uint16, length uint8) hcode { + return hcode(length) | (hcode(code) << 8) } func reverseBits(number uint16, bitLength byte) uint16 { @@ -80,7 +93,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { var ch uint16 for ch = 0; ch < literalCount; ch++ { var bits uint16 - var size uint16 + var size uint8 switch { case ch < 144: // size 8, 000110000 .. 10111111 @@ -99,7 +112,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { bits = ch + 192 - 280 size = 8 } - codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} + codes[ch] = newhcode(reverseBits(bits, size), size) } return h } @@ -108,7 +121,7 @@ func generateFixedOffsetEncoding() *huffmanEncoder { h := newHuffmanEncoder(30) codes := h.codes for ch := range codes { - codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5} + codes[ch] = newhcode(reverseBits(uint16(ch), 5), 5) } return h } @@ -120,7 +133,7 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int { var total int for i, f := range freq { if f != 0 { - total += int(f) * int(h.codes[i].len) + total += int(f) * int(h.codes[i].len()) } } return total @@ -129,9 +142,7 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int { func (h *huffmanEncoder) bitLengthRaw(b []byte) int { var total int for _, f := range b { - if f != 0 { - total += int(h.codes[f].len) - } + total += int(h.codes[f].len()) } return total } @@ -142,10 +153,10 @@ func (h *huffmanEncoder) canReuseBits(freq []uint16) int { for i, f := range freq { if f != 0 { code := h.codes[i] - if code.len == 0 { + if code.zero() { return math.MaxInt32 } - total += int(f) * int(code.len) + total += int(f) * int(code.len()) } } return total @@ -157,13 +168,18 @@ func (h *huffmanEncoder) canReuseBits(freq []uint16) int { // The cases of 0, 1, and 2 literals are handled by special case code. // // list An array of the literals with non-zero frequencies -// and their associated frequencies. The array is in order of increasing -// frequency, and has as its last element a special element with frequency -// MaxInt32 +// +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// // maxBits The maximum number of bits that should be used to encode any literal. -// Must be less than 16. +// +// Must be less than 16. +// // return An integer array in which array[i] indicates the number of literals -// that should be encoded in i bits. +// +// that should be encoded in i bits. func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { if maxBits >= maxBitsLimit { panic("flate: maxBits too large") @@ -189,14 +205,19 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // of the level j ancestor. var leafCounts [maxBitsLimit][maxBitsLimit]int32 + // Descending to only have 1 bounds check. + l2f := int32(list[2].freq) + l1f := int32(list[1].freq) + l0f := int32(list[0].freq) + int32(list[1].freq) + for level := int32(1); level <= maxBits; level++ { // For every level, the first two items are the first two characters. // We initialize the levels as if we had already figured this out. levels[level] = levelInfo{ level: level, - lastFreq: int32(list[1].freq), - nextCharFreq: int32(list[2].freq), - nextPairFreq: int32(list[0].freq) + int32(list[1].freq), + lastFreq: l1f, + nextCharFreq: l2f, + nextPairFreq: l0f, } leafCounts[level][level] = 2 if level == 1 { @@ -207,8 +228,8 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // We need a total of 2*n - 2 items at top level and have already generated 2. levels[maxBits].needed = 2*n - 4 - level := maxBits - for { + level := uint32(maxBits) + for level < 16 { l := &levels[level] if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { // We've run out of both leafs and pairs. @@ -240,7 +261,13 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // more values in the level below l.lastFreq = l.nextPairFreq // Take leaf counts from the lower level, except counts[level] remains the same. - copy(leafCounts[level][:level], leafCounts[level-1][:level]) + if true { + save := leafCounts[level][level] + leafCounts[level] = leafCounts[level-1] + leafCounts[level][level] = save + } else { + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + } levels[l.level-1].needed = 2 } @@ -298,7 +325,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN sortByLiteral(chunk) for _, node := range chunk { - h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} + h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n)) code++ } list = list[0 : len(list)-int(bits)] @@ -311,6 +338,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN // maxBits The maximum number of bits to use for any literal. func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { list := h.freqcache[:len(freq)+1] + codes := h.codes[:len(freq)] // Number of non-zero literals count := 0 // Set list to be the set of all non-zero literals and their frequencies @@ -319,11 +347,10 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { list[count] = literalNode{uint16(i), f} count++ } else { - list[count] = literalNode{} - h.codes[i].len = 0 + codes[i] = 0 } } - list[len(freq)] = literalNode{} + list[count] = literalNode{} list = list[:count] if count <= 2 { @@ -354,21 +381,37 @@ func atLeastOne(v float32) float32 { return v } -// Unassigned values are assigned '1' in the histogram. -func fillHist(b []uint16) { - for i, v := range b { - if v == 0 { - b[i] = 1 +func histogram(b []byte, h []uint16) { + if true && len(b) >= 8<<10 { + // Split for bigger inputs + histogramSplit(b, h) + } else { + h = h[:256] + for _, t := range b { + h[t]++ } } } -func histogram(b []byte, h []uint16, fill bool) { +func histogramSplit(b []byte, h []uint16) { + // Tested, and slightly faster than 2-way. + // Writing to separate arrays and combining is also slightly slower. h = h[:256] - for _, t := range b { - h[t]++ + for len(b)&3 != 0 { + h[b[0]]++ + b = b[1:] } - if fill { - fillHist(h) + n := len(b) / 4 + x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:] + y, z, w = y[:len(x)], z[:len(x)], w[:len(x)] + for i, t := range x { + v0 := &h[t] + v1 := &h[y[i]] + v3 := &h[w[i]] + v2 := &h[z[i]] + *v0++ + *v1++ + *v2++ + *v3++ } } diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index d5f62f6a2ca4..414c0bea9fa9 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -36,6 +36,13 @@ type lengthExtra struct { var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + // Initialize the fixedHuffmanDecoder only once upon first use. var fixedOnce sync.Once var fixedHuffmanDecoder huffmanDecoder @@ -559,221 +566,6 @@ func (f *decompressor) readHuffman() error { return nil } -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBlockGeneric() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := f.r.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var n uint // number of bits extra - var length int - var err error - switch { - case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlockGeneric - f.stepState = stateInit - return - } - goto readLiteral - case v == 256: - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - n = 0 - case v < 269: - length = v*2 - (265*2 - 11) - n = 1 - case v < 273: - length = v*4 - (269*4 - 19) - n = 2 - case v < 277: - length = v*8 - (273*8 - 35) - n = 3 - case v < 281: - length = v*16 - (277*16 - 67) - n = 4 - case v < 285: - length = v*32 - (281*32 - 131) - n = 5 - case v < maxNumLit: - length = 258 - n = 0 - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - return - } - if n > 0 { - for f.nb < n { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - } - - var dist uint32 - if f.hd == nil { - for f.nb < 5 { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 - } else { - sym, err := f.huffSym(f.hd) - if err != nil { - if debugDecode { - fmt.Println("huffsym:", err) - } - f.err = err - return - } - dist = uint32(sym) - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - default: - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { - if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work - f.stepState = stateDict - return - } - goto readLiteral - } -} - // Copy a single uncompressed data block from input to output. func (f *decompressor) dataBlock() { // Uncompressed. diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go index cc6db27925ce..61342b6b88fb 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -21,6 +21,11 @@ func (f *decompressor) huffmanBytesBuffer() { ) fr := f.r.(*bytes.Buffer) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + switch f.stepState { case stateInit: goto readLiteral @@ -39,41 +44,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -83,15 +82,17 @@ readLiteral: var length int switch { case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBytesBuffer f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -101,9 +102,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -111,25 +113,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -137,12 +141,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -152,38 +156,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -197,9 +198,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -223,9 +227,10 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) } f.err = CorruptInputError(f.roffset) return @@ -238,20 +243,22 @@ readLiteral: copyHistory: // Perform a backwards copy according to RFC section 3.2.3. { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + cnt = dict.writeCopy(f.copyDist, f.copyLen) } f.copyLen -= cnt - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -265,6 +272,11 @@ func (f *decompressor) huffmanBytesReader() { ) fr := f.r.(*bytes.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + switch f.stepState { case stateInit: goto readLiteral @@ -283,41 +295,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -327,15 +333,17 @@ readLiteral: var length int switch { case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBytesReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -345,9 +353,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -355,25 +364,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -381,12 +392,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -396,38 +407,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -441,9 +449,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -467,9 +478,10 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) } f.err = CorruptInputError(f.roffset) return @@ -482,20 +494,22 @@ readLiteral: copyHistory: // Perform a backwards copy according to RFC section 3.2.3. { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + cnt = dict.writeCopy(f.copyDist, f.copyLen) } f.copyLen -= cnt - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBytesReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -509,6 +523,11 @@ func (f *decompressor) huffmanBufioReader() { ) fr := f.r.(*bufio.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + switch f.stepState { case stateInit: goto readLiteral @@ -527,41 +546,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -571,15 +584,17 @@ readLiteral: var length int switch { case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBufioReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -589,9 +604,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -599,25 +615,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -625,12 +643,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -640,38 +658,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -685,9 +700,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -711,9 +729,10 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) } f.err = CorruptInputError(f.roffset) return @@ -726,20 +745,22 @@ readLiteral: copyHistory: // Perform a backwards copy according to RFC section 3.2.3. { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + cnt = dict.writeCopy(f.copyDist, f.copyLen) } f.copyLen -= cnt - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBufioReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -753,6 +774,11 @@ func (f *decompressor) huffmanStringsReader() { ) fr := f.r.(*strings.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + switch f.stepState { case stateInit: goto readLiteral @@ -771,41 +797,286 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanStringsReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanGenericReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -815,15 +1086,17 @@ readLiteral: var length int switch { case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanStringsReader + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanGenericReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -833,9 +1106,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -843,25 +1117,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -869,12 +1145,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -884,38 +1160,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -929,9 +1202,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -955,9 +1231,10 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) } f.err = CorruptInputError(f.roffset) return @@ -970,20 +1247,22 @@ readLiteral: copyHistory: // Perform a backwards copy according to RFC section 3.2.3. { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + cnt = dict.writeCopy(f.copyDist, f.copyLen) } f.copyLen -= cnt - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanStringsReader // We need to continue this work + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanGenericReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } func (f *decompressor) huffmanBlockDecoder() func() { @@ -996,7 +1275,9 @@ func (f *decompressor) huffmanBlockDecoder() func() { return f.huffmanBufioReader case *strings.Reader: return f.huffmanStringsReader + case Reader: + return f.huffmanGenericReader default: - return f.huffmanBlockGeneric + return f.huffmanGenericReader } } diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go index 1e5eea3968aa..703b9a89aa39 100644 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -1,6 +1,10 @@ package flate -import "fmt" +import ( + "encoding/binary" + "fmt" + "math/bits" +) // fastGen maintains the table for matches, // and the previous byte block for level 2. @@ -15,6 +19,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { const ( inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 ) if debugDeflate && e.cur < 0 { panic(fmt.Sprint("e.cur < 0: ", e.cur)) @@ -64,7 +69,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { sLimit := int32(len(src) - inputMargin) // nextEmit is where in src the next emitLiteral should start from. - cv := load3232(src, s) + cv := load6432(src, s) for { const skipLog = 5 @@ -73,7 +78,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { nextS := s var candidate tableEntry for { - nextHash := hash(cv) + nextHash := hashLen(cv, tableBits, hashBytes) candidate = e.table[nextHash] nextS = s + doEvery + (s-nextEmit)>>skipLog if nextS > sLimit { @@ -82,16 +87,16 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { now := load6432(src, nextS) e.table[nextHash] = tableEntry{offset: s + e.cur} - nextHash = hash(uint32(now)) + nextHash = hashLen(now, tableBits, hashBytes) offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } // Do one right away... - cv = uint32(now) + cv = now s = nextS nextS++ candidate = e.table[nextHash] @@ -99,11 +104,11 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { e.table[nextHash] = tableEntry{offset: s + e.cur} offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } - cv = uint32(now) + cv = now s = nextS } @@ -116,7 +121,32 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 + var l = int32(4) + if false { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else { + // inlined: + a := src[s+4:] + b := src[t+4:] + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + l += int32(bits.TrailingZeros64(diff) >> 3) + break + } + l += 8 + a = a[8:] + b = b[8:] + } + if len(a) < 8 { + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + break + } + l++ + } + } + } // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { @@ -125,11 +155,43 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } // Save the match found - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + if false { + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + } else { + // Inlined... + xoffset := uint32(s - t - baseMatchOffset) + xlength := l + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + dst.extraHist[lengthCodes1[uint8(xl)]]++ + dst.offHist[oc]++ + dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { @@ -137,9 +199,9 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { } if s >= sLimit { // Index first pair after match end. - if int(s+l+4) < len(src) { - cv := load3232(src, s) - e.table[hash(cv)] = tableEntry{offset: s + e.cur} + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur} } goto emitRemainder } @@ -152,16 +214,16 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { // three load32 calls. x := load6432(src, s-2) o := e.cur + s - 2 - prevHash := hash(uint32(x)) + prevHash := hashLen(x, tableBits, hashBytes) e.table[prevHash] = tableEntry{offset: o} x >>= 16 - currHash := hash(uint32(x)) + currHash := hashLen(x, tableBits, hashBytes) candidate = e.table[currHash] e.table[currHash] = tableEntry{offset: o + 2} offset := s - (candidate.offset - e.cur) if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { - cv = uint32(x >> 8) + cv = x >> 8 s++ break } diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go index 234c4389ab39..876dfbe30543 100644 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -16,6 +16,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { const ( inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 ) if debugDeflate && e.cur < 0 { @@ -66,7 +67,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { sLimit := int32(len(src) - inputMargin) // nextEmit is where in src the next emitLiteral should start from. - cv := load3232(src, s) + cv := load6432(src, s) for { // When should we start skipping if we haven't found matches in a long while. const skipLog = 5 @@ -75,7 +76,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { nextS := s var candidate tableEntry for { - nextHash := hash4u(cv, bTableBits) + nextHash := hashLen(cv, bTableBits, hashBytes) s = nextS nextS = s + doEvery + (s-nextEmit)>>skipLog if nextS > sLimit { @@ -84,16 +85,16 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { candidate = e.table[nextHash] now := load6432(src, nextS) e.table[nextHash] = tableEntry{offset: s + e.cur} - nextHash = hash4u(uint32(now), bTableBits) + nextHash = hashLen(now, bTableBits, hashBytes) offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } // Do one right away... - cv = uint32(now) + cv = now s = nextS nextS++ candidate = e.table[nextHash] @@ -101,10 +102,10 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { e.table[nextHash] = tableEntry{offset: s + e.cur} offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { break } - cv = uint32(now) + cv = now } // A 4-byte match has been found. We'll later see if more than 4 bytes @@ -134,7 +135,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) @@ -146,9 +155,9 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { if s >= sLimit { // Index first pair after match end. - if int(s+l+4) < len(src) { - cv := load3232(src, s) - e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur} + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur} } goto emitRemainder } @@ -156,15 +165,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { // Store every second hash in-between, but offset by 1. for i := s - l + 2; i < s-5; i += 7 { x := load6432(src, i) - nextHash := hash4u(uint32(x), bTableBits) + nextHash := hashLen(x, bTableBits, hashBytes) e.table[nextHash] = tableEntry{offset: e.cur + i} // Skip one x >>= 16 - nextHash = hash4u(uint32(x), bTableBits) + nextHash = hashLen(x, bTableBits, hashBytes) e.table[nextHash] = tableEntry{offset: e.cur + i + 2} // Skip one x >>= 16 - nextHash = hash4u(uint32(x), bTableBits) + nextHash = hashLen(x, bTableBits, hashBytes) e.table[nextHash] = tableEntry{offset: e.cur + i + 4} } @@ -176,17 +185,17 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { // three load32 calls. x := load6432(src, s-2) o := e.cur + s - 2 - prevHash := hash4u(uint32(x), bTableBits) - prevHash2 := hash4u(uint32(x>>8), bTableBits) + prevHash := hashLen(x, bTableBits, hashBytes) + prevHash2 := hashLen(x>>8, bTableBits, hashBytes) e.table[prevHash] = tableEntry{offset: o} e.table[prevHash2] = tableEntry{offset: o + 1} - currHash := hash4u(uint32(x>>16), bTableBits) + currHash := hashLen(x>>16, bTableBits, hashBytes) candidate = e.table[currHash] e.table[currHash] = tableEntry{offset: o + 2} offset := s - (candidate.offset - e.cur) if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { - cv = uint32(x >> 24) + cv = x >> 24 s++ break } diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go index c22b4244a5c0..7aa2b72a129f 100644 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -5,14 +5,17 @@ import "fmt" // fastEncL3 type fastEncL3 struct { fastGen - table [tableSize]tableEntryPrev + table [1 << 16]tableEntryPrev } // Encode uses a similar algorithm to level 2, will check up to two candidates. func (e *fastEncL3) Encode(dst *tokens, src []byte) { const ( - inputMargin = 8 - 1 + inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + tableBits = 16 + tableSize = 1 << tableBits + hashBytes = 5 ) if debugDeflate && e.cur < 0 { @@ -67,20 +70,20 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { sLimit := int32(len(src) - inputMargin) // nextEmit is where in src the next emitLiteral should start from. - cv := load3232(src, s) + cv := load6432(src, s) for { - const skipLog = 6 + const skipLog = 7 nextS := s var candidate tableEntry for { - nextHash := hash(cv) + nextHash := hashLen(cv, tableBits, hashBytes) s = nextS nextS = s + 1 + (s-nextEmit)>>skipLog if nextS > sLimit { goto emitRemainder } candidates := e.table[nextHash] - now := load3232(src, nextS) + now := load6432(src, nextS) // Safe offset distance until s + 4... minOffset := e.cur + s - (maxMatchOffset - 4) @@ -94,8 +97,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { continue } - if cv == load3232(src, candidate.offset-e.cur) { - if candidates.Prev.offset < minOffset || cv != load3232(src, candidates.Prev.offset-e.cur) { + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) { break } // Both match and are valid, pick longest. @@ -110,7 +113,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { // We only check if value mismatches. // Offset will always be invalid in other cases. candidate = candidates.Prev - if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { break } } @@ -141,7 +144,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) @@ -154,9 +165,9 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { if s >= sLimit { t += l // Index first pair after match end. - if int(t+4) < len(src) && t > 0 { - cv := load3232(src, t) - nextHash := hash(cv) + if int(t+8) < len(src) && t > 0 { + cv = load6432(src, t) + nextHash := hashLen(cv, tableBits, hashBytes) e.table[nextHash] = tableEntryPrev{ Prev: e.table[nextHash].Cur, Cur: tableEntry{offset: e.cur + t}, @@ -165,32 +176,33 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { goto emitRemainder } - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-3 to s. - x := load6432(src, s-3) - prevHash := hash(uint32(x)) - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 3}, + // Store every 5th hash in-between. + for i := s - l + 2; i < s-5; i += 6 { + nextHash := hashLen(load6432(src, i), tableBits, hashBytes) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + i}} } - x >>= 8 - prevHash = hash(uint32(x)) + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. + x := load6432(src, s-2) + prevHash := hashLen(x, tableBits, hashBytes) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, Cur: tableEntry{offset: e.cur + s - 2}, } x >>= 8 - prevHash = hash(uint32(x)) + prevHash = hashLen(x, tableBits, hashBytes) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, Cur: tableEntry{offset: e.cur + s - 1}, } x >>= 8 - currHash := hash(uint32(x)) + currHash := hashLen(x, tableBits, hashBytes) candidates := e.table[currHash] - cv = uint32(x) + cv = x e.table[currHash] = tableEntryPrev{ Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}, @@ -200,18 +212,18 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { candidate = candidates.Cur minOffset := e.cur + s - (maxMatchOffset - 4) - if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) { - // We only check if value mismatches. - // Offset will always be invalid in other cases. + if candidate.offset > minOffset { + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Found a match... + continue + } candidate = candidates.Prev - if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - continue - } + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Match at prev... + continue } } - cv = uint32(x >> 8) + cv = x >> 8 s++ break } diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go index e62f0c02b1e7..23c08b325cf7 100644 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -12,6 +12,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { const ( inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 ) if debugDeflate && e.cur < 0 { panic(fmt.Sprint("e.cur < 0: ", e.cur)) @@ -80,7 +81,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { nextS := s var t int32 for { - nextHashS := hash4x64(cv, tableBits) + nextHashS := hashLen(cv, tableBits, hashShortBytes) nextHashL := hash7(cv, tableBits) s = nextS @@ -135,7 +136,15 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if debugDeflate { if t >= s { @@ -160,7 +169,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { // Index first pair after match end. if int(s+8) < len(src) { cv := load6432(src, s) - e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur} e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} } goto emitRemainder @@ -175,7 +184,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { t2 := tableEntry{offset: t.offset + 1} e.bTable[hash7(cv, tableBits)] = t e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hash4u(uint32(cv>>8), tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 i += 3 for ; i < s-1; i += 3 { @@ -184,7 +193,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { t2 := tableEntry{offset: t.offset + 1} e.bTable[hash7(cv, tableBits)] = t e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hash4u(uint32(cv>>8), tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 } } } @@ -193,7 +202,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { // compression we first update the hash table at s-1 and at s. x := load6432(src, s-1) o := e.cur + s - 1 - prevHashS := hash4x64(x, tableBits) + prevHashS := hashLen(x, tableBits, hashShortBytes) prevHashL := hash7(x, tableBits) e.table[prevHashS] = tableEntry{offset: o} e.bTable[prevHashL] = tableEntry{offset: o} diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 293a3a320b7c..83ef50ba45f0 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -12,6 +12,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { const ( inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 ) if debugDeflate && e.cur < 0 { panic(fmt.Sprint("e.cur < 0: ", e.cur)) @@ -88,7 +89,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { var l int32 var t int32 for { - nextHashS := hash4x64(cv, tableBits) + nextHashS := hashLen(cv, tableBits, hashShortBytes) nextHashL := hash7(cv, tableBits) s = nextS @@ -105,7 +106,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { eLong := &e.bTable[nextHashL] eLong.Cur, eLong.Prev = entry, eLong.Cur - nextHashS = hash4x64(next, tableBits) + nextHashS = hashLen(next, tableBits, hashShortBytes) nextHashL = hash7(next, tableBits) t = lCandidate.Cur.offset - e.cur @@ -191,14 +192,21 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { // Try to locate a better match by checking the end of best match... if sAt := s + l; l < 30 && sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset - // Test current - t2 := eLong - e.cur - l - off := s - t2 + t2 := eLong - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 if t2 >= 0 && off < maxMatchOffset && off > 0 { - if l2 := e.matchlenLong(s, t2, src); l2 > l { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { t = t2 l = l2 + s = s2 } } } @@ -210,7 +218,15 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if debugDeflate { if t >= s { @@ -242,7 +258,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { if i < s-1 { cv := load6432(src, i) t := tableEntry{offset: i + e.cur} - e.table[hash4x64(cv, tableBits)] = t + e.table[hashLen(cv, tableBits, hashShortBytes)] = t eLong := &e.bTable[hash7(cv, tableBits)] eLong.Cur, eLong.Prev = t, eLong.Cur @@ -255,7 +271,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { // We only have enough bits for a short entry at i+2 cv >>= 8 t = tableEntry{offset: t.offset + 1} - e.table[hash4x64(cv, tableBits)] = t + e.table[hashLen(cv, tableBits, hashShortBytes)] = t // Skip one - otherwise we risk hitting 's' i += 4 @@ -265,7 +281,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { t2 := tableEntry{offset: t.offset + 1} eLong := &e.bTable[hash7(cv, tableBits)] eLong.Cur, eLong.Prev = t, eLong.Cur - e.table[hash4u(uint32(cv>>8), tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 } } } @@ -274,7 +290,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { // compression we first update the hash table at s-1 and at s. x := load6432(src, s-1) o := e.cur + s - 1 - prevHashS := hash4x64(x, tableBits) + prevHashS := hashLen(x, tableBits, hashShortBytes) prevHashL := hash7(x, tableBits) e.table[prevHashS] = tableEntry{offset: o} eLong := &e.bTable[prevHashL] diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go index a709977ec49a..f1e9d98fa505 100644 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -12,6 +12,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { const ( inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 ) if debugDeflate && e.cur < 0 { panic(fmt.Sprint("e.cur < 0: ", e.cur)) @@ -90,7 +91,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { var l int32 var t int32 for { - nextHashS := hash4x64(cv, tableBits) + nextHashS := hashLen(cv, tableBits, hashShortBytes) nextHashL := hash7(cv, tableBits) s = nextS nextS = s + doEvery + (s-nextEmit)>>skipLog @@ -107,7 +108,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { eLong.Cur, eLong.Prev = entry, eLong.Cur // Calculate hashes of 'next' - nextHashS = hash4x64(next, tableBits) + nextHashS = hashLen(next, tableBits, hashShortBytes) nextHashL = hash7(next, tableBits) t = lCandidate.Cur.offset - e.cur @@ -213,24 +214,33 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // Try to locate a better match by checking the end-of-match... if sAt := s + l; sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)] // Test current - t2 := eLong.Cur.offset - e.cur - l - off := s - t2 + t2 := eLong.Cur.offset - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 if off < maxMatchOffset { if off > 0 && t2 >= 0 { - if l2 := e.matchlenLong(s, t2, src); l2 > l { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { t = t2 l = l2 + s = s2 } } // Test next: - t2 = eLong.Prev.offset - e.cur - l - off := s - t2 + t2 = eLong.Prev.offset - e.cur - l + skipBeginning + off := s2 - t2 if off > 0 && off < maxMatchOffset && t2 >= 0 { - if l2 := e.matchlenLong(s, t2, src); l2 > l { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { t = t2 l = l2 + s = s2 } } } @@ -243,7 +253,15 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if false { if t >= s { @@ -269,7 +287,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // Index after match end. for i := nextS + 1; i < int32(len(src))-8; i += 2 { cv := load6432(src, i) - e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur} eLong := &e.bTable[hash7(cv, tableBits)] eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur } @@ -284,7 +302,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { t2 := tableEntry{offset: t.offset + 1} eLong := &e.bTable[hash7(cv, tableBits)] eLong2 := &e.bTable[hash7(cv>>8, tableBits)] - e.table[hash4x64(cv, tableBits)] = t + e.table[hashLen(cv, tableBits, hashShortBytes)] = t eLong.Cur, eLong.Prev = t, eLong.Cur eLong2.Cur, eLong2.Prev = t2, eLong2.Cur } diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 53e899124639..f3d4139ef362 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -59,9 +59,9 @@ var bitWriterPool = sync.Pool{ }, } -// StatelessDeflate allows to compress directly to a Writer without retaining state. +// StatelessDeflate allows compressing directly to a Writer without retaining state. // When returning everything will be flushed. -// Up to 8KB of an optional dictionary can be given which is presumed to presumed to precede the block. +// Up to 8KB of an optional dictionary can be given which is presumed to precede the block. // Longer dictionaries will be truncated and will still produce valid output. // Sending nil dictionary is perfectly fine. func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { @@ -86,11 +86,19 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { dict = dict[len(dict)-maxStatelessDict:] } + // For subsequent loops, keep shallow dict reference to avoid alloc+copy. + var inDict []byte + for len(in) > 0 { todo := in - if len(todo) > maxStatelessBlock-len(dict) { + if len(inDict) > 0 { + if len(todo) > maxStatelessBlock-maxStatelessDict { + todo = todo[:maxStatelessBlock-maxStatelessDict] + } + } else if len(todo) > maxStatelessBlock-len(dict) { todo = todo[:maxStatelessBlock-len(dict)] } + inOrg := in in = in[len(todo):] uncompressed := todo if len(dict) > 0 { @@ -102,7 +110,11 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { todo = combined } // Compress - statelessEnc(&dst, todo, int16(len(dict))) + if len(inDict) == 0 { + statelessEnc(&dst, todo, int16(len(dict))) + } else { + statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) + } isEof := eof && len(in) == 0 if dst.n == 0 { @@ -119,7 +131,8 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { } if len(in) > 0 { // Retain a dict if we have more - dict = todo[len(todo)-maxStatelessDict:] + inDict = inOrg[len(uncompressed)-maxStatelessDict:] + dict = nil dst.Reset() } if bw.err != nil { @@ -249,7 +262,15 @@ func statelessEnc(dst *tokens, src []byte, startAt int16) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } // Save the match found diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go index eb862d7a920d..d818790c1323 100644 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -13,11 +13,10 @@ import ( ) const ( - // From top - // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused - // 8 bits: xlength = length - MIN_MATCH_LENGTH - // 5 bits offsetcode - // 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits + // bits 16-22 offsetcode - 5 bits + // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits + // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits lengthShift = 22 offsetMask = 1<maxnumlit offHist [32]uint16 // offset codes litHist [256]uint16 // codes 0->255 - n uint16 // Must be able to contain maxStoreBlockSize + nFilled int + n uint16 // Must be able to contain maxStoreBlockSize tokens [maxStoreBlockSize + 1]token } @@ -142,7 +141,7 @@ func (t *tokens) Reset() { return } t.n = 0 - t.nLits = 0 + t.nFilled = 0 for i := range t.litHist[:] { t.litHist[i] = 0 } @@ -161,12 +160,12 @@ func (t *tokens) Fill() { for i, v := range t.litHist[:] { if v == 0 { t.litHist[i] = 1 - t.nLits++ + t.nFilled++ } } for i, v := range t.extraHist[:literalCount-256] { if v == 0 { - t.nLits++ + t.nFilled++ t.extraHist[i] = 1 } } @@ -196,20 +195,17 @@ func (t *tokens) indexTokens(in []token) { // emitLiteral writes a literal chunk and returns the number of bytes written. func emitLiteral(dst *tokens, lit []byte) { - ol := int(dst.n) - for i, v := range lit { - dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) + for _, v := range lit { + dst.tokens[dst.n] = token(v) dst.litHist[v]++ + dst.n++ } - dst.n += uint16(len(lit)) - dst.nLits += len(lit) } func (t *tokens) AddLiteral(lit byte) { t.tokens[t.n] = token(lit) t.litHist[lit]++ t.n++ - t.nLits++ } // from https://stackoverflow.com/a/28730362 @@ -230,8 +226,9 @@ func (t *tokens) EstimatedBits() int { shannon := float32(0) bits := int(0) nMatches := 0 - if t.nLits > 0 { - invTotal := 1.0 / float32(t.nLits) + total := int(t.n) + t.nFilled + if total > 0 { + invTotal := 1.0 / float32(total) for _, v := range t.litHist[:] { if v > 0 { n := float32(v) @@ -275,10 +272,9 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { } oCode := offsetCode(xoffset) xoffset |= oCode << 16 - t.nLits++ t.extraHist[lengthCodes1[uint8(xlength)]]++ - t.offHist[oCode]++ + t.offHist[oCode&31]++ t.tokens[t.n] = token(matchType | xlength< 258 { // We need to have at least baseMatchLength left over for next loop. - xl = 258 - baseMatchLength + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } } xlength -= xl xl -= baseMatchLength - t.nLits++ t.extraHist[lengthCodes1[uint8(xl)]]++ - t.offHist[oc]++ + t.offHist[oc&31]++ t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } -// The code is never more than 8 bits, but is returned as uint32 for convenience. -func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) } +// Convert length to code. +func lengthCode(len uint8) uint8 { return lengthCodes[len] } // Returns the offset code corresponding to a specific offset func offsetCode(off uint32) uint32 { diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 6f341914c67f..dac97e58a2db 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error { c1.encodeZero(tt[src[ip-2]]) ip -= 2 } + src = src[:ip] // Main compression loop. switch { case !s.zeroBits && s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush. // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case !s.zeroBits: // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) s.bw.flush32() c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } default: - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) s.bw.flush32() c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } } @@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) { for _, v := range in { s.count[v]++ } - m := uint32(0) + m, symlen := uint32(0), s.symbolLen for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + symlen = uint16(i) + 1 } + s.symbolLen = symlen return int(m) } diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip.go b/vendor/github.com/klauspost/compress/gzip/gunzip.go index 4d7018913e29..66fe5ddf72ce 100644 --- a/vendor/github.com/klauspost/compress/gzip/gunzip.go +++ b/vendor/github.com/klauspost/compress/gzip/gunzip.go @@ -252,42 +252,40 @@ func (z *Reader) Read(p []byte) (n int, err error) { return 0, z.err } - n, z.err = z.decompressor.Read(p) - z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) - z.size += uint32(n) - if z.err != io.EOF { - // In the normal case we return here. - return n, z.err - } + for n == 0 { + n, z.err = z.decompressor.Read(p) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) + z.size += uint32(n) + if z.err != io.EOF { + // In the normal case we return here. + return n, z.err + } - // Finished file; check checksum and size. - if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { - z.err = noEOF(err) - return n, z.err - } - digest := le.Uint32(z.buf[:4]) - size := le.Uint32(z.buf[4:8]) - if digest != z.digest || size != z.size { - z.err = ErrChecksum - return n, z.err - } - z.digest, z.size = 0, 0 + // Finished file; check checksum and size. + if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { + z.err = noEOF(err) + return n, z.err + } + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return n, z.err + } + z.digest, z.size = 0, 0 - // File is ok; check if there is another. - if !z.multistream { - return n, io.EOF - } - z.err = nil // Remove io.EOF + // File is ok; check if there is another. + if !z.multistream { + return n, io.EOF + } + z.err = nil // Remove io.EOF - if _, z.err = z.readHeader(); z.err != nil { - return n, z.err + if _, z.err = z.readHeader(); z.err != nil { + return n, z.err + } } - // Read from next file, if necessary. - if n > 0 { - return n, nil - } - return z.Read(p) + return n, nil } // Support the io.WriteTo interface for io.Copy and friends. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index a4979e8868a5..e36d9742f943 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -8,115 +8,10 @@ package huff0 import ( "encoding/binary" "errors" + "fmt" "io" ) -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBit32(uint32(v))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) peekBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -func (b *bitReader) advance(n uint8) { - b.bitsRead += n -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - // bitReader reads a bitstream in reverse. // The last set bit indicates the start of the stream and is used // for aligning the input. @@ -172,7 +67,6 @@ func (b *bitReaderBytes) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -193,8 +87,7 @@ func (b *bitReaderBytes) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -213,10 +106,17 @@ func (b *bitReaderBytes) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderBytes) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } @@ -277,7 +177,6 @@ func (b *bitReaderShifted) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 @@ -298,8 +197,7 @@ func (b *bitReaderShifted) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 @@ -313,15 +211,17 @@ func (b *bitReaderShifted) fill() { } } -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderShifted) finished() bool { - return b.off == 0 && b.bitsRead >= 64 +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) } // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderShifted) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 6bce4e87d4ff..ec71f7a349a1 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -5,8 +5,6 @@ package huff0 -import "fmt" - // bitWriter will write bits. // First bit will be LSB of the first byte of output. type bitWriter struct { @@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF} /* up to 16 bits */ -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { @@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { b.nBits += encA.nBits + encB.nBits } -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - return - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - b.bitContainer >>= 1 << 3 - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - b.bitContainer >>= 2 << 3 - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - b.bitContainer >>= 3 << 3 - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - b.bitContainer >>= 4 << 3 - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - b.bitContainer >>= 5 << 3 - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - b.bitContainer >>= 6 << 3 - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - b.bitContainer >>= 7 << 3 - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - b.bitContainer = 0 - b.nBits = 0 - return - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.nBits &= 7 -} - // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { @@ -201,10 +93,3 @@ func (b *bitWriter) close() error { b.flushAlign() return nil } - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go index 50bcdf6ea99c..4dcab8d23277 100644 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) { b.off = 0 } -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - // Int32 returns a little endian int32 starting at current offset. func (b byteReader) Int32() int32 { v3 := int32(b.b[b.off+3]) @@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 { return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 } -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - // remain will return the number of bytes remaining. func (b byteReader) remain() int { return len(b.b) - b.off diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 8323dc053890..cdc94856f2cb 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -2,6 +2,7 @@ package huff0 import ( "fmt" + "math" "runtime" "sync" ) @@ -289,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { if err != nil { return nil, err } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -332,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { return nil, errs[i] } o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -356,29 +365,29 @@ func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { m := uint32(0) if len(s.prevTable) > 0 { for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else { - if s.prevTable[i].nBits == 0 { - reuse = false - } - } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false } } return int(m), reuse } for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + s.symbolLen = uint16(i) + 1 } return int(m), false } @@ -395,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool { return true } +//lint:ignore U1000 used for debugging func (s *Scratch) validateTable(c cTable) bool { if len(c) < int(s.symbolLen) { return false @@ -474,34 +484,35 @@ func (s *Scratch) buildCTable() error { // Different from reference implementation. huffNode0 := s.nodes[0 : huffNodesLen+1] - for huffNode[nonNullRank].count == 0 { + for huffNode[nonNullRank].count() == 0 { nonNullRank-- } lowS := int16(nonNullRank) nodeRoot := nodeNb + lowS - 1 lowN := nodeNb - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count - huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) nodeNb++ lowS -= 2 for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].count = 1 << 30 + huffNode[n].setCount(1 << 30) } // fake entry, strong barrier - huffNode0[0].count = 1 << 31 + huffNode0[0].setCount(1 << 31) // create parents for nodeNb <= nodeRoot { var n1, n2 int16 - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n1 = lowS lowS-- } else { n1 = lowN lowN++ } - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n2 = lowS lowS-- } else { @@ -509,18 +520,19 @@ func (s *Scratch) buildCTable() error { lowN++ } - huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count - huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) nodeNb++ } // distribute weights (unlimited tree height) - huffNode[nodeRoot].nbBits = 0 + huffNode[nodeRoot].setNbBits(0) for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } s.actualTableLog = s.setMaxHeight(int(nonNullRank)) maxNbBits := s.actualTableLog @@ -532,7 +544,7 @@ func (s *Scratch) buildCTable() error { var nbPerRank [tableLogMax + 1]uint16 var valPerRank [16]uint16 for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits]++ + nbPerRank[v.nbBits()]++ } // determine stating value per rank { @@ -547,7 +559,7 @@ func (s *Scratch) buildCTable() error { // push nbBits per symbol, symbol order for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol].nBits = v.nbBits + s.cTable[v.symbol()].nBits = v.nbBits() } // assign value within rank, symbol order @@ -593,12 +605,12 @@ func (s *Scratch) huffSort() { pos := rank[r].current rank[r].current++ prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count { + for pos > rank[r].base && c > prev.count() { nodes[pos&huffNodesMask] = prev pos-- prev = nodes[(pos-1)&huffNodesMask] } - nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) } } @@ -607,7 +619,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { huffNode := s.nodes[1 : huffNodesLen+1] //huffNode = huffNode[: huffNodesLen] - largestBits := huffNode[lastNonNull].nbBits + largestBits := huffNode[lastNonNull].nbBits() // early exit : no elt > maxNbBits if largestBits <= maxNbBits { @@ -617,14 +629,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { baseCost := int(1) << (largestBits - maxNbBits) n := uint32(lastNonNull) - for huffNode[n].nbBits > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) - huffNode[n].nbBits = maxNbBits + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) n-- } // n stops at huffNode[n].nbBits <= maxNbBits - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } // n end at index of smallest symbol using < maxNbBits @@ -645,10 +657,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { { currentNbBits := maxNbBits for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits >= currentNbBits { + if huffNode[pos].nbBits() >= currentNbBits { continue } - currentNbBits = huffNode[pos].nbBits // < maxNbBits + currentNbBits = huffNode[pos].nbBits() // < maxNbBits rankLast[maxNbBits-currentNbBits] = uint32(pos) } } @@ -665,8 +677,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { if lowPos == noSymbol { break } - highTotal := huffNode[highPos].count - lowTotal := 2 * huffNode[lowPos].count + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() if highTotal <= lowTotal { break } @@ -682,13 +694,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { // this rank is no longer empty rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] } - huffNode[rankLast[nBitsToDecrease]].nbBits++ + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) if rankLast[nBitsToDecrease] == 0 { /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol } else { rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ } } @@ -696,15 +709,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { for totalCost < 0 { /* Sometimes, cost correction overshoot */ if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } - huffNode[n+1].nbBits-- + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) rankLast[1] = n + 1 totalCost++ continue } - huffNode[rankLast[1]+1].nbBits-- + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) rankLast[1]++ totalCost++ } @@ -712,9 +725,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { return maxNbBits } -type nodeElt struct { - count uint32 - parent uint16 - symbol byte - nbBits uint8 +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 } + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 2a06bd1a7e52..42a237eac4ab 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -4,13 +4,13 @@ import ( "errors" "fmt" "io" + "sync" "github.com/klauspost/compress/fse" ) type dTable struct { single []dEntrySingle - double []dEntryDouble } // single-symbols decoding @@ -18,13 +18,6 @@ type dEntrySingle struct { entry uint16 } -// double-symbols decoding -type dEntryDouble struct { - seq [4]byte - nBits uint8 - len uint8 -} - // Uses special code for all tables that are < 8 bits. const use8BitTables = true @@ -34,7 +27,7 @@ const use8BitTables = true // If no Scratch is provided a new one is allocated. // The returned Scratch can be used for encoding or decoding input using this table. func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(in) + s, err = s.prepare(nil) if err != nil { return s, nil, err } @@ -216,6 +209,7 @@ func (s *Scratch) Decoder() *Decoder { return &Decoder{ dt: s.dt, actualTableLog: s.actualTableLog, + bufs: &s.decPool, } } @@ -223,103 +217,15 @@ func (s *Scratch) Decoder() *Decoder { type Decoder struct { dt dTable actualTableLog uint8 + bufs *sync.Pool } -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf } - return dst, br.close() + return &[4][256]byte{} } // decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. @@ -341,7 +247,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 switch d.actualTableLog { @@ -369,6 +276,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -398,6 +306,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -426,6 +335,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -455,6 +365,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -484,6 +395,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -513,6 +425,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -542,6 +455,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -571,6 +485,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -578,10 +493,12 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } } default: + d.bufs.Put(bufs) return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -601,6 +518,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } if len(dst) >= maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } v := dt[br.peekByteFast()>>shift] @@ -609,6 +527,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -628,7 +547,8 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 const shift = 56 @@ -655,6 +575,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -663,6 +584,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -679,6 +601,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -688,195 +611,10 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := br.off*8 + uint(64-br.bitsRead) - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - // Decompress4X will decompress a 4X encoded stream. // The length of the supplied input must match the end of a block exactly. // The *capacity* of the dst slice must match the destination size of @@ -916,12 +654,12 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -942,8 +680,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -951,8 +689,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -960,8 +698,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -969,8 +707,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - buf[off+bufoff*stream+3] = uint8(v >> 8) + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { @@ -987,8 +725,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -996,8 +734,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -1005,8 +743,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -1014,49 +752,61 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - buf[off+bufoff*stream+3] = uint8(v >> 8) + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 } } if off > 0 { ioff := int(off) if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1076,7 +826,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } @@ -1084,16 +835,22 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { v := single[uint8(br.value>>shift)].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } @@ -1131,16 +888,15 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { const shift = 56 const tlSize = 1 << 8 - const tlMask = tlSize - 1 single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -1150,106 +906,116 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { // Interleave 2 decodes. const stream = 0 const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[uint8(br[stream].value>>shift)].entry - v2 := single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+3] = uint8(v >> 8) - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { const stream = 2 const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[uint8(br[stream].value>>shift)].entry - v2 := single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+3] = uint8(v >> 8) - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 } } if off > 0 { @@ -1257,21 +1023,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { if len(out) < dstEvery*3+ioff { return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1291,7 +1063,8 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } @@ -1299,16 +1072,23 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { v := single[br.peekByteFast()].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 000000000000..ba7e8e6b0276 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 000000000000..c4c7ab2d1fe3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,830 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ (R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 + + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 48(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 + + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 96(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 + + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 144(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 + + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 + + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 + + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 + + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 + + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 000000000000..908c17de63fc --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 3ee00ecb470a..e8ad17ad08ef 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "math/bits" + "sync" "github.com/klauspost/compress/fse" ) @@ -116,6 +117,7 @@ type Scratch struct { nodes []nodeElt tmpOut [4][]byte fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. huffWeight [maxSymbolValue + 1]byte } diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 000000000000..3954c51219b2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 000000000000..e802579c4f96 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 000000000000..4465fbe9e905 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 511bba65db8f..298c4f8e97da 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -18,6 +18,7 @@ func load64(b []byte, i int) uint64 { // emitLiteral writes a literal chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= len(lit) && len(lit) <= 65536 func emitLiteral(dst, lit []byte) int { @@ -42,6 +43,7 @@ func emitLiteral(dst, lit []byte) int { // emitCopy writes a copy chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= offset && offset <= 65535 // 4 <= length && length <= 65535 @@ -89,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int { // src[i:i+k-j] and src[j:k] have the same contents. // // It assumes that: +// // 0 <= i && i < j && j <= len(src) func extendMatch(src []byte, i, j int) int { for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { @@ -105,8 +108,9 @@ func hash(u, shift uint32) uint32 { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlock(dst, src []byte) (d int) { // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. // The table element type is uint16, as s < sLimit and sLimit < len(src) diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index c8f0f16fc1ec..65b38abed805 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -12,6 +12,8 @@ The `zstd` package is provided as open source software using a Go standard licen Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + ## Installation Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. @@ -78,6 +80,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is in the future. So if you want to limit concurrency for future updates, specify the concurrency you would like. +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined compression settings can be specified. @@ -104,7 +109,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. `EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently, but each call will only run on a single goroutine. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. Encoded blocks can be concatenated and the result will be the combined input stream. Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. @@ -149,10 +155,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip This package: file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73101992 643 313.87 -silesia.tar zskp 2 211947520 67504318 969 208.38 -silesia.tar zskp 3 211947520 64595893 2007 100.68 -silesia.tar zskp 4 211947520 60995370 8825 22.90 +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 cgo zstd: silesia.tar zstd 1 211947520 73605392 543 371.56 @@ -161,94 +167,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66 silesia.tar zstd 9 211947520 60212393 5063 39.92 gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1654 122.21 -silesia.tar gzkp 1 211947520 80136201 1152 175.45 +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 GOB stream of binary data. Highly compressible. https://files.klauspost.com/compress/gob-stream.7z file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 235022249 3088 590.30 -gob-stream zskp 2 1911399616 205669791 3786 481.34 -gob-stream zskp 3 1911399616 175034659 9636 189.17 -gob-stream zskp 4 1911399616 165609838 50369 36.19 +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 gob-stream zstd 1 1911399616 249810424 2637 691.26 gob-stream zstd 3 1911399616 208192146 3490 522.31 gob-stream zstd 6 1911399616 193632038 6687 272.56 gob-stream zstd 9 1911399616 177620386 16175 112.70 -gob-stream gzstd 1 1911399616 357382641 10251 177.82 -gob-stream gzkp 1 1911399616 359753026 5438 335.20 +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 The test data for the Large Text Compression Benchmark is the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. http://mattmahoney.net/dc/textdata.html file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343848582 3609 264.18 -enwik9 zskp 2 1000000000 317276632 5746 165.97 -enwik9 zskp 3 1000000000 292243069 12162 78.41 -enwik9 zskp 4 1000000000 262183768 82837 11.51 +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 enwik9 zstd 1 1000000000 358072021 3110 306.65 enwik9 zstd 3 1000000000 313734672 4784 199.35 enwik9 zstd 6 1000000000 295138875 10290 92.68 enwik9 zstd 9 1000000000 278348700 28549 33.40 -enwik9 gzstd 1 1000000000 382578136 9604 99.30 -enwik9 gzkp 1 1000000000 383825945 6544 145.73 +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 Highly compressible JSON file. https://files.klauspost.com/compress/github-june-2days-2019.json.zst file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 -github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 -github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75 -github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16 +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 -github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 -github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61 +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 VM Image, Linux mint with a few installed applications: https://files.klauspost.com/compress/rawstudio-mint14.7z file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 -rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 -rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08 -rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52 +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 -rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 -rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92 +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 CSV data: https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 -nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 -nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66 -nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33 +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 -nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00 +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ``` ## Decompressor @@ -283,8 +289,13 @@ func Decompress(in io.Reader, out io.Writer) error { } ``` -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. -See "Allocation-less operation" below. +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. For decoding buffers, it could look something like this: @@ -293,7 +304,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil) +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. @@ -303,9 +314,12 @@ func Decompress(src []byte) ([]byte, error) { ``` Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. ### Dictionaries @@ -357,62 +371,48 @@ In this case no unneeded allocations should be made. The buffer decoder does everything on the same goroutine and does nothing concurrently. It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. -The stream decoder operates on +The stream decoder will create goroutines that: -* One goroutine reads input and splits the input to several block decoders. -* A number of decoders will decode blocks. -* A goroutine coordinates these blocks and sends history from one to the next. +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. -In practice this means that concurrency is often limited to utilizing about 2 cores effectively. - - +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + ### Benchmarks -These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). - The first two are streaming decodes and the last are smaller inputs. - + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + ``` -BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op -BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op - -BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op -BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op - -Concurrent performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op - -BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op ``` -This reflects the performance around May 2020, but this may be out of date. +This reflects the performance around May 2022, but this may be out of date. ## Zstd inside ZIP files diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 753d17df634c..97299d499cf0 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -7,6 +7,7 @@ package zstd import ( "encoding/binary" "errors" + "fmt" "io" "math/bits" ) @@ -62,13 +63,6 @@ func (b *bitReader) get32BitsFast(n uint8) uint32 { return v } -func (b *bitReader) get16BitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - // fillFast() will make sure at least 32 bits are available. // There must be at least 4 bytes available. func (b *bitReader) fillFast() { @@ -132,6 +126,9 @@ func (b *bitReader) remain() uint { func (b *bitReader) close() error { // Release reference. b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index b36618285095..78b3c61be3ec 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -5,8 +5,6 @@ package zstd -import "fmt" - // bitWriter will write bits. // First bit will be LSB of the first byte of output. type bitWriter struct { @@ -73,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { b.nBits += bits } -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 8a98c4562e01..2445bb4fe5b4 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -5,9 +5,13 @@ package zstd import ( + "bytes" + "encoding/binary" "errors" "fmt" "io" + "os" + "path/filepath" "sync" "github.com/klauspost/compress/huff0" @@ -38,14 +42,14 @@ const ( // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) maxCompressedBlockSize = 128 << 10 + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + // Maximum possible block size (all Raw+Uncompressed). maxBlockSize = (1 << 21) - 1 - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header - maxCompressedLiteralSize = 1 << 18 - maxRLELiteralSize = 1 << 20 - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff // We support slightly less than the reference decoder to be able to // use ints on 32 bit archs. @@ -76,20 +80,28 @@ type blockDec struct { // Window size of the block. WindowSize uint64 - history chan *history - input chan struct{} - result chan decodeOutput - sequenceBuf []seq - err error - decWG sync.WaitGroup + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool // Frame to use for singlethreaded decoding. // Should not be used by the decoder itself since parent may be another frame. localFrame *frameDec + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + // Block is RLE, this is the size. RLESize uint32 - tmp [4]byte Type blockType @@ -109,13 +121,8 @@ func (b *blockDec) String() string { func newBlockDec(lowMem bool) *blockDec { b := blockDec{ - lowMem: lowMem, - result: make(chan decodeOutput, 1), - input: make(chan struct{}, 1), - history: make(chan *history, 1), + lowMem: lowMem, } - b.decWG.Add(1) - go b.startDecoder() return &b } @@ -133,11 +140,17 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { b.Type = blockType((bh >> 1) & 3) // find size. cSize := int(bh >> 3) - maxSize := maxBlockSize + maxSize := maxCompressedBlockSizeAlloc switch b.Type { case blockTypeReserved: return ErrReservedBlockType case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } b.RLESize = uint32(cSize) if b.lowMem { maxSize = cSize @@ -148,9 +161,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { println("Data size on stream:", cSize) } b.RLESize = 0 - maxSize = maxCompressedBlockSize + maxSize = maxCompressedBlockSizeAlloc if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + maxSize = int(windowSize) + compressedBlockOverAlloc } if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { if debugDecoder { @@ -158,7 +171,19 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return ErrCompressedSizeTooBig } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = 0 // We do not need a destination for raw blocks. maxSize = -1 @@ -167,16 +192,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } // Read block data. - if cap(b.dataStorage) < cSize { + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize) + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSize) + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { if debugDecoder { @@ -185,6 +208,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return err } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } return nil } @@ -193,85 +219,14 @@ func (b *blockDec) sendErr(err error) { b.Last = true b.Type = blockTypeReserved b.err = err - b.input <- struct{}{} } // Close will release resources. // Closed blockDec cannot be reset. func (b *blockDec) Close() { - close(b.input) - close(b.history) - close(b.result) - b.decWG.Wait() -} - -// decodeAsync will prepare decoding the block when it receives input. -// This will separate output and history. -func (b *blockDec) startDecoder() { - defer b.decWG.Done() - for range b.input { - //println("blockDec: Got block input") - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - o := decodeOutput{ - d: b, - b: b.dst[:b.RLESize], - err: nil, - } - v := b.data[0] - for i := range o.b { - o.b[i] = v - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeRaw: - o := decodeOutput{ - d: b, - b: b.data, - err: nil, - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeCompressed: - b.dst = b.dst[:0] - err := b.decodeCompressed(nil) - o := decodeOutput{ - d: b, - b: b.dst, - err: err, - } - if debugDecoder { - println("Decompressed to", len(b.dst), "bytes, error:", err) - } - b.result <- o - case blockTypeReserved: - // Used for returning errors. - <-b.history - b.result <- decodeOutput{ - d: b, - b: nil, - err: b.err, - } - default: - panic("Invalid block type") - } - if debugDecoder { - println("blockDec: Finished block") - } - } } -// decodeAsync will prepare decoding the block when it receives the history. -// If history is provided, it will not fetch it from the channel. +// decodeBuf func (b *blockDec) decodeBuf(hist *history) error { switch b.Type { case blockTypeRLE: @@ -279,7 +234,7 @@ func (b *blockDec) decodeBuf(hist *history) error { if b.lowMem { b.dst = make([]byte, b.RLESize) } else { - b.dst = make([]byte, maxBlockSize) + b.dst = make([]byte, maxCompressedBlockSize) } } b.dst = b.dst[:b.RLESize] @@ -294,14 +249,23 @@ func (b *blockDec) decodeBuf(hist *history) error { return nil case blockTypeCompressed: saved := b.dst - b.dst = hist.b - hist.b = nil + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } err := b.decodeCompressed(hist) if debugDecoder { println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) } - hist.b = b.dst - b.dst = saved + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } return err case blockTypeReserved: // Used for returning errors. @@ -311,30 +275,18 @@ func (b *blockDec) decodeBuf(hist *history) error { } } -// decodeCompressed will start decompressing a block. -// If no history is supplied the decoder will decodeAsync as much as possible -// before fetching from blockDec.history -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - delayedHistory := hist == nil - - if delayedHistory { - // We must always grab history. - defer func() { - if hist == nil { - <-b.history - } - }() - } +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header if len(in) < 2 { - return ErrBlockTooSmall + return in, ErrBlockTooSmall } + litType := literalsBlockType(in[0] & 3) var litRegenSize int var litCompSize int sizeFormat := (in[0] >> 2) & 3 var fourStreams bool + var literals []byte switch litType { case literalsBlockRaw, literalsBlockRLE: switch sizeFormat { @@ -350,7 +302,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) in = in[3:] @@ -361,7 +313,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) litRegenSize = int(n & 1023) @@ -372,7 +324,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 4 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) litRegenSize = int(n & 16383) @@ -382,7 +334,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 5 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) litRegenSize = int(n & 262143) @@ -393,13 +345,15 @@ func (b *blockDec) decodeCompressed(hist *history) error { if debugDecoder { println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) } - var literals []byte - var huff *huff0.Scratch + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + switch litType { case literalsBlockRaw: if len(in) < litRegenSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litRegenSize] in = in[litRegenSize:] @@ -407,19 +361,13 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockRLE: if len(in) < 1 { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, litRegenSize) + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) } else { - if litRegenSize > maxCompressedLiteralSize { - // Exceptional - b.literalBuf = make([]byte, litRegenSize) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) - - } + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) } } literals = b.literalBuf[:litRegenSize] @@ -434,7 +382,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockTreeless: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } // Store compressed literals, so we defer decoding until we get history. literals = in[:litCompSize] @@ -442,31 +390,65 @@ func (b *blockDec) decodeCompressed(hist *history) error { if debugDecoder { printf("Found %d compressed literals\n", litCompSize) } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + case literalsBlockCompressed: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litCompSize] in = in[litCompSize:] - huff = huffDecoderPool.Get().(*huff0.Scratch) - var err error // Ensure we have space to store it. if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) } } - if huff == nil { - huff = &huff0.Scratch{} + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } } + var err error huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) - return err + return in, err } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize // Use our out buffer. if fourStreams { literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) @@ -475,27 +457,63 @@ func (b *blockDec) decodeCompressed(hist *history) error { } if err != nil { println("decoding compressed literals:", err) - return err + return in, err } // Make sure we don't leak our literals buffer if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] if debugDecoder { printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) } } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } // Decode Sequences // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section if len(in) < 1 { return ErrBlockTooSmall } + var nSeqs int seqHeader := in[0] - nSeqs := 0 switch { - case seqHeader == 0: - in = in[1:] case seqHeader < 128: nSeqs = int(seqHeader) in = in[1:] @@ -512,19 +530,16 @@ func (b *blockDec) decodeCompressed(hist *history) error { nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) in = in[3:] } - // Allocate sequences - if cap(b.sequenceBuf) < nSeqs { - if b.lowMem { - b.sequenceBuf = make([]seq, nSeqs) - } else { - // Allocate max - b.sequenceBuf = make([]seq, nSeqs, maxSequences) + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) } - } else { - // Reuse buffer - b.sequenceBuf = b.sequenceBuf[:nSeqs] + return ErrUnexpectedBlockSize } - var seqs = &sequenceDecs{} + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs if nSeqs > 0 { if len(in) < 1 { return ErrBlockTooSmall @@ -553,6 +568,9 @@ func (b *blockDec) decodeCompressed(hist *history) error { } switch mode { case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } seq.fse = &fsePredef[i] case compModeRLE: if br.remain() < 1 { @@ -560,34 +578,36 @@ func (b *blockDec) decodeCompressed(hist *history) error { } v := br.Uint8() br.advance(1) - dec := fseDecoderPool.Get().(*fseDecoder) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } symb, err := decSymbolValue(v, symbolTableX[i]) if err != nil { printf("RLE Transform table (%v) error: %v", tableIndex(i), err) return err } - dec.setRLE(symb) - seq.fse = dec + seq.fse.setRLE(symb) if debugDecoder { printf("RLE set to %+v, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) - dec := fseDecoderPool.Get().(*fseDecoder) - err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) if err != nil { println("Read table error:", err) return err } - err = dec.transform(symbolTableX[i]) + err = seq.fse.transform(symbolTableX[i]) if err != nil { println("Transform table error:", err) return err } if debugDecoder { - println("Read table ok", "symbolLen:", dec.symbolLen) + println("Read table ok", "symbolLen:", seq.fse.symbolLen) } - seq.fse = dec case compModeRepeat: seq.repeat = true } @@ -597,140 +617,106 @@ func (b *blockDec) decodeCompressed(hist *history) error { } in = br.unread() } - - // Wait for history. - // All time spent after this is critical since it is strictly sequential. - if hist == nil { - hist = <-b.history - if hist.error { - return ErrDecoderClosed - } - } - - // Decode treeless literal block. - if litType == literalsBlockTreeless { - // TODO: We could send the history early WITHOUT the stream history. - // This would allow decoding treeless literals before the byte history is available. - // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. - // So not much obvious gain here. - - if hist.huffTree == nil { - return errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) - } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) - } - } - var err error - // Use our out buffer. - huff = hist.huffTree - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return err - } - if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - } else { - if hist.huffTree != nil && huff != nil { - if hist.dict == nil || hist.dict.litEnc != hist.huffTree { - huffDecoderPool.Put(hist.huffTree) - } - hist.huffTree = nil - } - } - if huff != nil { - hist.huffTree = huff - } if debugDecoder { - println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") } if nSeqs == 0 { - // Decompressed content is defined entirely as Literals Section content. - b.dst = append(b.dst, literals...) - if delayedHistory { - hist.append(literals) + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] } return nil } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } - seqs, err := seqs.mergeHistory(&hist.decoders) - if err != nil { + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) return err } - if debugDecoder { - println("History merged ok") + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) } - br := &bitReader{} - if err := br.init(in); err != nil { - return err + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets - // TODO: Investigate if sending history without decoders are faster. - // This would allow the sequences to be decoded async and only have to construct stream history. - // If only recent offsets were not transferred, this would be an obvious win. - // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} +func (b *blockDec) executeSequences(hist *history) error { hbytes := hist.b if len(hbytes) > hist.windowSize { hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history any more. + // We do not need history anymore. if hist.dict != nil { hist.dict.content = nil } } - - if err := seqs.initialize(br, hist, literals, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - - err = seqs.decode(nSeqs, br, hbytes) + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) if err != nil { return err } - if !br.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) - } + return b.updateHistory(hist) +} - err = br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } +func (b *blockDec) updateHistory(hist *history) error { if len(b.data) > maxCompressedBlockSize { return fmt.Errorf("compressed block size too large (%d)", len(b.data)) } // Set output and release references. - b.dst = seqs.out - seqs.out, seqs.literals, seqs.hist = nil, nil, nil + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset - if !delayedHistory { - // If we don't have delayed history, no need to update. - hist.recentOffsets = seqs.prevOffset - return nil - } if b.Last { // if last block we don't care about history. println("Last block, no history returned") hist.b = hist.b[:0] return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } } - hist.append(b.dst) - hist.recentOffsets = seqs.prevOffset - if debugDecoder { - println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") - } + hist.decoders.out, hist.decoders.literals = nil, nil return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index aab71c6cf851..176788f25976 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -7,7 +7,6 @@ package zstd import ( "fmt" "io" - "io/ioutil" ) type byteBuffer interface { @@ -23,7 +22,7 @@ type byteBuffer interface { readByte() (byte, error) // Skip n bytes. - skipN(n int) error + skipN(n int64) error } // in-memory buffer @@ -52,10 +51,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { return r, nil } -func (b *byteBuf) remain() []byte { - return *b -} - func (b *byteBuf) readByte() (byte, error) { bb := *b if len(bb) < 1 { @@ -66,9 +61,12 @@ func (b *byteBuf) readByte() (byte, error) { return r, nil } -func (b *byteBuf) skipN(n int) error { +func (b *byteBuf) skipN(n int64) error { bb := *b - if len(bb) < n { + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { return io.ErrUnexpectedEOF } *b = bb[n:] @@ -113,6 +111,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { func (r *readerWrapper) readByte() (byte, error) { n2, err := r.r.Read(r.tmp[:1]) if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } return 0, err } if n2 != 1 { @@ -121,9 +122,9 @@ func (r *readerWrapper) readByte() (byte, error) { return r.tmp[0], nil } -func (r *readerWrapper) skipN(n int) error { - n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) - if n2 != int64(n) { +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { err = io.ErrUnexpectedEOF } return err diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go index 2c4fca17fa1d..0e59a242d8dc 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -13,12 +13,6 @@ type byteReader struct { off int } -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - // advance the stream b n bytes. func (b *byteReader) advance(n uint) { b.off += int(n) diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index 69736e8d4bb8..f6a240970d46 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -4,7 +4,7 @@ package zstd import ( - "bytes" + "encoding/binary" "errors" "io" ) @@ -15,18 +15,50 @@ const HeaderMaxSize = 14 + 3 // Header contains information about the first frame and block within that. type Header struct { - // Window Size the window of data to keep while decoding. - // Will only be set if HasFCS is false. - WindowSize uint64 + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool - // Frame content size. - // Expected size of the entire frame. - FrameContentSize uint64 + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 // Dictionary ID. // If 0, no dictionary. DictionaryID uint32 + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + // First block information. FirstBlock struct { // OK will be set if first block could be decoded. @@ -51,17 +83,9 @@ type Header struct { CompressedSize int } - // Skippable will be true if the frame is meant to be skipped. - // No other information will be populated. - Skippable bool - // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. HasCheckSum bool - - // If this is true FrameContentSize will have a valid value - HasFCS bool - - SingleSegment bool } // Decode the header from the beginning of the stream. @@ -71,39 +95,46 @@ type Header struct { // If there isn't enough input, io.ErrUnexpectedEOF is returned. // The FirstBlock.OK will indicate if enough information was available to decode the first block header. func (h *Header) Decode(in []byte) error { + *h = Header{} if len(in) < 4 { return io.ErrUnexpectedEOF } + h.HeaderSize += 4 b, in := in[:4], in[4:] - if !bytes.Equal(b, frameMagic) { - if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { return ErrMagicMismatch } - *h = Header{Skippable: true} + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) return nil } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor if len(in) < 1 { return io.ErrUnexpectedEOF } - - // Clear output - *h = Header{} fhd, in := in[0], in[1:] + h.HeaderSize++ h.SingleSegment = fhd&(1<<5) != 0 h.HasCheckSum = fhd&(1<<2) != 0 - if fhd&(1<<3) != 0 { return errors.New("reserved bit set on frame header") } - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor if !h.SingleSegment { if len(in) < 1 { return io.ErrUnexpectedEOF } var wd byte wd, in = in[0], in[1:] + h.HeaderSize++ windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) @@ -120,10 +151,8 @@ func (h *Header) Decode(in []byte) error { return io.ErrUnexpectedEOF } b, in = in[:size], in[size:] - if b == nil { - return io.ErrUnexpectedEOF - } - switch size { + h.HeaderSize += int(size) + switch len(b) { case 1: h.DictionaryID = uint32(b[0]) case 2: @@ -152,10 +181,8 @@ func (h *Header) Decode(in []byte) error { return io.ErrUnexpectedEOF } b, in = in[:fcsSize], in[fcsSize:] - if b == nil { - return io.ErrUnexpectedEOF - } - switch fcsSize { + h.HeaderSize += int(fcsSize) + switch len(b) { case 1: h.FrameContentSize = uint64(b[0]) case 2: diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f430f58b5726..7113e69ee3a1 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -5,9 +5,12 @@ package zstd import ( - "errors" + "context" + "encoding/binary" "io" "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" ) // Decoder provides decoding of zstandard streams. @@ -22,15 +25,22 @@ type Decoder struct { // Unreferenced decoders, ready for use. decoders chan *blockDec - // Streams ready to be decoded. - stream chan decodeStream - // Current read position used for Reader functionality. current decoderState + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + // Custom dictionaries. - // Always uses copies. - dicts map[uint32]dict + dicts map[uint32]*dict // streamWg is the waitgroup for all streams streamWg sync.WaitGroup @@ -46,7 +56,10 @@ type decoderState struct { output chan decodeOutput // cancel remaining output. - cancel chan struct{} + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest flushed bool } @@ -81,7 +94,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { return nil, err } } - d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.crc = xxhash.New() d.current.flushed = true if r == nil { @@ -89,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { } // Transfer option dicts. - d.dicts = make(map[uint32]dict, len(d.o.dicts)) + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) for _, dc := range d.o.dicts { d.dicts[dc.id] = dc } @@ -130,7 +143,7 @@ func (d *Decoder) Read(p []byte) (int, error) { break } if !d.nextBlock(n == 0) { - return n, nil + return n, d.current.err } } } @@ -162,6 +175,7 @@ func (d *Decoder) Reset(r io.Reader) error { d.drainOutput() + d.syncStream.br.r = nil if r == nil { d.current.err = ErrDecoderNilInput if len(d.current.b) > 0 { @@ -172,21 +186,23 @@ func (d *Decoder) Reset(r io.Reader) error { } // If bytes buffer and < 5MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < 5<<20 { + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { bb2 := bb if debugDecoder { println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) } b := bb2.Bytes() var dst []byte - if cap(d.current.b) > 0 { - dst = d.current.b + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] } - dst, err := d.DecodeAll(b, dst[:0]) + dst, err := d.DecodeAll(b, dst) if err == nil { err = io.EOF } + // Save output buffer + d.syncStream.dstBuf = dst d.current.b = dst d.current.err = err d.current.flushed = true @@ -195,33 +211,40 @@ func (d *Decoder) Reset(r io.Reader) error { } return nil } - - if d.stream == nil { - d.stream = make(chan decodeStream, 1) - d.streamWg.Add(1) - go d.startStreamDecoder(d.stream) - } - // Remove current block. + d.stashDecoder() d.current.decodeOutput = decodeOutput{} d.current.err = nil - d.current.cancel = make(chan struct{}) d.current.flushed = false d.current.d = nil + d.syncStream.dstBuf = nil - d.stream <- decodeStream{ - r: r, - output: d.current.output, - cancel: d.current.cancel, + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + return nil } // drainOutput will drain the output until errEndOfStream is sent. func (d *Decoder) drainOutput() { if d.current.cancel != nil { - println("cancelling current") - close(d.current.cancel) + if debugDecoder { + println("cancelling current") + } + d.current.cancel() d.current.cancel = nil } if d.current.d != nil { @@ -243,12 +266,9 @@ func (d *Decoder) drainOutput() { } d.decoders <- v.d } - if v.err == errEndOfStream { - println("current flushed") - d.current.flushed = true - return - } } + d.current.output = nil + d.current.flushed = true } // WriteTo writes data to w until there's no more data to write or when an error occurs. @@ -287,19 +307,23 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) { // DecodeAll can be used concurrently. // The Decoder concurrency limits will be respected. func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.current.err == ErrDecoderClosed { + if d.decoders == nil { return dst, ErrDecoderClosed } // Grab a block decoder and frame decoder. block := <-d.decoders frame := block.localFrame + initialSize := len(dst) defer func() { if debugDecoder { printf("re-adding decoder: %p", block) } frame.rawInput = nil frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } d.decoders <- block }() frame.bBuf = input @@ -307,34 +331,45 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { for { frame.history.reset() err := frame.reset(&frame.bBuf) - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") - } - return dst, nil - } - if frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - return nil, ErrUnknownDictionary - } - frame.history.setDict(&dict) - } if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } return dst, err } - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { - return dst, ErrDecoderSizeExceeded + if err = d.setDict(frame); err != nil { + return nil, err + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded } - if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { - // Never preallocate moe than 1 GB up front. + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) copy(dst2, dst) dst = dst2 } } - if cap(dst) == 0 { + + if cap(dst) == 0 && !d.o.limitToCap { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. size := len(input) * 2 @@ -352,6 +387,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if err != nil { return dst, err } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } if len(frame.bBuf) == 0 { if debugDecoder { println("frame dbuf empty") @@ -368,33 +406,172 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { // If non-blocking mode is used the returned boolean will be false // if no data was available without blocking. func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } if d.current.err != nil { // Keep error state. - return blocking + return false } + d.current.b = d.current.b[:0] + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() if blocking { - d.current.decodeOutput = <-d.current.output + d.current.decodeOutput, ok = <-d.current.output } else { select { - case d.current.decodeOutput = <-d.current.output: + case d.current.decodeOutput, ok = <-d.current.output: default: return false } } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } if debugDecoder { - println("got", len(d.current.b), "bytes, error:", d.current.err) + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + n, err := d.current.crc.Write(next.b) + if err == nil { + if n != len(next.b) { + d.current.err = io.ErrShortWrite + } + } + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } + if d.current.err != nil { + return false + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last } return true } +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + // Close will release all resources. // It is NOT possible to reuse the decoder after this. func (d *Decoder) Close() { @@ -402,10 +579,10 @@ func (d *Decoder) Close() { return } d.drainOutput() - if d.stream != nil { - close(d.stream) + if d.current.cancel != nil { + d.current.cancel() d.streamWg.Wait() - d.stream = nil + d.current.cancel = nil } if d.decoders != nil { close(d.decoders) @@ -456,100 +633,321 @@ type decodeOutput struct { err error } -type decodeStream struct { - r io.Reader - - // Blocks ready to be written to output. - output chan decodeOutput - - // cancel reading from the input - cancel chan struct{} +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil } -// errEndOfStream indicates that everything from the stream was read. -var errEndOfStream = errors.New("end-of-stream") - // Create Decoder: -// Spawn n block decoders. These accept tasks to decode a block. -// Create goroutine that handles stream processing, this will send history to decoders as they are available. -// Decoders update the history as they decode. -// When a block is returned: -// a) history is sent to the next decoder, -// b) content written to CRC. -// c) return data to WRITER. -// d) wait for next block to return data. -// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. -func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { defer d.streamWg.Done() - frame := newFrameDec(d.o) - for stream := range inStream { - if debugDecoder { - println("got new stream") + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block } - br := readerWrapper{r: stream.r} - decodeStream: - for { - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true } else { - frame.history.setDict(&dict) + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } } } - if err != nil { - stream.output <- decodeOutput{ - err: err, + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block } - break + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil { + err = d.setDict(frame) + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { if debugDecoder { - println("starting frame decoder") - } - - // This goroutine will forward history between frames. - frame.frameDone.Add(1) - frame.initAsync() - - go frame.startDecoder(stream.output) - decodeFrame: - // Go through all blocks of the frame. - for { - dec := <-d.decoders - select { - case <-stream.cancel: - if !frame.sendErr(dec, io.EOF) { - // To not let the decoder dangle, send it back. - stream.output <- decodeOutput{d: dec} - } - break decodeStream - default: + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) } - err := frame.next(dec) - switch err { - case io.EOF: - // End of current frame, no error - println("EOF on next block") - break decodeFrame - case nil: - continue - default: - println("block decoder returned", err) - break decodeStream + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break } - // All blocks have started decoding, check if there are more frames. - println("waiting for done") - frame.frameDone.Wait() - println("done waiting...") } - frame.frameDone.Wait() - println("Sending EOS") - stream.output <- decodeOutput{err: errEndOfStream} } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 95cc9b8b81f2..07a90dd7af30 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -6,6 +6,8 @@ package zstd import ( "errors" + "fmt" + "math/bits" "runtime" ) @@ -14,21 +16,28 @@ type DOption func(*decoderOptions) error // options retains accumulated state of multiple options. type decoderOptions struct { - lowMem bool - concurrent int - maxDecodedSize uint64 - maxWindowSize uint64 - dicts []dict + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []*dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int } func (o *decoderOptions) setDefault() { *o = decoderOptions{ // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), - maxWindowSize: MaxWindowSize, + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, } - o.maxDecodedSize = 1 << 63 + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 } // WithDecoderLowmem will set whether to use a lower amount of memory, @@ -37,16 +46,25 @@ func WithDecoderLowmem(b bool) DOption { return func(o *decoderOptions) error { o.lowMem = b; return nil } } -// WithDecoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. func WithDecoderConcurrency(n int) DOption { return func(o *decoderOptions) error { - if n <= 0 { + if n < 0 { return errors.New("concurrency must be at least 1") } - o.concurrent = n + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } return nil } } @@ -54,7 +72,7 @@ func WithDecoderConcurrency(n int) DOption { // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory // non-streaming operations or maximum window size for streaming operations. // This can be used to control memory usage of potentially hostile content. -// Maximum and default is 1 << 63 bytes. +// Maximum is 1 << 63 bytes. Default is 64GiB. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { if n == 0 { @@ -69,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption { } // WithDecoderDicts allows to register one or more dictionaries for the decoder. -// If several dictionaries with the same ID is provided the last one will be used. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithDecoderDicts(dicts ...[]byte) DOption { return func(o *decoderOptions) error { for _, b := range dicts { @@ -77,8 +101,20 @@ func WithDecoderDicts(dicts ...[]byte) DOption { if err != nil { return err } - o.dicts = append(o.dicts, *d) + o.dicts = append(o.dicts, d) + } + return nil + } +} + +// WithEncoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) return nil } } @@ -100,3 +136,34 @@ func WithDecoderMaxWindow(size uint64) DOption { return nil } } + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index a36ae83ef579..66a95c18ef39 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -1,7 +1,6 @@ package zstd import ( - "bytes" "encoding/binary" "errors" "fmt" @@ -20,7 +19,10 @@ type dict struct { content []byte } -var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 // ID returns the dictionary id or 0 if d is nil. func (d *dict) ID() uint32 { @@ -50,7 +52,7 @@ func loadDict(b []byte) (*dict, error) { ofDec: sequenceDec{fse: &fseDecoder{}}, mlDec: sequenceDec{fse: &fseDecoder{}}, } - if !bytes.Equal(b[:4], dictMagic[:]) { + if string(b[:4]) != dictMagic { return nil, ErrMagicMismatch } d.id = binary.LittleEndian.Uint32(b[4:8]) diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 15ae8ee80774..bfb2e146c334 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -16,6 +16,7 @@ type fastBase struct { cur int32 // maximum offset. Should be at least 2x block size. maxMatchOff int32 + bufferReset int32 hist []byte crc *xxhash.Digest tmp [8]byte @@ -56,8 +57,8 @@ func (e *fastBase) Block() *blockEnc { } func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) } // check if we have space already if len(e.hist)+len(src) > cap(e.hist) { @@ -126,24 +127,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) } } - a := src[s:] - b := src[t:] - b = b[:len(a)] - end := int32((len(a) >> 3) << 3) - for i := int32(0); i < end; i += 8 { - if diff := load6432(a, i) ^ load6432(b, i); diff != 0 { - return i + int32(bits.TrailingZeros64(diff)>>3) - } - } - - a = a[end:] - b = b[end:] - for i := range a { - if a[i] != b[i] { - return int32(i) + end - } - } - return int32(len(a)) + end + return int32(matchLen(src[s:], src[t:])) } // Reset the encoding table. @@ -171,7 +155,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) { // We offset current position so everything will be out of reach. // If above reset line, history will be purged. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += e.maxMatchOff + int32(len(e.hist)) } e.hist = e.hist[:0] diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 96028ecd8366..830f5ba74a22 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -32,6 +32,7 @@ type match struct { length int32 rep int32 est int32 + _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes } const highScore = 25000 @@ -84,14 +85,10 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = prevEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} e.cur = e.maxMatchOff break } @@ -192,8 +189,8 @@ encodeLoop: panic("offset0 was 0") } - bestOf := func(a, b match) match { - if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { + bestOf := func(a, b *match) *match { + if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 { return a } return b @@ -219,22 +216,26 @@ encodeLoop: return m } - best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) + m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) + m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) + m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) + m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1) + best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4)) if canRepeat && best.length < goodEnough { cv32 := uint32(cv >> 8) spp := s + 1 - best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) - best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) - best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + m1 := matchAt(spp-offset1, spp, cv32, 1) + m2 := matchAt(spp-offset2, spp, cv32, 2) + m3 := matchAt(spp-offset3, spp, cv32, 3) + best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) if best.length > 0 { cv32 = uint32(cv >> 24) spp += 2 - best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) - best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) - best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + m1 := matchAt(spp-offset1, spp, cv32, 1) + m2 := matchAt(spp-offset2, spp, cv32, 2) + m3 := matchAt(spp-offset3, spp, cv32, 3) + best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) } } // Load next and check... @@ -261,26 +262,33 @@ encodeLoop: candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] // Short at s+1 - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) // Long at s+1, s+2 - best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) - best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) + m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) + m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) + m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1) + m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1) + best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5)) if false { // Short at s+3. // Too often worse... - best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) + m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1) + best = bestOf(best, &m) } // See if we can find a better match by checking where the current best ends. // Use that offset to see if we can find a better full match. if sAt := best.s + best.length; sAt < sLimit { nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) candidateEnd := e.longTable[nextHashL] - if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { - bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) - if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { - bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + const skipBeginning = 2 + if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 { + m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + bestEnd := bestOf(best, &m) + if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 { + m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + bestEnd = bestOf(bestEnd, &m) } best = bestEnd } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 602c05ee0c4c..8582f31a7cc4 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -62,14 +62,10 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} e.cur = e.maxMatchOff break } @@ -156,8 +152,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -416,15 +412,23 @@ encodeLoop: // Try to find a better match by searching for a long match at the end of the current best match if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - cv := load3232(src, s) + s2 := s + skipBeginning + cv := load3232(src, s2) candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 if matchedNext > matched { t = coffsetL + s = s2 matched = matchedNext if debugMatches { println("long match at end-of-match") @@ -434,12 +438,13 @@ encodeLoop: // Check prev long... if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 if matchedNext > matched { t = coffsetL + s = s2 matched = matchedNext if debugMatches { println("prev long match at end-of-match") @@ -518,8 +523,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -578,7 +583,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} @@ -674,8 +679,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -1047,8 +1052,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index d6b3104240b0..7d425109adb8 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -44,14 +44,10 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} e.cur = e.maxMatchOff break } @@ -127,8 +123,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -388,7 +384,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - if e.cur >= bufferReset { + if e.cur >= e.bufferReset { for i := range e.table[:] { e.table[i] = tableEntry{} } @@ -439,8 +435,8 @@ encodeLoop: var t int32 for { - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -685,7 +681,7 @@ encodeLoop: } // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += int32(len(src)) } } @@ -700,7 +696,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} @@ -785,8 +781,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -969,7 +965,7 @@ encodeLoop: te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) - longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) e.longTable[longHash1] = te0 e.longTable[longHash2] = te1 e.markLongShardDirty(longHash1) @@ -1002,8 +998,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -1103,7 +1099,8 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { } if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { - copy(e.longTable[:], e.dictLongTable) + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) for i := range e.longTableShardDirty { e.longTableShardDirty[i] = false } @@ -1114,7 +1111,9 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { continue } - copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + e.longTableShardDirty[i] = false } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 5f08a2830233..315b1a8f2f69 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -43,7 +43,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} @@ -85,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 7 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -304,13 +304,13 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { minNonLiteralBlockSize = 1 + 1 + inputMargin ) if debugEncoder { - if len(src) > maxBlockSize { + if len(src) > maxCompressedBlockSize { panic("src too big") } } // Protect against e.cur wraparound. - if e.cur >= bufferReset { + if e.cur >= e.bufferReset { for i := range e.table[:] { e.table[i] = tableEntry{} } @@ -334,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 8 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -538,7 +538,7 @@ encodeLoop: println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += int32(len(src)) } } @@ -555,11 +555,9 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { return } // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } + e.table = [tableSize]tableEntry{} e.cur = e.maxMatchOff break } @@ -871,7 +869,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { const shardCnt = tableShardCnt const shardSize = tableShardSize if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) for i := range e.tableShardDirty { e.tableShardDirty[i] = false } @@ -883,7 +882,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { continue } - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) e.tableShardDirty[i] = false } e.allDirty = false diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index e6e315969b00..65c6c36dc13a 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -8,6 +8,7 @@ import ( "crypto/rand" "fmt" "io" + "math" rdebug "runtime/debug" "sync" @@ -98,23 +99,25 @@ func (e *Encoder) Reset(w io.Writer) { if cap(s.filling) == 0 { s.filling = make([]byte, 0, e.o.blockSize) } - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() } if s.encoder == nil { s.encoder = e.o.encoder() } - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() s.filling = s.filling[:0] - s.current = s.current[:0] - s.previous = s.previous[:0] s.encoder.Reset(e.o.dict, false) s.headerWritten = false s.eofWritten = false @@ -258,6 +261,46 @@ func (e *Encoder) nextBlock(final bool) error { return s.err } + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.err = err + return err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + // Move blocks forward. s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) @@ -486,8 +529,8 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // If a non-single block is needed the encoder will reset again. e.encoders <- enc }() - // Use single segments when above minimum window and below 1MB. - single := len(src) < 1<<20 && len(src) > MinWindowSize + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { single = *e.o.single } @@ -509,7 +552,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } // If we can do everything in one block, prefer that. - if len(src) <= maxCompressedBlockSize { + if len(src) <= e.o.blockSize { enc.Reset(e.o.dict, true) // Slightly faster with no history and everything in one block. if e.o.crc { @@ -597,3 +640,37 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 7d29e1d689ee..8e15be2f7f8a 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -3,6 +3,8 @@ package zstd import ( "errors" "fmt" + "math" + "math/bits" "runtime" "strings" ) @@ -24,6 +26,7 @@ type encoderOptions struct { allLitEntropy bool customWindow bool customALEntropy bool + customBlockSize bool lowMem bool dict *dict } @@ -33,7 +36,7 @@ func (o *encoderOptions) setDefault() { concurrent: runtime.GOMAXPROCS(0), crc: true, single: nil, - blockSize: 1 << 16, + blockSize: maxCompressedBlockSize, windowSize: 8 << 20, level: SpeedDefault, allLitEntropy: true, @@ -46,22 +49,22 @@ func (o encoderOptions) encoder() encoder { switch o.level { case SpeedFastest: if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} case SpeedDefault: if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} case SpeedBetterCompression: if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} } panic("unknown compression level") } @@ -75,6 +78,7 @@ func WithEncoderCRC(b bool) EOption { // WithEncoderConcurrency will set the concurrency, // meaning the maximum number of encoders to run concurrently. // The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. // By default this will be set to GOMAXPROCS. func WithEncoderConcurrency(n int) EOption { return func(o *encoderOptions) error { @@ -106,6 +110,7 @@ func WithWindowSize(n int) EOption { o.customWindow = true if o.blockSize > o.windowSize { o.blockSize = o.windowSize + o.customBlockSize = true } return nil } @@ -188,10 +193,9 @@ func EncoderLevelFromZstd(level int) EncoderLevel { return SpeedDefault case level >= 6 && level < 10: return SpeedBetterCompression - case level >= 10: + default: return SpeedBestCompression } - return SpeedDefault } // String provides a string representation of the compression level. @@ -222,6 +226,9 @@ func WithEncoderLevel(l EncoderLevel) EOption { switch o.level { case SpeedFastest: o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } case SpeedDefault: o.windowSize = 8 << 20 case SpeedBetterCompression: @@ -278,7 +285,7 @@ func WithNoEntropyCompression(b bool) EOption { // a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. // For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. // This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. // This setting has no effect on streamed encodes. func WithSingleSegment(b bool) EOption { return func(o *encoderOptions) error { @@ -299,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption { } // WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// // The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithEncoderDict(dict []byte) EOption { return func(o *encoderOptions) error { d, err := loadDict(dict) @@ -310,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption { return nil } } + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 989c79f8c315..d8e8a05bd73a 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -5,26 +5,20 @@ package zstd import ( - "bytes" + "encoding/binary" "encoding/hex" "errors" - "hash" "io" - "sync" "github.com/klauspost/compress/zstd/internal/xxhash" ) type frameDec struct { - o decoderOptions - crc hash.Hash64 - offset int64 + o decoderOptions + crc *xxhash.Digest WindowSize uint64 - // In order queue of blocks being decoded. - decoding chan *blockDec - // Frame history passed between blocks history history @@ -34,15 +28,10 @@ type frameDec struct { bBuf byteBuf FrameContentSize uint64 - frameDone sync.WaitGroup - DictionaryID *uint32 + DictionaryID uint32 HasCheckSum bool SingleSegment bool - - // asyncRunning indicates whether the async routine processes input on 'decoding'. - asyncRunningMu sync.Mutex - asyncRunning bool } const ( @@ -54,9 +43,9 @@ const ( MaxWindowSize = 1 << 29 ) -var ( - frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} - skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" ) func newFrameDec(o decoderOptions) *frameDec { @@ -100,9 +89,9 @@ func (d *frameDec) reset(br byteBuffer) error { copy(signature[1:], b) } - if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { if debugDecoder { - println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) } // Break if not skippable frame. break @@ -117,7 +106,7 @@ func (d *frameDec) reset(br byteBuffer) error { } n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) println("Skipping frame with", n, "bytes.") - err = br.skipN(int(n)) + err = br.skipN(int64(n)) if err != nil { if debugDecoder { println("Reading discarded frame", err) @@ -125,9 +114,9 @@ func (d *frameDec) reset(br byteBuffer) error { return err } } - if !bytes.Equal(signature[:], frameMagic) { + if string(signature[:]) != frameMagic { if debugDecoder { - println("Got magic numbers: ", signature, "want:", frameMagic) + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) } return ErrMagicMismatch } @@ -166,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error { // Read Dictionary_ID // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = nil + d.DictionaryID = 0 if size := fhd & 3; size != 0 { if size == 3 { size = 4 @@ -178,7 +167,7 @@ func (d *frameDec) reset(br byteBuffer) error { return err } var id uint32 - switch size { + switch len(b) { case 1: id = uint32(b[0]) case 2: @@ -189,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error { if debugDecoder { println("Dict size", size, "ID:", id) } - if id > 0 { - // ID 0 means "sorry, no dictionary anyway". - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format - d.DictionaryID = &id - } + d.DictionaryID = id } // Read Frame_Content_Size @@ -208,14 +193,14 @@ func (d *frameDec) reset(br byteBuffer) error { default: fcsSize = 1 << v } - d.FrameContentSize = 0 + d.FrameContentSize = fcsUnknown if fcsSize > 0 { b, err := br.readSmall(fcsSize) if err != nil { println("Reading Frame content", err) return err } - switch fcsSize { + switch len(b) { case 1: d.FrameContentSize = uint64(b[0]) case 2: @@ -229,9 +214,10 @@ func (d *frameDec) reset(br byteBuffer) error { d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) } if debugDecoder { - println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + println("Read FCS:", d.FrameContentSize) } } + // Move this to shared. d.HasCheckSum = fhd&(1<<2) != 0 if d.HasCheckSum { @@ -241,20 +227,27 @@ func (d *frameDec) reset(br byteBuffer) error { d.crc.Reset() } + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + if d.WindowSize == 0 && d.SingleSegment { // We may not need window in this case. d.WindowSize = d.FrameContentSize if d.WindowSize < MinWindowSize { d.WindowSize = MinWindowSize } - } - - if d.WindowSize > uint64(d.o.maxWindowSize) { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded } - return ErrWindowSizeExceeded } + // The minimum Window_Size is 1 KB. if d.WindowSize < MinWindowSize { if debugDecoder { @@ -263,11 +256,23 @@ func (d *frameDec) reset(br byteBuffer) error { return ErrWindowSizeTooSmall } d.history.windowSize = int(d.WindowSize) - if d.o.lowMem && d.history.windowSize < maxBlockSize { - d.history.maxSize = d.history.windowSize * 2 + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 } else { - d.history.maxSize = d.history.windowSize + maxBlockSize + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + // history contains input - maybe we do something d.rawInput = br return nil @@ -276,209 +281,97 @@ func (d *frameDec) reset(br byteBuffer) error { // next will start decoding the next block from stream. func (d *frameDec) next(block *blockDec) error { if debugDecoder { - printf("decoding new block %p:%p", block, block.data) + println("decoding new block") } err := block.reset(d.rawInput, d.WindowSize) if err != nil { println("block error:", err) // Signal the frame decoder we have a problem. - d.sendErr(block, err) + block.sendErr(err) return err } - block.input <- struct{}{} - if debugDecoder { - println("next block:", block) - } - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return nil - } - if block.Last { - // We indicate the frame is done by sending io.EOF - d.decoding <- block - return io.EOF - } - d.decoding <- block return nil } -// sendEOF will queue an error block on the frame. -// This will cause the frame decoder to return when it encounters the block. -// Returns true if the decoder was added. -func (d *frameDec) sendErr(block *blockDec, err error) bool { - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return false - } - - println("sending error", err.Error()) - block.sendErr(err) - d.decoding <- block - return true -} - // checkCRC will check the checksum if the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { if !d.HasCheckSum { return nil } - var tmp [4]byte - got := d.crc.Sum64() - // Flip to match file order. - tmp[0] = byte(got >> 0) - tmp[1] = byte(got >> 8) - tmp[2] = byte(got >> 16) - tmp[3] = byte(got >> 24) // We can overwrite upper tmp now - want, err := d.rawInput.readSmall(4) + buf, err := d.rawInput.readSmall(4) if err != nil { println("CRC missing?", err) return err } - if !bytes.Equal(tmp[:], want) { + if d.o.ignoreChecksum { + return nil + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { if debugDecoder { - println("CRC Check Failed:", tmp[:], "!=", want) + printf("CRC check failed: got %08x, want %08x\n", got, want) } return ErrCRCMismatch } if debugDecoder { - println("CRC ok", tmp[:]) + printf("CRC ok %08x\n", got) } return nil } -func (d *frameDec) initAsync() { - if !d.o.lowMem && !d.SingleSegment { - // set max extra size history to 2MB. - d.history.maxSize = d.history.windowSize + maxBlockSize - } - // re-alloc if more than one extra block size. - if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.history.b) < d.history.maxSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.decoding) < d.o.concurrent { - d.decoding = make(chan *blockDec, d.o.concurrent) - } - if debugDecoder { - h := d.history - printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) - } - d.asyncRunningMu.Lock() - d.asyncRunning = true - d.asyncRunningMu.Unlock() -} - -// startDecoder will start decoding blocks and write them to the writer. -// The decoder will stop as soon as an error occurs or at end of frame. -// When the frame has finished decoding the *bufio.Reader -// containing the remaining input will be sent on frameDec.frameDone. -func (d *frameDec) startDecoder(output chan decodeOutput) { - written := int64(0) - - defer func() { - d.asyncRunningMu.Lock() - d.asyncRunning = false - d.asyncRunningMu.Unlock() - - // Drain the currently decoding. - d.history.error = true - flushdone: - for { - select { - case b := <-d.decoding: - b.history <- &d.history - output <- <-b.result - default: - break flushdone - } - } - println("frame decoder done, signalling done") - d.frameDone.Done() - }() - // Get decoder for first block. - block := <-d.decoding - block.history <- &d.history - for { - var next *blockDec - // Get result - r := <-block.result - if r.err != nil { - println("Result contained error", r.err) - output <- r - return - } - if debugDecoder { - println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) - d.offset += int64(len(r.b)) - } - if !block.Last { - // Send history to next block - select { - case next = <-d.decoding: - if debugDecoder { - println("Sending ", len(d.history.b), "bytes as history") - } - next.history <- &d.history - default: - // Wait until we have sent the block, so - // other decoders can potentially get the decoder. - next = nil - } - } - - // Add checksum, async to decoding. - if d.HasCheckSum { - n, err := d.crc.Write(r.b) - if err != nil { - r.err = err - if n != len(r.b) { - r.err = io.ErrShortWrite - } - output <- r - return - } - } - written += int64(len(r.b)) - if d.SingleSegment && uint64(written) > d.FrameContentSize { - println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) - r.err = ErrFrameSizeExceeded - output <- r - return - } - if block.Last { - r.err = d.checkCRC() - output <- r - return - } - output <- r - if next == nil { - // There was no decoder available, we wait for one now that we have sent to the writer. - if debugDecoder { - println("Sending ", len(d.history.b), " bytes as history") - } - next = <-d.decoding - next.history <- &d.history +// consumeCRC reads the checksum data if the frame has one. +func (d *frameDec) consumeCRC() error { + if d.HasCheckSum { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err } - block = next } + + return nil } -// runDecoder will create a sync decoder that will decode a block of data. +// runDecoder will run the decoder for the remainder of the frame. func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { saved := d.history.b // We use the history for output to avoid copying it. d.history.b = dst + d.history.ignoreBuffer = len(dst) // Store input length, so we only check new data. crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } var err error for { err = dec.reset(d.rawInput, d.WindowSize) @@ -489,29 +382,47 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { println("next block:", dec) } err = dec.decodeBuf(&d.history) - if err != nil || dec.Last { + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded break } - if uint64(len(d.history.b)) > d.o.maxDecodedSize { + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) err = ErrDecoderSizeExceeded break } - if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { - println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) err = ErrFrameSizeExceeded break } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } } dst = d.history.b if err == nil { - if d.HasCheckSum { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index bb3d4fd6c312..2f8860a722b8 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -5,8 +5,10 @@ package zstd import ( + "encoding/binary" "errors" "fmt" + "io" ) const ( @@ -178,10 +180,32 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<> 3) - // println(s.norm[:s.symbolLen], s.symbolLen) return s.buildDtable() } +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + // decSymbol contains information about a state entry, // Including the state offset base, the output symbol and // the number of bits to read for the low part of the destination state. @@ -204,18 +228,10 @@ func (d decSymbol) newState() uint16 { return uint16(d >> 16) } -func (d decSymbol) baseline() uint32 { - return uint32(d >> 32) -} - func (d decSymbol) baselineInt() int { return int(d >> 32) } -func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { - *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - func (d *decSymbol) setNBits(nBits uint8) { const mask = 0xffffffffffffff00 *d = (*d & mask) | decSymbol(nBits) @@ -231,11 +247,6 @@ func (d *decSymbol) setNewState(state uint16) { *d = (*d & mask) | decSymbol(state)<<16 } -func (d *decSymbol) setBaseline(baseline uint32) { - const mask = 0xffffffff - *d = (*d & mask) | decSymbol(baseline)<<32 -} - func (d *decSymbol) setExt(addBits uint8, baseline uint32) { const mask = 0xffff00ff *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) @@ -257,68 +268,6 @@ func (s *fseDecoder) setRLE(symbol decSymbol) { s.dt[0] = symbol } -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} - // transform will transform the decoder table into a table usable for // decoding without having to apply the transformation while decoding. // The state will contain the base value and the number of bits to read. @@ -352,34 +301,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { s.state = dt[br.getBits(tableLog)] } -// next returns the current symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) next(br *bitReader) { - lowBits := uint16(br.getBits(s.state.nbBits())) - s.state = s.dt[s.state.newState()+lowBits] -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (s *fseState) finished(br *bitReader) bool { - return br.finished() && s.state.nbBits() > 0 -} - -// final returns the current state symbol without decoding the next. -func (s *fseState) final() (int, uint8) { - return s.state.baselineInt(), s.state.addBits() -} - // final returns the current state symbol without decoding the next. func (s decSymbol) final() (int, uint8) { return s.baselineInt(), s.addBits() } - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { - lowBits := br.get16BitsFast(s.state.nbBits()) - s.state = s.dt[s.state.newState()+lowBits] - return s.state.baseline(), s.state.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 000000000000..d04a829b0a0e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 000000000000..bcde39869535 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 000000000000..332e51fe44fa --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,72 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index 5442061b18df..ab26326a8ff8 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -76,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { s.clearCount = maxCount != 0 } -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *fseEncoder) prepare() (*fseEncoder, error) { - if s == nil { - s = &fseEncoder{} - } - s.useRLE = false - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - return s, nil -} - // allocCtable will allocate tables needed for compression. // If existing tables a re big enough, they are simply re-used. func (s *fseEncoder) allocCtable() { @@ -709,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { c.state = c.stateTable[lu] } -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - // flush will write the tablelog to the output and flush the remaining full bytes. func (c *cState) flush(tableLog uint8) { c.bw.flush32() diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go index cf33f29a1b48..5d73c21ebdd4 100644 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 { return (uint32(u) * prime4bytes) >> (32 - length) } } - -// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash3(u uint32, h uint8) uint32 { - return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) -} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go index f783e32d251b..09164856d222 100644 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -10,40 +10,48 @@ import ( // history contains the information transferred between blocks. type history struct { - b []byte - huffTree *huff0.Scratch - recentOffsets [3]int + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression decoders sequenceDecs - windowSize int - maxSize int - error bool - dict *dict + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict } // reset will reset the history to initial state of a frame. // The history must already have been initialized to the desired size. func (h *history) reset() { h.b = h.b[:0] + h.ignoreBuffer = 0 h.error = false h.recentOffsets = [3]int{1, 4, 8} - if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - if f := h.decoders.offsets.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - h.decoders = sequenceDecs{} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { if h.huffTree != nil { if h.dict == nil || h.dict.litEnc != h.huffTree { huffDecoderPool.Put(h.huffTree) + h.huffTree = nil } } - h.huffTree = nil - h.dict = nil - //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) } func (h *history) setDict(dict *dict) { @@ -54,6 +62,7 @@ func (h *history) setDict(dict *dict) { h.decoders.litLengths = dict.llDec h.decoders.offsets = dict.ofDec h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content h.recentOffsets = dict.offsets h.huffTree = dict.litEnc } @@ -83,6 +92,24 @@ func (h *history) append(b []byte) { copy(h.b[h.windowSize-len(b):], b) } +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + // append bytes to history without ever discarding anything. func (h *history) appendKeep(b []byte) { h.b = append(h.b, b...) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md index 69aa3bb587c8..777290d44ced 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -2,12 +2,7 @@ VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -28,31 +23,49 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. ## Benchmarks Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package - [InfluxDB](https://github.com/influxdata/influxdb) - [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go index 2c112a0ab1c1..fc40c820016c 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -18,19 +18,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -52,10 +44,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -71,21 +63,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -135,21 +129,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s index be8db5bf7960..ddb63aa91b14 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -1,215 +1,210 @@ +//go:build !appengine && gc && !purego && !noasm // +build !appengine // +build gc // +build !purego +// +build !noasm #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s index 662609589079..17901e080406 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -1,13 +1,17 @@ -// +build gc,!purego +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm #include "textflag.h" -// Register allocation. +// Registers: #define digest R1 -#define h R2 // Return value. -#define p R3 // Input pointer. -#define len R4 -#define nblocks R5 // len / 32. +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 #define prime1 R7 #define prime2 R8 #define prime3 R9 @@ -22,65 +26,55 @@ #define x3 R22 #define x4 R23 -#define round(acc, x) \ - MADD prime2, acc, x, acc \ - ROR $64-31, acc \ - MUL prime1, acc \ - -// x = round(0, x). -#define round0(x) \ - MUL prime2, x \ - ROR $64-31, x \ - MUL prime1, x \ - -#define mergeRound(x) \ - round0(x) \ - EOR x, h \ - MADD h, prime4, prime1, h \ - -// Update v[1-4] with 32-byte blocks. Assumes len >= 32. -#define blocksLoop() \ - LSR $5, len, nblocks \ - PCALIGN $16 \ -loop: \ - LDP.P 32(p), (x1, x2) \ - round(v1, x1) \ - LDP -16(p), (x3, x4) \ - round(v2, x2) \ - SUB $1, nblocks \ - round(v3, x3) \ - round(v4, x4) \ - CBNZ nblocks, loop \ - - -// The primes are repeated here to ensure that they're stored -// in a contiguous array, so we can load them with LDP. -DATA primes<> +0(SB)/8, $11400714785074694791 -DATA primes<> +8(SB)/8, $14029467366897019727 -DATA primes<>+16(SB)/8, $1609587929392839161 -DATA primes<>+24(SB)/8, $9650029242287828579 -DATA primes<>+32(SB)/8, $2870177450012600261 -GLOBL primes<>(SB), NOPTR+RODATA, $40 - +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 - LDP b_base+0(FP), (p, len) +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) - LDP primes<> +0(SB), (prime1, prime2) - LDP primes<>+16(SB), (prime3, prime4) - MOVD primes<>+32(SB), prime5 + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 - CMP $32, len - CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 } - BLO afterLoop + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop ADD prime1, prime2, v1 MOVD prime2, v2 MOVD $0, v3 NEG prime1, v4 - blocksLoop() + blockLoop() ROR $64-1, v1, x1 ROR $64-7, v2, x2 @@ -90,100 +84,101 @@ TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 ADD x3, x4 ADD x2, x4, h - mergeRound(v1) - mergeRound(v2) - mergeRound(v3) - mergeRound(v4) + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) afterLoop: - ADD len, h + ADD n, h - TBZ $4, len, try8 + TBZ $4, n, try8 LDP.P 16(p), (x1, x2) round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. ROR $64-27, h EOR x1 @> 64-27, h, h MADD h, prime4, prime1, h round0(x2) ROR $64-27, h - EOR x2 @> 64-27, h + EOR x2 @> 64-27, h, h MADD h, prime4, prime1, h try8: - TBZ $3, len, try4 + TBZ $3, n, try4 MOVD.P 8(p), x1 round0(x1) ROR $64-27, h - EOR x1 @> 64-27, h + EOR x1 @> 64-27, h, h MADD h, prime4, prime1, h try4: - TBZ $2, len, try2 + TBZ $2, n, try2 MOVWU.P 4(p), x2 MUL prime1, x2 ROR $64-23, h - EOR x2 @> 64-23, h + EOR x2 @> 64-23, h, h MADD h, prime3, prime2, h try2: - TBZ $1, len, try1 + TBZ $1, n, try1 MOVHU.P 2(p), x3 AND $255, x3, x1 LSR $8, x3, x2 MUL prime5, x1 ROR $64-11, h - EOR x1 @> 64-11, h + EOR x1 @> 64-11, h, h MUL prime1, h MUL prime5, x2 ROR $64-11, h - EOR x2 @> 64-11, h + EOR x2 @> 64-11, h, h MUL prime1, h try1: - TBZ $0, len, end + TBZ $0, n, finalize MOVBU (p), x4 MUL prime5, x4 ROR $64-11, h - EOR x4 @> 64-11, h + EOR x4 @> 64-11, h, h MUL prime1, h -end: +finalize: EOR h >> 33, h - MUL prime2, h + MUL prime2, h EOR h >> 29, h - MUL prime3, h + MUL prime3, h EOR h >> 32, h MOVD h, ret+24(FP) RET - // func writeBlocks(d *Digest, b []byte) int -// -// Assumes len(b) >= 32. -TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40 - LDP primes<>(SB), (prime1, prime2) +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) // Load state. Assume v[1-4] are stored contiguously. MOVD d+0(FP), digest - LDP 0(digest), (v1, v2) + LDP 0(digest), (v1, v2) LDP 16(digest), (v3, v4) - LDP b_base+8(FP), (p, len) + LDP b_base+8(FP), (p, n) - blocksLoop() + blockLoop() // Store updated state. - STP (v1, v2), 0(digest) + STP (v1, v2), 0(digest) STP (v3, v4), 16(digest) - BIC $31, len - MOVD len, ret+32(FP) + BIC $31, n + MOVD n, ret+32(FP) RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go index 9216e0a40c1a..d4221edf4fd6 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -1,8 +1,9 @@ -//go:build (amd64 || arm64) && !appengine && gc && !purego +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm // +build amd64 arm64 // +build !appengine // +build gc // +build !purego +// +build !noasm package xxhash @@ -12,4 +13,4 @@ package xxhash func Sum64(b []byte) uint64 //go:noescape -func writeBlocks(d *Digest, b []byte) int +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go index 2deb1ca75532..0be16cefc7f4 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -1,5 +1,5 @@ -//go:build (!amd64 && !arm64) || appengine || !gc || purego -// +build !amd64,!arm64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm package xxhash @@ -15,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -37,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index bc731e4cb69a..f833d1541f98 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -20,6 +20,10 @@ type seq struct { llCode, mlCode, ofCode uint8 } +type seqVals struct { + ll, ml, mo int +} + func (s seq) String() string { if s.offset <= 3 { if s.offset == 0 { @@ -61,16 +65,19 @@ type sequenceDecs struct { offsets sequenceDec matchLengths sequenceDec prevOffset [3]int - hist []byte dict []byte literals []byte out []byte + nSeqs int + br *bitReader + seqSize int windowSize int maxBits uint8 + maxSyncLen uint64 } // initialize all 3 decoders from the stream input. -func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error { +func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error { if err := s.litLengths.init(br); err != nil { return errors.New("litLengths:" + err.Error()) } @@ -80,8 +87,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] if err := s.matchLengths.init(br); err != nil { return errors.New("matchLengths:" + err.Error()) } - s.literals = literals - s.hist = hist.b + s.br = br s.prevOffset = hist.recentOffsets s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits s.windowSize = hist.windowSize @@ -93,12 +99,142 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] return nil } +func (s *sequenceDecs) freeDecoders() { + if f := s.litLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + s.litLengths.fse = nil + } + if f := s.offsets.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + s.offsets.fse = nil + } + if f := s.matchLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + s.matchLengths.fse = nil + } +} + +// execute will execute the decoded sequence with the provided history. +// The sequence must be evaluated before being sent. +func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { + if len(s.dict) == 0 { + return s.executeSimple(seqs, hist) + } + + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + // decode sequences from the stream with the provided history. -func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs startSize := len(s.out) // Grab full sizes tables, to avoid bounds checks. llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } for i := seqs - 1; i >= 0; i-- { if br.overread() { @@ -151,7 +287,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if temp == 0 { // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") + println("WARNING: temp was 0") temp = 1 } @@ -176,51 +312,52 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if ll > len(s.literals) { return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) } - size := ll + ml + len(s.out) + size := ll + ml + len(out) if size-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size", size) + if size-startSize == 424242 { + panic("here") + } + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } - if size > cap(s.out) { + if size > cap(out) { // Not enough size, which can happen under high volume block streaming conditions // but could be if destination slice is too small for sync operations. // over-allocating here can create a large amount of GC pressure so we try to keep // it as contained as possible - used := len(s.out) - startSize + used := len(out) - startSize addBytes := 256 + ll + ml + used>>2 // Clamp to max block size. if used+addBytes > maxBlockSize { addBytes = maxBlockSize - used } - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] } if ml > maxMatchLen { return fmt.Errorf("match len (%d) bigger than max allowed length", ml) } // Add literals - s.out = append(s.out, s.literals[:ll]...) + out = append(out, s.literals[:ll]...) s.literals = s.literals[ll:] - out := s.out if mo == 0 && ml > 0 { return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) } - if mo > len(s.out)+len(hist) || mo > s.windowSize { + if mo > len(out)+len(hist) || mo > s.windowSize { if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + dictO := len(s.dict) - (mo - (len(out) + len(hist))) if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } end := dictO + ml if end > len(s.dict) { out = append(out, s.dict[dictO:]...) - mo -= len(s.dict) - dictO ml -= len(s.dict) - dictO } else { out = append(out, s.dict[dictO:end]...) @@ -231,26 +368,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { // Copy from history. // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(s.out); v > 0 { + if v := mo - len(out); v > 0 { // v is the start position in history from end. - start := len(s.hist) - v + start := len(hist) - v if ml > v { // Some goes into current block. // Copy remainder of history - out = append(out, s.hist[start:]...) - mo -= v + out = append(out, hist[start:]...) ml -= v } else { - out = append(out, s.hist[start:start+ml]...) + out = append(out, hist[start:start+ml]...) ml = 0 } } // We must be in current buffer now if ml > 0 { - start := len(s.out) - mo - if ml <= len(s.out)-start { + start := len(out) - mo + if ml <= len(out)-start { // No overlap - out = append(out, s.out[start:start+ml]...) + out = append(out, out[start:start+ml]...) } else { // Overlapping copy // Extend destination slice and copy one byte at the time. @@ -264,7 +400,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } } - s.out = out if i == 0 { // This is the last sequence, so we shouldn't update state. break @@ -279,6 +414,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { ofState = ofTable[ofState.newState()&maxTableMask] } else { bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) llState = llTable[(llState.newState()+lowBits)&maxTableMask] @@ -291,19 +427,14 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } - // Add final literals - s.out = append(s.out, s.literals...) - return nil -} + // Check if space for literals + if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } -// update states, at least 27 bits must be available. -func (s *sequenceDecs) update(br *bitReader) { - // Max 8 bits - s.litLengths.state.next(br) - // Max 9 bits - s.matchLengths.state.next(br) - // Max 8 bits - s.offsets.state.next(br) + // Add final literals + s.out = append(out, s.literals...) + return br.close() } var bitMask [16]uint16 @@ -314,87 +445,6 @@ func init() { } } -// update states, at least 27 bits must be available. -func (s *sequenceDecs) updateAlt(br *bitReader) { - // Update all 3 states at once. Approx 20% faster. - a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - - nBits := a.nbBits() + b.nbBits() + c.nbBits() - if nBits == 0 { - s.litLengths.state.state = s.litLengths.state.dt[a.newState()] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] - s.offsets.state.state = s.offsets.state.dt[c.newState()] - return - } - bits := br.get32BitsFast(nBits) - lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) - s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] - - lowBits = uint16(bits >> (c.nbBits() & 31)) - lowBits &= bitMask[b.nbBits()&15] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] - - lowBits = uint16(bits) & bitMask[c.nbBits()&15] - s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] -} - -// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. -func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - return - } - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - return - } - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - return -} - func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { // Final will not read from stream. ll, llB := llState.final() @@ -457,36 +507,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { s.prevOffset[0] = temp return temp } - -// mergeHistory will merge history. -func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { - for i := uint(0); i < 3; i++ { - var sNew, sHist *sequenceDec - switch i { - default: - // same as "case 0": - sNew = &s.litLengths - sHist = &hist.litLengths - case 1: - sNew = &s.offsets - sHist = &hist.offsets - case 2: - sNew = &s.matchLengths - sHist = &hist.matchLengths - } - if sNew.repeat { - if sHist.fse == nil { - return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) - } - continue - } - if sNew.fse == nil { - return nil, fmt.Errorf("sequence stream %d, no fse found", i) - } - if sHist.fse != nil && !sHist.fse.preDefined { - fseDecoderPool.Put(sHist.fse) - } - sHist.fse = sNew.fse - } - return hist, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 000000000000..191384adfd06 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,379 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 000000000000..b94993a07278 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4079 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 000000000000..ac2a80d29111 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 967f29b3120e..29c15c8c4efe 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -18,36 +18,58 @@ const ZipMethodWinZip = 93 // See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT const ZipMethodPKWare = 20 -var zipReaderPool sync.Pool +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} -// newZipReader cannot be used since we would leak goroutines... -func newZipReader(r io.Reader) io.ReadCloser { - dec, ok := zipReaderPool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d } - dec = d + return &pooledZipReader{dec: dec, pool: pool} } - return &pooledZipReader{dec: dec} } type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - dec *Decoder + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder } func (r *pooledZipReader) Read(p []byte) (n int, err error) { r.mu.Lock() defer r.mu.Unlock() if r.dec == nil { - return 0, errors.New("Read after Close") + return 0, errors.New("read after close or EOF") } dec, err := r.dec.Read(p) - + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } return dec, err } @@ -57,7 +79,7 @@ func (r *pooledZipReader) Close() error { var err error if r.dec != nil { err = r.dec.Reset(nil) - zipReaderPool.Put(r.dec) + r.pool.Put(r.dec) r.dec = nil } return err @@ -111,12 +133,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { // ZipDecompressor returns a decompressor that can be registered with zip libraries. // See ZipCompressor for example. -func ZipDecompressor() func(r io.Reader) io.ReadCloser { - return func(r io.Reader) io.ReadCloser { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) - } - return d.IOReadCloser() - } +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) } diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index ef1d49a009cc..5ffa82f5ac1b 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -36,8 +36,8 @@ const forcePreDef = false // zstdMinMatch is the minimum zstd match length. const zstdMinMatch = 3 -// Reset the buffer offset when reaching this. -const bufferReset = math.MaxInt32 - MaxWindowSize +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 var ( // ErrReservedBlockType is returned when a reserved block type is found. @@ -52,6 +52,10 @@ var ( // Typically returned on invalid input. ErrBlockTooSmall = errors.New("block too small") + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. // Typically this indicates wrong or corrupted input. ErrMagicMismatch = errors.New("invalid input: magic number mismatch") @@ -68,13 +72,16 @@ var ( ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") // ErrUnknownDictionary is returned if the dictionary ID is unknown. - // For the time being dictionaries are not supported. ErrUnknownDictionary = errors.New("unknown dictionary") // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. // This is only returned if SingleSegment is specified on the frame. ErrFrameSizeExceeded = errors.New("frame size exceeded") + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + // ErrCRCMismatch is returned if CRC mismatches. ErrCRCMismatch = errors.New("CRC check failed") @@ -99,37 +106,25 @@ func printf(format string, a ...interface{}) { } } -// matchLenFast does matching, but will not match the last up to 7 bytes. -func matchLenFast(a, b []byte) int { - endI := len(a) & (math.MaxInt32 - 7) - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + bits.TrailingZeros64(diff)>>3 - } - } - return endI -} - -// matchLen returns the maximum length. +// matchLen returns the maximum common prefix length of a and b. // a must be the shortest of the two. -// The function also returns whether all bytes matched. -func matchLen(a, b []byte) int { - b = b[:len(a)] - for i := 0; i < len(a)-7; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + (bits.TrailingZeros64(diff) >> 3) +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 } + n += 8 } - checked := (len(a) >> 3) << 3 - a = a[checked:] - b = b[checked:] for i := range a { if a[i] != b[i] { - return i + checked + break } + n++ } - return len(a) + checked + return n + } func load3232(b []byte, i int32) uint32 { @@ -140,10 +135,6 @@ func load6432(b []byte, i int32) uint64 { return binary.LittleEndian.Uint64(b[i:]) } -func load64(b []byte, i int) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - type byter interface { Bytes() []byte Len() int diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index a42e9d65ad79..b5f5e2613293 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -401,6 +401,7 @@ func (r *Lexer) scanToken() { // consume resets the current token to allow scanning the next one. func (r *Lexer) consume() { r.token.kind = tokenUndef + r.token.byteValueCloned = false r.token.delimValue = 0 } @@ -528,6 +529,7 @@ func (r *Lexer) Skip() { func (r *Lexer) SkipRecursive() { r.scanToken() var start, end byte + startPos := r.start switch r.token.delimValue { case '{': @@ -553,6 +555,14 @@ func (r *Lexer) SkipRecursive() { level-- if level == 0 { r.pos += i + 1 + if !json.Valid(r.Data[startPos:r.pos]) { + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "skipped array/object json value is invalid", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } + } return } case c == '\\' && inQuotes: @@ -702,6 +712,10 @@ func (r *Lexer) Bytes() []byte { r.errInvalidToken("string") return nil } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return nil + } ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) if err != nil { diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml deleted file mode 100644 index 7942c565ce62..000000000000 --- a/vendor/github.com/mattn/go-colorable/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -sudo: false -go: - - 1.13.x - - tip - -before_install: - - go get -t -v ./... - -script: - - ./go.test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) - diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md index e055952b667c..ca0483711c93 100644 --- a/vendor/github.com/mattn/go-colorable/README.md +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -1,6 +1,6 @@ # go-colorable -[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) [![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) [![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) [![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go index 1f7806fe16bb..416d1bbbf836 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine package colorable diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go index 08cbd1e0fa25..766d94603ac1 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -1,5 +1,5 @@ -// +build !windows -// +build !appengine +//go:build !windows && !appengine +// +build !windows,!appengine package colorable diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index 41215d7fc4ff..1846ad5ab41c 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -1,5 +1,5 @@ -// +build windows -// +build !appengine +//go:build windows && !appengine +// +build windows,!appengine package colorable @@ -452,18 +452,22 @@ func (w *Writer) Write(data []byte) (n int, err error) { } else { er = bytes.NewReader(data) } - var bw [1]byte + var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { + plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { - bw[0] = c1 - w.out.Write(bw[:]) + plaintext.WriteByte(c1) continue } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } c2, err := er.ReadByte() if err != nil { break loop diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go index 2dcb09aab889..05d6f74bf6bb 100644 --- a/vendor/github.com/mattn/go-colorable/noncolorable.go +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -18,21 +18,22 @@ func NewNonColorable(w io.Writer) io.Writer { // Write writes data on console func (w *NonColorable) Write(data []byte) (n int, err error) { er := bytes.NewReader(data) - var bw [1]byte + var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { + plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { - bw[0] = c1 - _, err = w.out.Write(bw[:]) - if err != nil { - break loop - } + plaintext.WriteByte(c1) continue } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } c2, err := er.ReadByte() if err != nil { break loop @@ -41,7 +42,6 @@ loop: continue } - var buf bytes.Buffer for { c, err := er.ReadByte() if err != nil { @@ -50,7 +50,6 @@ loop: if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { break } - buf.Write([]byte(string(c))) } } diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go index 31bf5759dd2c..9aa6585300fb 100644 --- a/vendor/github.com/miekg/dns/client.go +++ b/vendor/github.com/miekg/dns/client.go @@ -24,7 +24,7 @@ func isPacketConn(c net.Conn) bool { } if ua, ok := c.LocalAddr().(*net.UnixAddr); ok { - return ua.Net == "unixgram" + return ua.Net == "unixgram" || ua.Net == "unixpacket" } return true @@ -280,7 +280,7 @@ func (co *Conn) ReadMsg() (*Msg, error) { } if t := m.IsTsig(); t != nil { // Need to work on the original message p, as that was used to calculate the tsig. - err = tsigVerifyProvider(p, co.tsigProvider(), co.tsigRequestMAC, false) + err = TsigVerifyWithProvider(p, co.tsigProvider(), co.tsigRequestMAC, false) } return m, err } @@ -358,7 +358,7 @@ func (co *Conn) WriteMsg(m *Msg) (err error) { var out []byte if t := m.IsTsig(); t != nil { // Set tsigRequestMAC for the next read, although only used in zone transfers. - out, co.tsigRequestMAC, err = tsigGenerateProvider(m, co.tsigProvider(), co.tsigRequestMAC, false) + out, co.tsigRequestMAC, err = TsigGenerateWithProvider(m, co.tsigProvider(), co.tsigRequestMAC, false) } else { out, err = m.Pack() } diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go index d47b0b1f2be2..f2cdbf430297 100644 --- a/vendor/github.com/miekg/dns/defaults.go +++ b/vendor/github.com/miekg/dns/defaults.go @@ -218,6 +218,11 @@ func IsDomainName(s string) (labels int, ok bool) { wasDot = false case '.': + if i == 0 && len(s) > 1 { + // leading dots are not legal except for the root zone + return labels, false + } + if wasDot { // two dots back to back is not legal return labels, false diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go index 8539aae6c70f..ea01aa81fcac 100644 --- a/vendor/github.com/miekg/dns/dnssec.go +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -65,6 +65,9 @@ var AlgorithmToString = map[uint8]string{ } // AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's. +// For newer algorithm that do their own hashing (i.e. ED25519) the returned value +// is 0, implying no (external) hashing should occur. The non-exported identityHash is then +// used. var AlgorithmToHash = map[uint8]crypto.Hash{ RSAMD5: crypto.MD5, // Deprecated in RFC 6725 DSA: crypto.SHA1, @@ -74,7 +77,7 @@ var AlgorithmToHash = map[uint8]crypto.Hash{ ECDSAP256SHA256: crypto.SHA256, ECDSAP384SHA384: crypto.SHA384, RSASHA512: crypto.SHA512, - ED25519: crypto.Hash(0), + ED25519: 0, } // DNSSEC hashing algorithm codes. @@ -137,12 +140,12 @@ func (k *DNSKEY) KeyTag() uint16 { var keytag int switch k.Algorithm { case RSAMD5: - // Look at the bottom two bytes of the modules, which the last - // item in the pubkey. // This algorithm has been deprecated, but keep this key-tag calculation. + // Look at the bottom two bytes of the modules, which the last item in the pubkey. + // See https://www.rfc-editor.org/errata/eid193 . modulus, _ := fromBase64([]byte(k.PublicKey)) if len(modulus) > 1 { - x := binary.BigEndian.Uint16(modulus[len(modulus)-2:]) + x := binary.BigEndian.Uint16(modulus[len(modulus)-3:]) keytag = int(x) } default: @@ -296,35 +299,20 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { return err } - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return ErrAlg + h, cryptohash, err := hashFromAlgorithm(rr.Algorithm) + if err != nil { + return err } switch rr.Algorithm { - case ED25519: - // ed25519 signs the raw message and performs hashing internally. - // All other supported signature schemes operate over the pre-hashed - // message, and thus ed25519 must be handled separately here. - // - // The raw message is passed directly into sign and crypto.Hash(0) is - // used to signal to the crypto.Signer that the data has not been hashed. - signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm) - if err != nil { - return err - } - - rr.Signature = toBase64(signature) - return nil case RSAMD5, DSA, DSANSEC3SHA1: // See RFC 6944. return ErrAlg default: - h := hash.New() h.Write(signdata) h.Write(wire) - signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) + signature, err := sign(k, h.Sum(nil), cryptohash, rr.Algorithm) if err != nil { return err } @@ -341,7 +329,7 @@ func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, } switch alg { - case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, ED25519: return signature, nil case ECDSAP256SHA256, ECDSAP384SHA384: ecdsaSignature := &struct { @@ -362,8 +350,6 @@ func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, signature := intToBytes(ecdsaSignature.R, intlen) signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...) return signature, nil - case ED25519: - return signature, nil default: return nil, ErrAlg } @@ -437,9 +423,9 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { // remove the domain name and assume its ours? } - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return ErrAlg + h, cryptohash, err := hashFromAlgorithm(rr.Algorithm) + if err != nil { + return err } switch rr.Algorithm { @@ -450,10 +436,9 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { return ErrKey } - h := hash.New() h.Write(signeddata) h.Write(wire) - return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf) + return rsa.VerifyPKCS1v15(pubkey, cryptohash, h.Sum(nil), sigbuf) case ECDSAP256SHA256, ECDSAP384SHA384: pubkey := k.publicKeyECDSA() @@ -465,7 +450,6 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2]) s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:]) - h := hash.New() h.Write(signeddata) h.Write(wire) if ecdsa.Verify(pubkey, h.Sum(nil), r, s) { diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index 862e53439932..14568c2e969d 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -584,14 +584,17 @@ func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} } type EDNS0_EXPIRE struct { Code uint16 // Always EDNS0EXPIRE Expire uint32 + Empty bool // Empty is used to signal an empty Expire option in a backwards compatible way, it's not used on the wire. } // Option implements the EDNS0 interface. func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } -func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } -func (e *EDNS0_EXPIRE) copy() EDNS0 { return &EDNS0_EXPIRE{e.Code, e.Expire} } +func (e *EDNS0_EXPIRE) copy() EDNS0 { return &EDNS0_EXPIRE{e.Code, e.Expire, e.Empty} } func (e *EDNS0_EXPIRE) pack() ([]byte, error) { + if e.Empty { + return []byte{}, nil + } b := make([]byte, 4) binary.BigEndian.PutUint32(b, e.Expire) return b, nil @@ -600,15 +603,24 @@ func (e *EDNS0_EXPIRE) pack() ([]byte, error) { func (e *EDNS0_EXPIRE) unpack(b []byte) error { if len(b) == 0 { // zero-length EXPIRE query, see RFC 7314 Section 2 + e.Empty = true return nil } if len(b) < 4 { return ErrBuf } e.Expire = binary.BigEndian.Uint32(b) + e.Empty = false return nil } +func (e *EDNS0_EXPIRE) String() (s string) { + if e.Empty { + return "" + } + return strconv.FormatUint(uint64(e.Expire), 10) +} + // The EDNS0_LOCAL option is used for local/experimental purposes. The option // code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND] // (RFC6891), although any unassigned code can actually be used. The content of diff --git a/vendor/github.com/miekg/dns/hash.go b/vendor/github.com/miekg/dns/hash.go new file mode 100644 index 000000000000..7d4183e02757 --- /dev/null +++ b/vendor/github.com/miekg/dns/hash.go @@ -0,0 +1,31 @@ +package dns + +import ( + "bytes" + "crypto" + "hash" +) + +// identityHash will not hash, it only buffers the data written into it and returns it as-is. +type identityHash struct { + b *bytes.Buffer +} + +// Implement the hash.Hash interface. + +func (i identityHash) Write(b []byte) (int, error) { return i.b.Write(b) } +func (i identityHash) Size() int { return i.b.Len() } +func (i identityHash) BlockSize() int { return 1024 } +func (i identityHash) Reset() { i.b.Reset() } +func (i identityHash) Sum(b []byte) []byte { return append(b, i.b.Bytes()...) } + +func hashFromAlgorithm(alg uint8) (hash.Hash, crypto.Hash, error) { + hashnumber, ok := AlgorithmToHash[alg] + if !ok { + return nil, 0, ErrAlg + } + if hashnumber == 0 { + return identityHash{b: &bytes.Buffer{}}, hashnumber, nil + } + return hashnumber.New(), hashnumber, nil +} diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go index 60a2e8c3d649..89ebb64abc36 100644 --- a/vendor/github.com/miekg/dns/msg.go +++ b/vendor/github.com/miekg/dns/msg.go @@ -265,6 +265,11 @@ loop: wasDot = false case '.': + if i == 0 && len(s) > 1 { + // leading dots are not legal except for the root zone + return len(msg), ErrRdata + } + if wasDot { // two dots back to back is not legal return len(msg), ErrRdata diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go index 10754c8b85a8..ea2035cd2467 100644 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -476,7 +476,7 @@ func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) { length, window, lastwindow := 0, 0, -1 for off < len(msg) { if off+2 > len(msg) { - return nsec, len(msg), &Error{err: "overflow unpacking nsecx"} + return nsec, len(msg), &Error{err: "overflow unpacking NSEC(3)"} } window = int(msg[off]) length = int(msg[off+1]) @@ -484,17 +484,17 @@ func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) { if window <= lastwindow { // RFC 4034: Blocks are present in the NSEC RR RDATA in // increasing numerical order. - return nsec, len(msg), &Error{err: "out of order NSEC block"} + return nsec, len(msg), &Error{err: "out of order NSEC(3) block in type bitmap"} } if length == 0 { // RFC 4034: Blocks with no types present MUST NOT be included. - return nsec, len(msg), &Error{err: "empty NSEC block"} + return nsec, len(msg), &Error{err: "empty NSEC(3) block in type bitmap"} } if length > 32 { - return nsec, len(msg), &Error{err: "NSEC block too long"} + return nsec, len(msg), &Error{err: "NSEC(3) block too long in type bitmap"} } if off+length > len(msg) { - return nsec, len(msg), &Error{err: "overflowing NSEC block"} + return nsec, len(msg), &Error{err: "overflowing NSEC(3) block in type bitmap"} } // Walk the bytes in the window and extract the type bits @@ -558,6 +558,16 @@ func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) { if len(bitmap) == 0 { return off, nil } + if off > len(msg) { + return off, &Error{err: "overflow packing nsec"} + } + toZero := msg[off:] + if maxLen := typeBitMapLen(bitmap); maxLen < len(toZero) { + toZero = toZero[:maxLen] + } + for i := range toZero { + toZero[i] = 0 + } var lastwindow, lastlength uint16 for _, t := range bitmap { window := t / 256 diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index b962e6f35c21..4e5a9aa8a16c 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -646,7 +646,7 @@ func (srv *Server) serveDNS(m []byte, w *response) { w.tsigStatus = nil if w.tsigProvider != nil { if t := req.IsTsig(); t != nil { - w.tsigStatus = tsigVerifyProvider(m, w.tsigProvider, "", false) + w.tsigStatus = TsigVerifyWithProvider(m, w.tsigProvider, "", false) w.tsigTimersOnly = false w.tsigRequestMAC = t.MAC } @@ -728,7 +728,7 @@ func (w *response) WriteMsg(m *Msg) (err error) { var data []byte if w.tsigProvider != nil { // if no provider, dont check for the tsig (which is a longer check) if t := m.IsTsig(); t != nil { - data, w.tsigRequestMAC, err = tsigGenerateProvider(m, w.tsigProvider, w.tsigRequestMAC, w.tsigTimersOnly) + data, w.tsigRequestMAC, err = TsigGenerateWithProvider(m, w.tsigProvider, w.tsigRequestMAC, w.tsigTimersOnly) if err != nil { return err } diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go index e781c9bb6c28..2c4b103521c1 100644 --- a/vendor/github.com/miekg/dns/sig0.go +++ b/vendor/github.com/miekg/dns/sig0.go @@ -3,6 +3,7 @@ package dns import ( "crypto" "crypto/ecdsa" + "crypto/ed25519" "crypto/rsa" "encoding/binary" "math/big" @@ -38,18 +39,17 @@ func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { } buf = buf[:off:cap(buf)] - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return nil, ErrAlg + h, cryptohash, err := hashFromAlgorithm(rr.Algorithm) + if err != nil { + return nil, err } - hasher := hash.New() // Write SIG rdata - hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) + h.Write(buf[len(mbuf)+1+2+2+4+2:]) // Write message - hasher.Write(buf[:len(mbuf)]) + h.Write(buf[:len(mbuf)]) - signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm) + signature, err := sign(k, h.Sum(nil), cryptohash, rr.Algorithm) if err != nil { return nil, err } @@ -82,20 +82,10 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error { return ErrKey } - var hash crypto.Hash - switch rr.Algorithm { - case RSASHA1: - hash = crypto.SHA1 - case RSASHA256, ECDSAP256SHA256: - hash = crypto.SHA256 - case ECDSAP384SHA384: - hash = crypto.SHA384 - case RSASHA512: - hash = crypto.SHA512 - default: - return ErrAlg - } - hasher := hash.New() + h, cryptohash, err := hashFromAlgorithm(rr.Algorithm) + if err != nil { + return err + } buflen := len(buf) qdc := binary.BigEndian.Uint16(buf[4:]) @@ -103,7 +93,6 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error { auc := binary.BigEndian.Uint16(buf[8:]) adc := binary.BigEndian.Uint16(buf[10:]) offset := headerSize - var err error for i := uint16(0); i < qdc && offset < buflen; i++ { _, offset, err = UnpackDomainName(buf, offset) if err != nil { @@ -166,21 +155,21 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error { return &Error{err: "signer name doesn't match key name"} } sigend := offset - hasher.Write(buf[sigstart:sigend]) - hasher.Write(buf[:10]) - hasher.Write([]byte{ + h.Write(buf[sigstart:sigend]) + h.Write(buf[:10]) + h.Write([]byte{ byte((adc - 1) << 8), byte(adc - 1), }) - hasher.Write(buf[12:bodyend]) + h.Write(buf[12:bodyend]) - hashed := hasher.Sum(nil) + hashed := h.Sum(nil) sig := buf[sigend:] switch k.Algorithm { case RSASHA1, RSASHA256, RSASHA512: pk := k.publicKeyRSA() if pk != nil { - return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) + return rsa.VerifyPKCS1v15(pk, cryptohash, hashed, sig) } case ECDSAP256SHA256, ECDSAP384SHA384: pk := k.publicKeyECDSA() @@ -192,6 +181,14 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error { } return ErrSig } + case ED25519: + pk := k.publicKeyED25519() + if pk != nil { + if ed25519.Verify(pk, hashed, sig) { + return nil + } + return ErrSig + } } return ErrKeyAlg } diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go index 3344253c2b3e..ea58710da963 100644 --- a/vendor/github.com/miekg/dns/svcb.go +++ b/vendor/github.com/miekg/dns/svcb.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "errors" + "fmt" "net" "sort" "strconv" @@ -13,16 +14,18 @@ import ( // SVCBKey is the type of the keys used in the SVCB RR. type SVCBKey uint16 -// Keys defined in draft-ietf-dnsop-svcb-https-01 Section 12.3.2. +// Keys defined in draft-ietf-dnsop-svcb-https-08 Section 14.3.2. const ( - SVCB_MANDATORY SVCBKey = 0 - SVCB_ALPN SVCBKey = 1 - SVCB_NO_DEFAULT_ALPN SVCBKey = 2 - SVCB_PORT SVCBKey = 3 - SVCB_IPV4HINT SVCBKey = 4 - SVCB_ECHCONFIG SVCBKey = 5 - SVCB_IPV6HINT SVCBKey = 6 - svcb_RESERVED SVCBKey = 65535 + SVCB_MANDATORY SVCBKey = iota + SVCB_ALPN + SVCB_NO_DEFAULT_ALPN + SVCB_PORT + SVCB_IPV4HINT + SVCB_ECHCONFIG + SVCB_IPV6HINT + SVCB_DOHPATH // draft-ietf-add-svcb-dns-02 Section 9 + + svcb_RESERVED SVCBKey = 65535 ) var svcbKeyToStringMap = map[SVCBKey]string{ @@ -31,8 +34,9 @@ var svcbKeyToStringMap = map[SVCBKey]string{ SVCB_NO_DEFAULT_ALPN: "no-default-alpn", SVCB_PORT: "port", SVCB_IPV4HINT: "ipv4hint", - SVCB_ECHCONFIG: "echconfig", + SVCB_ECHCONFIG: "ech", SVCB_IPV6HINT: "ipv6hint", + SVCB_DOHPATH: "dohpath", } var svcbStringToKeyMap = reverseSVCBKeyMap(svcbKeyToStringMap) @@ -167,10 +171,14 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError { } l, _ = c.Next() } + + // "In AliasMode, records SHOULD NOT include any SvcParams, and recipients MUST + // ignore any SvcParams that are present." + // However, we don't check rr.Priority == 0 && len(xs) > 0 here + // It is the responsibility of the user of the library to check this. + // This is to encourage the fixing of the source of this error. + rr.Value = xs - if rr.Priority == 0 && len(xs) > 0 { - return &ParseError{l.token, "SVCB aliasform can't have values", l} - } return nil } @@ -191,6 +199,8 @@ func makeSVCBKeyValue(key SVCBKey) SVCBKeyValue { return new(SVCBECHConfig) case SVCB_IPV6HINT: return new(SVCBIPv6Hint) + case SVCB_DOHPATH: + return new(SVCBDoHPath) case svcb_RESERVED: return nil default: @@ -200,16 +210,24 @@ func makeSVCBKeyValue(key SVCBKey) SVCBKeyValue { } } -// SVCB RR. See RFC xxxx (https://tools.ietf.org/html/draft-ietf-dnsop-svcb-https-01). +// SVCB RR. See RFC xxxx (https://tools.ietf.org/html/draft-ietf-dnsop-svcb-https-08). +// +// NOTE: The HTTPS/SVCB RFCs are in the draft stage. +// The API, including constants and types related to SVCBKeyValues, may +// change in future versions in accordance with the latest drafts. type SVCB struct { Hdr RR_Header - Priority uint16 + Priority uint16 // If zero, Value must be empty or discarded by the user of this library Target string `dns:"domain-name"` - Value []SVCBKeyValue `dns:"pairs"` // Value must be empty if Priority is zero. + Value []SVCBKeyValue `dns:"pairs"` } // HTTPS RR. Everything valid for SVCB applies to HTTPS as well. // Except that the HTTPS record is intended for use with the HTTP and HTTPS protocols. +// +// NOTE: The HTTPS/SVCB RFCs are in the draft stage. +// The API, including constants and types related to SVCBKeyValues, may +// change in future versions in accordance with the latest drafts. type HTTPS struct { SVCB } @@ -235,15 +253,29 @@ type SVCBKeyValue interface { } // SVCBMandatory pair adds to required keys that must be interpreted for the RR -// to be functional. +// to be functional. If ignored, the whole RRSet must be ignored. +// "port" and "no-default-alpn" are mandatory by default if present, +// so they shouldn't be included here. +// +// It is incumbent upon the user of this library to reject the RRSet if +// or avoid constructing such an RRSet that: +// - "mandatory" is included as one of the keys of mandatory +// - no key is listed multiple times in mandatory +// - all keys listed in mandatory are present +// - escape sequences are not used in mandatory +// - mandatory, when present, lists at least one key +// // Basic use pattern for creating a mandatory option: // // s := &dns.SVCB{Hdr: dns.RR_Header{Name: ".", Rrtype: dns.TypeSVCB, Class: dns.ClassINET}} // e := new(dns.SVCBMandatory) -// e.Code = []uint16{65403} +// e.Code = []uint16{dns.SVCB_ALPN} // s.Value = append(s.Value, e) +// t := new(dns.SVCBAlpn) +// t.Alpn = []string{"xmpp-client"} +// s.Value = append(s.Value, t) type SVCBMandatory struct { - Code []SVCBKey // Must not include mandatory + Code []SVCBKey } func (*SVCBMandatory) Key() SVCBKey { return SVCB_MANDATORY } @@ -302,7 +334,8 @@ func (s *SVCBMandatory) copy() SVCBKeyValue { } // SVCBAlpn pair is used to list supported connection protocols. -// Protocol ids can be found at: +// The user of this library must ensure that at least one protocol is listed when alpn is present. +// Protocol IDs can be found at: // https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids // Basic use pattern for creating an alpn option: // @@ -310,13 +343,57 @@ func (s *SVCBMandatory) copy() SVCBKeyValue { // h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET} // e := new(dns.SVCBAlpn) // e.Alpn = []string{"h2", "http/1.1"} -// h.Value = append(o.Value, e) +// h.Value = append(h.Value, e) type SVCBAlpn struct { Alpn []string } -func (*SVCBAlpn) Key() SVCBKey { return SVCB_ALPN } -func (s *SVCBAlpn) String() string { return strings.Join(s.Alpn, ",") } +func (*SVCBAlpn) Key() SVCBKey { return SVCB_ALPN } + +func (s *SVCBAlpn) String() string { + // An ALPN value is a comma-separated list of values, each of which can be + // an arbitrary binary value. In order to allow parsing, the comma and + // backslash characters are themselves excaped. + // + // However, this escaping is done in addition to the normal escaping which + // happens in zone files, meaning that these values must be + // double-escaped. This looks terrible, so if you see a never-ending + // sequence of backslash in a zone file this may be why. + // + // https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-08#appendix-A.1 + var str strings.Builder + for i, alpn := range s.Alpn { + // 4*len(alpn) is the worst case where we escape every character in the alpn as \123, plus 1 byte for the ',' separating the alpn from others + str.Grow(4*len(alpn) + 1) + if i > 0 { + str.WriteByte(',') + } + for j := 0; j < len(alpn); j++ { + e := alpn[j] + if ' ' > e || e > '~' { + str.WriteString(escapeByte(e)) + continue + } + switch e { + // We escape a few characters which may confuse humans or parsers. + case '"', ';', ' ': + str.WriteByte('\\') + str.WriteByte(e) + // The comma and backslash characters themselves must be + // doubly-escaped. We use `\\` for the first backslash and + // the escaped numeric value for the other value. We especially + // don't want a comma in the output. + case ',': + str.WriteString(`\\\044`) + case '\\': + str.WriteString(`\\\092`) + default: + str.WriteByte(e) + } + } + } + return str.String() +} func (s *SVCBAlpn) pack() ([]byte, error) { // Liberally estimate the size of an alpn as 10 octets @@ -351,7 +428,47 @@ func (s *SVCBAlpn) unpack(b []byte) error { } func (s *SVCBAlpn) parse(b string) error { - s.Alpn = strings.Split(b, ",") + if len(b) == 0 { + s.Alpn = []string{} + return nil + } + + alpn := []string{} + a := []byte{} + for p := 0; p < len(b); { + c, q := nextByte(b, p) + if q == 0 { + return errors.New("dns: svcbalpn: unterminated escape") + } + p += q + // If we find a comma, we have finished reading an alpn. + if c == ',' { + if len(a) == 0 { + return errors.New("dns: svcbalpn: empty protocol identifier") + } + alpn = append(alpn, string(a)) + a = []byte{} + continue + } + // If it's a backslash, we need to handle a comma-separated list. + if c == '\\' { + dc, dq := nextByte(b, p) + if dq == 0 { + return errors.New("dns: svcbalpn: unterminated escape decoding comma-separated list") + } + if dc != '\\' && dc != ',' { + return errors.New("dns: svcbalpn: bad escaped character decoding comma-separated list") + } + p += dq + c = dc + } + a = append(a, c) + } + // Add the final alpn. + if len(a) == 0 { + return errors.New("dns: svcbalpn: last protocol identifier empty") + } + s.Alpn = append(alpn, string(a)) return nil } @@ -370,9 +487,13 @@ func (s *SVCBAlpn) copy() SVCBKeyValue { } // SVCBNoDefaultAlpn pair signifies no support for default connection protocols. +// Should be used in conjunction with alpn. // Basic use pattern for creating a no-default-alpn option: // // s := &dns.SVCB{Hdr: dns.RR_Header{Name: ".", Rrtype: dns.TypeSVCB, Class: dns.ClassINET}} +// t := new(dns.SVCBAlpn) +// t.Alpn = []string{"xmpp-client"} +// s.Value = append(s.Value, t) // e := new(dns.SVCBNoDefaultAlpn) // s.Value = append(s.Value, e) type SVCBNoDefaultAlpn struct{} @@ -385,14 +506,14 @@ func (*SVCBNoDefaultAlpn) len() int { return 0 } func (*SVCBNoDefaultAlpn) unpack(b []byte) error { if len(b) != 0 { - return errors.New("dns: svcbnodefaultalpn: no_default_alpn must have no value") + return errors.New("dns: svcbnodefaultalpn: no-default-alpn must have no value") } return nil } func (*SVCBNoDefaultAlpn) parse(b string) error { if b != "" { - return errors.New("dns: svcbnodefaultalpn: no_default_alpn must have no value") + return errors.New("dns: svcbnodefaultalpn: no-default-alpn must have no value") } return nil } @@ -523,7 +644,7 @@ func (s *SVCBIPv4Hint) copy() SVCBKeyValue { } // SVCBECHConfig pair contains the ECHConfig structure defined in draft-ietf-tls-esni [RFC xxxx]. -// Basic use pattern for creating an echconfig option: +// Basic use pattern for creating an ech option: // // h := new(dns.HTTPS) // h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET} @@ -531,7 +652,7 @@ func (s *SVCBIPv4Hint) copy() SVCBKeyValue { // e.ECH = []byte{0xfe, 0x08, ...} // h.Value = append(h.Value, e) type SVCBECHConfig struct { - ECH []byte + ECH []byte // Specifically ECHConfigList including the redundant length prefix } func (*SVCBECHConfig) Key() SVCBKey { return SVCB_ECHCONFIG } @@ -555,7 +676,7 @@ func (s *SVCBECHConfig) unpack(b []byte) error { func (s *SVCBECHConfig) parse(b string) error { x, err := fromBase64([]byte(b)) if err != nil { - return errors.New("dns: svcbechconfig: bad base64 echconfig") + return errors.New("dns: svcbech: bad base64 ech") } s.ECH = x return nil @@ -618,9 +739,6 @@ func (s *SVCBIPv6Hint) String() string { } func (s *SVCBIPv6Hint) parse(b string) error { - if strings.Contains(b, ".") { - return errors.New("dns: svcbipv6hint: expected ipv6, got ipv4") - } str := strings.Split(b, ",") dst := make([]net.IP, len(str)) for i, e := range str { @@ -628,6 +746,9 @@ func (s *SVCBIPv6Hint) parse(b string) error { if ip == nil { return errors.New("dns: svcbipv6hint: bad ip") } + if ip.To4() != nil { + return errors.New("dns: svcbipv6hint: expected ipv6, got ipv4-mapped-ipv6") + } dst[i] = ip } s.Hint = dst @@ -645,6 +766,54 @@ func (s *SVCBIPv6Hint) copy() SVCBKeyValue { } } +// SVCBDoHPath pair is used to indicate the URI template that the +// clients may use to construct a DNS over HTTPS URI. +// +// See RFC xxxx (https://datatracker.ietf.org/doc/html/draft-ietf-add-svcb-dns-02) +// and RFC yyyy (https://datatracker.ietf.org/doc/html/draft-ietf-add-ddr-06). +// +// A basic example of using the dohpath option together with the alpn +// option to indicate support for DNS over HTTPS on a certain path: +// +// s := new(dns.SVCB) +// s.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeSVCB, Class: dns.ClassINET} +// e := new(dns.SVCBAlpn) +// e.Alpn = []string{"h2", "h3"} +// p := new(dns.SVCBDoHPath) +// p.Template = "/dns-query{?dns}" +// s.Value = append(s.Value, e, p) +// +// The parsing currently doesn't validate that Template is a valid +// RFC 6570 URI template. +type SVCBDoHPath struct { + Template string +} + +func (*SVCBDoHPath) Key() SVCBKey { return SVCB_DOHPATH } +func (s *SVCBDoHPath) String() string { return svcbParamToStr([]byte(s.Template)) } +func (s *SVCBDoHPath) len() int { return len(s.Template) } +func (s *SVCBDoHPath) pack() ([]byte, error) { return []byte(s.Template), nil } + +func (s *SVCBDoHPath) unpack(b []byte) error { + s.Template = string(b) + return nil +} + +func (s *SVCBDoHPath) parse(b string) error { + template, err := svcbParseParam(b) + if err != nil { + return fmt.Errorf("dns: svcbdohpath: %w", err) + } + s.Template = string(template) + return nil +} + +func (s *SVCBDoHPath) copy() SVCBKeyValue { + return &SVCBDoHPath{ + Template: s.Template, + } +} + // SVCBLocal pair is intended for experimental/private use. The key is recommended // to be in the range [SVCB_PRIVATE_LOWER, SVCB_PRIVATE_UPPER]. // Basic use pattern for creating a keyNNNNN option: @@ -661,6 +830,7 @@ type SVCBLocal struct { } func (s *SVCBLocal) Key() SVCBKey { return s.KeyCode } +func (s *SVCBLocal) String() string { return svcbParamToStr(s.Data) } func (s *SVCBLocal) pack() ([]byte, error) { return append([]byte(nil), s.Data...), nil } func (s *SVCBLocal) len() int { return len(s.Data) } @@ -669,50 +839,10 @@ func (s *SVCBLocal) unpack(b []byte) error { return nil } -func (s *SVCBLocal) String() string { - var str strings.Builder - str.Grow(4 * len(s.Data)) - for _, e := range s.Data { - if ' ' <= e && e <= '~' { - switch e { - case '"', ';', ' ', '\\': - str.WriteByte('\\') - str.WriteByte(e) - default: - str.WriteByte(e) - } - } else { - str.WriteString(escapeByte(e)) - } - } - return str.String() -} - func (s *SVCBLocal) parse(b string) error { - data := make([]byte, 0, len(b)) - for i := 0; i < len(b); { - if b[i] != '\\' { - data = append(data, b[i]) - i++ - continue - } - if i+1 == len(b) { - return errors.New("dns: svcblocal: svcb private/experimental key escape unterminated") - } - if isDigit(b[i+1]) { - if i+3 < len(b) && isDigit(b[i+2]) && isDigit(b[i+3]) { - a, err := strconv.ParseUint(b[i+1:i+4], 10, 8) - if err == nil { - i += 4 - data = append(data, byte(a)) - continue - } - } - return errors.New("dns: svcblocal: svcb private/experimental key bad escaped octet") - } else { - data = append(data, b[i+1]) - i += 2 - } + data, err := svcbParseParam(b) + if err != nil { + return fmt.Errorf("dns: svcblocal: svcb private/experimental key %w", err) } s.Data = data return nil @@ -753,3 +883,53 @@ func areSVCBPairArraysEqual(a []SVCBKeyValue, b []SVCBKeyValue) bool { } return true } + +// svcbParamStr converts the value of an SVCB parameter into a DNS presentation-format string. +func svcbParamToStr(s []byte) string { + var str strings.Builder + str.Grow(4 * len(s)) + for _, e := range s { + if ' ' <= e && e <= '~' { + switch e { + case '"', ';', ' ', '\\': + str.WriteByte('\\') + str.WriteByte(e) + default: + str.WriteByte(e) + } + } else { + str.WriteString(escapeByte(e)) + } + } + return str.String() +} + +// svcbParseParam parses a DNS presentation-format string into an SVCB parameter value. +func svcbParseParam(b string) ([]byte, error) { + data := make([]byte, 0, len(b)) + for i := 0; i < len(b); { + if b[i] != '\\' { + data = append(data, b[i]) + i++ + continue + } + if i+1 == len(b) { + return nil, errors.New("escape unterminated") + } + if isDigit(b[i+1]) { + if i+3 < len(b) && isDigit(b[i+2]) && isDigit(b[i+3]) { + a, err := strconv.ParseUint(b[i+1:i+4], 10, 8) + if err == nil { + i += 4 + data = append(data, byte(a)) + continue + } + } + return nil, errors.New("bad escaped octet") + } else { + data = append(data, b[i+1]) + i += 2 + } + } + return data, nil +} diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go index 8b37cc8410b4..debfe2dd99cf 100644 --- a/vendor/github.com/miekg/dns/tsig.go +++ b/vendor/github.com/miekg/dns/tsig.go @@ -158,18 +158,17 @@ type timerWireFmt struct { } // TsigGenerate fills out the TSIG record attached to the message. -// The message should contain -// a "stub" TSIG RR with the algorithm, key name (owner name of the RR), -// time fudge (defaults to 300 seconds) and the current time -// The TSIG MAC is saved in that Tsig RR. -// When TsigGenerate is called for the first time requestMAC is set to the empty string and -// timersOnly is false. -// If something goes wrong an error is returned, otherwise it is nil. +// The message should contain a "stub" TSIG RR with the algorithm, key name +// (owner name of the RR), time fudge (defaults to 300 seconds) and the current +// time The TSIG MAC is saved in that Tsig RR. When TsigGenerate is called for +// the first time requestMAC should be set to the empty string and timersOnly to +// false. func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) { - return tsigGenerateProvider(m, tsigHMACProvider(secret), requestMAC, timersOnly) + return TsigGenerateWithProvider(m, tsigHMACProvider(secret), requestMAC, timersOnly) } -func tsigGenerateProvider(m *Msg, provider TsigProvider, requestMAC string, timersOnly bool) ([]byte, string, error) { +// TsigGenerateWithProvider is similar to TsigGenerate, but allows for a custom TsigProvider. +func TsigGenerateWithProvider(m *Msg, provider TsigProvider, requestMAC string, timersOnly bool) ([]byte, string, error) { if m.IsTsig() == nil { panic("dns: TSIG not last RR in additional") } @@ -216,14 +215,15 @@ func tsigGenerateProvider(m *Msg, provider TsigProvider, requestMAC string, time return mbuf, t.MAC, nil } -// TsigVerify verifies the TSIG on a message. -// If the signature does not validate err contains the -// error, otherwise it is nil. +// TsigVerify verifies the TSIG on a message. If the signature does not +// validate the returned error contains the cause. If the signature is OK, the +// error is nil. func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { return tsigVerify(msg, tsigHMACProvider(secret), requestMAC, timersOnly, uint64(time.Now().Unix())) } -func tsigVerifyProvider(msg []byte, provider TsigProvider, requestMAC string, timersOnly bool) error { +// TsigVerifyWithProvider is similar to TsigVerify, but allows for a custom TsigProvider. +func TsigVerifyWithProvider(msg []byte, provider TsigProvider, requestMAC string, timersOnly bool) error { return tsigVerify(msg, provider, requestMAC, timersOnly, uint64(time.Now().Unix())) } diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index 8c80750189be..b1a872bd5956 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 46} +var Version = v{1, 1, 50} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go index f0dcf61d4213..1917e91c80ce 100644 --- a/vendor/github.com/miekg/dns/xfr.go +++ b/vendor/github.com/miekg/dns/xfr.go @@ -237,7 +237,7 @@ func (t *Transfer) ReadMsg() (*Msg, error) { } if ts, tp := m.IsTsig(), t.tsigProvider(); ts != nil && tp != nil { // Need to work on the original message p, as that was used to calculate the tsig. - err = tsigVerifyProvider(p, tp, t.tsigRequestMAC, t.tsigTimersOnly) + err = TsigVerifyWithProvider(p, tp, t.tsigRequestMAC, t.tsigTimersOnly) t.tsigRequestMAC = ts.MAC } return m, err @@ -247,7 +247,7 @@ func (t *Transfer) ReadMsg() (*Msg, error) { func (t *Transfer) WriteMsg(m *Msg) (err error) { var out []byte if ts, tp := m.IsTsig(), t.tsigProvider(); ts != nil && tp != nil { - out, t.tsigRequestMAC, err = tsigGenerateProvider(m, tp, t.tsigRequestMAC, t.tsigTimersOnly) + out, t.tsigRequestMAC, err = TsigGenerateWithProvider(m, tp, t.tsigRequestMAC, t.tsigTimersOnly) } else { out, err = m.Pack() } diff --git a/vendor/github.com/mitchellh/copystructure/.travis.yml b/vendor/github.com/mitchellh/copystructure/.travis.yml deleted file mode 100644 index d7b9589ab11a..000000000000 --- a/vendor/github.com/mitchellh/copystructure/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.7 - - tip - -script: - - go test - -matrix: - allow_failures: - - go: tip diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md index bcb8c8d2cb97..f0fbd2e5c98c 100644 --- a/vendor/github.com/mitchellh/copystructure/README.md +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -1,21 +1,21 @@ -# copystructure - -copystructure is a Go library for deep copying values in Go. - -This allows you to copy Go values that may contain reference values -such as maps, slices, or pointers, and copy their data as well instead -of just their references. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/copystructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). - -The `Copy` function has examples associated with it there. +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go index 140435255e10..8089e6670a34 100644 --- a/vendor/github.com/mitchellh/copystructure/copystructure.go +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -8,7 +8,30 @@ import ( "github.com/mitchellh/reflectwalk" ) +const tagKey = "copy" + // Copy returns a deep copy of v. +// +// Copy is unable to copy unexported fields in a struct (lowercase field names). +// Unexported fields can't be reflected by the Go runtime and therefore +// copystructure can't perform any data copies. +// +// For structs, copy behavior can be controlled with struct tags. For example: +// +// struct { +// Name string +// Data *bytes.Buffer `copy:"shallow"` +// } +// +// The available tag values are: +// +// * "ignore" - The field will be ignored, effectively resulting in it being +// assigned the zero value in the copy. +// +// * "shallow" - The field will be be shallow copied. This means that references +// values such as pointers, maps, slices, etc. will be directly assigned +// versus deep copied. +// func Copy(v interface{}) (interface{}, error) { return Config{}.Copy(v) } @@ -28,6 +51,19 @@ type CopierFunc func(interface{}) (interface{}, error) // this map as well as to Copy in a mutex. var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) +// ShallowCopiers is a map of pointer types that behave specially +// when they are copied. If a type is found in this map while deep +// copying, the pointer value will be shallow copied and not walked +// into. +// +// The key should be the type, obtained using: reflect.TypeOf(value +// with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{}) + // Must is a helper that wraps a call to a function returning // (interface{}, error) and panics if the error is non-nil. It is intended // for use in variable initializations and should only be used when a copy @@ -50,6 +86,11 @@ type Config struct { // Copiers is a map of types associated with a CopierFunc. Use the global // Copiers map if this is nil. Copiers map[reflect.Type]CopierFunc + + // ShallowCopiers is a map of pointer types that when they are + // shallow copied no matter where they are encountered. Use the + // global ShallowCopiers if this is nil. + ShallowCopiers map[reflect.Type]struct{} } func (c Config) Copy(v interface{}) (interface{}, error) { @@ -65,6 +106,12 @@ func (c Config) Copy(v interface{}) (interface{}, error) { if c.Copiers == nil { c.Copiers = Copiers } + w.copiers = c.Copiers + + if c.ShallowCopiers == nil { + c.ShallowCopiers = ShallowCopiers + } + w.shallowCopiers = c.ShallowCopiers err := reflectwalk.Walk(v, w) if err != nil { @@ -93,10 +140,12 @@ func ifaceKey(pointers, depth int) uint64 { type walker struct { Result interface{} - depth int - ignoreDepth int - vals []reflect.Value - cs []reflect.Value + copiers map[reflect.Type]CopierFunc + shallowCopiers map[reflect.Type]struct{} + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value // This stores the number of pointers we've walked over, indexed by depth. ps []int @@ -263,6 +312,20 @@ func (w *walker) PointerExit(v bool) error { return nil } +func (w *walker) Pointer(v reflect.Value) error { + if _, ok := w.shallowCopiers[v.Type()]; ok { + // Shallow copy this value. Use the same logic as primitive, then + // return skip. + if err := w.Primitive(v); err != nil { + return err + } + + return reflectwalk.SkipEntry + } + + return nil +} + func (w *walker) Interface(v reflect.Value) error { if !v.IsValid() { return nil @@ -356,7 +419,7 @@ func (w *walker) Struct(s reflect.Value) error { w.lock(s) var v reflect.Value - if c, ok := Copiers[s.Type()]; ok { + if c, ok := w.copiers[s.Type()]; ok { // We have a Copier for this struct, so we use that copier to // get the copy, and we ignore anything deeper than this. w.ignoreDepth = w.depth @@ -396,9 +459,29 @@ func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { return reflectwalk.SkipEntry } + switch f.Tag.Get(tagKey) { + case "shallow": + // If we're shallow copying then assign the value directly to the + // struct and skip the entry. + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + if sf.CanSet() { + sf.Set(v) + } + } + + return reflectwalk.SkipEntry + + case "ignore": + // Do nothing + return reflectwalk.SkipEntry + } + // Push the field onto the stack, we'll handle it when we exit // the struct field in Exit... w.valPush(reflect.ValueOf(f)) + return nil } diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 38a099162caa..c758234904ec 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,3 +1,16 @@ +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + ## 1.4.3 * Fix cases where `json.Number` didn't decode properly [GH-261] diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index 4d4bbc733ba8..3a754ca72484 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -77,6 +77,28 @@ func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { } } +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, f := range ff { + out, err = DecodeHookExec(f, a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + // StringToSliceHookFunc returns a DecodeHookFunc that converts // string to []string by splitting on the given sep. func StringToSliceHookFunc(sep string) DecodeHookFunc { diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 6b81b00679ef..1efb22ac3610 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -122,7 +122,7 @@ // field value is zero and a numeric type, the field is empty, and it won't // be encoded into the destination type. // -// type Source { +// type Source struct { // Age int `mapstructure:",omitempty"` // } // @@ -215,6 +215,12 @@ type DecoderConfig struct { // (extra keys). ErrorUnused bool + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + // ZeroFields, if set to true, will zero fields before writing them. // For example, a map will be emptied before decoded values are put in // it. If this is false, a map will be merged. @@ -259,6 +265,10 @@ type DecoderConfig struct { // defaults to "mapstructure" TagName string + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + // MatchName is the function used to match the map key to the struct // field name or tag. Defaults to `strings.EqualFold`. This can be used // to implement case-sensitive tag values, support snake casing, etc. @@ -284,6 +294,11 @@ type Metadata struct { // Unused is a slice of keys that were found in the raw value but // weren't decoded since there was no matching field in the result interface Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string } // Decode takes an input structure and uses reflection to translate it to @@ -375,6 +390,10 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { if config.Metadata.Unused == nil { config.Metadata.Unused = make([]string, 0) } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } } if config.TagName == "" { @@ -906,9 +925,15 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re tagValue := f.Tag.Get(d.config.TagName) keyName := f.Name + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + // If Squash is set in the config, we squash the field down. squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) + // Determine the name of the key in the map if index := strings.Index(tagValue, ","); index != -1 { if tagValue[:index] == "-" { @@ -920,7 +945,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re } // If "squash" is specified in the tag, we squash the field down. - squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1 + squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 if squash { // When squashing, the embedded type can be a pointer to a struct. if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { @@ -932,7 +957,9 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) } } - keyName = tagValue[:index] + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } } else if len(tagValue) > 0 { if tagValue == "-" { continue @@ -1088,7 +1115,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) } // If the input value is nil, then don't allocate since empty != nil - if dataVal.IsNil() { + if dataValKind != reflect.Array && dataVal.IsNil() { return nil } @@ -1250,6 +1277,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e dataValKeysUnused[dataValKey.Interface()] = struct{}{} } + targetValKeysUnused := make(map[interface{}]struct{}) errors := make([]string, 0) // This slice will keep track of all the structs we'll be decoding. @@ -1354,7 +1382,8 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e if !rawMapVal.IsValid() { // There was no matching key in the map for the value in - // the struct. Just ignore. + // the struct. Remember it for potential errors and metadata. + targetValKeysUnused[fieldName] = struct{}{} continue } } @@ -1414,6 +1443,17 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e errors = appendErrors(errors, err) } + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + if len(errors) > 0 { return &Error{errors} } @@ -1428,6 +1468,14 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } } return nil @@ -1465,3 +1513,28 @@ func getKind(val reflect.Value) reflect.Kind { return kind } } + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go index 3a93a0b114d4..7fee7b050ba4 100644 --- a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -69,6 +69,13 @@ type PointerWalker interface { PointerExit(bool) error } +// PointerValueWalker implementations are notified with the value of +// a particular pointer when a pointer is walked. Pointer is called +// right before PointerEnter. +type PointerValueWalker interface { + Pointer(reflect.Value) error +} + // SkipEntry can be returned from walk functions to skip walking // the value of this field. This is only valid in the following functions: // @@ -130,6 +137,17 @@ func walk(v reflect.Value, w interface{}) (err error) { } if pointerV.Kind() == reflect.Ptr { + if pw, ok := w.(PointerValueWalker); ok { + if err = pw.Pointer(pointerV); err != nil { + if err == SkipEntry { + // Skip the rest of this entry but clear the error + return nil + } + + return + } + } + pointer = true v = reflect.Indirect(pointerV) } diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go index 35d8108958ff..581cf7cdfadf 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -53,4 +53,10 @@ const ( // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. AnnotationDescription = "org.opencontainers.image.description" + + // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image. + AnnotationBaseImageDigest = "org.opencontainers.image.base.digest" + + // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. + AnnotationBaseImageName = "org.opencontainers.image.base.name" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go index fe799bd698c7..ffff4b6d1863 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -89,9 +89,20 @@ type Image struct { // Architecture is the CPU architecture which the binaries in this image are built to run on. Architecture string `json:"architecture"` + // Variant is the variant of the specified CPU architecture which image binaries are intended to run on. + Variant string `json:"variant,omitempty"` + // OS is the name of the operating system which the image is built to run on. OS string `json:"os"` + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + // Config defines the execution parameters which should be used as a base when running a container using the image. Config ImageConfig `json:"config,omitempty"` diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index bad7bb97f473..4f35ac134fef 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -34,6 +34,10 @@ const ( // referenced by the manifest. MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + // MediaTypeImageLayerZstd is the media type used for zstd compressed + // layers referenced by the manifest. + MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" @@ -43,6 +47,11 @@ const ( // restrictions. MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + // MediaTypeImageLayerNonDistributableZstd is the media type for zstd + // compressed layers referenced by the manifest but with distribution + // restrictions. + MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" + // MediaTypeImageConfig specifies the media type for the image configuration. MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 0d9543f16000..31f99cf645c4 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 2 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" + VersionDev = "-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/opentracing/opentracing-go/.golangci.yml b/vendor/github.com/opentracing/opentracing-go/.golangci.yml new file mode 100644 index 000000000000..011a79406f43 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/.golangci.yml @@ -0,0 +1,3 @@ +linters-settings: + staticcheck: + checks: [ "all", "-SA1019" ] diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml deleted file mode 100644 index b950e42965f0..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: go - -matrix: - include: - - go: "1.13.x" - - go: "1.14.x" - - go: "tip" - env: - - LINT=true - - COVERAGE=true - -install: - - if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi - - go get -u github.com/stretchr/testify/... - -script: - - make test - - go build ./... - - if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi - - if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore deleted file mode 100644 index 5e987350471d..000000000000 --- a/vendor/github.com/pierrec/lz4/.gitignore +++ /dev/null @@ -1,34 +0,0 @@ -# Created by https://www.gitignore.io/api/macos - -### macOS ### -*.DS_Store -.AppleDouble -.LSOverride - -# Icon must end with two \r -Icon - - -# Thumbnails -._* - -# Files that might appear in the root of a volume -.DocumentRevisions-V100 -.fseventsd -.Spotlight-V100 -.TemporaryItems -.Trashes -.VolumeIcon.icns -.com.apple.timemachine.donotpresent - -# Directories potentially created on remote AFP share -.AppleDB -.AppleDesktop -Network Trash Folder -Temporary Items -.apdisk - -# End of https://www.gitignore.io/api/macos - -cmd/*/*exe -.idea \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml deleted file mode 100644 index fd6c6db713d3..000000000000 --- a/vendor/github.com/pierrec/lz4/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: go - -env: - - GO111MODULE=off - -go: - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - master - -matrix: - fast_finish: true - allow_failures: - - go: master - -sudo: false - -script: - - go test -v -cpu=2 - - go test -v -cpu=2 -race - - go test -v -cpu=2 -tags noasm - - go test -v -cpu=2 -race -tags noasm diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE deleted file mode 100644 index bd899d8353dd..000000000000 --- a/vendor/github.com/pierrec/lz4/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2015, Pierre Curto -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of xxHash nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md deleted file mode 100644 index 4ee388e81bfb..000000000000 --- a/vendor/github.com/pierrec/lz4/README.md +++ /dev/null @@ -1,90 +0,0 @@ -# lz4 : LZ4 compression in pure Go - -[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4) -[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) -[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4) -[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags) - -## Overview - -This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks. -The implementation is based on the reference C [one](https://github.com/lz4/lz4). - -## Install - -Assuming you have the go toolchain installed: - -``` -go get github.com/pierrec/lz4 -``` - -There is a command line interface tool to compress and decompress LZ4 files. - -``` -go install github.com/pierrec/lz4/cmd/lz4c -``` - -Usage - -``` -Usage of lz4c: - -version - print the program version - -Subcommands: -Compress the given files or from stdin to stdout. -compress [arguments] [ ...] - -bc - enable block checksum - -l int - compression level (0=fastest) - -sc - disable stream checksum - -size string - block max size [64K,256K,1M,4M] (default "4M") - -Uncompress the given files or from stdin to stdout. -uncompress [arguments] [ ...] - -``` - - -## Example - -``` -// Compress and uncompress an input string. -s := "hello world" -r := strings.NewReader(s) - -// The pipe will uncompress the data from the writer. -pr, pw := io.Pipe() -zw := lz4.NewWriter(pw) -zr := lz4.NewReader(pr) - -go func() { - // Compress the input string. - _, _ = io.Copy(zw, r) - _ = zw.Close() // Make sure the writer is closed - _ = pw.Close() // Terminate the pipe -}() - -_, _ = io.Copy(os.Stdout, zr) - -// Output: -// hello world -``` - -## Contributing - -Contributions are very welcome for bug fixing, performance improvements...! - -- Open an issue with a proper description -- Send a pull request with appropriate test case(s) - -## Contributors - -Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! - -Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. - -Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go deleted file mode 100644 index 664d9be580d4..000000000000 --- a/vendor/github.com/pierrec/lz4/block.go +++ /dev/null @@ -1,413 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "math/bits" - "sync" -) - -// blockHash hashes the lower 6 bytes into a value < htSize. -func blockHash(x uint64) uint32 { - const prime6bytes = 227718039650203 - return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) -} - -// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. -func CompressBlockBound(n int) int { - return n + n/255 + 16 -} - -// UncompressBlock uncompresses the source buffer into the destination one, -// and returns the uncompressed size. -// -// The destination buffer must be sized appropriately. -// -// An error is returned if the source data is invalid or the destination buffer is too small. -func UncompressBlock(src, dst []byte) (int, error) { - if len(src) == 0 { - return 0, nil - } - if di := decodeBlock(dst, src); di >= 0 { - return di, nil - } - return 0, ErrInvalidSourceShortBuffer -} - -// CompressBlock compresses the source buffer into the destination one. -// This is the fast version of LZ4 compression and also the default one. -// -// The argument hashTable is scratch space for a hash table used by the -// compressor. If provided, it should have length at least 1<<16. If it is -// shorter (or nil), CompressBlock allocates its own hash table. -// -// The size of the compressed data is returned. -// -// If the destination buffer size is lower than CompressBlockBound and -// the compressed size is 0 and no error, then the data is incompressible. -// -// An error is returned if the destination buffer is too small. -func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { - defer recoverBlock(&err) - - // Return 0, nil only if the destination buffer size is < CompressBlockBound. - isNotCompressible := len(dst) < CompressBlockBound(len(src)) - - // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. - // This significantly speeds up incompressible data and usually has very small impact on compression. - // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) - const adaptSkipLog = 7 - if len(hashTable) < htSize { - htIface := htPool.Get() - defer htPool.Put(htIface) - hashTable = (*(htIface).(*[htSize]int))[:] - } - // Prove to the compiler the table has at least htSize elements. - // The compiler can see that "uint32() >> hashShift" cannot be out of bounds. - hashTable = hashTable[:htSize] - - // si: Current position of the search. - // anchor: Position of the current literals. - var si, di, anchor int - sn := len(src) - mfLimit - if sn <= 0 { - goto lastLiterals - } - - // Fast scan strategy: the hash table only stores the last 4 bytes sequences. - for si < sn { - // Hash the next 6 bytes (sequence)... - match := binary.LittleEndian.Uint64(src[si:]) - h := blockHash(match) - h2 := blockHash(match >> 8) - - // We check a match at s, s+1 and s+2 and pick the first one we get. - // Checking 3 only requires us to load the source one. - ref := hashTable[h] - ref2 := hashTable[h2] - hashTable[h] = si - hashTable[h2] = si + 1 - offset := si - ref - - // If offset <= 0 we got an old entry in the hash table. - if offset <= 0 || offset >= winSize || // Out of window. - uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. - // No match. Start calculating another hash. - // The processor can usually do this out-of-order. - h = blockHash(match >> 16) - ref = hashTable[h] - - // Check the second match at si+1 - si += 1 - offset = si - ref2 - - if offset <= 0 || offset >= winSize || - uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { - // No match. Check the third match at si+2 - si += 1 - offset = si - ref - hashTable[h] = si - - if offset <= 0 || offset >= winSize || - uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) { - // Skip one extra byte (at si+3) before we check 3 matches again. - si += 2 + (si-anchor)>>adaptSkipLog - continue - } - } - } - - // Match found. - lLen := si - anchor // Literal length. - // We already matched 4 bytes. - mLen := 4 - - // Extend backwards if we can, reducing literals. - tOff := si - offset - 1 - for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { - si-- - tOff-- - lLen-- - mLen++ - } - - // Add the match length, so we continue search at the end. - // Use mLen to store the offset base. - si, mLen = si+mLen, si+minMatch - - // Find the longest match by looking by batches of 8 bytes. - for si+8 < sn { - x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) - if x == 0 { - si += 8 - } else { - // Stop is first non-zero byte. - si += bits.TrailingZeros64(x) >> 3 - break - } - } - - mLen = si - mLen - if mLen < 0xF { - dst[di] = byte(mLen) - } else { - dst[di] = 0xF - } - - // Encode literals length. - if lLen < 0xF { - dst[di] |= byte(lLen << 4) - } else { - dst[di] |= 0xF0 - di++ - l := lLen - 0xF - for ; l >= 0xFF; l -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(l) - } - di++ - - // Literals. - copy(dst[di:di+lLen], src[anchor:anchor+lLen]) - di += lLen + 2 - anchor = si - - // Encode offset. - _ = dst[di] // Bound check elimination. - dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) - - // Encode match length part 2. - if mLen >= 0xF { - for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(mLen) - di++ - } - // Check if we can load next values. - if si >= sn { - break - } - // Hash match end-2 - h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) - hashTable[h] = si - 2 - } - -lastLiterals: - if isNotCompressible && anchor == 0 { - // Incompressible. - return 0, nil - } - - // Last literals. - lLen := len(src) - anchor - if lLen < 0xF { - dst[di] = byte(lLen << 4) - } else { - dst[di] = 0xF0 - di++ - for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(lLen) - } - di++ - - // Write the last literals. - if isNotCompressible && di >= anchor { - // Incompressible. - return 0, nil - } - di += copy(dst[di:di+len(src)-anchor], src[anchor:]) - return di, nil -} - -// Pool of hash tables for CompressBlock. -var htPool = sync.Pool{ - New: func() interface{} { - return new([htSize]int) - }, -} - -// blockHash hashes 4 bytes into a value < winSize. -func blockHashHC(x uint32) uint32 { - const hasher uint32 = 2654435761 // Knuth multiplicative hash. - return x * hasher >> (32 - winSizeLog) -} - -// CompressBlockHC compresses the source buffer src into the destination dst -// with max search depth (use 0 or negative value for no max). -// -// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. -// -// The size of the compressed data is returned. -// -// If the destination buffer size is lower than CompressBlockBound and -// the compressed size is 0 and no error, then the data is incompressible. -// -// An error is returned if the destination buffer is too small. -func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { - defer recoverBlock(&err) - - // Return 0, nil only if the destination buffer size is < CompressBlockBound. - isNotCompressible := len(dst) < CompressBlockBound(len(src)) - - // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. - // This significantly speeds up incompressible data and usually has very small impact on compression. - // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) - const adaptSkipLog = 7 - - var si, di, anchor int - - // hashTable: stores the last position found for a given hash - // chainTable: stores previous positions for a given hash - var hashTable, chainTable [winSize]int - - if depth <= 0 { - depth = winSize - } - - sn := len(src) - mfLimit - if sn <= 0 { - goto lastLiterals - } - - for si < sn { - // Hash the next 4 bytes (sequence). - match := binary.LittleEndian.Uint32(src[si:]) - h := blockHashHC(match) - - // Follow the chain until out of window and give the longest match. - mLen := 0 - offset := 0 - for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] { - // The first (mLen==0) or next byte (mLen>=minMatch) at current match length - // must match to improve on the match length. - if src[next+mLen] != src[si+mLen] { - continue - } - ml := 0 - // Compare the current position with a previous with the same hash. - for ml < sn-si { - x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) - if x == 0 { - ml += 8 - } else { - // Stop is first non-zero byte. - ml += bits.TrailingZeros64(x) >> 3 - break - } - } - if ml < minMatch || ml <= mLen { - // Match too small (>adaptSkipLog - continue - } - - // Match found. - // Update hash/chain tables with overlapping bytes: - // si already hashed, add everything from si+1 up to the match length. - winStart := si + 1 - if ws := si + mLen - winSize; ws > winStart { - winStart = ws - } - for si, ml := winStart, si+mLen; si < ml; { - match >>= 8 - match |= uint32(src[si+3]) << 24 - h := blockHashHC(match) - chainTable[si&winMask] = hashTable[h] - hashTable[h] = si - si++ - } - - lLen := si - anchor - si += mLen - mLen -= minMatch // Match length does not include minMatch. - - if mLen < 0xF { - dst[di] = byte(mLen) - } else { - dst[di] = 0xF - } - - // Encode literals length. - if lLen < 0xF { - dst[di] |= byte(lLen << 4) - } else { - dst[di] |= 0xF0 - di++ - l := lLen - 0xF - for ; l >= 0xFF; l -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(l) - } - di++ - - // Literals. - copy(dst[di:di+lLen], src[anchor:anchor+lLen]) - di += lLen - anchor = si - - // Encode offset. - di += 2 - dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) - - // Encode match length part 2. - if mLen >= 0xF { - for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(mLen) - di++ - } - } - - if isNotCompressible && anchor == 0 { - // Incompressible. - return 0, nil - } - - // Last literals. -lastLiterals: - lLen := len(src) - anchor - if lLen < 0xF { - dst[di] = byte(lLen << 4) - } else { - dst[di] = 0xF0 - di++ - lLen -= 0xF - for ; lLen >= 0xFF; lLen -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(lLen) - } - di++ - - // Write the last literals. - if isNotCompressible && di >= anchor { - // Incompressible. - return 0, nil - } - di += copy(dst[di:di+len(src)-anchor], src[anchor:]) - return di, nil -} diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go deleted file mode 100644 index bc5e78d40f0a..000000000000 --- a/vendor/github.com/pierrec/lz4/debug.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build lz4debug - -package lz4 - -import ( - "fmt" - "os" - "path/filepath" - "runtime" -) - -const debugFlag = true - -func debug(args ...interface{}) { - _, file, line, _ := runtime.Caller(1) - file = filepath.Base(file) - - f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0]) - if f[len(f)-1] != '\n' { - f += "\n" - } - fmt.Fprintf(os.Stderr, f, args[1:]...) -} diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go deleted file mode 100644 index 44211ad96453..000000000000 --- a/vendor/github.com/pierrec/lz4/debug_stub.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !lz4debug - -package lz4 - -const debugFlag = false - -func debug(args ...interface{}) {} diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go deleted file mode 100644 index 43cc14fbe2e3..000000000000 --- a/vendor/github.com/pierrec/lz4/decode_amd64.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -package lz4 - -//go:noescape -func decodeBlock(dst, src []byte) int diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s deleted file mode 100644 index 20fef39759cb..000000000000 --- a/vendor/github.com/pierrec/lz4/decode_amd64.s +++ /dev/null @@ -1,375 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// AX scratch -// BX scratch -// CX scratch -// DX token -// -// DI &dst -// SI &src -// R8 &dst + len(dst) -// R9 &src + len(src) -// R11 &dst -// R12 short output end -// R13 short input end -// func decodeBlock(dst, src []byte) int -// using 50 bytes of stack currently -TEXT ·decodeBlock(SB), NOSPLIT, $64-56 - MOVQ dst_base+0(FP), DI - MOVQ DI, R11 - MOVQ dst_len+8(FP), R8 - ADDQ DI, R8 - - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R9 - ADDQ SI, R9 - - // shortcut ends - // short output end - MOVQ R8, R12 - SUBQ $32, R12 - // short input end - MOVQ R9, R13 - SUBQ $16, R13 - -loop: - // for si < len(src) - CMPQ SI, R9 - JGE end - - // token := uint32(src[si]) - MOVBQZX (SI), DX - INCQ SI - - // lit_len = token >> 4 - // if lit_len > 0 - // CX = lit_len - MOVQ DX, CX - SHRQ $4, CX - - // if lit_len != 0xF - CMPQ CX, $0xF - JEQ lit_len_loop_pre - CMPQ DI, R12 - JGE lit_len_loop_pre - CMPQ SI, R13 - JGE lit_len_loop_pre - - // copy shortcut - - // A two-stage shortcut for the most common case: - // 1) If the literal length is 0..14, and there is enough space, - // enter the shortcut and copy 16 bytes on behalf of the literals - // (in the fast mode, only 8 bytes can be safely copied this way). - // 2) Further if the match length is 4..18, copy 18 bytes in a similar - // manner; but we ensure that there's enough space in the output for - // those 18 bytes earlier, upon entering the shortcut (in other words, - // there is a combined check for both stages). - - // copy literal - MOVOU (SI), X0 - MOVOU X0, (DI) - ADDQ CX, DI - ADDQ CX, SI - - MOVQ DX, CX - ANDQ $0xF, CX - - // The second stage: prepare for match copying, decode full info. - // If it doesn't work out, the info won't be wasted. - // offset := uint16(data[:2]) - MOVWQZX (SI), DX - ADDQ $2, SI - - MOVQ DI, AX - SUBQ DX, AX - CMPQ AX, DI - JGT err_short_buf - - // if we can't do the second stage then jump straight to read the - // match length, we already have the offset. - CMPQ CX, $0xF - JEQ match_len_loop_pre - CMPQ DX, $8 - JLT match_len_loop_pre - CMPQ AX, R11 - JLT err_short_buf - - // memcpy(op + 0, match + 0, 8); - MOVQ (AX), BX - MOVQ BX, (DI) - // memcpy(op + 8, match + 8, 8); - MOVQ 8(AX), BX - MOVQ BX, 8(DI) - // memcpy(op +16, match +16, 2); - MOVW 16(AX), BX - MOVW BX, 16(DI) - - ADDQ $4, DI // minmatch - ADDQ CX, DI - - // shortcut complete, load next token - JMP loop - -lit_len_loop_pre: - // if lit_len > 0 - CMPQ CX, $0 - JEQ offset - CMPQ CX, $0xF - JNE copy_literal - -lit_len_loop: - // for src[si] == 0xFF - CMPB (SI), $0xFF - JNE lit_len_finalise - - // bounds check src[si+1] - MOVQ SI, AX - ADDQ $1, AX - CMPQ AX, R9 - JGT err_short_buf - - // lit_len += 0xFF - ADDQ $0xFF, CX - INCQ SI - JMP lit_len_loop - -lit_len_finalise: - // lit_len += int(src[si]) - // si++ - MOVBQZX (SI), AX - ADDQ AX, CX - INCQ SI - -copy_literal: - // bounds check src and dst - MOVQ SI, AX - ADDQ CX, AX - CMPQ AX, R9 - JGT err_short_buf - - MOVQ DI, AX - ADDQ CX, AX - CMPQ AX, R8 - JGT err_short_buf - - // whats a good cut off to call memmove? - CMPQ CX, $16 - JGT memmove_lit - - // if len(dst[di:]) < 16 - MOVQ R8, AX - SUBQ DI, AX - CMPQ AX, $16 - JLT memmove_lit - - // if len(src[si:]) < 16 - MOVQ R9, AX - SUBQ SI, AX - CMPQ AX, $16 - JLT memmove_lit - - MOVOU (SI), X0 - MOVOU X0, (DI) - - JMP finish_lit_copy - -memmove_lit: - // memmove(to, from, len) - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - // spill - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) // need len to inc SI, DI after - MOVB DX, 48(SP) - CALL runtime·memmove(SB) - - // restore registers - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVB 48(SP), DX - - // recalc initial values - MOVQ dst_base+0(FP), R8 - MOVQ R8, R11 - ADDQ dst_len+8(FP), R8 - MOVQ src_base+24(FP), R9 - ADDQ src_len+32(FP), R9 - MOVQ R8, R12 - SUBQ $32, R12 - MOVQ R9, R13 - SUBQ $16, R13 - -finish_lit_copy: - ADDQ CX, SI - ADDQ CX, DI - - CMPQ SI, R9 - JGE end - -offset: - // CX := mLen - // free up DX to use for offset - MOVQ DX, CX - - MOVQ SI, AX - ADDQ $2, AX - CMPQ AX, R9 - JGT err_short_buf - - // offset - // DX := int(src[si]) | int(src[si+1])<<8 - MOVWQZX (SI), DX - ADDQ $2, SI - - // 0 offset is invalid - CMPQ DX, $0 - JEQ err_corrupt - - ANDB $0xF, CX - -match_len_loop_pre: - // if mlen != 0xF - CMPB CX, $0xF - JNE copy_match - -match_len_loop: - // for src[si] == 0xFF - // lit_len += 0xFF - CMPB (SI), $0xFF - JNE match_len_finalise - - // bounds check src[si+1] - MOVQ SI, AX - ADDQ $1, AX - CMPQ AX, R9 - JGT err_short_buf - - ADDQ $0xFF, CX - INCQ SI - JMP match_len_loop - -match_len_finalise: - // lit_len += int(src[si]) - // si++ - MOVBQZX (SI), AX - ADDQ AX, CX - INCQ SI - -copy_match: - // mLen += minMatch - ADDQ $4, CX - - // check we have match_len bytes left in dst - // di+match_len < len(dst) - MOVQ DI, AX - ADDQ CX, AX - CMPQ AX, R8 - JGT err_short_buf - - // DX = offset - // CX = match_len - // BX = &dst + (di - offset) - MOVQ DI, BX - SUBQ DX, BX - - // check BX is within dst - // if BX < &dst - CMPQ BX, R11 - JLT err_short_buf - - // if offset + match_len < di - MOVQ BX, AX - ADDQ CX, AX - CMPQ DI, AX - JGT copy_interior_match - - // AX := len(dst[:di]) - // MOVQ DI, AX - // SUBQ R11, AX - - // copy 16 bytes at a time - // if di-offset < 16 copy 16-(di-offset) bytes to di - // then do the remaining - -copy_match_loop: - // for match_len >= 0 - // dst[di] = dst[i] - // di++ - // i++ - MOVB (BX), AX - MOVB AX, (DI) - INCQ DI - INCQ BX - DECQ CX - - CMPQ CX, $0 - JGT copy_match_loop - - JMP loop - -copy_interior_match: - CMPQ CX, $16 - JGT memmove_match - - // if len(dst[di:]) < 16 - MOVQ R8, AX - SUBQ DI, AX - CMPQ AX, $16 - JLT memmove_match - - MOVOU (BX), X0 - MOVOU X0, (DI) - - ADDQ CX, DI - JMP loop - -memmove_match: - // memmove(to, from, len) - MOVQ DI, 0(SP) - MOVQ BX, 8(SP) - MOVQ CX, 16(SP) - // spill - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) // need len to inc SI, DI after - CALL runtime·memmove(SB) - - // restore registers - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - - // recalc initial values - MOVQ dst_base+0(FP), R8 - MOVQ R8, R11 // TODO: make these sensible numbers - ADDQ dst_len+8(FP), R8 - MOVQ src_base+24(FP), R9 - ADDQ src_len+32(FP), R9 - MOVQ R8, R12 - SUBQ $32, R12 - MOVQ R9, R13 - SUBQ $16, R13 - - ADDQ CX, DI - JMP loop - -err_corrupt: - MOVQ $-1, ret+48(FP) - RET - -err_short_buf: - MOVQ $-2, ret+48(FP) - RET - -end: - SUBQ R11, DI - MOVQ DI, ret+48(FP) - RET diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go deleted file mode 100644 index 919888edf7dc..000000000000 --- a/vendor/github.com/pierrec/lz4/decode_other.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build !amd64 appengine !gc noasm - -package lz4 - -func decodeBlock(dst, src []byte) (ret int) { - const hasError = -2 - defer func() { - if recover() != nil { - ret = hasError - } - }() - - var si, di int - for { - // Literals and match lengths (token). - b := int(src[si]) - si++ - - // Literals. - if lLen := b >> 4; lLen > 0 { - switch { - case lLen < 0xF && si+16 < len(src): - // Shortcut 1 - // if we have enough room in src and dst, and the literals length - // is small enough (0..14) then copy all 16 bytes, even if not all - // are part of the literals. - copy(dst[di:], src[si:si+16]) - si += lLen - di += lLen - if mLen := b & 0xF; mLen < 0xF { - // Shortcut 2 - // if the match length (4..18) fits within the literals, then copy - // all 18 bytes, even if not all are part of the literals. - mLen += 4 - if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset { - i := di - offset - end := i + 18 - if end > len(dst) { - // The remaining buffer may not hold 18 bytes. - // See https://github.com/pierrec/lz4/issues/51. - end = len(dst) - } - copy(dst[di:], dst[i:end]) - si += 2 - di += mLen - continue - } - } - case lLen == 0xF: - for src[si] == 0xFF { - lLen += 0xFF - si++ - } - lLen += int(src[si]) - si++ - fallthrough - default: - copy(dst[di:di+lLen], src[si:si+lLen]) - si += lLen - di += lLen - } - } - if si >= len(src) { - return di - } - - offset := int(src[si]) | int(src[si+1])<<8 - if offset == 0 { - return hasError - } - si += 2 - - // Match. - mLen := b & 0xF - if mLen == 0xF { - for src[si] == 0xFF { - mLen += 0xFF - si++ - } - mLen += int(src[si]) - si++ - } - mLen += minMatch - - // Copy the match. - expanded := dst[di-offset:] - if mLen > offset { - // Efficiently copy the match dst[di-offset:di] into the dst slice. - bytesToCopy := offset * (mLen / offset) - for n := offset; n <= bytesToCopy+offset; n *= 2 { - copy(expanded[n:], expanded[:n]) - } - di += bytesToCopy - mLen -= bytesToCopy - } - di += copy(dst[di:di+mLen], expanded[:mLen]) - } -} diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go deleted file mode 100644 index 1c45d1813cef..000000000000 --- a/vendor/github.com/pierrec/lz4/errors.go +++ /dev/null @@ -1,30 +0,0 @@ -package lz4 - -import ( - "errors" - "fmt" - "os" - rdebug "runtime/debug" -) - -var ( - // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed - // block is corrupted or the destination buffer is not large enough for the uncompressed data. - ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short") - // ErrInvalid is returned when reading an invalid LZ4 archive. - ErrInvalid = errors.New("lz4: bad magic number") - // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency. - ErrBlockDependency = errors.New("lz4: block dependency not supported") - // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position. - ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent") -) - -func recoverBlock(e *error) { - if r := recover(); r != nil && *e == nil { - if debugFlag { - fmt.Fprintln(os.Stderr, r) - rdebug.PrintStack() - } - *e = ErrInvalidSourceShortBuffer - } -} diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go deleted file mode 100644 index 7a76a6bce2b5..000000000000 --- a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go +++ /dev/null @@ -1,223 +0,0 @@ -// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). -// (https://github.com/Cyan4973/XXH/) -package xxh32 - -import ( - "encoding/binary" -) - -const ( - prime1 uint32 = 2654435761 - prime2 uint32 = 2246822519 - prime3 uint32 = 3266489917 - prime4 uint32 = 668265263 - prime5 uint32 = 374761393 - - primeMask = 0xFFFFFFFF - prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984 - prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535 -) - -// XXHZero represents an xxhash32 object with seed 0. -type XXHZero struct { - v1 uint32 - v2 uint32 - v3 uint32 - v4 uint32 - totalLen uint64 - buf [16]byte - bufused int -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (xxh XXHZero) Sum(b []byte) []byte { - h32 := xxh.Sum32() - return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) -} - -// Reset resets the Hash to its initial state. -func (xxh *XXHZero) Reset() { - xxh.v1 = prime1plus2 - xxh.v2 = prime2 - xxh.v3 = 0 - xxh.v4 = prime1minus - xxh.totalLen = 0 - xxh.bufused = 0 -} - -// Size returns the number of bytes returned by Sum(). -func (xxh *XXHZero) Size() int { - return 4 -} - -// BlockSize gives the minimum number of bytes accepted by Write(). -func (xxh *XXHZero) BlockSize() int { - return 1 -} - -// Write adds input bytes to the Hash. -// It never returns an error. -func (xxh *XXHZero) Write(input []byte) (int, error) { - if xxh.totalLen == 0 { - xxh.Reset() - } - n := len(input) - m := xxh.bufused - - xxh.totalLen += uint64(n) - - r := len(xxh.buf) - m - if n < r { - copy(xxh.buf[m:], input) - xxh.bufused += len(input) - return n, nil - } - - p := 0 - // Causes compiler to work directly from registers instead of stack: - v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 - if m > 0 { - // some data left from previous update - copy(xxh.buf[xxh.bufused:], input[:r]) - xxh.bufused += len(input) - r - - // fast rotl(13) - buf := xxh.buf[:16] // BCE hint. - v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1 - v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1 - v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1 - v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1 - p = r - xxh.bufused = 0 - } - - for n := n - 16; p <= n; p += 16 { - sub := input[p:][:16] //BCE hint for compiler - v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 - v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 - v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 - v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 - } - xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 - - copy(xxh.buf[xxh.bufused:], input[p:]) - xxh.bufused += len(input) - p - - return n, nil -} - -// Sum32 returns the 32 bits Hash value. -func (xxh *XXHZero) Sum32() uint32 { - h32 := uint32(xxh.totalLen) - if h32 >= 16 { - h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) - } else { - h32 += prime5 - } - - p := 0 - n := xxh.bufused - buf := xxh.buf - for n := n - 4; p <= n; p += 4 { - h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3 - h32 = rol17(h32) * prime4 - } - for ; p < n; p++ { - h32 += uint32(buf[p]) * prime5 - h32 = rol11(h32) * prime1 - } - - h32 ^= h32 >> 15 - h32 *= prime2 - h32 ^= h32 >> 13 - h32 *= prime3 - h32 ^= h32 >> 16 - - return h32 -} - -// ChecksumZero returns the 32bits Hash value. -func ChecksumZero(input []byte) uint32 { - n := len(input) - h32 := uint32(n) - - if n < 16 { - h32 += prime5 - } else { - v1 := prime1plus2 - v2 := prime2 - v3 := uint32(0) - v4 := prime1minus - p := 0 - for n := n - 16; p <= n; p += 16 { - sub := input[p:][:16] //BCE hint for compiler - v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 - v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 - v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 - v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 - } - input = input[p:] - n -= p - h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - } - - p := 0 - for n := n - 4; p <= n; p += 4 { - h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3 - h32 = rol17(h32) * prime4 - } - for p < n { - h32 += uint32(input[p]) * prime5 - h32 = rol11(h32) * prime1 - p++ - } - - h32 ^= h32 >> 15 - h32 *= prime2 - h32 ^= h32 >> 13 - h32 *= prime3 - h32 ^= h32 >> 16 - - return h32 -} - -// Uint32Zero hashes x with seed 0. -func Uint32Zero(x uint32) uint32 { - h := prime5 + 4 + x*prime3 - h = rol17(h) * prime4 - h ^= h >> 15 - h *= prime2 - h ^= h >> 13 - h *= prime3 - h ^= h >> 16 - return h -} - -func rol1(u uint32) uint32 { - return u<<1 | u>>31 -} - -func rol7(u uint32) uint32 { - return u<<7 | u>>25 -} - -func rol11(u uint32) uint32 { - return u<<11 | u>>21 -} - -func rol12(u uint32) uint32 { - return u<<12 | u>>20 -} - -func rol13(u uint32) uint32 { - return u<<13 | u>>19 -} - -func rol17(u uint32) uint32 { - return u<<17 | u>>15 -} - -func rol18(u uint32) uint32 { - return u<<18 | u>>14 -} diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go deleted file mode 100644 index a3284bdf708b..000000000000 --- a/vendor/github.com/pierrec/lz4/lz4.go +++ /dev/null @@ -1,116 +0,0 @@ -// Package lz4 implements reading and writing lz4 compressed data (a frame), -// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html. -// -// Although the block level compression and decompression functions are exposed and are fully compatible -// with the lz4 block format definition, they are low level and should not be used directly. -// For a complete description of an lz4 compressed block, see: -// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html -// -// See https://github.com/Cyan4973/lz4 for the reference C implementation. -// -package lz4 - -import ( - "math/bits" - "sync" -) - -const ( - // Extension is the LZ4 frame file name extension - Extension = ".lz4" - // Version is the LZ4 frame format version - Version = 1 - - frameMagic uint32 = 0x184D2204 - frameSkipMagic uint32 = 0x184D2A50 - frameMagicLegacy uint32 = 0x184C2102 - - // The following constants are used to setup the compression algorithm. - minMatch = 4 // the minimum size of the match sequence size (4 bytes) - winSizeLog = 16 // LZ4 64Kb window size limit - winSize = 1 << winSizeLog - winMask = winSize - 1 // 64Kb window of previous data for dependent blocks - compressedBlockFlag = 1 << 31 - compressedBlockMask = compressedBlockFlag - 1 - - // hashLog determines the size of the hash table used to quickly find a previous match position. - // Its value influences the compression speed and memory usage, the lower the faster, - // but at the expense of the compression ratio. - // 16 seems to be the best compromise for fast compression. - hashLog = 16 - htSize = 1 << hashLog - - mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes. -) - -// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. -const ( - blockSize64K = 1 << (16 + 2*iota) - blockSize256K - blockSize1M - blockSize4M -) - -var ( - // Keep a pool of buffers for each valid block sizes. - bsMapValue = [...]*sync.Pool{ - newBufferPool(2 * blockSize64K), - newBufferPool(2 * blockSize256K), - newBufferPool(2 * blockSize1M), - newBufferPool(2 * blockSize4M), - } -) - -// newBufferPool returns a pool for buffers of the given size. -func newBufferPool(size int) *sync.Pool { - return &sync.Pool{ - New: func() interface{} { - return make([]byte, size) - }, - } -} - -// getBuffer returns a buffer to its pool. -func getBuffer(size int) []byte { - idx := blockSizeValueToIndex(size) - 4 - return bsMapValue[idx].Get().([]byte) -} - -// putBuffer returns a buffer to its pool. -func putBuffer(size int, buf []byte) { - if cap(buf) > 0 { - idx := blockSizeValueToIndex(size) - 4 - bsMapValue[idx].Put(buf[:cap(buf)]) - } -} -func blockSizeIndexToValue(i byte) int { - return 1 << (16 + 2*uint(i)) -} -func isValidBlockSize(size int) bool { - const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M - - return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1 -} -func blockSizeValueToIndex(size int) byte { - return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2) -} - -// Header describes the various flags that can be set on a Writer or obtained from a Reader. -// The default values match those of the LZ4 frame format definition -// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). -// -// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. -// It is the caller's responsibility to check them if necessary. -type Header struct { - BlockChecksum bool // Compressed blocks checksum flag. - NoChecksum bool // Frame checksum flag. - BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. - Size uint64 // Frame total size. It is _not_ computed by the Writer. - CompressionLevel int // Compression level (higher is better, use 0 for fastest compression). - done bool // Header processed flag (Read or Write and checked). -} - -// Reset reset internal status -func (h *Header) Reset() { - h.done = false -} diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go deleted file mode 100644 index 9a0fb00709d5..000000000000 --- a/vendor/github.com/pierrec/lz4/lz4_go1.10.go +++ /dev/null @@ -1,29 +0,0 @@ -//+build go1.10 - -package lz4 - -import ( - "fmt" - "strings" -) - -func (h Header) String() string { - var s strings.Builder - - s.WriteString(fmt.Sprintf("%T{", h)) - if h.BlockChecksum { - s.WriteString("BlockChecksum: true ") - } - if h.NoChecksum { - s.WriteString("NoChecksum: true ") - } - if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { - s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) - } - if l := h.CompressionLevel; l != 0 { - s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) - } - s.WriteByte('}') - - return s.String() -} diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go deleted file mode 100644 index 12c761a2e7f9..000000000000 --- a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go +++ /dev/null @@ -1,29 +0,0 @@ -//+build !go1.10 - -package lz4 - -import ( - "bytes" - "fmt" -) - -func (h Header) String() string { - var s bytes.Buffer - - s.WriteString(fmt.Sprintf("%T{", h)) - if h.BlockChecksum { - s.WriteString("BlockChecksum: true ") - } - if h.NoChecksum { - s.WriteString("NoChecksum: true ") - } - if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { - s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) - } - if l := h.CompressionLevel; l != 0 { - s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) - } - s.WriteByte('}') - - return s.String() -} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go deleted file mode 100644 index 87dd72bd0db3..000000000000 --- a/vendor/github.com/pierrec/lz4/reader.go +++ /dev/null @@ -1,335 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "fmt" - "io" - "io/ioutil" - - "github.com/pierrec/lz4/internal/xxh32" -) - -// Reader implements the LZ4 frame decoder. -// The Header is set after the first call to Read(). -// The Header may change between Read() calls in case of concatenated frames. -type Reader struct { - Header - // Handler called when a block has been successfully read. - // It provides the number of bytes read. - OnBlockDone func(size int) - - buf [8]byte // Scrap buffer. - pos int64 // Current position in src. - src io.Reader // Source. - zdata []byte // Compressed data. - data []byte // Uncompressed data. - idx int // Index of unread bytes into data. - checksum xxh32.XXHZero // Frame hash. - skip int64 // Bytes to skip before next read. - dpos int64 // Position in dest -} - -// NewReader returns a new LZ4 frame decoder. -// No access to the underlying io.Reader is performed. -func NewReader(src io.Reader) *Reader { - r := &Reader{src: src} - return r -} - -// readHeader checks the frame magic number and parses the frame descriptoz. -// Skippable frames are supported even as a first frame although the LZ4 -// specifications recommends skippable frames not to be used as first frames. -func (z *Reader) readHeader(first bool) error { - defer z.checksum.Reset() - - buf := z.buf[:] - for { - magic, err := z.readUint32() - if err != nil { - z.pos += 4 - if !first && err == io.ErrUnexpectedEOF { - return io.EOF - } - return err - } - if magic == frameMagic { - break - } - if magic>>8 != frameSkipMagic>>8 { - return ErrInvalid - } - skipSize, err := z.readUint32() - if err != nil { - return err - } - z.pos += 4 - m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) - if err != nil { - return err - } - z.pos += m - } - - // Header. - if _, err := io.ReadFull(z.src, buf[:2]); err != nil { - return err - } - z.pos += 8 - - b := buf[0] - if v := b >> 6; v != Version { - return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version) - } - if b>>5&1 == 0 { - return ErrBlockDependency - } - z.BlockChecksum = b>>4&1 > 0 - frameSize := b>>3&1 > 0 - z.NoChecksum = b>>2&1 == 0 - - bmsID := buf[1] >> 4 & 0x7 - if bmsID < 4 || bmsID > 7 { - return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID) - } - bSize := blockSizeIndexToValue(bmsID - 4) - z.BlockMaxSize = bSize - - // Allocate the compressed/uncompressed buffers. - // The compressed buffer cannot exceed the uncompressed one. - if n := 2 * bSize; cap(z.zdata) < n { - z.zdata = make([]byte, n, n) - } - if debugFlag { - debug("header block max size id=%d size=%d", bmsID, bSize) - } - z.zdata = z.zdata[:bSize] - z.data = z.zdata[:cap(z.zdata)][bSize:] - z.idx = len(z.data) - - _, _ = z.checksum.Write(buf[0:2]) - - if frameSize { - buf := buf[:8] - if _, err := io.ReadFull(z.src, buf); err != nil { - return err - } - z.Size = binary.LittleEndian.Uint64(buf) - z.pos += 8 - _, _ = z.checksum.Write(buf) - } - - // Header checksum. - if _, err := io.ReadFull(z.src, buf[:1]); err != nil { - return err - } - z.pos++ - if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { - return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h) - } - - z.Header.done = true - if debugFlag { - debug("header read: %v", z.Header) - } - - return nil -} - -// Read decompresses data from the underlying source into the supplied buffer. -// -// Since there can be multiple streams concatenated, Header values may -// change between calls to Read(). If that is the case, no data is actually read from -// the underlying io.Reader, to allow for potential input buffer resizing. -func (z *Reader) Read(buf []byte) (int, error) { - if debugFlag { - debug("Read buf len=%d", len(buf)) - } - if !z.Header.done { - if err := z.readHeader(true); err != nil { - return 0, err - } - if debugFlag { - debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", - len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) - } - } - - if len(buf) == 0 { - return 0, nil - } - - if z.idx == len(z.data) { - // No data ready for reading, process the next block. - if debugFlag { - debug("reading block from writer") - } - // Reset uncompressed buffer - z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] - - // Block length: 0 = end of frame, highest bit set: uncompressed. - bLen, err := z.readUint32() - if err != nil { - return 0, err - } - z.pos += 4 - - if bLen == 0 { - // End of frame reached. - if !z.NoChecksum { - // Validate the frame checksum. - checksum, err := z.readUint32() - if err != nil { - return 0, err - } - if debugFlag { - debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum) - } - z.pos += 4 - if h := z.checksum.Sum32(); checksum != h { - return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum) - } - } - - // Get ready for the next concatenated frame and keep the position. - pos := z.pos - z.Reset(z.src) - z.pos = pos - - // Since multiple frames can be concatenated, check for more. - return 0, z.readHeader(false) - } - - if debugFlag { - debug("raw block size %d", bLen) - } - if bLen&compressedBlockFlag > 0 { - // Uncompressed block. - bLen &= compressedBlockMask - if debugFlag { - debug("uncompressed block size %d", bLen) - } - if int(bLen) > cap(z.data) { - return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) - } - z.data = z.data[:bLen] - if _, err := io.ReadFull(z.src, z.data); err != nil { - return 0, err - } - z.pos += int64(bLen) - if z.OnBlockDone != nil { - z.OnBlockDone(int(bLen)) - } - - if z.BlockChecksum { - checksum, err := z.readUint32() - if err != nil { - return 0, err - } - z.pos += 4 - - if h := xxh32.ChecksumZero(z.data); h != checksum { - return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) - } - } - - } else { - // Compressed block. - if debugFlag { - debug("compressed block size %d", bLen) - } - if int(bLen) > cap(z.data) { - return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) - } - zdata := z.zdata[:bLen] - if _, err := io.ReadFull(z.src, zdata); err != nil { - return 0, err - } - z.pos += int64(bLen) - - if z.BlockChecksum { - checksum, err := z.readUint32() - if err != nil { - return 0, err - } - z.pos += 4 - - if h := xxh32.ChecksumZero(zdata); h != checksum { - return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) - } - } - - n, err := UncompressBlock(zdata, z.data) - if err != nil { - return 0, err - } - z.data = z.data[:n] - if z.OnBlockDone != nil { - z.OnBlockDone(n) - } - } - - if !z.NoChecksum { - _, _ = z.checksum.Write(z.data) - if debugFlag { - debug("current frame checksum %x", z.checksum.Sum32()) - } - } - z.idx = 0 - } - - if z.skip > int64(len(z.data[z.idx:])) { - z.skip -= int64(len(z.data[z.idx:])) - z.dpos += int64(len(z.data[z.idx:])) - z.idx = len(z.data) - return 0, nil - } - - z.idx += int(z.skip) - z.dpos += z.skip - z.skip = 0 - - n := copy(buf, z.data[z.idx:]) - z.idx += n - z.dpos += int64(n) - if debugFlag { - debug("copied %d bytes to input", n) - } - - return n, nil -} - -// Seek implements io.Seeker, but supports seeking forward from the current -// position only. Any other seek will return an error. Allows skipping output -// bytes which aren't needed, which in some scenarios is faster than reading -// and discarding them. -// Note this may cause future calls to Read() to read 0 bytes if all of the -// data they would have returned is skipped. -func (z *Reader) Seek(offset int64, whence int) (int64, error) { - if offset < 0 || whence != io.SeekCurrent { - return z.dpos + z.skip, ErrUnsupportedSeek - } - z.skip += offset - return z.dpos + z.skip, nil -} - -// Reset discards the Reader's state and makes it equivalent to the -// result of its original state from NewReader, but reading from r instead. -// This permits reusing a Reader rather than allocating a new one. -func (z *Reader) Reset(r io.Reader) { - z.Header = Header{} - z.pos = 0 - z.src = r - z.zdata = z.zdata[:0] - z.data = z.data[:0] - z.idx = 0 - z.checksum.Reset() -} - -// readUint32 reads an uint32 into the supplied buffer. -// The idea is to make use of the already allocated buffers avoiding additional allocations. -func (z *Reader) readUint32() (uint32, error) { - buf := z.buf[:4] - _, err := io.ReadFull(z.src, buf) - x := binary.LittleEndian.Uint32(buf) - return x, err -} diff --git a/vendor/github.com/pierrec/lz4/reader_legacy.go b/vendor/github.com/pierrec/lz4/reader_legacy.go deleted file mode 100644 index 1670a77d02a4..000000000000 --- a/vendor/github.com/pierrec/lz4/reader_legacy.go +++ /dev/null @@ -1,207 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "fmt" - "io" -) - -// ReaderLegacy implements the LZ4Demo frame decoder. -// The Header is set after the first call to Read(). -type ReaderLegacy struct { - Header - // Handler called when a block has been successfully read. - // It provides the number of bytes read. - OnBlockDone func(size int) - - lastBlock bool - buf [8]byte // Scrap buffer. - pos int64 // Current position in src. - src io.Reader // Source. - zdata []byte // Compressed data. - data []byte // Uncompressed data. - idx int // Index of unread bytes into data. - skip int64 // Bytes to skip before next read. - dpos int64 // Position in dest -} - -// NewReaderLegacy returns a new LZ4Demo frame decoder. -// No access to the underlying io.Reader is performed. -func NewReaderLegacy(src io.Reader) *ReaderLegacy { - r := &ReaderLegacy{src: src} - return r -} - -// readHeader checks the frame magic number and parses the frame descriptoz. -// Skippable frames are supported even as a first frame although the LZ4 -// specifications recommends skippable frames not to be used as first frames. -func (z *ReaderLegacy) readLegacyHeader() error { - z.lastBlock = false - magic, err := z.readUint32() - if err != nil { - z.pos += 4 - if err == io.ErrUnexpectedEOF { - return io.EOF - } - return err - } - if magic != frameMagicLegacy { - return ErrInvalid - } - z.pos += 4 - - // Legacy has fixed 8MB blocksizes - // https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame - bSize := blockSize4M * 2 - - // Allocate the compressed/uncompressed buffers. - // The compressed buffer cannot exceed the uncompressed one. - if n := 2 * bSize; cap(z.zdata) < n { - z.zdata = make([]byte, n, n) - } - if debugFlag { - debug("header block max size size=%d", bSize) - } - z.zdata = z.zdata[:bSize] - z.data = z.zdata[:cap(z.zdata)][bSize:] - z.idx = len(z.data) - - z.Header.done = true - if debugFlag { - debug("header read: %v", z.Header) - } - - return nil -} - -// Read decompresses data from the underlying source into the supplied buffer. -// -// Since there can be multiple streams concatenated, Header values may -// change between calls to Read(). If that is the case, no data is actually read from -// the underlying io.Reader, to allow for potential input buffer resizing. -func (z *ReaderLegacy) Read(buf []byte) (int, error) { - if debugFlag { - debug("Read buf len=%d", len(buf)) - } - if !z.Header.done { - if err := z.readLegacyHeader(); err != nil { - return 0, err - } - if debugFlag { - debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", - len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) - } - } - - if len(buf) == 0 { - return 0, nil - } - - if z.idx == len(z.data) { - // No data ready for reading, process the next block. - if debugFlag { - debug(" reading block from writer %d %d", z.idx, blockSize4M*2) - } - - // Reset uncompressed buffer - z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] - - bLen, err := z.readUint32() - if err != nil { - return 0, err - } - if debugFlag { - debug(" bLen %d (0x%x) offset = %d (0x%x)", bLen, bLen, z.pos, z.pos) - } - z.pos += 4 - - // Legacy blocks are always compressed, even when detrimental - if debugFlag { - debug(" compressed block size %d", bLen) - } - - if int(bLen) > cap(z.data) { - return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) - } - zdata := z.zdata[:bLen] - if _, err := io.ReadFull(z.src, zdata); err != nil { - return 0, err - } - z.pos += int64(bLen) - - n, err := UncompressBlock(zdata, z.data) - if err != nil { - return 0, err - } - - z.data = z.data[:n] - if z.OnBlockDone != nil { - z.OnBlockDone(n) - } - - z.idx = 0 - - // Legacy blocks are fixed to 8MB, if we read a decompressed block smaller than this - // it means we've reached the end... - if n < blockSize4M*2 { - z.lastBlock = true - } - } - - if z.skip > int64(len(z.data[z.idx:])) { - z.skip -= int64(len(z.data[z.idx:])) - z.dpos += int64(len(z.data[z.idx:])) - z.idx = len(z.data) - return 0, nil - } - - z.idx += int(z.skip) - z.dpos += z.skip - z.skip = 0 - - n := copy(buf, z.data[z.idx:]) - z.idx += n - z.dpos += int64(n) - if debugFlag { - debug("%v] copied %d bytes to input (%d:%d)", z.lastBlock, n, z.idx, len(z.data)) - } - if z.lastBlock && len(z.data) == z.idx { - return n, io.EOF - } - return n, nil -} - -// Seek implements io.Seeker, but supports seeking forward from the current -// position only. Any other seek will return an error. Allows skipping output -// bytes which aren't needed, which in some scenarios is faster than reading -// and discarding them. -// Note this may cause future calls to Read() to read 0 bytes if all of the -// data they would have returned is skipped. -func (z *ReaderLegacy) Seek(offset int64, whence int) (int64, error) { - if offset < 0 || whence != io.SeekCurrent { - return z.dpos + z.skip, ErrUnsupportedSeek - } - z.skip += offset - return z.dpos + z.skip, nil -} - -// Reset discards the Reader's state and makes it equivalent to the -// result of its original state from NewReader, but reading from r instead. -// This permits reusing a Reader rather than allocating a new one. -func (z *ReaderLegacy) Reset(r io.Reader) { - z.Header = Header{} - z.pos = 0 - z.src = r - z.zdata = z.zdata[:0] - z.data = z.data[:0] - z.idx = 0 -} - -// readUint32 reads an uint32 into the supplied buffer. -// The idea is to make use of the already allocated buffers avoiding additional allocations. -func (z *ReaderLegacy) readUint32() (uint32, error) { - buf := z.buf[:4] - _, err := io.ReadFull(z.src, buf) - x := binary.LittleEndian.Uint32(buf) - return x, err -} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go index 8a5c0d36e23d..fec8adb03a12 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go @@ -31,11 +31,10 @@ func recoverBlock(e *error) { } } -// blockHash hashes the lower five bytes of x into a value < htSize. +// blockHash hashes the lower 6 bytes into a value < htSize. func blockHash(x uint64) uint32 { const prime6bytes = 227718039650203 - x &= 1<<40 - 1 - return uint32((x * prime6bytes) >> (64 - hashLog)) + return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) } func CompressBlockBound(n int) int { @@ -63,7 +62,10 @@ type Compressor struct { // inspecting the input stream. table [htSize]uint16 - needsReset bool + // Bitmap indicating which positions in the table are in use. + // This allows us to quickly reset the table for reuse, + // without having to zero everything. + inUse [htSize / 32]uint32 } // Get returns the position of a presumptive match for the hash h. @@ -71,7 +73,10 @@ type Compressor struct { // If si < winSize, the return value may be negative. func (c *Compressor) get(h uint32, si int) int { h &= htSize - 1 - i := int(c.table[h]) + i := 0 + if c.inUse[h/32]&(1<<(h%32)) != 0 { + i = int(c.table[h]) + } i += si &^ winMask if i >= si { // Try previous 64kiB block (negative when in first block). @@ -83,8 +88,11 @@ func (c *Compressor) get(h uint32, si int) int { func (c *Compressor) put(h uint32, si int) { h &= htSize - 1 c.table[h] = uint16(si) + c.inUse[h/32] |= 1 << (h % 32) } +func (c *Compressor) reset() { c.inUse = [htSize / 32]uint32{} } + var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }} func CompressBlock(src, dst []byte) (int, error) { @@ -95,11 +103,8 @@ func CompressBlock(src, dst []byte) (int, error) { } func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { - if c.needsReset { - // Zero out reused table to avoid non-deterministic output (issue #65). - c.table = [htSize]uint16{} - } - c.needsReset = true // Only false on first call. + // Zero out reused table to avoid non-deterministic output (issue #65). + c.reset() // Return 0, nil only if the destination buffer size is < CompressBlockBound. isNotCompressible := len(dst) < CompressBlockBound(len(src)) @@ -117,9 +122,9 @@ func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { goto lastLiterals } - // Fast scan strategy: the hash table only stores the last five-byte sequences. + // Fast scan strategy: the hash table only stores the last 4 bytes sequences. for si < sn { - // Hash the next five bytes (sequence)... + // Hash the next 6 bytes (sequence)... match := binary.LittleEndian.Uint64(src[si:]) h := blockHash(match) h2 := blockHash(match >> 8) @@ -127,7 +132,7 @@ func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { // We check a match at s, s+1 and s+2 and pick the first one we get. // Checking 3 only requires us to load the source one. ref := c.get(h, si) - ref2 := c.get(h2, si) + ref2 := c.get(h2, si+1) c.put(h, si) c.put(h2, si+1) @@ -176,7 +181,7 @@ func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { si, mLen = si+mLen, si+minMatch // Find the longest match by looking by batches of 8 bytes. - for si+8 < sn { + for si+8 <= sn { x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) if x == 0 { si += 8 diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go index 84b89833ed52..a1bfa99e4b45 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go @@ -8,9 +8,12 @@ const ( Block256Kb Block1Mb Block4Mb - Block8Mb = 2 * Block4Mb ) +// In legacy mode all blocks are compressed regardless +// of the compressed size: use the bound size. +var Block8Mb = uint32(CompressBlockBound(8 << 20)) + var ( BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }} BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s index d86e981fc949..1d00133fac48 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s @@ -7,8 +7,8 @@ // AX scratch // BX scratch -// CX scratch -// DX token +// CX literal and match lengths +// DX token, match offset // // DI &dst // SI &src @@ -44,28 +44,26 @@ TEXT ·decodeBlock(SB), NOSPLIT, $48-80 MOVQ R9, R13 SUBQ $16, R13 -loop: - // for si < len(src) - CMPQ SI, R9 - JAE end + XORL CX, CX +loop: // token := uint32(src[si]) - MOVBQZX (SI), DX + MOVBLZX (SI), DX INCQ SI // lit_len = token >> 4 // if lit_len > 0 // CX = lit_len - MOVQ DX, CX - SHRQ $4, CX + MOVL DX, CX + SHRL $4, CX // if lit_len != 0xF - CMPQ CX, $0xF - JEQ lit_len_loop_pre + CMPL CX, $0xF + JEQ lit_len_loop CMPQ DI, R12 - JAE lit_len_loop_pre + JAE copy_literal CMPQ SI, R13 - JAE lit_len_loop_pre + JAE copy_literal // copy shortcut @@ -84,13 +82,15 @@ loop: ADDQ CX, DI ADDQ CX, SI - MOVQ DX, CX - ANDQ $0xF, CX + MOVL DX, CX + ANDL $0xF, CX // The second stage: prepare for match copying, decode full info. // If it doesn't work out, the info won't be wasted. // offset := uint16(data[:2]) - MOVWQZX (SI), DX + MOVWLZX (SI), DX + TESTL DX, DX + JE err_corrupt ADDQ $2, SI JC err_short_buf @@ -102,9 +102,9 @@ loop: // if we can't do the second stage then jump straight to read the // match length, we already have the offset. - CMPQ CX, $0xF + CMPL CX, $0xF JEQ match_len_loop_pre - CMPQ DX, $8 + CMPL DX, $8 JLT match_len_loop_pre CMPQ AX, R11 JB match_len_loop_pre @@ -122,15 +122,9 @@ loop: LEAQ const_minMatch(DI)(CX*1), DI // shortcut complete, load next token - JMP loop - -lit_len_loop_pre: - // if lit_len > 0 - CMPQ CX, $0 - JEQ offset - CMPQ CX, $0xF - JNE copy_literal + JMP loopcheck + // Read the rest of the literal length: // do { BX = src[si++]; lit_len += BX } while (BX == 0xFF). lit_len_loop: CMPQ SI, R9 @@ -157,24 +151,28 @@ copy_literal: CMPQ BX, R8 JA err_short_buf - // whats a good cut off to call memmove? - CMPQ CX, $16 + // Copy literals of <=48 bytes through the XMM registers. + CMPQ CX, $48 JGT memmove_lit - // if len(dst[di:]) < 16 + // if len(dst[di:]) < 48 MOVQ R8, AX SUBQ DI, AX - CMPQ AX, $16 + CMPQ AX, $48 JLT memmove_lit - // if len(src[si:]) < 16 - MOVQ R9, AX - SUBQ SI, AX - CMPQ AX, $16 + // if len(src[si:]) < 48 + MOVQ R9, BX + SUBQ SI, BX + CMPQ BX, $48 JLT memmove_lit MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU 32(SI), X2 MOVOU X0, (DI) + MOVOU X1, 16(DI) + MOVOU X2, 32(DI) ADDQ CX, SI ADDQ CX, DI @@ -215,13 +213,13 @@ memmove_lit: SUBQ $16, R13 finish_lit_copy: - CMPQ SI, R9 - JAE end - -offset: // CX := mLen // free up DX to use for offset - MOVQ DX, CX + MOVL DX, CX + ANDL $0xF, CX + + CMPQ SI, R9 + JAE end // offset // si += 2 @@ -233,10 +231,8 @@ offset: MOVWQZX -2(SI), DX // 0 offset is invalid - CMPQ DX, $0 - JEQ err_corrupt - - ANDB $0xF, CX + TESTL DX, DX + JEQ err_corrupt match_len_loop_pre: // if mlen != 0xF @@ -256,8 +252,7 @@ match_len_loop: JE match_len_loop copy_match: - // mLen += minMatch - ADDQ $4, CX + ADDQ $const_minMatch, CX // check we have match_len bytes left in dst // di+match_len < len(dst) @@ -304,7 +299,7 @@ copy_match_loop: DECQ CX JNZ copy_match_loop - JMP loop + JMP loopcheck copy_interior_match: CMPQ CX, $16 @@ -320,7 +315,8 @@ copy_interior_match: MOVOU X0, (DI) ADDQ CX, DI - JMP loop + XORL CX, CX + JMP loopcheck copy_match_from_dict: // CX = match_len @@ -423,8 +419,21 @@ memmove_match: SUBQ $16, R13 MOVQ dict_base+48(FP), R14 MOVQ dict_len+56(FP), R15 + XORL CX, CX + +loopcheck: + // for si < len(src) + CMPQ SI, R9 + JB loop - JMP loop +end: + // Remaining length must be zero. + TESTQ CX, CX + JNE err_corrupt + + SUBQ R11, DI + MOVQ DI, ret+72(FP) + RET err_corrupt: MOVQ $-1, ret+72(FP) @@ -437,8 +446,3 @@ err_short_buf: err_short_dict: MOVQ $-3, ret+72(FP) RET - -end: - SUBQ R11, DI - MOVQ DI, ret+72(FP) - RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s index 0c5696ef1ad7..20b21fcf15ec 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s @@ -110,13 +110,13 @@ copyLiteralFinish: MOVB.NE tmp2, -1(dst) copyLiteralDone: - CMP src, srcend - BEQ end - // Initial part of match length. // This frees up the token register for reuse as offset. AND $15, token, len + CMP src, srcend + BEQ end + // Read offset. ADD.S $2, src BCS shortSrc @@ -213,6 +213,8 @@ copyMatchDone: BNE loop end: + CMP $0, len + BNE corrupt SUB dstorig, dst, tmp1 MOVW tmp1, ret+36(FP) RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s index cb44977a9af2..c43e8a8d28e4 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s @@ -8,23 +8,25 @@ #include "textflag.h" // Register allocation. -#define dst R0 -#define dstorig R1 -#define src R2 -#define dstend R3 -#define srcend R4 -#define match R5 // Match address. -#define dict R6 -#define dictlen R7 -#define dictend R8 -#define token R9 -#define len R10 // Literal and match lengths. -#define lenRem R11 -#define offset R12 // Match offset. -#define tmp1 R13 -#define tmp2 R14 -#define tmp3 R15 -#define tmp4 R16 +#define dst R0 +#define dstorig R1 +#define src R2 +#define dstend R3 +#define dstend16 R4 // dstend - 16 +#define srcend R5 +#define srcend16 R6 // srcend - 16 +#define match R7 // Match address. +#define dict R8 +#define dictlen R9 +#define dictend R10 +#define token R11 +#define len R12 // Literal and match lengths. +#define lenRem R13 +#define offset R14 // Match offset. +#define tmp1 R15 +#define tmp2 R16 +#define tmp3 R17 +#define tmp4 R19 // func decodeBlock(dst, src, dict []byte) int TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80 @@ -36,6 +38,12 @@ TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80 CBZ srcend, shortSrc ADD src, srcend + // dstend16 = max(dstend-16, 0) and similarly for srcend16. + SUBS $16, dstend, dstend16 + CSEL LO, ZR, dstend16, dstend16 + SUBS $16, srcend, srcend16 + CSEL LO, ZR, srcend16, srcend16 + LDP dict_base+48(FP), (dict, dictlen) ADD dict, dictlen, dictend @@ -71,27 +79,31 @@ readLitlenDone: // Copy literal. SUBS $16, len BLO copyLiteralShort - AND $15, len, lenRem copyLiteralLoop: - SUBS $16, len LDP.P 16(src), (tmp1, tmp2) STP.P (tmp1, tmp2), 16(dst) + SUBS $16, len BPL copyLiteralLoop - // lenRem = len%16 is the remaining number of bytes we need to copy. - // Since len was >= 16, we can do this in one load and one store, - // overlapping with the last load and store, without worrying about - // writing out of bounds. - ADD lenRem, src - ADD lenRem, dst - LDP -16(src), (tmp1, tmp2) - STP (tmp1, tmp2), -16(dst) + // Copy (final part of) literal of length 0-15. + // If we have >=16 bytes left in src and dst, just copy 16 bytes. +copyLiteralShort: + CMP dstend16, dst + CCMP LO, src, srcend16, $0b0010 // 0010 = preserve carry (LO). + BHS copyLiteralShortEnd + + AND $15, len + + LDP (src), (tmp1, tmp2) + ADD len, src + STP (tmp1, tmp2), (dst) + ADD len, dst B copyLiteralDone - // Copy literal of length 0-15. -copyLiteralShort: + // Safe but slow copy near the end of src, dst. +copyLiteralShortEnd: TBZ $3, len, 3(PC) MOVD.P 8(src), tmp1 MOVD.P tmp1, 8(dst) @@ -106,6 +118,9 @@ copyLiteralShort: MOVB.P tmp4, 1(dst) copyLiteralDone: + // Initial part of match length. + AND $15, token, len + CMP src, srcend BEQ end @@ -117,8 +132,7 @@ copyLiteralDone: MOVHU -2(src), offset CBZ offset, corrupt - // Read match length. - AND $15, token, len + // Read rest of match length. CMP $15, len BNE readMatchlenDone @@ -159,14 +173,12 @@ copyDict: CCMP NE, dictend, match, $0b0100 // 0100 sets the Z (EQ) flag. BNE copyDict - CBZ len, copyMatchDone + CBZ len, copyMatchDone // If the match extends beyond the dictionary, the rest is at dstorig. + // Recompute the offset for the next check. MOVD dstorig, match - - // The code up to copyMatchLoop1 assumes len >= minMatch. - CMP $const_minMatch, len - BLO copyMatchLoop1 + SUB dstorig, dst, offset copyMatchTry8: // Copy doublewords if both len and offset are at least eight. @@ -178,42 +190,30 @@ copyMatchTry8: AND $7, len, lenRem SUB $8, len copyMatchLoop8: - SUBS $8, len MOVD.P 8(match), tmp1 MOVD.P tmp1, 8(dst) + SUBS $8, len BPL copyMatchLoop8 - ADD lenRem, match + MOVD (match)(len), tmp2 // match+len == match+lenRem-8. ADD lenRem, dst - MOVD -8(match), tmp2 + MOVD $0, len MOVD tmp2, -8(dst) B copyMatchDone - // 4× unrolled byte copy loop for the overlapping case. -copyMatchLoop4: - SUB $4, len - MOVBU.P 4(match), tmp1 - MOVB.P tmp1, 4(dst) - MOVBU -3(match), tmp2 - MOVB tmp2, -3(dst) - MOVBU -2(match), tmp3 - MOVB tmp3, -2(dst) - MOVBU -1(match), tmp4 - MOVB tmp4, -1(dst) - CBNZ len, copyMatchLoop4 - copyMatchLoop1: - // Finish with a byte-at-a-time copy. - SUB $1, len + // Byte-at-a-time copy for small offsets. MOVBU.P 1(match), tmp2 MOVB.P tmp2, 1(dst) - CBNZ len, copyMatchLoop1 + SUBS $1, len + BNE copyMatchLoop1 copyMatchDone: CMP src, srcend BNE loop end: + CBNZ len, corrupt SUB dstorig, dst, tmp1 MOVD tmp1, ret+72(FP) RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go index 56a7c9e7052d..8d9023d1007e 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go @@ -1,3 +1,4 @@ +//go:build (amd64 || arm || arm64) && !appengine && gc && !noasm // +build amd64 arm arm64 // +build !appengine // +build gc diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go index 2ecce682292f..9f568fbb1afb 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go @@ -1,3 +1,4 @@ +//go:build (!amd64 && !arm && !arm64) || appengine || !gc || noasm // +build !amd64,!arm,!arm64 appengine !gc noasm package lz4block @@ -10,9 +11,13 @@ func decodeBlock(dst, src, dict []byte) (ret int) { // Restrict capacities so we don't read or write out of bounds. dst = dst[:len(dst):len(dst)] src = src[:len(src):len(src)] - dictLen := uint(len(dict)) const hasError = -2 + + if len(src) == 0 { + return hasError + } + defer func() { if recover() != nil { ret = hasError @@ -20,7 +25,7 @@ func decodeBlock(dst, src, dict []byte) (ret int) { }() var si, di uint - for { + for si < uint(len(src)) { // Literals and match lengths (token). b := uint(src[si]) si++ @@ -43,16 +48,14 @@ func decodeBlock(dst, src, dict []byte) (ret int) { mLen += 4 if offset := u16(src[si:]); mLen <= offset && offset < di { i := di - offset - end := i + 18 - if end > uint(len(dst)) { - // The remaining buffer may not hold 18 bytes. - // See https://github.com/pierrec/lz4/issues/51. - end = uint(len(dst)) + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + if end := i + 18; end <= uint(len(dst)) { + copy(dst[di:], dst[i:end]) + si += 2 + di += mLen + continue } - copy(dst[di:], dst[i:end]) - si += 2 - di += mLen - continue } } case lLen == 0xF: @@ -73,9 +76,11 @@ func decodeBlock(dst, src, dict []byte) (ret int) { di += lLen } } - if si == uint(len(src)) { - return int(di) - } else if si > uint(len(src)) { + + mLen := b & 0xF + if si == uint(len(src)) && mLen == 0 { + break + } else if si >= uint(len(src)) { return hasError } @@ -86,7 +91,7 @@ func decodeBlock(dst, src, dict []byte) (ret int) { si += 2 // Match. - mLen := minMatch + b&0xF + mLen += minMatch if mLen == minMatch+0xF { for { x := uint(src[si]) @@ -102,35 +107,17 @@ func decodeBlock(dst, src, dict []byte) (ret int) { // Copy the match. if di < offset { - // The match is beyond our block, meaning in the dictionary - if offset-di > mLen { - // The match is entirely contained in the dictionary. Just copy! - copy(dst[di:di+mLen], dict[dictLen+di-offset:dictLen+di-offset+mLen]) - di = di + mLen - } else { - // The match stretches over the dictionary and our block - copySize := offset - di - restSize := mLen - copySize - - copy(dst[di:di+copySize], dict[dictLen-copySize:]) - di = di + copySize - - if di < restSize { - // Overlap - we want to copy more than what we have available, - // so copy byte per byte. - copyFrom := 0 - endOfMatch := di + restSize - for di < endOfMatch { - dst[di] = dst[copyFrom] - di = di + 1 - copyFrom = copyFrom + 1 - } - } else { - copy(dst[di:di+restSize], dst[0:restSize]) - di = di + restSize - } + // The match is beyond our block, meaning the first part + // is in the dictionary. + fromDict := dict[uint(len(dict))+di-offset:] + n := uint(copy(dst[di:di+mLen], fromDict)) + di += n + if mLen -= n; mLen == 0 { + continue } - continue + // We copied n = offset-di bytes from the dictionary, + // then set di = di+n = offset, so the following code + // copies from dst[di-offset:] = dst[0:]. } expanded := dst[di-offset:] @@ -145,6 +132,8 @@ func decodeBlock(dst, src, dict []byte) (ret int) { } di += uint(copy(dst[di:di+mLen], expanded[:mLen])) } + + return int(di) } func u16(p []byte) uint { return uint(binary.LittleEndian.Uint16(p)) } diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go index e96465460c5d..459086f09b29 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go @@ -224,7 +224,9 @@ func (b *FrameDataBlock) Close(f *Frame) { func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock { data := b.data if f.isLegacy() { - data = data[:cap(data)] + // In legacy mode, the buffer is sized according to CompressBlockBound, + // but only 8Mb is buffered for compression. + src = src[:8<<20] } else { data = data[:len(src)] // trigger the incompressible flag in CompressBlock } diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go index 8d3206a87c55..651d10c10474 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go @@ -1,5 +1,5 @@ // Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). -// (https://github.com/Cyan4973/XXH/) +// (ported from the reference implementation https://github.com/Cyan4973/xxHash/) package xxh32 import ( diff --git a/vendor/github.com/pierrec/lz4/v4/reader.go b/vendor/github.com/pierrec/lz4/v4/reader.go index d084e264dd2f..275daad7cb98 100644 --- a/vendor/github.com/pierrec/lz4/v4/reader.go +++ b/vendor/github.com/pierrec/lz4/v4/reader.go @@ -196,10 +196,8 @@ func (r *Reader) read(buf []byte) (int, error) { } // Reset clears the state of the Reader r such that it is equivalent to its -// initial state from NewReader, but instead writing to writer. +// initial state from NewReader, but instead reading from reader. // No access to reader is performed. -// -// w.Close must be called before Reset. func (r *Reader) Reset(reader io.Reader) { if r.data != nil { lz4block.Put(r.data) diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go index 03261c593ce5..77699f2b54aa 100644 --- a/vendor/github.com/pierrec/lz4/v4/writer.go +++ b/vendor/github.com/pierrec/lz4/v4/writer.go @@ -65,10 +65,8 @@ func (w *Writer) isNotConcurrent() bool { // init sets up the Writer when in newState. It does not change the Writer state. func (w *Writer) init() error { w.frame.InitW(w.src, w.num, w.legacy) - if true || !w.isNotConcurrent() { - size := w.frame.Descriptor.Flags.BlockSizeIndex() - w.data = size.Get() - } + size := w.frame.Descriptor.Flags.BlockSizeIndex() + w.data = size.Get() w.idx = 0 return w.frame.Descriptor.Write(w.frame, w.src) } @@ -146,9 +144,8 @@ func (w *Writer) write(data []byte, safe bool) error { return nil } -// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, -// but does not close the underlying io.Writer. -func (w *Writer) Close() (err error) { +// Flush any buffered data to the underlying writer immediately. +func (w *Writer) Flush() (err error) { switch w.state.state { case writeState: case errorState: @@ -156,7 +153,7 @@ func (w *Writer) Close() (err error) { default: return nil } - defer w.state.nextd(&err) + if w.idx > 0 { // Flush pending data, disable w.data freeing as it is done later on. if err = w.write(w.data[:w.idx], false); err != nil { @@ -164,13 +161,22 @@ func (w *Writer) Close() (err error) { } w.idx = 0 } - err = w.frame.CloseW(w.src, w.num) + return nil +} + +// Close closes the Writer, flushing any unwritten data to the underlying writer +// without closing it. +func (w *Writer) Close() error { + if err := w.Flush(); err != nil { + return err + } + err := w.frame.CloseW(w.src, w.num) // It is now safe to free the buffer. if w.data != nil { lz4block.Put(w.data) w.data = nil } - return + return err } // Reset clears the state of the Writer w such that it is equivalent to its @@ -228,6 +234,5 @@ func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { data = size.Get() } } - err = w.Close() return } diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go deleted file mode 100644 index f066d56305e6..000000000000 --- a/vendor/github.com/pierrec/lz4/writer.go +++ /dev/null @@ -1,422 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "fmt" - "io" - "runtime" - - "github.com/pierrec/lz4/internal/xxh32" -) - -// zResult contains the results of compressing a block. -type zResult struct { - size uint32 // Block header - data []byte // Compressed data - checksum uint32 // Data checksum -} - -// Writer implements the LZ4 frame encoder. -type Writer struct { - Header - // Handler called when a block has been successfully written out. - // It provides the number of bytes written. - OnBlockDone func(size int) - - buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes - dst io.Writer // Destination. - checksum xxh32.XXHZero // Frame checksum. - data []byte // Data to be compressed + buffer for compressed data. - idx int // Index into data. - hashtable [winSize]int // Hash table used in CompressBlock(). - - // For concurrency. - c chan chan zResult // Channel for block compression goroutines and writer goroutine. - err error // Any error encountered while writing to the underlying destination. -} - -// NewWriter returns a new LZ4 frame encoder. -// No access to the underlying io.Writer is performed. -// The supplied Header is checked at the first Write. -// It is ok to change it before the first Write but then not until a Reset() is performed. -func NewWriter(dst io.Writer) *Writer { - z := new(Writer) - z.Reset(dst) - return z -} - -// WithConcurrency sets the number of concurrent go routines used for compression. -// A negative value sets the concurrency to GOMAXPROCS. -func (z *Writer) WithConcurrency(n int) *Writer { - switch { - case n == 0 || n == 1: - z.c = nil - return z - case n < 0: - n = runtime.GOMAXPROCS(0) - } - z.c = make(chan chan zResult, n) - // Writer goroutine managing concurrent block compression goroutines. - go func() { - // Process next block compression item. - for c := range z.c { - // Read the next compressed block result. - // Waiting here ensures that the blocks are output in the order they were sent. - // The incoming channel is always closed as it indicates to the caller that - // the block has been processed. - res := <-c - n := len(res.data) - if n == 0 { - // Notify the block compression routine that we are done with its result. - // This is used when a sentinel block is sent to terminate the compression. - close(c) - return - } - // Write the block. - if err := z.writeUint32(res.size); err != nil && z.err == nil { - z.err = err - } - if _, err := z.dst.Write(res.data); err != nil && z.err == nil { - z.err = err - } - if z.BlockChecksum { - if err := z.writeUint32(res.checksum); err != nil && z.err == nil { - z.err = err - } - } - // It is now safe to release the buffer as no longer in use by any goroutine. - putBuffer(cap(res.data), res.data) - if h := z.OnBlockDone; h != nil { - h(n) - } - close(c) - } - }() - return z -} - -// newBuffers instantiates new buffers which size matches the one in Header. -// The returned buffers are for decompression and compression respectively. -func (z *Writer) newBuffers() { - bSize := z.Header.BlockMaxSize - buf := getBuffer(bSize) - z.data = buf[:bSize] // Uncompressed buffer is the first half. -} - -// freeBuffers puts the writer's buffers back to the pool. -func (z *Writer) freeBuffers() { - // Put the buffer back into the pool, if any. - putBuffer(z.Header.BlockMaxSize, z.data) - z.data = nil -} - -// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. -func (z *Writer) writeHeader() error { - // Default to 4Mb if BlockMaxSize is not set. - if z.Header.BlockMaxSize == 0 { - z.Header.BlockMaxSize = blockSize4M - } - // The only option that needs to be validated. - bSize := z.Header.BlockMaxSize - if !isValidBlockSize(z.Header.BlockMaxSize) { - return fmt.Errorf("lz4: invalid block max size: %d", bSize) - } - // Allocate the compressed/uncompressed buffers. - // The compressed buffer cannot exceed the uncompressed one. - z.newBuffers() - z.idx = 0 - - // Size is optional. - buf := z.buf[:] - - // Set the fixed size data: magic number, block max size and flags. - binary.LittleEndian.PutUint32(buf[0:], frameMagic) - flg := byte(Version << 6) - flg |= 1 << 5 // No block dependency. - if z.Header.BlockChecksum { - flg |= 1 << 4 - } - if z.Header.Size > 0 { - flg |= 1 << 3 - } - if !z.Header.NoChecksum { - flg |= 1 << 2 - } - buf[4] = flg - buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4 - - // Current buffer size: magic(4) + flags(1) + block max size (1). - n := 6 - // Optional items. - if z.Header.Size > 0 { - binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) - n += 8 - } - - // The header checksum includes the flags, block max size and optional Size. - buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF) - z.checksum.Reset() - - // Header ready, write it out. - if _, err := z.dst.Write(buf[0 : n+1]); err != nil { - return err - } - z.Header.done = true - if debugFlag { - debug("wrote header %v", z.Header) - } - - return nil -} - -// Write compresses data from the supplied buffer into the underlying io.Writer. -// Write does not return until the data has been written. -func (z *Writer) Write(buf []byte) (int, error) { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return 0, err - } - } - if debugFlag { - debug("input buffer len=%d index=%d", len(buf), z.idx) - } - - zn := len(z.data) - var n int - for len(buf) > 0 { - if z.idx == 0 && len(buf) >= zn { - // Avoid a copy as there is enough data for a block. - if err := z.compressBlock(buf[:zn]); err != nil { - return n, err - } - n += zn - buf = buf[zn:] - continue - } - // Accumulate the data to be compressed. - m := copy(z.data[z.idx:], buf) - n += m - z.idx += m - buf = buf[m:] - if debugFlag { - debug("%d bytes copied to buf, current index %d", n, z.idx) - } - - if z.idx < len(z.data) { - // Buffer not filled. - if debugFlag { - debug("need more data for compression") - } - return n, nil - } - - // Buffer full. - if err := z.compressBlock(z.data); err != nil { - return n, err - } - z.idx = 0 - } - - return n, nil -} - -// compressBlock compresses a block. -func (z *Writer) compressBlock(data []byte) error { - if !z.NoChecksum { - _, _ = z.checksum.Write(data) - } - - if z.c != nil { - c := make(chan zResult) - z.c <- c // Send now to guarantee order - - // get a buffer from the pool and copy the data over - block := getBuffer(z.Header.BlockMaxSize)[:len(data)] - copy(block, data) - - go writerCompressBlock(c, z.Header, block) - return nil - } - - zdata := z.data[z.Header.BlockMaxSize:cap(z.data)] - // The compressed block size cannot exceed the input's. - var zn int - - if level := z.Header.CompressionLevel; level != 0 { - zn, _ = CompressBlockHC(data, zdata, level) - } else { - zn, _ = CompressBlock(data, zdata, z.hashtable[:]) - } - - var bLen uint32 - if debugFlag { - debug("block compression %d => %d", len(data), zn) - } - if zn > 0 && zn < len(data) { - // Compressible and compressed size smaller than uncompressed: ok! - bLen = uint32(zn) - zdata = zdata[:zn] - } else { - // Uncompressed block. - bLen = uint32(len(data)) | compressedBlockFlag - zdata = data - } - if debugFlag { - debug("block compression to be written len=%d data len=%d", bLen, len(zdata)) - } - - // Write the block. - if err := z.writeUint32(bLen); err != nil { - return err - } - written, err := z.dst.Write(zdata) - if err != nil { - return err - } - if h := z.OnBlockDone; h != nil { - h(written) - } - - if !z.BlockChecksum { - if debugFlag { - debug("current frame checksum %x", z.checksum.Sum32()) - } - return nil - } - checksum := xxh32.ChecksumZero(zdata) - if debugFlag { - debug("block checksum %x", checksum) - defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }() - } - return z.writeUint32(checksum) -} - -// Flush flushes any pending compressed data to the underlying writer. -// Flush does not return until the data has been written. -// If the underlying writer returns an error, Flush returns that error. -func (z *Writer) Flush() error { - if debugFlag { - debug("flush with index %d", z.idx) - } - if z.idx == 0 { - return nil - } - - data := getBuffer(z.Header.BlockMaxSize)[:len(z.data[:z.idx])] - copy(data, z.data[:z.idx]) - - z.idx = 0 - if z.c == nil { - return z.compressBlock(data) - } - if !z.NoChecksum { - _, _ = z.checksum.Write(data) - } - c := make(chan zResult) - z.c <- c - writerCompressBlock(c, z.Header, data) - return nil -} - -func (z *Writer) close() error { - if z.c == nil { - return nil - } - // Send a sentinel block (no data to compress) to terminate the writer main goroutine. - c := make(chan zResult) - z.c <- c - c <- zResult{} - // Wait for the main goroutine to complete. - <-c - // At this point the main goroutine has shut down or is about to return. - z.c = nil - return z.err -} - -// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. -func (z *Writer) Close() error { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return err - } - } - if err := z.Flush(); err != nil { - return err - } - if err := z.close(); err != nil { - return err - } - z.freeBuffers() - - if debugFlag { - debug("writing last empty block") - } - if err := z.writeUint32(0); err != nil { - return err - } - if z.NoChecksum { - return nil - } - checksum := z.checksum.Sum32() - if debugFlag { - debug("stream checksum %x", checksum) - } - return z.writeUint32(checksum) -} - -// Reset clears the state of the Writer z such that it is equivalent to its -// initial state from NewWriter, but instead writing to w. -// No access to the underlying io.Writer is performed. -func (z *Writer) Reset(w io.Writer) { - n := cap(z.c) - _ = z.close() - z.freeBuffers() - z.Header.Reset() - z.dst = w - z.checksum.Reset() - z.idx = 0 - z.err = nil - // reset hashtable to ensure deterministic output. - for i := range z.hashtable { - z.hashtable[i] = 0 - } - z.WithConcurrency(n) -} - -// writeUint32 writes a uint32 to the underlying writer. -func (z *Writer) writeUint32(x uint32) error { - buf := z.buf[:4] - binary.LittleEndian.PutUint32(buf, x) - _, err := z.dst.Write(buf) - return err -} - -// writerCompressBlock compresses data into a pooled buffer and writes its result -// out to the input channel. -func writerCompressBlock(c chan zResult, header Header, data []byte) { - zdata := getBuffer(header.BlockMaxSize) - // The compressed block size cannot exceed the input's. - var zn int - if level := header.CompressionLevel; level != 0 { - zn, _ = CompressBlockHC(data, zdata, level) - } else { - var hashTable [winSize]int - zn, _ = CompressBlock(data, zdata, hashTable[:]) - } - var res zResult - if zn > 0 && zn < len(data) { - res.size = uint32(zn) - res.data = zdata[:zn] - // release the uncompressed block since it is not used anymore - putBuffer(header.BlockMaxSize, data) - } else { - res.size = uint32(len(data)) | compressedBlockFlag - res.data = data - // release the compressed block since it was not used - putBuffer(header.BlockMaxSize, zdata) - } - if header.BlockChecksum { - res.checksum = xxh32.ChecksumZero(res.data) - } - c <- res -} diff --git a/vendor/github.com/pierrec/lz4/writer_legacy.go b/vendor/github.com/pierrec/lz4/writer_legacy.go deleted file mode 100644 index ca8dc8c7f0c6..000000000000 --- a/vendor/github.com/pierrec/lz4/writer_legacy.go +++ /dev/null @@ -1,182 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "io" -) - -// WriterLegacy implements the LZ4Demo frame decoder. -type WriterLegacy struct { - Header - // Handler called when a block has been successfully read. - // It provides the number of bytes read. - OnBlockDone func(size int) - - dst io.Writer // Destination. - data []byte // Data to be compressed + buffer for compressed data. - idx int // Index into data. - hashtable [winSize]int // Hash table used in CompressBlock(). -} - -// NewWriterLegacy returns a new LZ4 encoder for the legacy frame format. -// No access to the underlying io.Writer is performed. -// The supplied Header is checked at the first Write. -// It is ok to change it before the first Write but then not until a Reset() is performed. -func NewWriterLegacy(dst io.Writer) *WriterLegacy { - z := new(WriterLegacy) - z.Reset(dst) - return z -} - -// Write compresses data from the supplied buffer into the underlying io.Writer. -// Write does not return until the data has been written. -func (z *WriterLegacy) Write(buf []byte) (int, error) { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return 0, err - } - } - if debugFlag { - debug("input buffer len=%d index=%d", len(buf), z.idx) - } - - zn := len(z.data) - var n int - for len(buf) > 0 { - if z.idx == 0 && len(buf) >= zn { - // Avoid a copy as there is enough data for a block. - if err := z.compressBlock(buf[:zn]); err != nil { - return n, err - } - n += zn - buf = buf[zn:] - continue - } - // Accumulate the data to be compressed. - m := copy(z.data[z.idx:], buf) - n += m - z.idx += m - buf = buf[m:] - if debugFlag { - debug("%d bytes copied to buf, current index %d", n, z.idx) - } - - if z.idx < len(z.data) { - // Buffer not filled. - if debugFlag { - debug("need more data for compression") - } - return n, nil - } - - // Buffer full. - if err := z.compressBlock(z.data); err != nil { - return n, err - } - z.idx = 0 - } - - return n, nil -} - -// writeHeader builds and writes the header to the underlying io.Writer. -func (z *WriterLegacy) writeHeader() error { - // Legacy has fixed 8MB blocksizes - // https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame - bSize := 2 * blockSize4M - - buf := make([]byte, 2*bSize, 2*bSize) - z.data = buf[:bSize] // Uncompressed buffer is the first half. - - z.idx = 0 - - // Header consists of one mageic number, write it out. - if err := binary.Write(z.dst, binary.LittleEndian, frameMagicLegacy); err != nil { - return err - } - z.Header.done = true - if debugFlag { - debug("wrote header %v", z.Header) - } - - return nil -} - -// compressBlock compresses a block. -func (z *WriterLegacy) compressBlock(data []byte) error { - bSize := 2 * blockSize4M - zdata := z.data[bSize:cap(z.data)] - // The compressed block size cannot exceed the input's. - var zn int - - if level := z.Header.CompressionLevel; level != 0 { - zn, _ = CompressBlockHC(data, zdata, level) - } else { - zn, _ = CompressBlock(data, zdata, z.hashtable[:]) - } - - if debugFlag { - debug("block compression %d => %d", len(data), zn) - } - zdata = zdata[:zn] - - // Write the block. - if err := binary.Write(z.dst, binary.LittleEndian, uint32(zn)); err != nil { - return err - } - written, err := z.dst.Write(zdata) - if err != nil { - return err - } - if h := z.OnBlockDone; h != nil { - h(written) - } - return nil -} - -// Flush flushes any pending compressed data to the underlying writer. -// Flush does not return until the data has been written. -// If the underlying writer returns an error, Flush returns that error. -func (z *WriterLegacy) Flush() error { - if debugFlag { - debug("flush with index %d", z.idx) - } - if z.idx == 0 { - return nil - } - - data := z.data[:z.idx] - z.idx = 0 - return z.compressBlock(data) -} - -// Close closes the WriterLegacy, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. -func (z *WriterLegacy) Close() error { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return err - } - } - if err := z.Flush(); err != nil { - return err - } - - if debugFlag { - debug("writing last empty block") - } - - return nil -} - -// Reset clears the state of the WriterLegacy z such that it is equivalent to its -// initial state from NewWriterLegacy, but instead writing to w. -// No access to the underlying io.Writer is performed. -func (z *WriterLegacy) Reset(w io.Writer) { - z.Header.Reset() - z.dst = w - z.idx = 0 - // reset hashtable to ensure deterministic output. - for i := range z.hashtable { - z.hashtable[i] = 0 - } -} diff --git a/vendor/github.com/prometheus/client_golang/api/client.go b/vendor/github.com/prometheus/client_golang/api/client.go index 1413f65fe45e..72a01309c3a3 100644 --- a/vendor/github.com/prometheus/client_golang/api/client.go +++ b/vendor/github.com/prometheus/client_golang/api/client.go @@ -17,6 +17,7 @@ package api import ( "bytes" "context" + "errors" "net" "net/http" "net/url" @@ -40,6 +41,10 @@ type Config struct { // The address of the Prometheus to connect to. Address string + // Client is used by the Client to drive HTTP requests. If not provided, + // a new one based on the provided RoundTripper (or DefaultRoundTripper) will be used. + Client *http.Client + // RoundTripper is used by the Client to drive HTTP requests. If not // provided, DefaultRoundTripper will be used. RoundTripper http.RoundTripper @@ -52,6 +57,22 @@ func (cfg *Config) roundTripper() http.RoundTripper { return cfg.RoundTripper } +func (cfg *Config) client() http.Client { + if cfg.Client == nil { + return http.Client{ + Transport: cfg.roundTripper(), + } + } + return *cfg.Client +} + +func (cfg *Config) validate() error { + if cfg.Client != nil && cfg.RoundTripper != nil { + return errors.New("api.Config.RoundTripper and api.Config.Client are mutually exclusive") + } + return nil +} + // Client is the interface for an API client. type Client interface { URL(ep string, args map[string]string) *url.URL @@ -68,9 +89,13 @@ func NewClient(cfg Config) (Client, error) { } u.Path = strings.TrimRight(u.Path, "/") + if err := cfg.validate(); err != nil { + return nil, err + } + return &httpClient{ endpoint: u, - client: http.Client{Transport: cfg.roundTripper()}, + client: cfg.client(), }, nil } @@ -84,7 +109,7 @@ func (c *httpClient) URL(ep string, args map[string]string) *url.URL { for arg, val := range args { arg = ":" + arg - p = strings.Replace(p, arg, val, -1) + p = strings.ReplaceAll(p, arg, val) } u := *c.endpoint diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go index 857512fb8fc6..f74139c71f6e 100644 --- a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go +++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go @@ -109,7 +109,6 @@ func marshalPointJSON(ptr unsafe.Pointer, stream *json.Stream) { stream.WriteRaw(`"`) stream.WriteArrayEnd() - } func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { @@ -230,25 +229,25 @@ type API interface { // Config returns the current Prometheus configuration. Config(ctx context.Context) (ConfigResult, error) // DeleteSeries deletes data for a selection of series in a time range. - DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error + DeleteSeries(ctx context.Context, matches []string, startTime, endTime time.Time) error // Flags returns the flag values that Prometheus was launched with. Flags(ctx context.Context) (FlagsResult, error) // LabelNames returns the unique label names present in the block in sorted order by given time range and matchers. - LabelNames(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]string, Warnings, error) + LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error) // LabelValues performs a query for the values of the given label, time range and matchers. - LabelValues(ctx context.Context, label string, matches []string, startTime time.Time, endTime time.Time) (model.LabelValues, Warnings, error) + LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error) // Query performs a query for the given time. - Query(ctx context.Context, query string, ts time.Time) (model.Value, Warnings, error) + Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) // QueryRange performs a query for the given range. - QueryRange(ctx context.Context, query string, r Range) (model.Value, Warnings, error) + QueryRange(ctx context.Context, query string, r Range, opts ...Option) (model.Value, Warnings, error) // QueryExemplars performs a query for exemplars by the given query and time range. - QueryExemplars(ctx context.Context, query string, startTime time.Time, endTime time.Time) ([]ExemplarQueryResult, error) + QueryExemplars(ctx context.Context, query string, startTime, endTime time.Time) ([]ExemplarQueryResult, error) // Buildinfo returns various build information properties about the Prometheus server Buildinfo(ctx context.Context) (BuildinfoResult, error) // Runtimeinfo returns the various runtime information properties about the Prometheus server. Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) // Series finds series by label matchers. - Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, Warnings, error) + Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, Warnings, error) // Snapshot creates a snapshot of all current data into snapshots/- // under the TSDB's data directory and returns the directory as response. Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) @@ -257,9 +256,9 @@ type API interface { // Targets returns an overview of the current state of the Prometheus target discovery. Targets(ctx context.Context) (TargetsResult, error) // TargetsMetadata returns metadata about metrics currently scraped by the target. - TargetsMetadata(ctx context.Context, matchTarget string, metric string, limit string) ([]MetricMetadata, error) + TargetsMetadata(ctx context.Context, matchTarget, metric, limit string) ([]MetricMetadata, error) // Metadata returns metadata about metrics currently scraped by the metric name. - Metadata(ctx context.Context, metric string, limit string) (map[string][]Metadata, error) + Metadata(ctx context.Context, metric, limit string) (map[string][]Metadata, error) // TSDB returns the cardinality statistics. TSDB(ctx context.Context) (TSDBResult, error) // WalReplay returns the current replay status of the wal. @@ -336,14 +335,15 @@ type RuleGroup struct { // that rules are returned in by the API. // // Rule types can be determined using a type switch: -// switch v := rule.(type) { -// case RecordingRule: -// fmt.Print("got a recording rule") -// case AlertingRule: -// fmt.Print("got a alerting rule") -// default: -// fmt.Printf("unknown rule type %s", v) -// } +// +// switch v := rule.(type) { +// case RecordingRule: +// fmt.Print("got a recording rule") +// case AlertingRule: +// fmt.Print("got a alerting rule") +// default: +// fmt.Printf("unknown rule type %s", v) +// } type Rules []interface{} // AlertingRule models a alerting rule. @@ -699,7 +699,7 @@ func (h *httpAPI) Config(ctx context.Context) (ConfigResult, error) { return res, json.Unmarshal(body, &res) } -func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error { +func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime, endTime time.Time) error { u := h.client.URL(epDeleteSeries, nil) q := u.Query() @@ -772,7 +772,7 @@ func (h *httpAPI) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) { return res, json.Unmarshal(body, &res) } -func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]string, Warnings, error) { +func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error) { u := h.client.URL(epLabels, nil) q := u.Query() q.Set("start", formatTime(startTime)) @@ -795,7 +795,7 @@ func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime ti return labelNames, w, json.Unmarshal(body, &labelNames) } -func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime time.Time, endTime time.Time) (model.LabelValues, Warnings, error) { +func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error) { u := h.client.URL(epLabelValues, map[string]string{"name": label}) q := u.Query() q.Set("start", formatTime(startTime)) @@ -818,10 +818,34 @@ func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []strin return labelValues, w, json.Unmarshal(body, &labelValues) } -func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, Warnings, error) { +type apiOptions struct { + timeout time.Duration +} + +type Option func(c *apiOptions) + +// WithTimeout can be used to provide an optional query evaluation timeout for Query and QueryRange. +// https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries +func WithTimeout(timeout time.Duration) Option { + return func(o *apiOptions) { + o.timeout = timeout + } +} + +func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) { u := h.client.URL(epQuery, nil) q := u.Query() + opt := &apiOptions{} + for _, o := range opts { + o(opt) + } + + d := opt.timeout + if d > 0 { + q.Set("timeout", d.String()) + } + q.Set("query", query) if !ts.IsZero() { q.Set("time", formatTime(ts)) @@ -833,10 +857,10 @@ func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model. } var qres queryResult - return model.Value(qres.v), warnings, json.Unmarshal(body, &qres) + return qres.v, warnings, json.Unmarshal(body, &qres) } -func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, Warnings, error) { +func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range, opts ...Option) (model.Value, Warnings, error) { u := h.client.URL(epQueryRange, nil) q := u.Query() @@ -845,6 +869,16 @@ func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model. q.Set("end", formatTime(r.End)) q.Set("step", strconv.FormatFloat(r.Step.Seconds(), 'f', -1, 64)) + opt := &apiOptions{} + for _, o := range opts { + o(opt) + } + + d := opt.timeout + if d > 0 { + q.Set("timeout", d.String()) + } + _, body, warnings, err := h.client.DoGetFallback(ctx, u, q) if err != nil { return nil, warnings, err @@ -852,10 +886,10 @@ func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model. var qres queryResult - return model.Value(qres.v), warnings, json.Unmarshal(body, &qres) + return qres.v, warnings, json.Unmarshal(body, &qres) } -func (h *httpAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, Warnings, error) { +func (h *httpAPI) Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, Warnings, error) { u := h.client.URL(epSeries, nil) q := u.Query() @@ -938,7 +972,7 @@ func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) { return res, json.Unmarshal(body, &res) } -func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget string, metric string, limit string) ([]MetricMetadata, error) { +func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget, metric, limit string) ([]MetricMetadata, error) { u := h.client.URL(epTargetsMetadata, nil) q := u.Query() @@ -962,7 +996,7 @@ func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget string, metri return res, json.Unmarshal(body, &res) } -func (h *httpAPI) Metadata(ctx context.Context, metric string, limit string) (map[string][]Metadata, error) { +func (h *httpAPI) Metadata(ctx context.Context, metric, limit string) (map[string][]Metadata, error) { u := h.client.URL(epMetadata, nil) q := u.Query() @@ -1019,7 +1053,7 @@ func (h *httpAPI) WalReplay(ctx context.Context) (WalReplayStatus, error) { return res, json.Unmarshal(body, &res) } -func (h *httpAPI) QueryExemplars(ctx context.Context, query string, startTime time.Time, endTime time.Time) ([]ExemplarQueryResult, error) { +func (h *httpAPI) QueryExemplars(ctx context.Context, query string, startTime, endTime time.Time) ([]ExemplarQueryResult, error) { u := h.client.URL(epQueryExemplars, nil) q := u.Query() @@ -1127,33 +1161,36 @@ func (h *apiClientImpl) Do(ctx context.Context, req *http.Request) (*http.Respon } return resp, []byte(result.Data), result.Warnings, err - } // DoGetFallback will attempt to do the request as-is, and on a 405 or 501 it // will fallback to a GET request. func (h *apiClientImpl) DoGetFallback(ctx context.Context, u *url.URL, args url.Values) (*http.Response, []byte, Warnings, error) { - req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(args.Encode())) + encodedArgs := args.Encode() + req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(encodedArgs)) if err != nil { return nil, nil, nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + // Following comment originates from https://pkg.go.dev/net/http#Transport + // Transport only retries a request upon encountering a network error if the request is + // idempotent and either has no body or has its Request.GetBody defined. HTTP requests + // are considered idempotent if they have HTTP methods GET, HEAD, OPTIONS, or TRACE; or + // if their Header map contains an "Idempotency-Key" or "X-Idempotency-Key" entry. If the + // idempotency key value is a zero-length slice, the request is treated as idempotent but + // the header is not sent on the wire. + req.Header["Idempotency-Key"] = nil resp, body, warnings, err := h.Do(ctx, req) if resp != nil && (resp.StatusCode == http.StatusMethodNotAllowed || resp.StatusCode == http.StatusNotImplemented) { - u.RawQuery = args.Encode() + u.RawQuery = encodedArgs req, err = http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return nil, nil, warnings, err } - - } else { - if err != nil { - return resp, body, warnings, err - } - return resp, body, warnings, nil + return h.Do(ctx, req) } - return h.Do(ctx, req) + return resp, body, warnings, err } func formatTime(t time.Time) string { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go index ac1ca3cf5ffb..cf05079fb822 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -69,9 +69,9 @@ type Collector interface { // If a Collector collects the same metrics throughout its lifetime, its // Describe method can simply be implemented as: // -// func (c customCollector) Describe(ch chan<- *Desc) { -// DescribeByCollect(c, ch) -// } +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } // // However, this will not work if the metrics collected change dynamically over // the lifetime of the Collector in a way that their combined set of descriptors diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go index c4d0f5c35b7c..f4c92913a556 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go @@ -14,3 +14,27 @@ // Package collectors provides implementations of prometheus.Collector to // conveniently collect process and Go-related metrics. package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewBuildInfoCollector() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go index e09f149d7611..d5a7279fb9cf 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go @@ -101,7 +101,7 @@ func (c *dbStatsCollector) Describe(ch chan<- *prometheus.Desc) { ch <- c.waitDuration ch <- c.maxIdleClosed ch <- c.maxLifetimeClosed - c.describeNewInGo115(ch) + ch <- c.maxIdleTimeClosed } // Collect implements Collector. @@ -115,5 +115,5 @@ func (c *dbStatsCollector) Collect(ch chan<- prometheus.Metric) { ch <- prometheus.MustNewConstMetric(c.waitDuration, prometheus.CounterValue, stats.WaitDuration.Seconds()) ch <- prometheus.MustNewConstMetric(c.maxIdleClosed, prometheus.CounterValue, float64(stats.MaxIdleClosed)) ch <- prometheus.MustNewConstMetric(c.maxLifetimeClosed, prometheus.CounterValue, float64(stats.MaxLifetimeClosed)) - c.collectNewInGo115(ch, stats) + ch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed)) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go similarity index 64% rename from vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go rename to vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go index edaa4e50b7ec..effc57840aeb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go @@ -11,6 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.17 +// +build !go1.17 + package collectors import "github.com/prometheus/client_golang/prometheus" @@ -42,28 +45,5 @@ import "github.com/prometheus/client_golang/prometheus" // NOTE: The problem is solved in Go 1.15, see // https://github.com/golang/go/issues/19812 for the related Go issue. func NewGoCollector() prometheus.Collector { - //nolint:staticcheck // Ignore SA1019 until v2. return prometheus.NewGoCollector() } - -// NewBuildInfoCollector returns a collector collecting a single metric -// "go_build_info" with the constant value 1 and three labels "path", "version", -// and "checksum". Their label values contain the main module path, version, and -// checksum, respectively. The labels will only have meaningful values if the -// binary is built with Go module support and from source code retrieved from -// the source repository (rather than the local file system). This is usually -// accomplished by building from outside of GOPATH, specifying the full address -// of the main package, e.g. "GO111MODULE=on go run -// github.com/prometheus/client_golang/examples/random". If built without Go -// module support, all label values will be "unknown". If built with Go module -// support but using the source code from the local file system, the "path" will -// be set appropriately, but "checksum" will be empty and "version" will be -// "(devel)". -// -// This collector uses only the build information for the main module. See -// https://github.com/povilasv/prommod for an example of a collector for the -// module dependencies. -func NewBuildInfoCollector() prometheus.Collector { - //nolint:staticcheck // Ignore SA1019 until v2. - return prometheus.NewBuildInfoCollector() -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go new file mode 100644 index 000000000000..246c5ea943c8 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -0,0 +1,160 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.17 +// +build go1.17 + +package collectors + +import ( + "regexp" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/internal" +) + +var ( + // MetricsAll allows all the metrics to be collected from Go runtime. + MetricsAll = GoRuntimeMetricsRule{regexp.MustCompile("/.*")} + // MetricsGC allows only GC metrics to be collected from Go runtime. + // e.g. go_gc_cycles_automatic_gc_cycles_total + MetricsGC = GoRuntimeMetricsRule{regexp.MustCompile(`^/gc/.*`)} + // MetricsMemory allows only memory metrics to be collected from Go runtime. + // e.g. go_memory_classes_heap_free_bytes + MetricsMemory = GoRuntimeMetricsRule{regexp.MustCompile(`^/memory/.*`)} + // MetricsScheduler allows only scheduler metrics to be collected from Go runtime. + // e.g. go_sched_goroutines_goroutines + MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)} +) + +// WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as: +// +// go_memstats_alloc_bytes +// go_memstats_alloc_bytes_total +// go_memstats_sys_bytes +// go_memstats_lookups_total +// go_memstats_mallocs_total +// go_memstats_frees_total +// go_memstats_heap_alloc_bytes +// go_memstats_heap_sys_bytes +// go_memstats_heap_idle_bytes +// go_memstats_heap_inuse_bytes +// go_memstats_heap_released_bytes +// go_memstats_heap_objects +// go_memstats_stack_inuse_bytes +// go_memstats_stack_sys_bytes +// go_memstats_mspan_inuse_bytes +// go_memstats_mspan_sys_bytes +// go_memstats_mcache_inuse_bytes +// go_memstats_mcache_sys_bytes +// go_memstats_buck_hash_sys_bytes +// go_memstats_gc_sys_bytes +// go_memstats_other_sys_bytes +// go_memstats_next_gc_bytes +// +// so the metrics known from pre client_golang v1.12.0, +// +// NOTE(bwplotka): The above represents runtime.MemStats statistics, but they are +// actually implemented using new runtime/metrics package. (except skipped go_memstats_gc_cpu_fraction +// -- see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 for explanation). +// +// Some users might want to disable this on collector level (although you can use scrape relabelling on Prometheus), +// because similar metrics can be now obtained using WithGoCollectorRuntimeMetrics. Note that the semantics of new +// metrics might be different, plus the names can be change over time with different Go version. +// +// NOTE(bwplotka): Changing metric names can be tedious at times as the alerts, recording rules and dashboards have to be adjusted. +// The old metrics are also very useful, with many guides and books written about how to interpret them. +// +// As a result our recommendation would be to stick with MemStats like metrics and enable other runtime/metrics if you are interested +// in advanced insights Go provides. See ExampleGoCollector_WithAdvancedGoMetrics. +func WithGoCollectorMemStatsMetricsDisabled() func(options *internal.GoCollectorOptions) { + return func(o *internal.GoCollectorOptions) { + o.DisableMemStatsLikeMetrics = true + } +} + +// GoRuntimeMetricsRule allow enabling and configuring particular group of runtime/metrics. +// TODO(bwplotka): Consider adding ability to adjust buckets. +type GoRuntimeMetricsRule struct { + // Matcher represents RE2 expression will match the runtime/metrics from https://golang.bg/src/runtime/metrics/description.go + // Use `regexp.MustCompile` or `regexp.Compile` to create this field. + Matcher *regexp.Regexp +} + +// WithGoCollectorRuntimeMetrics allows enabling and configuring particular group of runtime/metrics. +// See the list of metrics https://golang.bg/src/runtime/metrics/description.go (pick the Go version you use there!). +// You can use this option in repeated manner, which will add new rules. The order of rules is important, the last rule +// that matches particular metrics is applied. +func WithGoCollectorRuntimeMetrics(rules ...GoRuntimeMetricsRule) func(options *internal.GoCollectorOptions) { + rs := make([]internal.GoCollectorRule, len(rules)) + for i, r := range rules { + rs[i] = internal.GoCollectorRule{ + Matcher: r.Matcher, + } + } + + return func(o *internal.GoCollectorOptions) { + o.RuntimeMetricRules = append(o.RuntimeMetricRules, rs...) + } +} + +// WithoutGoCollectorRuntimeMetrics allows disabling group of runtime/metrics that you might have added in WithGoCollectorRuntimeMetrics. +// It behaves similarly to WithGoCollectorRuntimeMetrics just with deny-list semantics. +func WithoutGoCollectorRuntimeMetrics(matchers ...*regexp.Regexp) func(options *internal.GoCollectorOptions) { + rs := make([]internal.GoCollectorRule, len(matchers)) + for i, m := range matchers { + rs[i] = internal.GoCollectorRule{ + Matcher: m, + Deny: true, + } + } + + return func(o *internal.GoCollectorOptions) { + o.RuntimeMetricRules = append(o.RuntimeMetricRules, rs...) + } +} + +// GoCollectionOption represents Go collection option flag. +// Deprecated. +type GoCollectionOption uint32 + +const ( + // GoRuntimeMemStatsCollection represents the metrics represented by runtime.MemStats structure. + // Deprecated. Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector. + GoRuntimeMemStatsCollection GoCollectionOption = 1 << iota + // GoRuntimeMetricsCollection is the new set of metrics represented by runtime/metrics package. + // Deprecated. Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")}) + // function to enable those metrics in the collector. + GoRuntimeMetricsCollection +) + +// WithGoCollections allows enabling different collections for Go collector on top of base metrics. +// Deprecated. Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics. +func WithGoCollections(flags GoCollectionOption) func(options *internal.GoCollectorOptions) { + return func(options *internal.GoCollectorOptions) { + if flags&GoRuntimeMemStatsCollection == 0 { + WithGoCollectorMemStatsMetricsDisabled()(options) + } + + if flags&GoRuntimeMetricsCollection != 0 { + WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")})(options) + } + } +} + +// NewGoCollector returns a collector that exports metrics about the current Go +// process using debug.GCStats (base metrics) and runtime/metrics (both in MemStats style and new ones). +func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewGoCollector(opts...) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 00d70f09b689..a912b75a05b7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -51,7 +51,7 @@ type Counter interface { // will lead to a valid (label-less) exemplar. But if Labels is nil, the current // exemplar is left in place. AddWithExemplar panics if the value is < 0, if any // of the provided labels are invalid, or if the provided labels contain more -// than 64 runes in total. +// than 128 runes in total. type ExemplarAdder interface { AddWithExemplar(value float64, exemplar Labels) } @@ -140,12 +140,13 @@ func (c *counter) get() float64 { } func (c *counter) Write(out *dto.Metric) error { - val := c.get() - + // Read the Exemplar first and the value second. This is to avoid a race condition + // where users see an exemplar for a not-yet-existing observation. var exemplar *dto.Exemplar if e := c.exemplar.Load(); e != nil { exemplar = e.(*dto.Exemplar) } + val := c.get() return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) } @@ -245,7 +246,8 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (v *CounterVec) WithLabelValues(lvs ...string) Counter { c, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -256,7 +258,8 @@ func (v *CounterVec) WithLabelValues(lvs ...string) Counter { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *CounterVec) With(labels Labels) Counter { c, err := v.GetMetricWith(labels) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 4bb816ab75ac..8bc5e44e2fc4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -20,6 +20,9 @@ import ( "strings" "github.com/cespare/xxhash/v2" + + "github.com/prometheus/client_golang/prometheus/internal" + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/model" @@ -154,7 +157,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * Value: proto.String(v), }) } - sort.Sort(labelPairSorter(d.constLabelPairs)) + sort.Sort(internal.LabelPairSorter(d.constLabelPairs)) return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 98450125d6a3..811072cbd54f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -21,55 +21,66 @@ // All exported functions and methods are safe to be used concurrently unless // specified otherwise. // -// A Basic Example +// # A Basic Example // // As a starting point, a very basic usage example: // -// package main -// -// import ( -// "log" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var ( -// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }) -// hdFailures = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ) -// ) -// -// func init() { -// // Metrics have to be registered to be exposed: -// prometheus.MustRegister(cpuTemp) -// prometheus.MustRegister(hdFailures) -// } -// -// func main() { -// cpuTemp.Set(65.3) -// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() -// -// // The Handler function provides a default handler to expose metrics -// // via an HTTP server. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.Handler()) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } -// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// type metrics struct { +// cpuTemp prometheus.Gauge +// hdFailures *prometheus.CounterVec +// } +// +// func NewMetrics(reg prometheus.Registerer) *metrics { +// m := &metrics{ +// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }), +// hdFailures: prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ), +// } +// reg.MustRegister(m.cpuTemp) +// reg.MustRegister(m.hdFailures) +// return m +// } +// +// func main() { +// // Create a non-global registry. +// reg := prometheus.NewRegistry() +// +// // Create new metrics and register them using the custom registry. +// m := NewMetrics(reg) +// // Set values for the new created metrics. +// m.cpuTemp.Set(65.3) +// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // Expose metrics and custom registry via an HTTP server +// // using the HandleFor function. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } // // This is a complete program that exports two metrics, a Gauge and a Counter, // the latter with a label attached to turn it into a (one-dimensional) vector. +// It register the metrics using a custom registry and exposes them via an HTTP server +// on the /metrics endpoint. // -// Metrics +// # Metrics // // The number of exported identifiers in this package might appear a bit // overwhelming. However, in addition to the basic plumbing shown in the example @@ -100,7 +111,7 @@ // To create instances of Metrics and their vector versions, you need a suitable // …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. // -// Custom Collectors and constant Metrics +// # Custom Collectors and constant Metrics // // While you could create your own implementations of Metric, most likely you // will only ever implement the Collector interface on your own. At a first @@ -141,7 +152,7 @@ // a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting // shortcuts. // -// Advanced Uses of the Registry +// # Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, // sometimes you might want to handle the errors the registration might cause. @@ -176,23 +187,23 @@ // NewProcessCollector). With a custom registry, you are in control and decide // yourself about the Collectors to register. // -// HTTP Exposition +// # HTTP Exposition // // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example // above. The tools to expose metrics via HTTP are in the promhttp sub-package. // -// Pushing to the Pushgateway +// # Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // -// Graphite Bridge +// # Graphite Bridge // // Functions and examples to push metrics from a Gatherer to Graphite can be // found in the graphite sub-package. // -// Other Means of Exposition +// # Other Means of Exposition // // More ways of exposing metrics can easily be added by following the approaches // of the existing implementations. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index bd0733d6a7d6..21271a5bb462 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -210,7 +210,8 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { g, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -221,7 +222,8 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *GaugeVec) With(labels Labels) Gauge { g, err := v.GetMetricWith(labels) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go similarity index 61% rename from vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go rename to vendor/github.com/prometheus/client_golang/prometheus/get_pid.go index 65235069d622..614fd61be95a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go @@ -1,4 +1,4 @@ -// Copyright 2021 The Prometheus Authors +// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,17 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !go1.15 -// +build !go1.15 +//go:build !js || wasm +// +build !js wasm -package collectors +package prometheus -import ( - "database/sql" +import "os" - "github.com/prometheus/client_golang/prometheus" -) - -func (c *dbStatsCollector) describeNewInGo115(ch chan<- *prometheus.Desc) {} - -func (c *dbStatsCollector) collectNewInGo115(ch chan<- prometheus.Metric, stats sql.DBStats) {} +func getPIDFn() func() (int, error) { + pid := os.Getpid() + return func() (int, error) { + return pid, nil + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go new file mode 100644 index 000000000000..eaf8059ee15d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js && !wasm +// +build js,!wasm + +package prometheus + +func getPIDFn() func() (int, error) { + return func() (int, error) { + return 1, nil + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index 08195b410218..ad9a71a5e0d4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -19,6 +19,10 @@ import ( "time" ) +// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. +// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so +// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is +// populated using runtime/metrics. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { @@ -197,14 +201,6 @@ func goRuntimeMemStats() memStatsMetrics { ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, }, } } @@ -232,7 +228,7 @@ func newBaseGoCollector() baseGoCollector { "A summary of the pause duration of garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( - memstatNamespace("last_gc_time_seconds"), + "go_memstats_last_gc_time_seconds", "Number of seconds since 1970 of last garbage collection.", nil, nil), goInfoDesc: NewDesc( @@ -254,8 +250,9 @@ func (c *baseGoCollector) Describe(ch chan<- *Desc) { // Collect returns the current state of all metrics of the collector. func (c *baseGoCollector) Collect(ch chan<- Metric) { ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - n, _ := runtime.ThreadCreateProfile(nil) - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + n := getRuntimeNumThreads() + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n) var stats debug.GCStats stats.PauseQuantiles = make([]time.Duration, 5) @@ -268,7 +265,6 @@ func (c *baseGoCollector) Collect(ch chan<- Metric) { quantiles[0.0] = stats.PauseQuantiles[0].Seconds() ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9) - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go index 24526131e732..897a6e906b3a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go @@ -40,13 +40,28 @@ type goCollector struct { // // Deprecated: Use collectors.NewGoCollector instead. func NewGoCollector() Collector { + msMetrics := goRuntimeMemStats() + msMetrics = append(msMetrics, struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType + }{ + // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }) return &goCollector{ base: newBaseGoCollector(), msLast: &runtime.MemStats{}, msRead: runtime.ReadMemStats, msMaxWait: time.Second, msMaxAge: 5 * time.Minute, - msMetrics: goRuntimeMemStats(), + msMetrics: msMetrics, } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go similarity index 53% rename from vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go rename to vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index d43bdcddabc0..3a2d55e84b1e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -25,10 +25,72 @@ import ( //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" - "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" +) + +const ( + // constants for strings referenced more than once. + goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects" + goGCHeapAllocsObjects = "/gc/heap/allocs:objects" + goGCHeapFreesObjects = "/gc/heap/frees:objects" + goGCHeapFreesBytes = "/gc/heap/frees:bytes" + goGCHeapAllocsBytes = "/gc/heap/allocs:bytes" + goGCHeapObjects = "/gc/heap/objects:objects" + goGCHeapGoalBytes = "/gc/heap/goal:bytes" + goMemoryClassesTotalBytes = "/memory/classes/total:bytes" + goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes" + goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes" + goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes" + goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes" + goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes" + goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes" + goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes" + goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes" + goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes" + goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes" + goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes" + goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes" + goMemoryClassesOtherBytes = "/memory/classes/other:bytes" ) +// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic. +var rmNamesForMemStatsMetrics = []string{ + goGCHeapTinyAllocsObjects, + goGCHeapAllocsObjects, + goGCHeapFreesObjects, + goGCHeapAllocsBytes, + goGCHeapObjects, + goGCHeapGoalBytes, + goMemoryClassesTotalBytes, + goMemoryClassesHeapObjectsBytes, + goMemoryClassesHeapUnusedBytes, + goMemoryClassesHeapReleasedBytes, + goMemoryClassesHeapFreeBytes, + goMemoryClassesHeapStacksBytes, + goMemoryClassesOSStacksBytes, + goMemoryClassesMetadataMSpanInuseBytes, + goMemoryClassesMetadataMSPanFreeBytes, + goMemoryClassesMetadataMCacheInuseBytes, + goMemoryClassesMetadataMCacheFreeBytes, + goMemoryClassesProfilingBucketsBytes, + goMemoryClassesMetadataOtherBytes, + goMemoryClassesOtherBytes, +} + +func bestEffortLookupRM(lookup []string) []metrics.Description { + ret := make([]metrics.Description, 0, len(lookup)) + for _, rm := range metrics.All() { + for _, m := range lookup { + if m == rm.Name { + ret = append(ret, rm) + } + } + } + return ret +} + type goCollector struct { base baseGoCollector @@ -36,70 +98,124 @@ type goCollector struct { // snapshot is always produced by Collect. mu sync.Mutex - // rm... fields all pertain to the runtime/metrics package. - rmSampleBuf []metrics.Sample - rmSampleMap map[string]*metrics.Sample - rmMetrics []collectorMetric + // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed). + sampleBuf []metrics.Sample + // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums. + sampleMap map[string]*metrics.Sample + + // rmExposedMetrics represents all runtime/metrics package metrics + // that were configured to be exposed. + rmExposedMetrics []collectorMetric + rmExactSumMapForHist map[string]string // With Go 1.17, the runtime/metrics package was introduced. // From that point on, metric names produced by the runtime/metrics // package could be generated from runtime/metrics names. However, // these differ from the old names for the same values. // - // This field exist to export the same values under the old names + // This field exists to export the same values under the old names // as well. - msMetrics memStatsMetrics + msMetrics memStatsMetrics + msMetricsEnabled bool +} + +type rmMetricDesc struct { + metrics.Description +} + +func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc { + var descs []rmMetricDesc + for _, d := range metrics.All() { + var ( + deny = true + desc rmMetricDesc + ) + + for _, r := range rules { + if !r.Matcher.MatchString(d.Name) { + continue + } + deny = r.Deny + } + if deny { + continue + } + + desc.Description = d + descs = append(descs, desc) + } + return descs +} + +func defaultGoCollectorOptions() internal.GoCollectorOptions { + return internal.GoCollectorOptions{ + RuntimeMetricSumForHist: map[string]string{ + "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes, + "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, + }, + RuntimeMetricRules: []internal.GoCollectorRule{ + //{Matcher: regexp.MustCompile("")}, + }, + } } // NewGoCollector is the obsolete version of collectors.NewGoCollector. // See there for documentation. // // Deprecated: Use collectors.NewGoCollector instead. -func NewGoCollector() Collector { - descriptions := metrics.All() +func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { + opt := defaultGoCollectorOptions() + for _, o := range opts { + o(&opt) + } + + exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules) // Collect all histogram samples so that we can get their buckets. // The API guarantees that the buckets are always fixed for the lifetime // of the process. var histograms []metrics.Sample - for _, d := range descriptions { + for _, d := range exposedDescriptions { if d.Kind == metrics.KindFloat64Histogram { histograms = append(histograms, metrics.Sample{Name: d.Name}) } } - metrics.Read(histograms) + + if len(histograms) > 0 { + metrics.Read(histograms) + } + bucketsMap := make(map[string][]float64) for i := range histograms { bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets } - // Generate a Desc and ValueType for each runtime/metrics metric. - metricSet := make([]collectorMetric, 0, len(descriptions)) - sampleBuf := make([]metrics.Sample, 0, len(descriptions)) - sampleMap := make(map[string]*metrics.Sample, len(descriptions)) - for i := range descriptions { - d := &descriptions[i] - namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(d) + // Generate a collector for each exposed runtime/metrics metric. + metricSet := make([]collectorMetric, 0, len(exposedDescriptions)) + // SampleBuf is used for reading from runtime/metrics. + // We are assuming the largest case to have stable pointers for sampleMap purposes. + sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics)) + sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions)) + for _, d := range exposedDescriptions { + namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description) if !ok { // Just ignore this metric; we can't do anything with it here. // If a user decides to use the latest version of Go, we don't want - // to fail here. This condition is tested elsewhere. + // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } - // Set up sample buffer for reading, and a map - // for quick lookup of sample values. sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] var m collectorMetric if d.Kind == metrics.KindFloat64Histogram { - _, hasSum := rmExactSumMap[d.Name] + _, hasSum := opt.RuntimeMetricSumForHist[d.Name] unit := d.Name[strings.IndexRune(d.Name, ':')+1:] m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description, + d.Description.Description, nil, nil, ), @@ -111,24 +227,61 @@ func NewGoCollector() Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description, - }) + Help: d.Description.Description, + }, + ) } else { m = NewGauge(GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description, + Help: d.Description.Description, }) } metricSet = append(metricSet, m) } + + // Add exact sum metrics to sampleBuf if not added before. + for _, h := range histograms { + sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name] + if !ok { + continue + } + + if _, ok := sampleMap[sumMetric]; ok { + continue + } + sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric}) + sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1] + } + + var ( + msMetrics memStatsMetrics + msDescriptions []metrics.Description + ) + + if !opt.DisableMemStatsLikeMetrics { + msMetrics = goRuntimeMemStats() + msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics) + + // Check if metric was not exposed before and if not, add to sampleBuf. + for _, mdDesc := range msDescriptions { + if _, ok := sampleMap[mdDesc.Name]; ok { + continue + } + sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name}) + sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1] + } + } + return &goCollector{ - base: newBaseGoCollector(), - rmSampleBuf: sampleBuf, - rmSampleMap: sampleMap, - rmMetrics: metricSet, - msMetrics: goRuntimeMemStats(), + base: newBaseGoCollector(), + sampleBuf: sampleBuf, + sampleMap: sampleMap, + rmExposedMetrics: metricSet, + rmExactSumMapForHist: opt.RuntimeMetricSumForHist, + msMetrics: msMetrics, + msMetricsEnabled: !opt.DisableMemStatsLikeMetrics, } } @@ -138,7 +291,7 @@ func (c *goCollector) Describe(ch chan<- *Desc) { for _, i := range c.msMetrics { ch <- i.desc } - for _, m := range c.rmMetrics { + for _, m := range c.rmExposedMetrics { ch <- m.Desc() } } @@ -148,8 +301,12 @@ func (c *goCollector) Collect(ch chan<- Metric) { // Collect base non-memory metrics. c.base.Collect(ch) + if len(c.sampleBuf) == 0 { + return + } + // Collect must be thread-safe, so prevent concurrent use of - // rmSampleBuf. Just read into rmSampleBuf but write all the data + // sampleBuf elements. Just read into sampleBuf but write all the data // we get into our Metrics or MemStats. // // This lock also ensures that the Metrics we send out are all from @@ -164,14 +321,17 @@ func (c *goCollector) Collect(ch chan<- Metric) { defer c.mu.Unlock() // Populate runtime/metrics sample buffer. - metrics.Read(c.rmSampleBuf) + metrics.Read(c.sampleBuf) + + // Collect all our runtime/metrics user chose to expose from sampleBuf (if any). + for i, metric := range c.rmExposedMetrics { + // We created samples for exposed metrics first in order, so indexes match. + sample := c.sampleBuf[i] - // Update all our metrics from rmSampleBuf. - for i, sample := range c.rmSampleBuf { // N.B. switch on concrete type because it's significantly more efficient // than checking for the Counter and Gauge interface implementations. In // this case, we control all the types here. - switch m := c.rmMetrics[i].(type) { + switch m := metric.(type) { case *counter: // Guard against decreases. This should never happen, but a failure // to do so will result in a panic, which is a harsh consequence for @@ -191,12 +351,15 @@ func (c *goCollector) Collect(ch chan<- Metric) { panic("unexpected metric type") } } - // ms is a dummy MemStats that we populate ourselves so that we can - // populate the old metrics from it. - var ms runtime.MemStats - memStatsFromRM(&ms, c.rmSampleMap) - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) + + if c.msMetricsEnabled { + // ms is a dummy MemStats that we populate ourselves so that we can + // populate the old metrics from it if goMemStatsCollection is enabled. + var ms runtime.MemStats + memStatsFromRM(&ms, c.sampleMap) + for _, i := range c.msMetrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) + } } } @@ -224,11 +387,6 @@ func unwrapScalarRMValue(v metrics.Value) float64 { } } -var rmExactSumMap = map[string]string{ - "/gc/heap/allocs-by-size:bytes": "/gc/heap/allocs:bytes", - "/gc/heap/frees-by-size:bytes": "/gc/heap/frees:bytes", -} - // exactSumFor takes a runtime/metrics metric name (that is assumed to // be of kind KindFloat64Histogram) and returns its exact sum and whether // its exact sum exists. @@ -236,11 +394,11 @@ var rmExactSumMap = map[string]string{ // The runtime/metrics API for histograms doesn't currently expose exact // sums, but some of the other metrics are in fact exact sums of histograms. func (c *goCollector) exactSumFor(rmName string) float64 { - sumName, ok := rmExactSumMap[rmName] + sumName, ok := c.rmExactSumMapForHist[rmName] if !ok { return 0 } - s, ok := c.rmSampleMap[sumName] + s, ok := c.sampleMap[sumName] if !ok { return 0 } @@ -261,35 +419,30 @@ func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) { // while having Mallocs - Frees still represent a live object count. // Unfortunately, MemStats doesn't actually export a large allocation count, // so it's impossible to pull this number out directly. - tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects") - ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs - ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs + tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects) + ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs + ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs - ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes") - ms.Sys = lookupOrZero("/memory/classes/total:bytes") + ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes) + ms.Sys = lookupOrZero(goMemoryClassesTotalBytes) ms.Lookups = 0 // Already always zero. - ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes") + ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes) ms.Alloc = ms.HeapAlloc - ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes") - ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes") - ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes") + ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes) + ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes) + ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes) ms.HeapSys = ms.HeapInuse + ms.HeapIdle - ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects") - ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes") - ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes") - ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes") - ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes") - ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes") - ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes") - ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes") - ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes") - ms.OtherSys = lookupOrZero("/memory/classes/other:bytes") - ms.NextGC = lookupOrZero("/gc/heap/goal:bytes") - - // N.B. LastGC is omitted because runtime.GCStats already has this. - // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 - // for more details. - ms.LastGC = 0 + ms.HeapObjects = lookupOrZero(goGCHeapObjects) + ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes) + ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes) + ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes) + ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes) + ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes) + ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes) + ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes) + ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes) + ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes) + ms.NextGC = lookupOrZero(goGCHeapGoalBytes) // N.B. GCCPUFraction is intentionally omitted. This metric is not useful, // and often misleading due to the fact that it's an average over the lifetime @@ -324,6 +477,11 @@ type batchHistogram struct { // buckets must always be from the runtime/metrics package, following // the same conventions. func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram { + // We need to remove -Inf values. runtime/metrics keeps them around. + // But -Inf bucket should not be allowed for prometheus histograms. + if buckets[0] == math.Inf(-1) { + buckets = buckets[1:] + } h := &batchHistogram{ desc: desc, buckets: buckets, @@ -382,8 +540,10 @@ func (h *batchHistogram) Write(out *dto.Metric) error { for i, count := range h.counts { totalCount += count if !h.hasSum { - // N.B. This computed sum is an underestimate. - sum += h.buckets[i] * float64(count) + if count != 0 { + // N.B. This computed sum is an underestimate. + sum += h.buckets[i] * float64(count) + } } // Skip the +Inf bucket, but only for the bucket list. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 893802fd6b4c..4c873a01c3d3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -28,19 +28,216 @@ import ( dto "github.com/prometheus/client_model/go" ) +// nativeHistogramBounds for the frac of observed values. Only relevant for +// schema > 0. The position in the slice is the schema. (0 is never used, just +// here for convenience of using the schema directly as the index.) +// +// TODO(beorn7): Currently, we do a binary search into these slices. There are +// ways to turn it into a small number of simple array lookups. It probably only +// matters for schema 5 and beyond, but should be investigated. See this comment +// as a starting point: +// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310 +var nativeHistogramBounds = [][]float64{ + // Schema "0": + {0.5}, + // Schema 1: + {0.5, 0.7071067811865475}, + // Schema 2: + {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + // Schema 3: + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, + // Schema 4: + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, + 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, + // Schema 5: + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, + 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, + 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, + 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, + 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, + 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, + // Schema 6: + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, + 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, + 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, + 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, + 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, + 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, + 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, + 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, + 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, + 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, + 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, + 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, + 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, + 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, + // Schema 7: + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, + 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, + 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, + 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, + 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, + 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, + 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, + 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, + 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, + 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, + 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, + 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, + 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, + 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, + 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, + 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, + 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, + 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, + 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, + 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, + 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, + 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, + 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, + 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, + 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, + 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, + 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, + 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, + 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, + 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, + // Schema 8: + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, + 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, + 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, + 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, + 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, + 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, + 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, + 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, + 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, + 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, + 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, + 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, + 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, + 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, + 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, + 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, + 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, + 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, + 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, + 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, + 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, + 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, + 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, + 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, + 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, + 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, + 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, + 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, + 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, + 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, + 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, + 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, + 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, + 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, + 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, + 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, + 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, + 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, + 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, + 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, + 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, + 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, + 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, + 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, + 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, + 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, + 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, + 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, + 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, + 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, + 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, + 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, + 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, + 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, + 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, + 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, + 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, + 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, + 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, + 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, + 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, + 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, +} + +// The nativeHistogramBounds above can be generated with the code below. +// +// TODO(beorn7): It's tempting to actually use `go generate` to generate the +// code above. However, this could lead to slightly different numbers on +// different architectures. We still need to come to terms if we are fine with +// that, or if we might prefer to specify precise numbers in the standard. +// +// var nativeHistogramBounds [][]float64 = make([][]float64, 9) +// +// func init() { +// // Populate nativeHistogramBounds. +// numBuckets := 1 +// for i := range nativeHistogramBounds { +// bounds := []float64{0.5} +// factor := math.Exp2(math.Exp2(float64(-i))) +// for j := 0; j < numBuckets-1; j++ { +// var bound float64 +// if (j+1)%2 == 0 { +// // Use previously calculated value for increased precision. +// bound = nativeHistogramBounds[i-1][j/2+1] +// } else { +// bound = bounds[j] * factor +// } +// bounds = append(bounds, bound) +// } +// numBuckets *= 2 +// nativeHistogramBounds[i] = bounds +// } +// } + // A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. +// configurable static buckets (or in dynamic sparse buckets as part of the +// experimental Native Histograms, see below for more details). Similar to a +// Summary, it also provides a sum of observations and an observation count. // // On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. +// the histogram_quantile PromQL function. +// +// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL +// (see the documentation for detailed procedures). However, Histograms require +// the user to pre-define suitable buckets, and they are in general less +// accurate. (Both problems are addressed by the experimental Native +// Histograms. To use them, configure a NativeHistogramBucketFactor in the +// HistogramOpts. They also require a Prometheus server v2.40+ with the +// corresponding feature flag enabled.) // -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. +// The Observe method of a Histogram has a very low performance overhead in +// comparison with the Observe method of a Summary. // // To create Histogram instances, use NewHistogram. type Histogram interface { @@ -50,7 +247,8 @@ type Histogram interface { // Observe adds a single observation to the histogram. Observations are // usually positive or zero. Negative observations are accepted but // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. See + // counter resets in the sum of observations. (The experimental Native + // Histograms handle negative observations properly.) See // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations // for details. Observe(float64) @@ -64,18 +262,28 @@ const bucketLabel = "le" // tailored to broadly measure the response time (in seconds) of a network // service. Most likely, however, you will be required to define buckets // customized to your use case. -var ( - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} +var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) +// DefNativeHistogramZeroThreshold is the default value for +// NativeHistogramZeroThreshold in the HistogramOpts. +// +// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), +// which is a bucket boundary at all possible resolutions. +const DefNativeHistogramZeroThreshold = 2.938735877055719e-39 + +// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold +// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero +// bucket that only receives observations of precisely zero. +const NativeHistogramZeroThresholdZero = -1 + +var errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, ) -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the +// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not +// counted and not included in the returned slice. The returned slice is meant +// to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is zero or negative. func LinearBuckets(start, width float64, count int) []float64 { @@ -90,11 +298,11 @@ func LinearBuckets(start, width float64, count int) []float64 { return buckets } -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket +// has an upper bound of 'start' and each following bucket's upper bound is +// 'factor' times the previous bucket's upper bound. The final +Inf bucket is +// not counted and not included in the returned slice. The returned slice is +// meant to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, // or if 'factor' is less than or equal 1. @@ -180,8 +388,85 @@ type HistogramOpts struct { // element in the slice is the upper inclusive bound of a bucket. The // values must be sorted in strictly increasing order. There is no need // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. + // implicitly. If Buckets is left as nil or set to a slice of length + // zero, it is replaced by default buckets. The default buckets are + // DefBuckets if no buckets for a native histogram (see below) are used, + // otherwise the default is no buckets. (In other words, if you want to + // use both reguler buckets and buckets for a native histogram, you have + // to define the regular buckets here explicitly.) Buckets []float64 + + // If NativeHistogramBucketFactor is greater than one, so-called sparse + // buckets are used (in addition to the regular buckets, if defined + // above). A Histogram with sparse buckets will be ingested as a Native + // Histogram by a Prometheus server with that feature enabled (requires + // Prometheus v2.40+). Sparse buckets are exponential buckets covering + // the whole float64 range (with the exception of the “zero” bucket, see + // SparseBucketsZeroThreshold below). From any one bucket to the next, + // the width of the bucket grows by a constant + // factor. NativeHistogramBucketFactor provides an upper bound for this + // factor (exception see below). The smaller + // NativeHistogramBucketFactor, the more buckets will be used and thus + // the more costly the histogram will become. A generally good trade-off + // between cost and accuracy is a value of 1.1 (each bucket is at most + // 10% wider than the previous one), which will result in each power of + // two divided into 8 buckets (e.g. there will be 8 buckets between 1 + // and 2, same as between 2 and 4, and 4 and 8, etc.). + // + // Details about the actually used factor: The factor is calculated as + // 2^(2^n), where n is an integer number between (and including) -8 and + // 4. n is chosen so that the resulting factor is the largest that is + // still smaller or equal to NativeHistogramBucketFactor. Note that the + // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) + // ). If NativeHistogramBucketFactor is greater than 1 but smaller than + // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though + // it is larger than the provided NativeHistogramBucketFactor. + // + // NOTE: Native Histograms are still an experimental feature. Their + // behavior might still change without a major version + // bump. Subsequently, all NativeHistogram... options here might still + // change their behavior or name (or might completely disappear) without + // a major version bump. + NativeHistogramBucketFactor float64 + // All observations with an absolute value of less or equal + // NativeHistogramZeroThreshold are accumulated into a “zero” + // bucket. For best results, this should be close to a bucket + // boundary. This is usually the case if picking a power of two. If + // NativeHistogramZeroThreshold is left at zero, + // DefSparseBucketsZeroThreshold is used as the threshold. To configure + // a zero bucket with an actual threshold of zero (i.e. only + // observations of precisely zero will go into the zero bucket), set + // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero + // constant (or any negative float value). + NativeHistogramZeroThreshold float64 + + // The remaining fields define a strategy to limit the number of + // populated sparse buckets. If NativeHistogramMaxBucketNumber is left + // at zero, the number of buckets is not limited. (Note that this might + // lead to unbounded memory consumption if the values observed by the + // Histogram are sufficiently wide-spread. In particular, this could be + // used as a DoS attack vector. Where the observed values depend on + // external inputs, it is highly recommended to set a + // NativeHistogramMaxBucketNumber.) Once the set + // NativeHistogramMaxBucketNumber is exceeded, the following strategy is + // enacted: First, if the last reset (or the creation) of the histogram + // is at least NativeHistogramMinResetDuration ago, then the whole + // histogram is reset to its initial state (including regular + // buckets). If less time has passed, or if + // NativeHistogramMinResetDuration is zero, no reset is + // performed. Instead, the zero threshold is increased sufficiently to + // reduce the number of buckets to or below + // NativeHistogramMaxBucketNumber, but not to more than + // NativeHistogramMaxZeroThreshold. Thus, if + // NativeHistogramMaxZeroThreshold is already at or below the current + // zero threshold, nothing happens at this step. After that, if the + // number of buckets still exceeds NativeHistogramMaxBucketNumber, the + // resolution of the histogram is reduced by doubling the width of the + // sparse buckets (up to a growth factor between one bucket to the next + // of 2^(2^4) = 65536, see above). + NativeHistogramMaxBucketNumber uint32 + NativeHistogramMinResetDuration time.Duration + NativeHistogramMaxZeroThreshold float64 } // NewHistogram creates a new Histogram based on the provided HistogramOpts. It @@ -218,16 +503,29 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{{}, {}}, - now: time.Now, + desc: desc, + upperBounds: opts.Buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, + nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, + nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, + lastResetTime: time.Now(), + now: time.Now, + } + if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { + h.upperBounds = DefBuckets + } + if opts.NativeHistogramBucketFactor <= 1 { + h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets. + } else { + switch { + case opts.NativeHistogramZeroThreshold > 0: + h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold + case opts.NativeHistogramZeroThreshold == 0: + h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold + } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. + h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -246,8 +544,16 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } // Finally we know the final length of h.upperBounds and can make buckets // for both counts as well as exemplars: - h.counts[0].buckets = make([]uint64, len(h.upperBounds)) - h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + h.counts[0] = &histogramCounts{ + buckets: make([]uint64, len(h.upperBounds)), + nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), + nativeHistogramSchema: h.nativeHistogramSchema, + } + h.counts[1] = &histogramCounts{ + buckets: make([]uint64, len(h.upperBounds)), + nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), + nativeHistogramSchema: h.nativeHistogramSchema, + } h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. @@ -255,13 +561,98 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } type histogramCounts struct { + // Order in this struct matters for the alignment required by atomic + // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // observations. sumBits uint64 count uint64 + + // nativeHistogramZeroBucket counts all (positive and negative) + // observations in the zero bucket (with an absolute value less or equal + // the current threshold, see next field. + nativeHistogramZeroBucket uint64 + // nativeHistogramZeroThresholdBits is the bit pattern of the current + // threshold for the zero bucket. It's initially equal to + // nativeHistogramZeroThreshold but may change according to the bucket + // count limitation strategy. + nativeHistogramZeroThresholdBits uint64 + // nativeHistogramSchema may change over time according to the bucket + // count limitation strategy and therefore has to be saved here. + nativeHistogramSchema int32 + // Number of (positive and negative) sparse buckets. + nativeHistogramBucketsNumber uint32 + + // Regular buckets. buckets []uint64 + + // The sparse buckets for native histograms are implemented with a + // sync.Map for now. A dedicated data structure will likely be more + // efficient. There are separate maps for negative and positive + // observations. The map's value is an *int64, counting observations in + // that bucket. (Note that we don't use uint64 as an int64 won't + // overflow in practice, and working with signed numbers from the + // beginning simplifies the handling of deltas.) The map's key is the + // index of the bucket according to the used + // nativeHistogramSchema. Index 0 is for an upper bound of 1. + nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map +} + +// observe manages the parts of observe that only affects +// histogramCounts. doSparse is true if sparse buckets should be done, +// too. +func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { + if bucket < len(hc.buckets) { + atomic.AddUint64(&hc.buckets[bucket], 1) + } + atomicAddFloat(&hc.sumBits, v) + if doSparse && !math.IsNaN(v) { + var ( + key int + schema = atomic.LoadInt32(&hc.nativeHistogramSchema) + zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits)) + bucketCreated, isInf bool + ) + if math.IsInf(v, 0) { + // Pretend v is MaxFloat64 but later increment key by one. + if math.IsInf(v, +1) { + v = math.MaxFloat64 + } else { + v = -math.MaxFloat64 + } + isInf = true + } + frac, exp := math.Frexp(math.Abs(v)) + if schema > 0 { + bounds := nativeHistogramBounds[schema] + key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) + } else { + key = exp + if frac == 0.5 { + key-- + } + div := 1 << -schema + key = (key + div - 1) / div + } + if isInf { + key++ + } + switch { + case v > zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1) + case v < -zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1) + default: + atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1) + } + if bucketCreated { + atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1) + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hc.count, 1) } type histogram struct { @@ -276,7 +667,7 @@ type histogram struct { // perspective of the histogram) swap the hot–cold under the writeMtx // lock. A cooldown is awaited (while locked) by comparing the number of // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must + // last observation on the now cool one has completed. All cold fields must // be merged into the new hot before releasing writeMtx. // // Fields with atomic access first! See alignment constraint: @@ -284,8 +675,10 @@ type histogram struct { countAndHotIdx uint64 selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. + desc *Desc + + // Only used in the Write method and for sparse bucket management. + mtx sync.Mutex // Two counts, one is "hot" for lock-free observations, the other is // "cold" for writing out a dto.Metric. It has to be an array of @@ -293,9 +686,15 @@ type histogram struct { // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + upperBounds []float64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used. + nativeHistogramZeroThreshold float64 // The initial zero threshold. + nativeHistogramMaxZeroThreshold float64 + nativeHistogramMaxBuckets uint32 + nativeHistogramMinResetDuration time.Duration + lastResetTime time.Time // Protected by mtx. now func() time.Time // To mock out time.Now() for testing. } @@ -319,8 +718,8 @@ func (h *histogram) Write(out *dto.Metric) error { // the hot path, i.e. Observe is called much more often than Write. The // complication of making Write lock-free isn't worth it, if possible at // all. - h.writeMtx.Lock() - defer h.writeMtx.Unlock() + h.mtx.Lock() + defer h.mtx.Unlock() // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) // without touching the count bits. See the struct comments for a full @@ -333,16 +732,16 @@ func (h *histogram) Write(out *dto.Metric) error { hotCounts := h.counts[n>>63] coldCounts := h.counts[(^n)>>63] - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } + waitForCooldown(count, coldCounts) his := &dto.Histogram{ Bucket: make([]*dto.Bucket, len(h.upperBounds)), SampleCount: proto.Uint64(count), SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), } + out.Histogram = his + out.Label = h.labelPairs + var cumCount uint64 for i, upperBound := range h.upperBounds { cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) @@ -363,25 +762,21 @@ func (h *histogram) Write(out *dto.Metric) error { } his.Bucket = append(his.Bucket, b) } - - out.Histogram = his - out.Label = h.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) + if h.nativeHistogramSchema > math.MinInt32 { + his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits))) + his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema)) + zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket) + + defer func() { + coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber)) + coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber)) + }() + + his.ZeroCount = proto.Uint64(zeroBucket) + his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) + his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) } + addAndResetCounts(hotCounts, coldCounts) return nil } @@ -402,25 +797,216 @@ func (h *histogram) findBucket(v float64) int { // observe is the implementation for Observe without the findBucket part. func (h *histogram) observe(v float64, bucket int) { + // Do not add to sparse buckets for NaN observations. + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) // We increment h.countAndHotIdx so that the counter in the lower // 63 bits gets incremented. At the same time, we get the new value // back, which we can use to find the currently-hot counts. n := atomic.AddUint64(&h.countAndHotIdx, 1) hotCounts := h.counts[n>>63] + hotCounts.observe(v, bucket, doSparse) + if doSparse { + h.limitBuckets(hotCounts, v, bucket) + } +} - if bucket < len(h.upperBounds) { - atomic.AddUint64(&hotCounts.buckets[bucket], 1) +// limitSparsebuckets applies a strategy to limit the number of populated sparse +// buckets. It's generally best effort, and there are situations where the +// number can go higher (if even the lowest resolution isn't enough to reduce +// the number sufficiently, or if the provided counts aren't fully updated yet +// by a concurrently happening Write call). +func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) { + if h.nativeHistogramMaxBuckets == 0 { + return // No limit configured. } - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded yet. + } + + h.mtx.Lock() + defer h.mtx.Unlock() + + // The hot counts might have been swapped just before we acquired the + // lock. Re-fetch the hot counts first... + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hotCounts := h.counts[hotIdx] + coldCounts := h.counts[coldIdx] + // ...and then check again if we really have to reduce the bucket count. + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded after all. + } + // Try the various strategies in order. + if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { + return + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { + return + } + h.doubleBucketWidth(hotCounts, coldCounts) +} + +// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration +// has been passed. It returns true if the histogram has been reset. The caller +// must have locked h.mtx. +func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { + // We are using the possibly mocked h.now() rather than + // time.Since(h.lastResetTime) to enable testing. + if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { + return false + } + // Completely reset coldCounts. + h.resetCounts(cold) + // Repeat the latest observation to not lose it completely. + cold.observe(value, bucket, true) + // Make coldCounts the new hot counts while ressetting countAndHotIdx. + n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + return true +} + +// maybeWidenZeroBucket widens the zero bucket until it includes the existing +// buckets closest to the zero bucket (which could be two, if an equidistant +// negative and a positive bucket exists, but usually it's only one bucket to be +// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold +// limits how far the zero bucket can be extended, and if that's not enough to +// include an existing bucket, the method returns false. The caller must have +// locked h.mtx. +func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { + currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits)) + if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold { + return false + } + // Find the key of the bucket closest to zero. + smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive) + smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative) + if smallestNegativeKey < smallestKey { + smallestKey = smallestNegativeKey + } + if smallestKey == math.MaxInt32 { + return false + } + newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema)) + if newZeroThreshold > h.nativeHistogramMaxZeroThreshold { + return false // New threshold would exceed the max threshold. + } + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // Remove applicable buckets. + if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + // Make cold counts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the new zero threshold in the cold counts, too... + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // ...and then merge the newly deleted buckets into the wider zero + // bucket. + mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + if key == smallestKey { + // Merge into hot zero bucket... + atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket))) + // ...and delete from cold counts. + coldBuckets.Delete(key) + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } else { + // Add to corresponding hot bucket... + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + // ...and reset cold bucket. + atomic.StoreInt64(bucket, 0) + } + return true } } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) + + cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative)) + return true +} + +// doubleBucketWidth doubles the bucket width (by decrementing the schema +// number). Note that very sparse buckets could lead to a low reduction of the +// bucket count (or even no reduction at all). The method does nothing if the +// schema is already -4. +func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { + coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema) + if coldSchema == -4 { + return // Already at lowest resolution. + } + coldSchema-- + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // Play it simple and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) + // Make coldCounts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the schema in the cold counts, too... + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // ...and then merge the cold buckets into the wider hot buckets. + merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + // Adjust key to match the bucket to merge into. + if key > 0 { + key++ + } + key /= 2 + // Add to corresponding hot bucket. + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + return true + } + } + + cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative)) + // Play it simple again and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) +} + +func (h *histogram) resetCounts(counts *histogramCounts) { + atomic.StoreUint64(&counts.sumBits, 0) + atomic.StoreUint64(&counts.count, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema) + atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0) + for i := range h.upperBounds { + atomic.StoreUint64(&counts.buckets[i], 0) + } + deleteSyncMap(&counts.nativeHistogramBucketsNegative) + deleteSyncMap(&counts.nativeHistogramBucketsPositive) } // updateExemplar replaces the exemplar for the provided bucket. With empty @@ -516,7 +1102,8 @@ func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { h, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -527,7 +1114,8 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *HistogramVec) With(labels Labels) Observer { h, err := v.GetMetricWith(labels) if err != nil { @@ -581,11 +1169,11 @@ func (h *constHistogram) Desc() *Desc { func (h *constHistogram) Write(out *dto.Metric) error { his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) his.SampleCount = proto.Uint64(h.count) his.SampleSum = proto.Float64(h.sum) - for upperBound, count := range h.buckets { buckets = append(buckets, &dto.Bucket{ CumulativeCount: proto.Uint64(count), @@ -613,7 +1201,7 @@ func (h *constHistogram) Write(out *dto.Metric) error { // to send it to Prometheus in the Collect method. // // buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. +// bucket. The +Inf bucket is implicit, and its value is equal to the provided count. // // NewConstHistogram returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. @@ -668,3 +1256,229 @@ func (s buckSort) Swap(i, j int) { func (s buckSort) Less(i, j int) bool { return s[i].GetUpperBound() < s[j].GetUpperBound() } + +// pickSchema returns the largest number n between -4 and 8 such that +// 2^(2^-n) is less or equal the provided bucketFactor. +// +// Special cases: +// - bucketFactor <= 1: panics. +// - bucketFactor < 2^(2^-8) (but > 1): still returns 8. +func pickSchema(bucketFactor float64) int32 { + if bucketFactor <= 1 { + panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor)) + } + floor := math.Floor(math.Log2(math.Log2(bucketFactor))) + switch { + case floor <= -8: + return 8 + case floor >= 4: + return -4 + default: + return -int32(floor) + } +} + +func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { + var ii []int + buckets.Range(func(k, v interface{}) bool { + ii = append(ii, k.(int)) + return true + }) + sort.Ints(ii) + + if len(ii) == 0 { + return nil, nil + } + + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) + + appendDelta := func(count int64) { + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + for n, i := range ii { + v, _ := buckets.Load(i) + count := atomic.LoadInt64(v.(*int64)) + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one ore two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + spans = append(spans, &dto.BucketSpan{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return spans, deltas +} + +// addToBucket increments the sparse bucket at key by the provided amount. It +// returns true if a new sparse bucket had to be created for that. +func addToBucket(buckets *sync.Map, key int, increment int64) bool { + if existingBucket, ok := buckets.Load(key); ok { + // Fast path without allocation. + atomic.AddInt64(existingBucket.(*int64), increment) + return false + } + // Bucket doesn't exist yet. Slow path allocating new counter. + newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape. + if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { + // The bucket was created concurrently in another goroutine. + // Have to increment after all. + atomic.AddInt64(actualBucket.(*int64), increment) + return false + } + return true +} + +// addAndReset returns a function to be used with sync.Map.Range of spare +// buckets in coldCounts. It increments the buckets in the provided hotBuckets +// according to the buckets ranged through. It then resets all buckets ranged +// through to 0 (but leaves them in place so that they don't need to get +// recreated on the next scrape). +func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + bucket := v.(*int64) + if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) { + atomic.AddUint32(bucketNumber, 1) + } + atomic.StoreInt64(bucket, 0) + return true + } +} + +func deleteSyncMap(m *sync.Map) { + m.Range(func(k, v interface{}) bool { + m.Delete(k) + return true + }) +} + +func findSmallestKey(m *sync.Map) int { + result := math.MaxInt32 + m.Range(func(k, v interface{}) bool { + key := k.(int) + if key < result { + result = key + } + return true + }) + return result +} + +func getLe(key int, schema int32) float64 { + // Here a bit of context about the behavior for the last bucket counting + // regular numbers (called simply "last bucket" below) and the bucket + // counting observations of ±Inf (called "inf bucket" below, with a key + // one higher than that of the "last bucket"): + // + // If we apply the usual formula to the last bucket, its upper bound + // would be calculated as +Inf. The reason is that the max possible + // regular float64 number (math.MaxFloat64) doesn't coincide with one of + // the calculated bucket boundaries. So the calculated boundary has to + // be larger than math.MaxFloat64, and the only float64 larger than + // math.MaxFloat64 is +Inf. However, we want to count actual + // observations of ±Inf in the inf bucket. Therefore, we have to treat + // the upper bound of the last bucket specially and set it to + // math.MaxFloat64. (The upper bound of the inf bucket, with its key + // being one higher than that of the last bucket, naturally comes out as + // +Inf by the usual formula. So that's fine.) + // + // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of + // 1024. If there were a float64 number following math.MaxFloat64, it + // would have a frac of 1.0 and an exp of 1024, or equivalently a frac + // of 0.5 and an exp of 1025. However, since frac must be smaller than + // 1, and exp must be smaller than 1025, either representation overflows + // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the + // largest possible float64. Q.E.D.) However, the formula for + // calculating the upper bound from the idx and schema of the last + // bucket results in precisely that. It is either frac=1.0 & exp=1024 + // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, + // by the way, a power of two where the exponent itself is a power of + // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // schemas.) So these are the special cases we have to catch below. + if schema < 0 { + exp := key << -schema + if exp == 1024 { + // This is the last bucket before the overflow bucket + // (for ±Inf observations). Return math.MaxFloat64 as + // explained above. + return math.MaxFloat64 + } + return math.Ldexp(1, exp) + } + + fracIdx := key & ((1 << schema) - 1) + frac := nativeHistogramBounds[schema][fracIdx] + exp := (key >> schema) + 1 + if frac == 0.5 && exp == 1025 { + // This is the last bucket before the overflow bucket (for ±Inf + // observations). Return math.MaxFloat64 as explained above. + return math.MaxFloat64 + } + return math.Ldexp(frac, exp) +} + +// waitForCooldown returns after the count field in the provided histogramCounts +// has reached the provided count value. +func waitForCooldown(count uint64, counts *histogramCounts) { + for count != atomic.LoadUint64(&counts.count) { + runtime.Gosched() // Let observations get work done. + } +} + +// atomicAddFloat adds the provided float atomically to another float +// represented by the bit pattern the bits pointer is pointing to. +func atomicAddFloat(bits *uint64, v float64) { + for { + loadedBits := atomic.LoadUint64(bits) + newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) + if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { + break + } + } +} + +// atomicDecUint32 atomically decrements the uint32 p points to. See +// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done. +func atomicDecUint32(p *uint32) { + atomic.AddUint32(p, ^uint32(0)) +} + +// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero +// bucket) from the cold counts to the corresponding fields in the hot +// counts. Those fields are then reset to 0 in the cold counts. +func addAndResetCounts(hot, cold *histogramCounts) { + atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count)) + atomic.StoreUint64(&cold.count, 0) + coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits)) + atomicAddFloat(&hot.sumBits, coldSum) + atomic.StoreUint64(&cold.sumBits, 0) + for i := range hot.buckets { + atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i])) + atomic.StoreUint64(&cold.buckets[i], 0) + } + atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) + atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go new file mode 100644 index 000000000000..1ed5abe74c16 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go @@ -0,0 +1,60 @@ +// Copyright (c) 2015 Björn Rabenstein +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// The code in this package is copy/paste to avoid a dependency. Hence this file +// carries the copyright of the original repo. +// https://github.com/beorn7/floats +package internal + +import ( + "math" +) + +// minNormalFloat64 is the smallest positive normal value of type float64. +var minNormalFloat64 = math.Float64frombits(0x0010000000000000) + +// AlmostEqualFloat64 returns true if a and b are equal within a relative error +// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the +// details of the applied method. +func AlmostEqualFloat64(a, b, epsilon float64) bool { + if a == b { + return true + } + absA := math.Abs(a) + absB := math.Abs(b) + diff := math.Abs(a - b) + if a == 0 || b == 0 || absA+absB < minNormalFloat64 { + return diff < epsilon*minNormalFloat64 + } + return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon +} + +// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64. +func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !AlmostEqualFloat64(a[i], b[i], epsilon) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go new file mode 100644 index 000000000000..fd0750f2cf50 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -0,0 +1,654 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// Maintaining `GetUnifiedDiffString` here because original repository +// (https://github.com/pmezard/go-difflib) is no loger maintained. +package internal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool, +) *SequenceMatcher { + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// +// and for all (i',j',k') meeting those conditions, +// +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{ + c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n), + }) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s]++ + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches++ + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning-- // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return w.String(), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go similarity index 51% rename from vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go rename to vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go index 6d152fbf192c..723b45d64444 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -11,21 +11,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.15 -// +build go1.15 +package internal -package collectors +import "regexp" -import ( - "database/sql" - - "github.com/prometheus/client_golang/prometheus" -) - -func (c *dbStatsCollector) describeNewInGo115(ch chan<- *prometheus.Desc) { - ch <- c.maxIdleTimeClosed +type GoCollectorRule struct { + Matcher *regexp.Regexp + Deny bool } -func (c *dbStatsCollector) collectNewInGo115(ch chan<- prometheus.Metric, stats sql.DBStats) { - ch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed)) +// GoCollectorOptions should not be used be directly by anything, except `collectors` package. +// Use it via collectors package instead. See issue +// https://github.com/prometheus/client_golang/issues/1030. +// +// This is internal, so external users only can use it via `collector.WithGoCollector*` methods +type GoCollectorOptions struct { + DisableMemStatsLikeMetrics bool + RuntimeMetricSumForHist map[string]string + RuntimeMetricRules []GoCollectorRule } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index fe0a52180e72..97d17d6cb60b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -61,9 +61,9 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) // name has - replaced with _ and is concatenated with the unit and // other data. name = strings.ReplaceAll(name, "-", "_") - name = name + "_" + unit - if d.Cumulative { - name = name + "_total" + name += "_" + unit + if d.Cumulative && d.Kind != metrics.KindFloat64Histogram { + name += "_total" } valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name)) @@ -84,12 +84,12 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { switch unit { case "bytes": - // Rebucket as powers of 2. - return rebucketExp(buckets, 2) + // Re-bucket as powers of 2. + return reBucketExp(buckets, 2) case "seconds": - // Rebucket as powers of 10 and then merge all buckets greater + // Re-bucket as powers of 10 and then merge all buckets greater // than 1 second into the +Inf bucket. - b := rebucketExp(buckets, 10) + b := reBucketExp(buckets, 10) for i := range b { if b[i] <= 1 { continue @@ -103,11 +103,11 @@ func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { return buckets } -// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and +// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and // downsamples the buckets to those a multiple of base apart. The end result // is a roughly exponential (in many cases, perfectly exponential) bucketing // scheme. -func rebucketExp(buckets []float64, base float64) []float64 { +func reBucketExp(buckets []float64, base float64) []float64 { bucket := buckets[0] var newBuckets []float64 // We may see a -Inf here, in which case, add it and skip it diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go index 351c26e1aedb..6515c114804f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -19,18 +19,34 @@ import ( dto "github.com/prometheus/client_model/go" ) -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type LabelPairSorter []*dto.LabelPair -func (s metricSorter) Len() int { +func (s LabelPairSorter) Len() int { return len(s) } -func (s metricSorter) Swap(i, j int) { +func (s LabelPairSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s metricSorter) Less(i, j int) bool { +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +// MetricSorter is a sortable slice of *dto.Metric. +type MetricSorter []*dto.Metric + +func (s MetricSorter) Len() int { + return len(s) +} + +func (s MetricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s MetricSorter) Less(i, j int) bool { if len(s[i].Label) != len(s[j].Label) { // This should not happen. The metrics are // inconsistent. However, we have to deal with the fact, as @@ -68,7 +84,7 @@ func (s metricSorter) Less(i, j int) bool { // the slice, with the contained Metrics sorted within each MetricFamily. func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) + sort.Sort(MetricSorter(mf.Metric)) } names := make([]string, 0, len(metricFamiliesByName)) for name, mf := range metricFamiliesByName { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index 2744443ac228..c1b8fad36aeb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -25,7 +25,8 @@ import ( // Labels represents a collection of label name -> value mappings. This type is // commonly used with the With(Labels) and GetMetricWith(Labels) methods of // metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) // // The other use-case is the specification of constant label pairs in Opts or to // create a Desc. @@ -39,7 +40,7 @@ var errInconsistentCardinality = errors.New("inconsistent label cardinality") func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { return fmt.Errorf( - "%s: %q has %d variable labels named %q but %d values %q were provided", + "%w: %q has %d variable labels named %q but %d values %q were provided", errInconsistentCardinality, fqName, len(labels), labels, len(labelValues), labelValues, @@ -49,7 +50,7 @@ func makeInconsistentCardinalityError(fqName string, labels, labelValues []strin func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { if len(labels) != expectedNumberOfValues { return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", + "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, len(labels), labels, ) @@ -67,7 +68,7 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", + "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, len(vals), vals, ) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index dc121910a520..b5119c50410e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -14,6 +14,9 @@ package prometheus import ( + "errors" + "math" + "sort" "strings" "time" @@ -115,22 +118,6 @@ func BuildFQName(namespace, subsystem, name string) string { return name } -// labelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. -type labelPairSorter []*dto.LabelPair - -func (s labelPairSorter) Len() int { - return len(s) -} - -func (s labelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s labelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - type invalidMetric struct { desc *Desc err error @@ -174,3 +161,96 @@ func (m timestampedMetric) Write(pb *dto.Metric) error { func NewMetricWithTimestamp(t time.Time, m Metric) Metric { return timestampedMetric{Metric: m, t: t} } + +type withExemplarsMetric struct { + Metric + + exemplars []*dto.Exemplar +} + +func (m *withExemplarsMetric) Write(pb *dto.Metric) error { + if err := m.Metric.Write(pb); err != nil { + return err + } + + switch { + case pb.Counter != nil: + pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] + case pb.Histogram != nil: + for _, e := range m.exemplars { + // pb.Histogram.Bucket are sorted by UpperBound. + i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { + return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() + }) + if i < len(pb.Histogram.Bucket) { + pb.Histogram.Bucket[i].Exemplar = e + } else { + // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. + b := &dto.Bucket{ + CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), + UpperBound: proto.Float64(math.Inf(1)), + Exemplar: e, + } + pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) + } + } + default: + // TODO(bwplotka): Implement Gauge? + return errors.New("cannot inject exemplar into Gauge, Summary or Untyped") + } + + return nil +} + +// Exemplar is easier to use, user-facing representation of *dto.Exemplar. +type Exemplar struct { + Value float64 + Labels Labels + // Optional. + // Default value (time.Time{}) indicates its empty, which should be + // understood as time.Now() time at the moment of creation of metric. + Timestamp time.Time +} + +// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given +// exemplars. Exemplars are validated. +// +// Only last applicable exemplar is injected from the list. +// For example for Counter it means last exemplar is injected. +// For Histogram, it means last applicable exemplar for each bucket is injected. +// +// NewMetricWithExemplars works best with MustNewConstMetric and +// MustNewConstHistogram, see example. +func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { + if len(exemplars) == 0 { + return nil, errors.New("no exemplar was passed for NewMetricWithExemplars") + } + + var ( + now = time.Now() + exs = make([]*dto.Exemplar, len(exemplars)) + err error + ) + for i, e := range exemplars { + ts := e.Timestamp + if ts == (time.Time{}) { + ts = now + } + exs[i], err = newExemplar(e.Value, ts, e.Labels) + if err != nil { + return nil, err + } + } + + return &withExemplarsMetric{Metric: m, exemplars: exs}, nil +} + +// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where +// NewMetricWithExemplars would have returned an error. +func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric { + ret, err := NewMetricWithExemplars(m, exemplars...) + if err != nil { + panic(err) + } + return ret +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go new file mode 100644 index 000000000000..7c12b210870a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go @@ -0,0 +1,25 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !js || wasm +// +build !js wasm + +package prometheus + +import "runtime" + +// getRuntimeNumThreads returns the number of open OS threads. +func getRuntimeNumThreads() float64 { + n, _ := runtime.ThreadCreateProfile(nil) + return float64(n) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go new file mode 100644 index 000000000000..7348df01dfbc --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js && !wasm +// +build js,!wasm + +package prometheus + +// getRuntimeNumThreads returns the number of open OS threads. +func getRuntimeNumThreads() float64 { + return 1 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go index 44128016fd1d..03773b21f759 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -58,7 +58,7 @@ type ObserverVec interface { // current time as timestamp, and the provided Labels. Empty Labels will lead to // a valid (label-less) exemplar. But if Labels is nil, the current exemplar is // left in place. ObserveWithExemplar panics if any of the provided labels are -// invalid or if the provided labels contain more than 64 runes in total. +// invalid or if the provided labels contain more than 128 runes in total. type ExemplarObserver interface { ObserveWithExemplar(value float64, exemplar Labels) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 5bfe0ff5bbc9..8548dd18ed5e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -16,7 +16,6 @@ package prometheus import ( "errors" "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -104,8 +103,7 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { } if opts.PidFn == nil { - pid := os.Getpid() - c.pidFn = func() (int, error) { return pid, nil } + c.pidFn = getPIDFn() } else { c.pidFn = opts.PidFn } @@ -152,13 +150,13 @@ func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) // It is meant to be used for the PidFn field in ProcessCollectorOpts. func NewPidFileFn(pidFilePath string) func() (int, error) { return func() (int, error) { - content, err := ioutil.ReadFile(pidFilePath) + content, err := os.ReadFile(pidFilePath) if err != nil { - return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err) + return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err) } pid, err := strconv.Atoi(strings.TrimSpace(string(content))) if err != nil { - return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err) + return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err) } return pid, nil diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go new file mode 100644 index 000000000000..b1e363d6cf69 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go @@ -0,0 +1,26 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js +// +build js + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + // noop on this platform + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 2dc3660da0a3..c0152cdb613a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows -// +build !windows +//go:build !windows && !js +// +build !windows,!js package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go index f8d50d1f9112..8031e8704244 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go @@ -14,114 +14,114 @@ // Package promauto provides alternative constructors for the fundamental // Prometheus metric types and their …Vec and …Func variants. The difference to // their counterparts in the prometheus package is that the promauto -// constructors return Collectors that are already registered with a -// registry. There are two sets of constructors. The constructors in the first -// set are top-level functions, while the constructors in the other set are -// methods of the Factory type. The top-level function return Collectors -// registered with the global registry (prometheus.DefaultRegisterer), while the -// methods return Collectors registered with the registry the Factory was -// constructed with. All constructors panic if the registration fails. +// constructors register the Collectors with a registry before returning them. +// There are two sets of constructors. The constructors in the first set are +// top-level functions, while the constructors in the other set are methods of +// the Factory type. The top-level function return Collectors registered with +// the global registry (prometheus.DefaultRegisterer), while the methods return +// Collectors registered with the registry the Factory was constructed with. All +// constructors panic if the registration fails. // // The following example is a complete program to create a histogram of normally // distributed random numbers from the math/rand package: // -// package main +// package main // -// import ( -// "math/rand" -// "net/http" +// import ( +// "math/rand" +// "net/http" // -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promauto" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) // -// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) +// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// }) // -// func Random() { -// for { -// histogram.Observe(rand.NormFloat64()) -// } -// } +// func Random() { +// for { +// histogram.Observe(rand.NormFloat64()) +// } +// } // -// func main() { -// go Random() -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) -// } +// func main() { +// go Random() +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":1971", nil) +// } // // Prometheus's version of a minimal hello-world program: // -// package main +// package main // -// import ( -// "fmt" -// "net/http" +// import ( +// "fmt" +// "net/http" // -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promauto" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) // -// func main() { -// http.Handle("/", promhttp.InstrumentHandlerCounter( -// promauto.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hello_requests_total", -// Help: "Total number of hello-world requests by HTTP code.", -// }, -// []string{"code"}, -// ), -// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -// fmt.Fprint(w, "Hello, world!") -// }), -// )) -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) -// } +// func main() { +// http.Handle("/", promhttp.InstrumentHandlerCounter( +// promauto.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hello_requests_total", +// Help: "Total number of hello-world requests by HTTP code.", +// }, +// []string{"code"}, +// ), +// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// fmt.Fprint(w, "Hello, world!") +// }), +// )) +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":1971", nil) +// } // // A Factory is created with the With(prometheus.Registerer) function, which // enables two usage pattern. With(prometheus.Registerer) can be called once per // line: // -// var ( -// reg = prometheus.NewRegistry() -// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// requestCount = promauto.With(reg).NewCounterVec( -// prometheus.CounterOpts{ -// Name: "http_requests_total", -// Help: "Total number of HTTP requests by status code and method.", -// }, -// []string{"code", "method"}, -// ) -// ) +// var ( +// reg = prometheus.NewRegistry() +// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// }) +// requestCount = promauto.With(reg).NewCounterVec( +// prometheus.CounterOpts{ +// Name: "http_requests_total", +// Help: "Total number of HTTP requests by status code and method.", +// }, +// []string{"code", "method"}, +// ) +// ) // // Or it can be used to create a Factory once to be used multiple times: // -// var ( -// reg = prometheus.NewRegistry() -// factory = promauto.With(reg) -// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// requestCount = factory.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "http_requests_total", -// Help: "Total number of HTTP requests by status code and method.", -// }, -// []string{"code", "method"}, -// ) -// ) +// var ( +// reg = prometheus.NewRegistry() +// factory = promauto.With(reg) +// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{ +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// }) +// requestCount = factory.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "http_requests_total", +// Help: "Total number of HTTP requests by status code and method.", +// }, +// []string{"code", "method"}, +// ) +// ) // // This appears very handy. So why are these constructors locked away in a // separate package? diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index e7c0d05464fa..9819917b83b3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,16 +76,19 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } -type closeNotifierDelegator struct{ *responseWriterDelegator } -type flusherDelegator struct{ *responseWriterDelegator } -type hijackerDelegator struct{ *responseWriterDelegator } -type readerFromDelegator struct{ *responseWriterDelegator } -type pusherDelegator struct{ *responseWriterDelegator } +type ( + closeNotifierDelegator struct{ *responseWriterDelegator } + flusherDelegator struct{ *responseWriterDelegator } + hijackerDelegator struct{ *responseWriterDelegator } + readerFromDelegator struct{ *responseWriterDelegator } + pusherDelegator struct{ *responseWriterDelegator } +) func (d closeNotifierDelegator) CloseNotify() <-chan bool { //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. return d.ResponseWriter.(http.CloseNotifier).CloseNotify() } + func (d flusherDelegator) Flush() { // If applicable, call WriteHeader here so that observeWriteHeader is // handled appropriately. @@ -94,9 +97,11 @@ func (d flusherDelegator) Flush() { } d.ResponseWriter.(http.Flusher).Flush() } + func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { return d.ResponseWriter.(http.Hijacker).Hijack() } + func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { // If applicable, call WriteHeader here so that observeWriteHeader is // handled appropriately. @@ -107,6 +112,7 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { d.written += n return n, err } + func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { return d.ResponseWriter.(http.Pusher).Push(target, opts) } @@ -261,7 +267,7 @@ func init() { http.Flusher }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} } - pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23 return struct { *responseWriterDelegator http.Pusher diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index d86d0cf4b0e9..a4cc9810b072 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -33,6 +33,7 @@ package promhttp import ( "compress/gzip" + "errors" "fmt" "io" "net/http" @@ -84,6 +85,13 @@ func Handler() http.Handler { // instrumentation. Use the InstrumentMetricHandler function to apply the same // kind of instrumentation as it is used by the Handler function. func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts) +} + +// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which +// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after +// call to `done` of that `Gather`. +func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler { var ( inFlightSem chan struct{} errCnt = prometheus.NewCounterVec( @@ -103,7 +111,8 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { errCnt.WithLabelValues("gathering") errCnt.WithLabelValues("encoding") if err := opts.Registry.Register(errCnt); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { errCnt = are.ExistingCollector.(*prometheus.CounterVec) } else { panic(err) @@ -123,7 +132,8 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { return } } - mfs, err := reg.Gather() + mfs, done, err := reg.Gather() + defer done() if err != nil { if opts.ErrorLog != nil { opts.ErrorLog.Println("error gathering metrics:", err) @@ -242,7 +252,8 @@ func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) ht cnt.WithLabelValues("500") cnt.WithLabelValues("503") if err := reg.Register(cnt); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { cnt = are.ExistingCollector.(*prometheus.CounterVec) } else { panic(err) @@ -254,7 +265,8 @@ func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) ht Help: "Current number of scrapes being served.", }) if err := reg.Register(gge); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { gge = are.ExistingCollector.(prometheus.Gauge) } else { panic(err) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 861b4d21cac6..21086781621f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -38,11 +38,11 @@ func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { // // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { gauge.Inc() defer gauge.Dec() return next.RoundTrip(r) - }) + } } // InstrumentRoundTripperCounter is a middleware that wraps the provided @@ -59,22 +59,28 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp // If the wrapped RoundTripper panics or returns a non-nil error, the Counter // is not incremented. // +// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests. +// // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { - rtOpts := &option{} + rtOpts := defaultOptions() for _, o := range opts { - o(rtOpts) + o.apply(rtOpts) } code, method := checkLabels(counter) - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { resp, err := next.RoundTrip(r) if err == nil { - counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc() + addWithExemplar( + counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), + 1, + rtOpts.getExemplarFn(r.Context()), + ) } return resp, err - }) + } } // InstrumentRoundTripperDuration is a middleware that wraps the provided @@ -94,24 +100,30 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou // If the wrapped RoundTripper panics or returns a non-nil error, no values are // reported. // +// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms. +// // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { - rtOpts := &option{} + rtOpts := defaultOptions() for _, o := range opts { - o(rtOpts) + o.apply(rtOpts) } code, method := checkLabels(obs) - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { start := time.Now() resp, err := next.RoundTrip(r) if err == nil { - obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds()) + observeWithExemplar( + obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), + time.Since(start).Seconds(), + rtOpts.getExemplarFn(r.Context()), + ) } return resp, err - }) + } } // InstrumentTrace is used to offer flexibility in instrumenting the available @@ -149,7 +161,7 @@ type InstrumentTrace struct { // // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { start := time.Now() trace := &httptrace.ClientTrace{ @@ -231,5 +243,5 @@ func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) Ro r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) return next.RoundTrip(r) - }) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index a23f0edc6f81..cca67a78a90d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -28,6 +28,26 @@ import ( // magicString is used for the hacky label test in checkLabels. Remove once fixed. const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" +// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver], +// which falls back to [prometheus.Observer.Observe] if no labels are provided. +func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) { + if labels == nil { + obs.Observe(val) + return + } + obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels) +} + +// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar], +// which falls back to [prometheus.Counter.Add] if no labels are provided. +func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) { + if labels == nil { + obs.Add(val) + return + } + obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels) +} + // InstrumentHandlerInFlight is a middleware that wraps the provided // http.Handler. It sets the provided prometheus.Gauge to the number of // requests currently handled by the wrapped http.Handler. @@ -48,7 +68,7 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl // names are "code" and "method". The function panics otherwise. For the "method" // label a predefined default label value set is used to filter given values. // Values besides predefined values will count as `unknown` method. -//`WithExtraMethods` can be used to add more methods to the set. The Observe +// `WithExtraMethods` can be used to add more methods to the set. The Observe // method of the Observer in the ObserverVec is called with the request duration // in seconds. Partitioning happens by HTTP status code and/or HTTP method if // the respective instance label names are present in the ObserverVec. For @@ -62,28 +82,37 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(obs) if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, nil) next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) - }) + observeWithExemplar( + obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + time.Since(now).Seconds(), + hOpts.getExemplarFn(r.Context()), + ) + } } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { now := time.Now() next.ServeHTTP(w, r) - obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) - }) + + observeWithExemplar( + obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), + time.Since(now).Seconds(), + hOpts.getExemplarFn(r.Context()), + ) + } } // InstrumentHandlerCounter is a middleware that wraps the provided http.Handler @@ -104,25 +133,34 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(counter) if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc() - }) + + addWithExemplar( + counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + 1, + hOpts.getExemplarFn(r.Context()), + ) + } } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) - counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc() - }) + addWithExemplar( + counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), + 1, + hOpts.getExemplarFn(r.Context()), + ) + } } // InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided @@ -148,20 +186,24 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(obs) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, func(status int) { - obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) + observeWithExemplar( + obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)), + time.Since(now).Seconds(), + hOpts.getExemplarFn(r.Context()), + ) }) next.ServeHTTP(d, r) - }) + } } // InstrumentHandlerRequestSize is a middleware that wraps the provided @@ -184,27 +226,34 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(obs) - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size)) - }) + observeWithExemplar( + obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + float64(size), + hOpts.getExemplarFn(r.Context()), + ) + } } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size)) - }) + observeWithExemplar( + obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), + float64(size), + hOpts.getExemplarFn(r.Context()), + ) + } } // InstrumentHandlerResponseSize is a middleware that wraps the provided @@ -227,9 +276,9 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(obs) @@ -237,7 +286,11 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written())) + observeWithExemplar( + obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + float64(d.Written()), + hOpts.getExemplarFn(r.Context()), + ) }) } @@ -246,7 +299,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler // Collector does not have a Desc or has more than one Desc or its Desc is // invalid. It also panics if the Collector has any non-const, non-curried // labels that are not named "code" or "method". -func checkLabels(c prometheus.Collector) (code bool, method bool) { +func checkLabels(c prometheus.Collector) (code, method bool) { // TODO(beorn7): Remove this hacky way to check for instance labels // once Descriptors can have their dimensionality queried. var ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go index 35e41bd1e6bc..c590d912c947 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go @@ -13,19 +13,46 @@ package promhttp -// Option are used to configure a middleware or round tripper.. -type Option func(*option) +import ( + "context" -type option struct { - extraMethods []string + "github.com/prometheus/client_golang/prometheus" +) + +// Option are used to configure both handler (middleware) or round tripper. +type Option interface { + apply(*options) +} + +// options store options for both a handler or round tripper. +type options struct { + extraMethods []string + getExemplarFn func(requestCtx context.Context) prometheus.Labels +} + +func defaultOptions() *options { + return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }} } +type optionApplyFunc func(*options) + +func (o optionApplyFunc) apply(opt *options) { o(opt) } + // WithExtraMethods adds additional HTTP methods to the list of allowed methods. // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list. // // See the example for ExampleInstrumentHandlerWithExtraMethods for example usage. func WithExtraMethods(methods ...string) Option { - return func(o *option) { + return optionApplyFunc(func(o *options) { o.extraMethods = methods - } + }) +} + +// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics. +// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric +// will get instrumented without exemplar. +func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option { + return optionApplyFunc(func(o *options) { + o.getExemplarFn = getExemplarFn + }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go index c1a6cb99fcbb..29f6cd309c91 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go @@ -15,17 +15,17 @@ // builder approach. Create a Pusher with New and then add the various options // by using its methods, finally calling Add or Push, like this: // -// // Easy case: -// push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push() +// // Easy case: +// push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push() // -// // Complex case: -// push.New("http://example.org/metrics", "my_job"). -// Collector(myCollector1). -// Collector(myCollector2). -// Grouping("zone", "xy"). -// Client(&myHTTPClient). -// BasicAuth("top", "secret"). -// Add() +// // Complex case: +// push.New("http://example.org/metrics", "my_job"). +// Collector(myCollector1). +// Collector(myCollector2). +// Grouping("zone", "xy"). +// Client(&myHTTPClient). +// BasicAuth("top", "secret"). +// Add() // // See the examples section for more detailed examples. // @@ -36,10 +36,11 @@ package push import ( "bytes" + "context" "encoding/base64" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -97,9 +98,7 @@ func New(url, job string) *Pusher { if !strings.Contains(url, "://") { url = "http://" + url } - if strings.HasSuffix(url, "/") { - url = url[:len(url)-1] - } + url = strings.TrimSuffix(url, "/") return &Pusher{ error: err, @@ -123,14 +122,28 @@ func New(url, job string) *Pusher { // Push returns the first error encountered by any method call (including this // one) in the lifetime of the Pusher. func (p *Pusher) Push() error { - return p.push(http.MethodPut) + return p.push(context.Background(), http.MethodPut) +} + +// PushContext is like Push but includes a context. +// +// If the context expires before HTTP request is complete, an error is returned. +func (p *Pusher) PushContext(ctx context.Context) error { + return p.push(ctx, http.MethodPut) } // Add works like push, but only previously pushed metrics with the same name // (and the same job and other grouping labels) will be replaced. (It uses HTTP // method “POST” to push to the Pushgateway.) func (p *Pusher) Add() error { - return p.push(http.MethodPost) + return p.push(context.Background(), http.MethodPost) +} + +// AddContext is like Add but includes a context. +// +// If the context expires before HTTP request is complete, an error is returned. +func (p *Pusher) AddContext(ctx context.Context) error { + return p.push(ctx, http.MethodPost) } // Gatherer adds a Gatherer to the Pusher, from which metrics will be gathered @@ -155,6 +168,11 @@ func (p *Pusher) Collector(c prometheus.Collector) *Pusher { return p } +// Error returns the error that was encountered. +func (p *Pusher) Error() error { + return p.error +} + // Grouping adds a label pair to the grouping key of the Pusher, replacing any // previously added label pair with the same label name. Note that setting any // labels in the grouping key that are already contained in the metrics to push @@ -227,13 +245,13 @@ func (p *Pusher) Delete() error { } defer resp.Body.Close() if resp.StatusCode != http.StatusAccepted { - body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. + body, _ := io.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. return fmt.Errorf("unexpected status code %d while deleting %s: %s", resp.StatusCode, p.fullURL(), body) } return nil } -func (p *Pusher) push(method string) error { +func (p *Pusher) push(ctx context.Context, method string) error { if p.error != nil { return p.error } @@ -258,9 +276,13 @@ func (p *Pusher) push(method string) error { } } } - enc.Encode(mf) + if err := enc.Encode(mf); err != nil { + return fmt.Errorf( + "failed to encode metric familty %s, error is %w", + mf.GetName(), err) + } } - req, err := http.NewRequest(method, p.fullURL(), buf) + req, err := http.NewRequestWithContext(ctx, method, p.fullURL(), buf) if err != nil { return err } @@ -275,7 +297,7 @@ func (p *Pusher) push(method string) error { defer resp.Body.Close() // Depending on version and configuration of the PGW, StatusOK or StatusAccepted may be returned. if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted { - body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. + body, _ := io.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, p.fullURL(), body) } return nil diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 383a7f5941a5..09e34d307c97 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -15,8 +15,8 @@ package prometheus import ( "bytes" + "errors" "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -252,9 +252,12 @@ func (errs MultiError) MaybeUnwrap() error { } // Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements both Registerer and -// Gatherer. The zero value is not usable. Create instances with NewRegistry or -// NewPedanticRegistry. +// them into MetricFamilies for exposition. It implements Registerer, Gatherer, +// and Collector. The zero value is not usable. Create instances with +// NewRegistry or NewPedanticRegistry. +// +// Registry implements Collector to allow it to be used for creating groups of +// metrics. See the Grouping example for how this can be done. type Registry struct { mtx sync.RWMutex collectorsByID map[uint64]Collector // ID is a hash of the descIDs. @@ -289,7 +292,7 @@ func (r *Registry) Register(c Collector) error { // Is the descriptor valid at all? if desc.err != nil { - return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err) } // Is the descID unique? @@ -407,6 +410,14 @@ func (r *Registry) MustRegister(cs ...Collector) { // Gather implements Gatherer. func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + r.mtx.RLock() + + if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 { + // Fast path. + r.mtx.RUnlock() + return nil, nil + } + var ( checkedMetricChan = make(chan Metric, capMetricChan) uncheckedMetricChan = make(chan Metric, capMetricChan) @@ -416,7 +427,6 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { registeredDescIDs map[uint64]struct{} // Only used for pedantic checks ) - r.mtx.RLock() goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) checkedCollectors := make(chan Collector, len(r.collectorsByID)) @@ -549,6 +559,31 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } +// Describe implements Collector. +func (r *Registry) Describe(ch chan<- *Desc) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + // Only report the checked Collectors; unchecked collectors don't report any + // Desc. + for _, c := range r.collectorsByID { + c.Describe(ch) + } +} + +// Collect implements Collector. +func (r *Registry) Collect(ch chan<- Metric) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + for _, c := range r.collectorsByID { + c.Collect(ch) + } + for _, c := range r.uncheckedCollectors { + c.Collect(ch) + } +} + // WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the // Prometheus text format, and writes it to a temporary file. Upon success, the // temporary file is renamed to the provided filename. @@ -556,7 +591,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { // This is intended for use with the textfile collector of the node exporter. // Note that the node exporter expects the filename to be suffixed with ".prom". func WriteToTextfile(filename string, g Gatherer) error { - tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)) if err != nil { return err } @@ -575,7 +610,7 @@ func WriteToTextfile(filename string, g Gatherer) error { return err } - if err := os.Chmod(tmp.Name(), 0644); err != nil { + if err := os.Chmod(tmp.Name(), 0o644); err != nil { return err } return os.Rename(tmp.Name(), filename) @@ -596,7 +631,7 @@ func processMetric( } dtoMetric := &dto.Metric{} if err := metric.Write(dtoMetric); err != nil { - return fmt.Errorf("error collecting metric %v: %s", desc, err) + return fmt.Errorf("error collecting metric %v: %w", desc, err) } metricFamily, ok := metricFamiliesByName[desc.fqName] if ok { // Existing name. @@ -718,12 +753,13 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { for i, g := range gs { mfs, err := g.Gather() if err != nil { - if multiErr, ok := err.(MultiError); ok { + multiErr := MultiError{} + if errors.As(err, &multiErr) { for _, err := range multiErr { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) } } else { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) } } for _, mf := range mfs { @@ -884,11 +920,11 @@ func checkMetricConsistency( h.Write(separatorByteSlice) // Make sure label pairs are sorted. We depend on it for the consistency // check. - if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { + if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) { // We cannot sort dtoMetric.Label in place as it is immutable by contract. copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) copy(copiedLabels, dtoMetric.Label) - sort.Sort(labelPairSorter(copiedLabels)) + sort.Sort(internal.LabelPairSorter(copiedLabels)) dtoMetric.Label = copiedLabels } for _, lp := range dtoMetric.Label { @@ -935,7 +971,7 @@ func checkDescConsistency( metricFamily.GetName(), dtoMetric, desc, ) } - sort.Sort(labelPairSorter(lpsFromDesc)) + sort.Sort(internal.LabelPairSorter(lpsFromDesc)) for i, lpFromDesc := range lpsFromDesc { lpFromMetric := dtoMetric.Label[i] if lpFromDesc.GetName() != lpFromMetric.GetName() || @@ -948,3 +984,89 @@ func checkDescConsistency( } return nil } + +var _ TransactionalGatherer = &MultiTRegistry{} + +// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple +// transactional gatherers. +// +// It is caller responsibility to ensure two registries have mutually exclusive metric families, +// no deduplication will happen. +type MultiTRegistry struct { + tGatherers []TransactionalGatherer +} + +// NewMultiTRegistry creates MultiTRegistry. +func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry { + return &MultiTRegistry{ + tGatherers: tGatherers, + } +} + +// Gather implements TransactionalGatherer interface. +func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) { + errs := MultiError{} + + dFns := make([]func(), 0, len(r.tGatherers)) + // TODO(bwplotka): Implement concurrency for those? + for _, g := range r.tGatherers { + // TODO(bwplotka): Check for duplicates? + m, d, err := g.Gather() + errs.Append(err) + + mfs = append(mfs, m...) + dFns = append(dFns, d) + } + + // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already. + sort.Slice(mfs, func(i, j int) bool { + return *mfs[i].Name < *mfs[j].Name + }) + return mfs, func() { + for _, d := range dFns { + d() + } + }, errs.MaybeUnwrap() +} + +// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory +// used by metric family is no longer used by a caller. This allows implementations with cache. +type TransactionalGatherer interface { + // Gather returns metrics in a lexicographically sorted slice + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. + // + // Important: done is expected to be triggered (even if the error occurs!) + // once caller does not need returned slice of dto.MetricFamily. + Gather() (_ []*dto.MetricFamily, done func(), err error) +} + +// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function. +func ToTransactionalGatherer(g Gatherer) TransactionalGatherer { + return &noTransactionGatherer{g: g} +} + +type noTransactionGatherer struct { + g Gatherer +} + +// Gather implements TransactionalGatherer interface. +func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) { + mfs, err := g.g.Gather() + return mfs, func() {}, err +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index c5fa8ed7c71a..7bc448a89394 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -603,7 +603,8 @@ func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { s, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -614,7 +615,8 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *SummaryVec) With(labels Labels) Observer { s, err := v.GetMetricWith(labels) if err != nil { @@ -701,7 +703,8 @@ func (s *constSummary) Write(out *dto.Metric) error { // // quantiles maps ranks to quantile values. For example, a median latency of // 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// map[float64]float64{0.5: 0.23, 0.99: 0.56} // // NewConstSummary returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go index 7681877a8942..8d2f05500b3e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go @@ -26,7 +26,7 @@ import ( func CollectAndLint(c prometheus.Collector, metricNames ...string) ([]promlint.Problem, error) { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { - return nil, fmt.Errorf("registering collector failed: %s", err) + return nil, fmt.Errorf("registering collector failed: %w", err) } return GatherAndLint(reg, metricNames...) } @@ -37,7 +37,7 @@ func CollectAndLint(c prometheus.Collector, metricNames ...string) ([]promlint.P func GatherAndLint(g prometheus.Gatherer, metricNames ...string) ([]promlint.Problem, error) { got, err := g.Gather() if err != nil { - return nil, fmt.Errorf("gathering metrics failed: %s", err) + return nil, fmt.Errorf("gathering metrics failed: %w", err) } if metricNames != nil { got = filterMetrics(got, metricNames) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go index ec8061706250..a20f159b78e9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go @@ -15,6 +15,7 @@ package promlint import ( + "errors" "fmt" "io" "regexp" @@ -83,7 +84,7 @@ func (l *Linter) Lint() ([]Problem, error) { mf := &dto.MetricFamily{} for { if err := d.Decode(mf); err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } @@ -283,7 +284,7 @@ func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem { // metricUnits attempts to detect known unit types used as part of a metric name, // e.g. "foo_bytes_total" or "bar_baz_milligrams". -func metricUnits(m string) (unit string, base string, ok bool) { +func metricUnits(m string) (unit, base string, ok bool) { ss := strings.Split(m, "_") for unit, base := range units { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 9af60ce1d21a..91b83b5285db 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -41,10 +41,12 @@ import ( "bytes" "fmt" "io" + "net/http" + "reflect" - "github.com/prometheus/common/expfmt" - + "github.com/davecgh/go-spew/spew" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/internal" @@ -99,7 +101,9 @@ func ToFloat64(c prometheus.Collector) float64 { } pb := &dto.Metric{} - m.Write(pb) + if err := m.Write(pb); err != nil { + panic(fmt.Errorf("error happened while collecting metrics: %w", err)) + } if pb.Gauge != nil { return pb.Gauge.GetValue() } @@ -122,7 +126,7 @@ func ToFloat64(c prometheus.Collector) float64 { func CollectAndCount(c prometheus.Collector, metricNames ...string) int { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { - panic(fmt.Errorf("registering collector failed: %s", err)) + panic(fmt.Errorf("registering collector failed: %w", err)) } result, err := GatherAndCount(reg, metricNames...) if err != nil { @@ -138,7 +142,7 @@ func CollectAndCount(c prometheus.Collector, metricNames ...string) int { func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) { got, err := g.Gather() if err != nil { - return 0, fmt.Errorf("gathering metrics failed: %s", err) + return 0, fmt.Errorf("gathering metrics failed: %w", err) } if metricNames != nil { got = filterMetrics(got, metricNames) @@ -151,13 +155,41 @@ func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) { return result, nil } +// ScrapeAndCompare calls a remote exporter's endpoint which is expected to return some metrics in +// plain text format. Then it compares it with the results that the `expected` would return. +// If the `metricNames` is not empty it would filter the comparison only to the given metric names. +func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) error { + resp, err := http.Get(url) + if err != nil { + return fmt.Errorf("scraping metrics failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("the scraping target returned a status code other than 200: %d", + resp.StatusCode) + } + + scraped, err := convertReaderToMetricFamily(resp.Body) + if err != nil { + return err + } + + wanted, err := convertReaderToMetricFamily(expected) + if err != nil { + return err + } + + return compareMetricFamilies(scraped, wanted, metricNames...) +} + // CollectAndCompare registers the provided Collector with a newly created // pedantic Registry. It then calls GatherAndCompare with that Registry and with // the provided metricNames. func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { - return fmt.Errorf("registering collector failed: %s", err) + return fmt.Errorf("registering collector failed: %w", err) } return GatherAndCompare(reg, expected, metricNames...) } @@ -167,21 +199,48 @@ func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames . // exposition format. If any metricNames are provided, only metrics with those // names are compared. func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error { - got, err := g.Gather() + return TransactionalGatherAndCompare(prometheus.ToTransactionalGatherer(g), expected, metricNames...) +} + +// TransactionalGatherAndCompare gathers all metrics from the provided Gatherer and compares +// it to an expected output read from the provided Reader in the Prometheus text +// exposition format. If any metricNames are provided, only metrics with those +// names are compared. +func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected io.Reader, metricNames ...string) error { + got, done, err := g.Gather() + defer done() if err != nil { - return fmt.Errorf("gathering metrics failed: %s", err) + return fmt.Errorf("gathering metrics failed: %w", err) } - if metricNames != nil { - got = filterMetrics(got, metricNames) + + wanted, err := convertReaderToMetricFamily(expected) + if err != nil { + return err } + + return compareMetricFamilies(got, wanted, metricNames...) +} + +// convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of +// dto.MetricFamily. +func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { var tp expfmt.TextParser - wantRaw, err := tp.TextToMetricFamilies(expected) + notNormalized, err := tp.TextToMetricFamilies(reader) if err != nil { - return fmt.Errorf("parsing expected metrics failed: %s", err) + return nil, fmt.Errorf("converting reader to metric families failed: %w", err) } - want := internal.NormalizeMetricFamilies(wantRaw) - return compare(got, want) + return internal.NormalizeMetricFamilies(notNormalized), nil +} + +// compareMetricFamilies would compare 2 slices of metric families, and optionally filters both of +// them to the `metricNames` provided. +func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...string) error { + if metricNames != nil { + got = filterMetrics(got, metricNames) + } + + return compare(got, expected) } // compare encodes both provided slices of metric families into the text format, @@ -193,27 +252,80 @@ func compare(got, want []*dto.MetricFamily) error { enc := expfmt.NewEncoder(&gotBuf, expfmt.FmtText) for _, mf := range got { if err := enc.Encode(mf); err != nil { - return fmt.Errorf("encoding gathered metrics failed: %s", err) + return fmt.Errorf("encoding gathered metrics failed: %w", err) } } enc = expfmt.NewEncoder(&wantBuf, expfmt.FmtText) for _, mf := range want { if err := enc.Encode(mf); err != nil { - return fmt.Errorf("encoding expected metrics failed: %s", err) + return fmt.Errorf("encoding expected metrics failed: %w", err) } } + if diffErr := diff(wantBuf, gotBuf); diffErr != "" { + return fmt.Errorf(diffErr) + } + return nil +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice, array or string. Otherwise it returns an empty string. +func diff(expected, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } - if wantBuf.String() != gotBuf.String() { - return fmt.Errorf(` -metric output does not match expectation; want: + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + if et != at { + return "" + } -%s -got: + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { + return "" + } + + var e, a string + c := spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + } + if et != reflect.TypeOf("") { + e = c.Sdump(expected) + a = c.Sdump(actual) + } else { + e = reflect.ValueOf(expected).String() + a = reflect.ValueOf(actual).String() + } -%s`, wantBuf.String(), gotBuf.String()) + diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{ + A: internal.SplitLines(e), + B: internal.SplitLines(a), + FromFile: "metric output does not match expectation; want", + FromDate: "", + ToFile: "got:", + ToDate: "", + Context: 1, + }) + if diff == "" { + return "" } - return nil + + return "\n\nDiff:\n" + diff +} + +// typeAndKind returns the type and kind of the given interface{} +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k } func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go index 8d5f10523375..f28a76f3a62a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -25,11 +25,12 @@ type Timer struct { // NewTimer creates a new Timer. The provided Observer is used to observe a // duration in seconds. Timer is usually used to time a function call in the // following way: -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } +// +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } func NewTimer(o Observer) *Timer { return &Timer{ begin: time.Now(), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index b4e0ae11cb4b..2d3abc1cbd68 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -23,6 +23,8 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/protobuf/types/known/timestamppb" + "github.com/prometheus/client_golang/prometheus/internal" + dto "github.com/prometheus/client_model/go" ) @@ -38,6 +40,23 @@ const ( UntypedValue ) +var ( + CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }() + GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }() + UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }() +) + +func (v ValueType) ToDTO() *dto.MetricType { + switch v { + case CounterValue: + return CounterMetricTypePtr + case GaugeValue: + return GaugeMetricTypePtr + default: + return UntypedMetricTypePtr + } +} + // valueFunc is a generic metric for simple values retrieved on collect time // from a function. It implements Metric and Collector. Its effective type is // determined by ValueType. This is a low-level building block used by the @@ -91,11 +110,15 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { return nil, err } + + metric := &dto.Metric{} + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil { + return nil, err + } + return &constMetric{ - desc: desc, - valType: valueType, - val: value, - labelPairs: MakeLabelPairs(desc, labelValues), + desc: desc, + metric: metric, }, nil } @@ -110,10 +133,8 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal } type constMetric struct { - desc *Desc - valType ValueType - val float64 - labelPairs []*dto.LabelPair + desc *Desc + metric *dto.Metric } func (m *constMetric) Desc() *Desc { @@ -121,7 +142,11 @@ func (m *constMetric) Desc() *Desc { } func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, nil, out) + out.Label = m.metric.Label + out.Counter = m.metric.Counter + out.Gauge = m.metric.Gauge + out.Untyped = m.metric.Untyped + return nil } func populateMetric( @@ -170,12 +195,12 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { }) } labelPairs = append(labelPairs, desc.constLabelPairs...) - sort.Sort(labelPairSorter(labelPairs)) + sort.Sort(internal.LabelPairSorter(labelPairs)) return labelPairs } // ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. -const ExemplarMaxRunes = 64 +const ExemplarMaxRunes = 128 // newExemplar creates a new dto.Exemplar from the provided values. An error is // returned if any of the label names or values are invalid or if the total diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 4ababe6c9812..7ae322590c86 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -99,6 +99,16 @@ func (m *MetricVec) Delete(labels Labels) bool { return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) } +// DeletePartialMatch deletes all metrics where the variable labels contain all of those +// passed in as labels. The order of the labels does not matter. +// It returns the number of metrics deleted. +// +// Note that curried labels will never be matched if deleting from the curried vector. +// To match curried labels with DeletePartialMatch, it must be called on the base vector. +func (m *MetricVec) DeletePartialMatch(labels Labels) int { + return m.metricMap.deleteByLabels(labels, m.curry) +} + // Without explicit forwarding of Describe, Collect, Reset, those methods won't // show up in GoDoc. @@ -381,6 +391,82 @@ func (m *metricMap) deleteByHashWithLabels( return true } +// deleteByLabels deletes a metric if the given labels are present in the metric. +func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int { + m.mtx.Lock() + defer m.mtx.Unlock() + + var numDeleted int + + for h, metrics := range m.metrics { + i := findMetricWithPartialLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + // Didn't find matching labels in this metric slice. + continue + } + delete(m.metrics, h) + numDeleted++ + } + + return numDeleted +} + +// findMetricWithPartialLabel returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithPartialLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchPartialLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +// indexOf searches the given slice of strings for the target string and returns +// the index or len(items) as well as a boolean whether the search succeeded. +func indexOf(target string, items []string) (int, bool) { + for i, l := range items { + if l == target { + return i, true + } + } + return len(items), false +} + +// valueMatchesVariableOrCurriedValue determines if a value was previously curried, +// and returns whether it matches either the "base" value or the curried value accordingly. +// It also indicates whether the match is against a curried or uncurried value. +func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) { + for _, curriedValue := range curry { + if curriedValue.index == index { + // This label was curried. See if the curried value matches our target. + return curriedValue.value == targetValue, true + } + } + // This label was not curried. See if the current value matches our target label. + return values[index] == targetValue, false +} + +// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present. +func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + for l, v := range labels { + // Check if the target label exists in our metrics and get the index. + varLabelIndex, validLabel := indexOf(l, desc.variableLabels) + if validLabel { + // Check the value of that label against the target value. + // We don't consider curried values in partial matches. + matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry) + if matches && !curried { + continue + } + } + return false + } + return true +} + // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value // or creates it and returns the new one. // @@ -485,7 +571,7 @@ func findMetricWithLabels( return len(metrics) } -func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { +func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool { if len(values) != len(lvs)+len(curry) { return false } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 74ee93280fed..1498ee144cb0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -21,6 +21,8 @@ import ( "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" ) // WrapRegistererWith returns a Registerer wrapping the provided @@ -182,7 +184,7 @@ func (m *wrappingMetric) Write(out *dto.Metric) error { Value: proto.String(lv), }) } - sort.Sort(labelPairSorter(out.Label)) + sort.Sort(internal.LabelPairSorter(out.Label)) return nil } diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 2f4930d9dd34..35904ea19861 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: metrics.proto +// source: io/prometheus/client/metrics.proto package io_prometheus_client @@ -24,11 +24,18 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type MetricType int32 const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 - MetricType_SUMMARY MetricType = 2 - MetricType_UNTYPED MetricType = 3 + // COUNTER must use the Metric field "counter". + MetricType_COUNTER MetricType = 0 + // GAUGE must use the Metric field "gauge". + MetricType_GAUGE MetricType = 1 + // SUMMARY must use the Metric field "summary". + MetricType_SUMMARY MetricType = 2 + // UNTYPED must use the Metric field "untyped". + MetricType_UNTYPED MetricType = 3 + // HISTOGRAM must use the Metric field "histogram". MetricType_HISTOGRAM MetricType = 4 + // GAUGE_HISTOGRAM must use the Metric field "histogram". + MetricType_GAUGE_HISTOGRAM MetricType = 5 ) var MetricType_name = map[int32]string{ @@ -37,14 +44,16 @@ var MetricType_name = map[int32]string{ 2: "SUMMARY", 3: "UNTYPED", 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", } var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, } func (x MetricType) Enum() *MetricType { @@ -67,7 +76,7 @@ func (x *MetricType) UnmarshalJSON(data []byte) error { } func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} + return fileDescriptor_d1e5ddb18987a258, []int{0} } type LabelPair struct { @@ -82,7 +91,7 @@ func (m *LabelPair) Reset() { *m = LabelPair{} } func (m *LabelPair) String() string { return proto.CompactTextString(m) } func (*LabelPair) ProtoMessage() {} func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} + return fileDescriptor_d1e5ddb18987a258, []int{0} } func (m *LabelPair) XXX_Unmarshal(b []byte) error { @@ -128,7 +137,7 @@ func (m *Gauge) Reset() { *m = Gauge{} } func (m *Gauge) String() string { return proto.CompactTextString(m) } func (*Gauge) ProtoMessage() {} func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{1} + return fileDescriptor_d1e5ddb18987a258, []int{1} } func (m *Gauge) XXX_Unmarshal(b []byte) error { @@ -168,7 +177,7 @@ func (m *Counter) Reset() { *m = Counter{} } func (m *Counter) String() string { return proto.CompactTextString(m) } func (*Counter) ProtoMessage() {} func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{2} + return fileDescriptor_d1e5ddb18987a258, []int{2} } func (m *Counter) XXX_Unmarshal(b []byte) error { @@ -215,7 +224,7 @@ func (m *Quantile) Reset() { *m = Quantile{} } func (m *Quantile) String() string { return proto.CompactTextString(m) } func (*Quantile) ProtoMessage() {} func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{3} + return fileDescriptor_d1e5ddb18987a258, []int{3} } func (m *Quantile) XXX_Unmarshal(b []byte) error { @@ -263,7 +272,7 @@ func (m *Summary) Reset() { *m = Summary{} } func (m *Summary) String() string { return proto.CompactTextString(m) } func (*Summary) ProtoMessage() {} func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{4} + return fileDescriptor_d1e5ddb18987a258, []int{4} } func (m *Summary) XXX_Unmarshal(b []byte) error { @@ -316,7 +325,7 @@ func (m *Untyped) Reset() { *m = Untyped{} } func (m *Untyped) String() string { return proto.CompactTextString(m) } func (*Untyped) ProtoMessage() {} func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{5} + return fileDescriptor_d1e5ddb18987a258, []int{5} } func (m *Untyped) XXX_Unmarshal(b []byte) error { @@ -345,9 +354,34 @@ func (m *Untyped) GetValue() float64 { } type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + // Buckets for the conventional histogram. + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` + ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` + ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` + ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` + // Negative buckets for the native histogram. + NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` + NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` + // Positive buckets for the native histogram. + PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` + PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -357,7 +391,7 @@ func (m *Histogram) Reset() { *m = Histogram{} } func (m *Histogram) String() string { return proto.CompactTextString(m) } func (*Histogram) ProtoMessage() {} func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{6} + return fileDescriptor_d1e5ddb18987a258, []int{6} } func (m *Histogram) XXX_Unmarshal(b []byte) error { @@ -385,6 +419,13 @@ func (m *Histogram) GetSampleCount() uint64 { return 0 } +func (m *Histogram) GetSampleCountFloat() float64 { + if m != nil && m.SampleCountFloat != nil { + return *m.SampleCountFloat + } + return 0 +} + func (m *Histogram) GetSampleSum() float64 { if m != nil && m.SampleSum != nil { return *m.SampleSum @@ -399,8 +440,81 @@ func (m *Histogram) GetBucket() []*Bucket { return nil } +func (m *Histogram) GetSchema() int32 { + if m != nil && m.Schema != nil { + return *m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil && m.ZeroThreshold != nil { + return *m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCount() uint64 { + if m != nil && m.ZeroCount != nil { + return *m.ZeroCount + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if m != nil && m.ZeroCountFloat != nil { + return *m.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpan() []*BucketSpan { + if m != nil { + return m.NegativeSpan + } + return nil +} + +func (m *Histogram) GetNegativeDelta() []int64 { + if m != nil { + return m.NegativeDelta + } + return nil +} + +func (m *Histogram) GetNegativeCount() []float64 { + if m != nil { + return m.NegativeCount + } + return nil +} + +func (m *Histogram) GetPositiveSpan() []*BucketSpan { + if m != nil { + return m.PositiveSpan + } + return nil +} + +func (m *Histogram) GetPositiveDelta() []int64 { + if m != nil { + return m.PositiveDelta + } + return nil +} + +func (m *Histogram) GetPositiveCount() []float64 { + if m != nil { + return m.PositiveCount + } + return nil +} + +// A Bucket of a conventional histogram, each of which is treated as +// an individual counter-like time series by Prometheus. type Bucket struct { CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -412,7 +526,7 @@ func (m *Bucket) Reset() { *m = Bucket{} } func (m *Bucket) String() string { return proto.CompactTextString(m) } func (*Bucket) ProtoMessage() {} func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{7} + return fileDescriptor_d1e5ddb18987a258, []int{7} } func (m *Bucket) XXX_Unmarshal(b []byte) error { @@ -440,6 +554,13 @@ func (m *Bucket) GetCumulativeCount() uint64 { return 0 } +func (m *Bucket) GetCumulativeCountFloat() float64 { + if m != nil && m.CumulativeCountFloat != nil { + return *m.CumulativeCountFloat + } + return 0 +} + func (m *Bucket) GetUpperBound() float64 { if m != nil && m.UpperBound != nil { return *m.UpperBound @@ -454,6 +575,59 @@ func (m *Bucket) GetExemplar() *Exemplar { return nil } +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +type BucketSpan struct { + Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` + Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{8} +} + +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketSpan.Unmarshal(m, b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return xxx_messageInfo_BucketSpan.Size(m) +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil && m.Length != nil { + return *m.Length + } + return 0 +} + type Exemplar struct { Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` @@ -467,7 +641,7 @@ func (m *Exemplar) Reset() { *m = Exemplar{} } func (m *Exemplar) String() string { return proto.CompactTextString(m) } func (*Exemplar) ProtoMessage() {} func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{8} + return fileDescriptor_d1e5ddb18987a258, []int{9} } func (m *Exemplar) XXX_Unmarshal(b []byte) error { @@ -526,7 +700,7 @@ func (m *Metric) Reset() { *m = Metric{} } func (m *Metric) String() string { return proto.CompactTextString(m) } func (*Metric) ProtoMessage() {} func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{9} + return fileDescriptor_d1e5ddb18987a258, []int{10} } func (m *Metric) XXX_Unmarshal(b []byte) error { @@ -610,7 +784,7 @@ func (m *MetricFamily) Reset() { *m = MetricFamily{} } func (m *MetricFamily) String() string { return proto.CompactTextString(m) } func (*MetricFamily) ProtoMessage() {} func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{10} + return fileDescriptor_d1e5ddb18987a258, []int{11} } func (m *MetricFamily) XXX_Unmarshal(b []byte) error { @@ -669,55 +843,72 @@ func init() { proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") } -func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } - -var fileDescriptor_6039342a2ba47b72 = []byte{ - // 665 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, - 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, - 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, - 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, - 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, - 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, - 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, - 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, - 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, - 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, - 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, - 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, - 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, - 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, - 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, - 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, - 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, - 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, - 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, - 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, - 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, - 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, - 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, - 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, - 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, - 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, - 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, - 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, - 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, - 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, - 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, - 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, - 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, - 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, - 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, - 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, - 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, - 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, - 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, - 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, +func init() { + proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) +} + +var fileDescriptor_d1e5ddb18987a258 = []byte{ + // 896 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48, + 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92, + 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0, + 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b, + 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3, + 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90, + 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25, + 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb, + 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19, + 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba, + 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b, + 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c, + 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0, + 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5, + 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd, + 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d, + 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b, + 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b, + 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f, + 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b, + 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8, + 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e, + 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79, + 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29, + 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48, + 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca, + 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9, + 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5, + 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe, + 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55, + 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e, + 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83, + 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65, + 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a, + 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8, + 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf, + 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1, + 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97, + 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc, + 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7, + 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b, + 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58, + 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b, + 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8, + 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f, + 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67, + 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28, + 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27, + 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41, + 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f, + 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f, + 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac, + 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a, + 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab, + 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00, } diff --git a/vendor/github.com/prometheus/common/config/config.go b/vendor/github.com/prometheus/common/config/config.go index fffda4a7ef45..0b91f20d55af 100644 --- a/vendor/github.com/prometheus/common/config/config.go +++ b/vendor/github.com/prometheus/common/config/config.go @@ -18,6 +18,7 @@ package config import ( "encoding/json" + "net/http" "path/filepath" ) @@ -34,7 +35,7 @@ func (s Secret) MarshalYAML() (interface{}, error) { return nil, nil } -//UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets. +// UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets. func (s *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error { type plain Secret return unmarshal((*plain)(s)) @@ -48,6 +49,29 @@ func (s Secret) MarshalJSON() ([]byte, error) { return json.Marshal(secretToken) } +type Header map[string][]Secret + +func (h *Header) HTTPHeader() http.Header { + if h == nil || *h == nil { + return nil + } + + header := make(http.Header) + + for name, values := range *h { + var s []string + if values != nil { + s = make([]string, 0, len(values)) + for _, value := range values { + s = append(s, string(value)) + } + } + header[name] = s + } + + return header +} + // DirectorySetter is a config type that contains file paths that may // be relative to the file containing the config. type DirectorySetter interface { diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 4b8724171054..3965099972ac 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build go1.8 - package config import ( @@ -23,11 +21,11 @@ import ( "crypto/x509" "encoding/json" "fmt" - "io/ioutil" "net" "net/http" "net/url" "os" + "path/filepath" "strings" "sync" "time" @@ -39,24 +37,100 @@ import ( "gopkg.in/yaml.v2" ) -// DefaultHTTPClientConfig is the default HTTP client configuration. -var DefaultHTTPClientConfig = HTTPClientConfig{ - FollowRedirects: true, -} +var ( + // DefaultHTTPClientConfig is the default HTTP client configuration. + DefaultHTTPClientConfig = HTTPClientConfig{ + FollowRedirects: true, + EnableHTTP2: true, + } -// defaultHTTPClientOptions holds the default HTTP client options. -var defaultHTTPClientOptions = httpClientOptions{ - keepAlivesEnabled: true, - http2Enabled: true, - // 5 minutes is typically above the maximum sane scrape interval. So we can - // use keepalive for all configurations. - idleConnTimeout: 5 * time.Minute, -} + // defaultHTTPClientOptions holds the default HTTP client options. + defaultHTTPClientOptions = httpClientOptions{ + keepAlivesEnabled: true, + http2Enabled: true, + // 5 minutes is typically above the maximum sane scrape interval. So we can + // use keepalive for all configurations. + idleConnTimeout: 5 * time.Minute, + } +) type closeIdler interface { CloseIdleConnections() } +type TLSVersion uint16 + +var TLSVersions = map[string]TLSVersion{ + "TLS13": (TLSVersion)(tls.VersionTLS13), + "TLS12": (TLSVersion)(tls.VersionTLS12), + "TLS11": (TLSVersion)(tls.VersionTLS11), + "TLS10": (TLSVersion)(tls.VersionTLS10), +} + +func (tv *TLSVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + err := unmarshal((*string)(&s)) + if err != nil { + return err + } + if v, ok := TLSVersions[s]; ok { + *tv = v + return nil + } + return fmt.Errorf("unknown TLS version: %s", s) +} + +func (tv *TLSVersion) MarshalYAML() (interface{}, error) { + if tv == nil || *tv == 0 { + return []byte("null"), nil + } + for s, v := range TLSVersions { + if *tv == v { + return s, nil + } + } + return nil, fmt.Errorf("unknown TLS version: %d", tv) +} + +// MarshalJSON implements the json.Unmarshaler interface for TLSVersion. +func (tv *TLSVersion) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if v, ok := TLSVersions[s]; ok { + *tv = v + return nil + } + return fmt.Errorf("unknown TLS version: %s", s) +} + +// MarshalJSON implements the json.Marshaler interface for TLSVersion. +func (tv *TLSVersion) MarshalJSON() ([]byte, error) { + if tv == nil || *tv == 0 { + return []byte("null"), nil + } + for s, v := range TLSVersions { + if *tv == v { + return []byte(s), nil + } + } + return nil, fmt.Errorf("unknown TLS version: %d", tv) +} + +// String implements the fmt.Stringer interface for TLSVersion. +func (tv *TLSVersion) String() string { + if tv == nil || *tv == 0 { + return "" + } + for s, v := range TLSVersions { + if *tv == v { + return s + } + } + return fmt.Sprintf("%d", tv) +} + // BasicAuth contains basic HTTP authentication credentials. type BasicAuth struct { Username string `yaml:"username" json:"username"` @@ -160,6 +234,8 @@ type OAuth2 struct { TokenURL string `yaml:"token_url" json:"token_url"` EndpointParams map[string]string `yaml:"endpoint_params,omitempty" json:"endpoint_params,omitempty"` + // HTTP proxy server to use to connect to the targets. + ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"` // TLSConfig is used to connect to the token URL. TLSConfig TLSConfig `yaml:"tls_config,omitempty"` } @@ -173,6 +249,30 @@ func (a *OAuth2) SetDirectory(dir string) { a.TLSConfig.SetDirectory(dir) } +// LoadHTTPConfig parses the YAML input s into a HTTPClientConfig. +func LoadHTTPConfig(s string) (*HTTPClientConfig, error) { + cfg := &HTTPClientConfig{} + err := yaml.UnmarshalStrict([]byte(s), cfg) + if err != nil { + return nil, err + } + return cfg, nil +} + +// LoadHTTPConfigFile parses the given YAML file into a HTTPClientConfig. +func LoadHTTPConfigFile(filename string) (*HTTPClientConfig, []byte, error) { + content, err := os.ReadFile(filename) + if err != nil { + return nil, nil, err + } + cfg, err := LoadHTTPConfig(string(content)) + if err != nil { + return nil, nil, err + } + cfg.SetDirectory(filepath.Dir(filepath.Dir(filename))) + return cfg, content, nil +} + // HTTPClientConfig configures an HTTP client. type HTTPClientConfig struct { // The HTTP basic authentication credentials for the targets. @@ -189,12 +289,21 @@ type HTTPClientConfig struct { BearerTokenFile string `yaml:"bearer_token_file,omitempty" json:"bearer_token_file,omitempty"` // HTTP proxy server to use to connect to the targets. ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"` + // ProxyConnectHeader optionally specifies headers to send to + // proxies during CONNECT requests. Assume that at least _some_ of + // these headers are going to contain secrets and use Secret as the + // value type instead of string. + ProxyConnectHeader Header `yaml:"proxy_connect_header,omitempty" json:"proxy_connect_header,omitempty"` // TLSConfig to use to connect to the targets. TLSConfig TLSConfig `yaml:"tls_config,omitempty" json:"tls_config,omitempty"` // FollowRedirects specifies whether the client should follow HTTP 3xx redirects. // The omitempty flag is not set, because it would be hidden from the // marshalled configuration when set to false. FollowRedirects bool `yaml:"follow_redirects" json:"follow_redirects"` + // EnableHTTP2 specifies whether the client should configure HTTP2. + // The omitempty flag is not set, because it would be hidden from the + // marshalled configuration when set to false. + EnableHTTP2 bool `yaml:"enable_http2" json:"enable_http2"` } // SetDirectory joins any relative file paths with dir. @@ -210,7 +319,8 @@ func (c *HTTPClientConfig) SetDirectory(dir string) { } // Validate validates the HTTPClientConfig to check only one of BearerToken, -// BasicAuth and BearerTokenFile is configured. +// BasicAuth and BearerTokenFile is configured. It also validates that ProxyURL +// is set if ProxyConnectHeader is set. func (c *HTTPClientConfig) Validate() error { // Backwards compatibility with the bearer_token field. if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 { @@ -268,6 +378,9 @@ func (c *HTTPClientConfig) Validate() error { return fmt.Errorf("at most one of oauth2 client_secret & client_secret_file must be configured") } } + if len(c.ProxyConnectHeader) > 0 && (c.ProxyURL.URL == nil || c.ProxyURL.String() == "") { + return fmt.Errorf("if proxy_connect_header is configured proxy_url must also be configured") + } return nil } @@ -306,6 +419,7 @@ type httpClientOptions struct { keepAlivesEnabled bool http2Enabled bool idleConnTimeout time.Duration + userAgent string } // HTTPClientOption defines an option that can be applied to the HTTP client. @@ -339,6 +453,13 @@ func WithIdleConnTimeout(timeout time.Duration) HTTPClientOption { } } +// WithUserAgent allows setting the user agent. +func WithUserAgent(ua string) HTTPClientOption { + return func(opts *httpClientOptions) { + opts.userAgent = ua + } +} + // NewClient returns a http.Client using the specified http.RoundTripper. func newClient(rt http.RoundTripper) *http.Client { return &http.Client{Transport: rt} @@ -388,6 +509,7 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT // It is applied on request. So we leave out any timings here. var rt http.RoundTripper = &http.Transport{ Proxy: http.ProxyURL(cfg.ProxyURL.URL), + ProxyConnectHeader: cfg.ProxyConnectHeader.HTTPHeader(), MaxIdleConns: 20000, MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801 DisableKeepAlives: !opts.keepAlivesEnabled, @@ -398,19 +520,13 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT ExpectContinueTimeout: 1 * time.Second, DialContext: dialContext, } - if opts.http2Enabled && os.Getenv("PROMETHEUS_COMMON_DISABLE_HTTP2") == "" { + if opts.http2Enabled && cfg.EnableHTTP2 { // HTTP/2 support is golang had many problematic cornercases where // dead connections would be kept and used in connection pools. // https://github.com/golang/go/issues/32388 // https://github.com/golang/go/issues/39337 // https://github.com/golang/go/issues/39750 - // Do not enable HTTP2 if the environment variable - // PROMETHEUS_COMMON_DISABLE_HTTP2 is set to a non-empty value. - // This allows users to easily disable HTTP2 in case they run into - // issues again, but will be removed once we are confident that - // things work as expected. - http2t, err := http2.ConfigureTransports(rt.(*http.Transport)) if err != nil { return nil, err @@ -438,8 +554,13 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT } if cfg.OAuth2 != nil { - rt = NewOAuth2RoundTripper(cfg.OAuth2, rt) + rt = NewOAuth2RoundTripper(cfg.OAuth2, rt, &opts) } + + if opts.userAgent != "" { + rt = NewUserAgentRoundTripper(opts.userAgent, rt) + } + // Return a new configured RoundTripper. return rt, nil } @@ -454,7 +575,7 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT return newRT(tlsConfig) } - return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, newRT) + return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, cfg.TLSConfig.CertFile, cfg.TLSConfig.KeyFile, newRT) } type authorizationCredentialsRoundTripper struct { @@ -498,7 +619,7 @@ func NewAuthorizationCredentialsFileRoundTripper(authType, authCredentialsFile s func (rt *authorizationCredentialsFileRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { if len(req.Header.Get("Authorization")) == 0 { - b, err := ioutil.ReadFile(rt.authCredentialsFile) + b, err := os.ReadFile(rt.authCredentialsFile) if err != nil { return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", rt.authCredentialsFile, err) } @@ -536,7 +657,7 @@ func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } req = cloneRequest(req) if rt.passwordFile != "" { - bs, err := ioutil.ReadFile(rt.passwordFile) + bs, err := os.ReadFile(rt.passwordFile) if err != nil { return nil, fmt.Errorf("unable to read basic auth password file %s: %s", rt.passwordFile, err) } @@ -559,12 +680,15 @@ type oauth2RoundTripper struct { next http.RoundTripper secret string mtx sync.RWMutex + opts *httpClientOptions + client *http.Client } -func NewOAuth2RoundTripper(config *OAuth2, next http.RoundTripper) http.RoundTripper { +func NewOAuth2RoundTripper(config *OAuth2, next http.RoundTripper, opts *httpClientOptions) http.RoundTripper { return &oauth2RoundTripper{ config: config, next: next, + opts: opts, } } @@ -575,7 +699,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro ) if rt.config.ClientSecretFile != "" { - data, err := ioutil.ReadFile(rt.config.ClientSecretFile) + data, err := os.ReadFile(rt.config.ClientSecretFile) if err != nil { return nil, fmt.Errorf("unable to read oauth2 client secret file %s: %s", rt.config.ClientSecretFile, err) } @@ -603,19 +727,35 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro return nil, err } + tlsTransport := func(tlsConfig *tls.Config) (http.RoundTripper, error) { + return &http.Transport{ + TLSClientConfig: tlsConfig, + Proxy: http.ProxyURL(rt.config.ProxyURL.URL), + DisableKeepAlives: !rt.opts.keepAlivesEnabled, + MaxIdleConns: 20, + MaxIdleConnsPerHost: 1, // see https://github.com/golang/go/issues/13801 + IdleConnTimeout: 10 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, nil + } + var t http.RoundTripper if len(rt.config.TLSConfig.CAFile) == 0 { - t = &http.Transport{TLSClientConfig: tlsConfig} + t, _ = tlsTransport(tlsConfig) } else { - t, err = NewTLSRoundTripper(tlsConfig, rt.config.TLSConfig.CAFile, func(tls *tls.Config) (http.RoundTripper, error) { - return &http.Transport{TLSClientConfig: tls}, nil - }) + t, err = NewTLSRoundTripper(tlsConfig, rt.config.TLSConfig.CAFile, rt.config.TLSConfig.CertFile, rt.config.TLSConfig.KeyFile, tlsTransport) if err != nil { return nil, err } } - ctx := context.WithValue(context.Background(), oauth2.HTTPClient, &http.Client{Transport: t}) + if ua := req.UserAgent(); ua != "" { + t = NewUserAgentRoundTripper(ua, t) + } + + client := &http.Client{Transport: t} + ctx := context.WithValue(context.Background(), oauth2.HTTPClient, client) tokenSource := config.TokenSource(ctx) rt.mtx.Lock() @@ -624,6 +764,10 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro Base: rt.next, Source: tokenSource, } + if rt.client != nil { + rt.client.CloseIdleConnections() + } + rt.client = client rt.mtx.Unlock() } @@ -634,7 +778,9 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro } func (rt *oauth2RoundTripper) CloseIdleConnections() { - // OAuth2 RT does not support CloseIdleConnections() but the next RT might. + if rt.client != nil { + rt.client.CloseIdleConnections() + } if ci, ok := rt.next.(closeIdler); ok { ci.CloseIdleConnections() } @@ -665,7 +811,17 @@ func cloneRequest(r *http.Request) *http.Request { // NewTLSConfig creates a new tls.Config from the given TLSConfig. func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { - tlsConfig := &tls.Config{InsecureSkipVerify: cfg.InsecureSkipVerify} + tlsConfig := &tls.Config{ + InsecureSkipVerify: cfg.InsecureSkipVerify, + MinVersion: uint16(cfg.MinVersion), + MaxVersion: uint16(cfg.MaxVersion), + } + + if cfg.MaxVersion != 0 && cfg.MinVersion != 0 { + if cfg.MaxVersion < cfg.MinVersion { + return nil, fmt.Errorf("tls_config.max_version must be greater than or equal to tls_config.min_version if both are specified") + } + } // If a CA cert is provided then let's read it in so we can validate the // scrape target's certificate properly. @@ -710,6 +866,10 @@ type TLSConfig struct { ServerName string `yaml:"server_name,omitempty" json:"server_name,omitempty"` // Disable target certificate validation. InsecureSkipVerify bool `yaml:"insecure_skip_verify" json:"insecure_skip_verify"` + // Minimum TLS version. + MinVersion TLSVersion `yaml:"min_version,omitempty" json:"min_version,omitempty"` + // Maximum TLS version. + MaxVersion TLSVersion `yaml:"max_version,omitempty" json:"max_version,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -728,18 +888,39 @@ func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return unmarshal((*plain)(c)) } +// readCertAndKey reads the cert and key files from the disk. +func readCertAndKey(certFile, keyFile string) ([]byte, []byte, error) { + certData, err := os.ReadFile(certFile) + if err != nil { + return nil, nil, err + } + + keyData, err := os.ReadFile(keyFile) + if err != nil { + return nil, nil, err + } + + return certData, keyData, nil +} + // getClientCertificate reads the pair of client cert and key from disk and returns a tls.Certificate. -func (c *TLSConfig) getClientCertificate(*tls.CertificateRequestInfo) (*tls.Certificate, error) { - cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile) +func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) { + certData, keyData, err := readCertAndKey(c.CertFile, c.KeyFile) + if err != nil { + return nil, fmt.Errorf("unable to read specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err) + } + + cert, err := tls.X509KeyPair(certData, keyData) if err != nil { return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err) } + return &cert, nil } // readCAFile reads the CA cert file from disk. func readCAFile(f string) ([]byte, error) { - data, err := ioutil.ReadFile(f) + data, err := os.ReadFile(f) if err != nil { return nil, fmt.Errorf("unable to load specified CA cert %s: %s", f, err) } @@ -759,23 +940,30 @@ func updateRootCA(cfg *tls.Config, b []byte) bool { // tlsRoundTripper is a RoundTripper that updates automatically its TLS // configuration whenever the content of the CA file changes. type tlsRoundTripper struct { - caFile string + caFile string + certFile string + keyFile string + // newRT returns a new RoundTripper. newRT func(*tls.Config) (http.RoundTripper, error) - mtx sync.RWMutex - rt http.RoundTripper - hashCAFile []byte - tlsConfig *tls.Config + mtx sync.RWMutex + rt http.RoundTripper + hashCAFile []byte + hashCertFile []byte + hashKeyFile []byte + tlsConfig *tls.Config } func NewTLSRoundTripper( cfg *tls.Config, - caFile string, + caFile, certFile, keyFile string, newRT func(*tls.Config) (http.RoundTripper, error), ) (http.RoundTripper, error) { t := &tlsRoundTripper{ caFile: caFile, + certFile: certFile, + keyFile: keyFile, newRT: newRT, tlsConfig: cfg, } @@ -785,7 +973,7 @@ func NewTLSRoundTripper( return nil, err } t.rt = rt - _, t.hashCAFile, err = t.getCAWithHash() + _, t.hashCAFile, t.hashCertFile, t.hashKeyFile, err = t.getTLSFilesWithHash() if err != nil { return nil, err } @@ -793,25 +981,36 @@ func NewTLSRoundTripper( return t, nil } -func (t *tlsRoundTripper) getCAWithHash() ([]byte, []byte, error) { - b, err := readCAFile(t.caFile) +func (t *tlsRoundTripper) getTLSFilesWithHash() ([]byte, []byte, []byte, []byte, error) { + b1, err := readCAFile(t.caFile) if err != nil { - return nil, nil, err + return nil, nil, nil, nil, err + } + h1 := sha256.Sum256(b1) + + var h2, h3 [32]byte + if t.certFile != "" { + b2, b3, err := readCertAndKey(t.certFile, t.keyFile) + if err != nil { + return nil, nil, nil, nil, err + } + h2, h3 = sha256.Sum256(b2), sha256.Sum256(b3) } - h := sha256.Sum256(b) - return b, h[:], nil + return b1, h1[:], h2[:], h3[:], nil } // RoundTrip implements the http.RoundTrip interface. func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - b, h, err := t.getCAWithHash() + caData, caHash, certHash, keyHash, err := t.getTLSFilesWithHash() if err != nil { return nil, err } t.mtx.RLock() - equal := bytes.Equal(h[:], t.hashCAFile) + equal := bytes.Equal(caHash[:], t.hashCAFile) && + bytes.Equal(certHash[:], t.hashCertFile) && + bytes.Equal(keyHash[:], t.hashKeyFile) rt := t.rt t.mtx.RUnlock() if equal { @@ -820,8 +1019,10 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { } // Create a new RoundTripper. + // The cert and key files are read separately by the client + // using GetClientCertificate. tlsConfig := t.tlsConfig.Clone() - if !updateRootCA(tlsConfig, b) { + if !updateRootCA(tlsConfig, caData) { return nil, fmt.Errorf("unable to use specified CA cert %s", t.caFile) } rt, err = t.newRT(tlsConfig) @@ -832,7 +1033,9 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { t.mtx.Lock() t.rt = rt - t.hashCAFile = h[:] + t.hashCAFile = caHash[:] + t.hashCertFile = certHash[:] + t.hashKeyFile = keyHash[:] t.mtx.Unlock() return rt.RoundTrip(req) @@ -846,6 +1049,28 @@ func (t *tlsRoundTripper) CloseIdleConnections() { } } +type userAgentRoundTripper struct { + userAgent string + rt http.RoundTripper +} + +// NewUserAgentRoundTripper adds the user agent every request header. +func NewUserAgentRoundTripper(userAgent string, rt http.RoundTripper) http.RoundTripper { + return &userAgentRoundTripper{userAgent, rt} +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + req.Header.Set("User-Agent", rt.userAgent) + return rt.rt.RoundTrip(req) +} + +func (rt *userAgentRoundTripper) CloseIdleConnections() { + if ci, ok := rt.rt.(closeIdler); ok { + ci.CloseIdleConnections() + } +} + func (c HTTPClientConfig) String() string { b, err := yaml.Marshal(c) if err != nil { diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index dc2eedeefcac..dfac962a4e7e 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -12,6 +12,7 @@ // limitations under the License. // Build only when actually fuzzing +//go:build gofuzz // +build gofuzz package expfmt @@ -20,8 +21,8 @@ import "bytes" // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 8a9313a3bee9..21cdddcf0541 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,7 +22,6 @@ import ( "strconv" "strings" - "github.com/golang/protobuf/ptypes" "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" @@ -47,20 +46,20 @@ import ( // missing features and peculiarities to avoid complications when switching from // Prometheus to OpenMetrics or vice versa: // -// - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, -// its type will be set to `unknown` in that case to avoid invalid OpenMetrics -// output. +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. // -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. // -// - The size of exemplar labels is not checked (i.e. it's possible to create -// exemplars that are larger than allowed by the OpenMetrics specification). +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). // -// - The value of Counters is not checked. (OpenMetrics doesn't allow counters -// with a `NaN` value.) +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { name := in.GetName() if name == "" { @@ -473,10 +472,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - ts, err := ptypes.Timestamp((*e).Timestamp) + err = (*e).Timestamp.CheckValid() if err != nil { return written, err } + ts := (*e).Timestamp.AsTime() // TODO(beorn7): Format this directly from components of ts to // avoid overflow/underflow and precision issues of the float // conversion. diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 5ba503b06547..2946b8f1a644 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -17,7 +17,6 @@ import ( "bufio" "fmt" "io" - "io/ioutil" "math" "strconv" "strings" @@ -44,7 +43,7 @@ const ( var ( bufPool = sync.Pool{ New: func() interface{} { - return bufio.NewWriter(ioutil.Discard) + return bufio.NewWriter(io.Discard) }, } numBufPool = sync.Pool{ diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go index 26e92288c7c0..a21b9d15dd89 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -11,18 +11,18 @@ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -35,8 +35,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - */ package goautoneg diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 7f67b16e4295..c909b8aa8c50 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -193,7 +193,7 @@ func ParseDuration(durationStr string) (Duration, error) { // Allow 0 without a unit. return 0, nil case "": - return 0, fmt.Errorf("empty duration string") + return 0, errors.New("empty duration string") } matches := durationRE.FindStringSubmatch(durationStr) if matches == nil { diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go index 3e2a7ee50e4b..b3da48a17d04 100644 --- a/vendor/github.com/prometheus/common/version/info.go +++ b/vendor/github.com/prometheus/common/version/info.go @@ -31,6 +31,8 @@ var ( BuildUser string BuildDate string GoVersion = runtime.Version() + GoOS = runtime.GOOS + GoArch = runtime.GOARCH ) // NewCollector returns a collector that exports metrics about current version @@ -41,14 +43,16 @@ func NewCollector(program string) prometheus.Collector { Namespace: program, Name: "build_info", Help: fmt.Sprintf( - "A metric with a constant '1' value labeled by version, revision, branch, and goversion from which %s was built.", + "A metric with a constant '1' value labeled by version, revision, branch, goversion from which %s was built, and the goos and goarch for the build.", program, ), ConstLabels: prometheus.Labels{ "version": Version, - "revision": Revision, + "revision": getRevision(), "branch": Branch, "goversion": GoVersion, + "goos": GoOS, + "goarch": GoArch, }, }, func() float64 { return 1 }, @@ -69,12 +73,12 @@ func Print(program string) string { m := map[string]string{ "program": program, "version": Version, - "revision": Revision, + "revision": getRevision(), "branch": Branch, "buildUser": BuildUser, "buildDate": BuildDate, "goVersion": GoVersion, - "platform": runtime.GOOS + "/" + runtime.GOARCH, + "platform": GoOS + "/" + GoArch, } t := template.Must(template.New("version").Parse(versionInfoTmpl)) @@ -87,10 +91,10 @@ func Print(program string) string { // Info returns version, branch and revision information. func Info() string { - return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, Revision) + return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, getRevision()) } -// BuildContext returns goVersion, buildUser and buildDate information. +// BuildContext returns goVersion, platform, buildUser and buildDate information. func BuildContext() string { - return fmt.Sprintf("(go=%s, user=%s, date=%s)", GoVersion, BuildUser, BuildDate) + return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate) } diff --git a/vendor/github.com/prometheus/common/version/info_default.go b/vendor/github.com/prometheus/common/version/info_default.go new file mode 100644 index 000000000000..2ab0be009c26 --- /dev/null +++ b/vendor/github.com/prometheus/common/version/info_default.go @@ -0,0 +1,21 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.18 +// +build !go1.18 + +package version + +func getRevision() string { + return Revision +} diff --git a/vendor/github.com/prometheus/common/version/info_go118.go b/vendor/github.com/prometheus/common/version/info_go118.go new file mode 100644 index 000000000000..bed0f4994297 --- /dev/null +++ b/vendor/github.com/prometheus/common/version/info_go118.go @@ -0,0 +1,58 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package version + +import "runtime/debug" + +var computedRevision string + +func getRevision() string { + if Revision != "" { + return Revision + } + return computedRevision +} + +func init() { + computedRevision = computeRevision() +} + +func computeRevision() string { + var ( + rev = "unknown" + modified bool + ) + + buildInfo, ok := debug.ReadBuildInfo() + if !ok { + return rev + } + for _, v := range buildInfo.Settings { + if v.Key == "vcs.revision" { + rev = v.Value + } + if v.Key == "vcs.modified" { + if v.Value == "true" { + modified = true + } + } + } + if modified { + return rev + "-modified" + } + return rev +} diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore index 25e3659ab25a..7cc33ae4a704 100644 --- a/vendor/github.com/prometheus/procfs/.gitignore +++ b/vendor/github.com/prometheus/procfs/.gitignore @@ -1 +1,2 @@ -/fixtures/ +/testdata/fixtures/ +/fixtures diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 0aa09edacb38..a197699a1ee9 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,4 +1,12 @@ --- linters: enable: - - golint + - godot + - revive + +linter-settings: + godot: + capital: true + exclude: + # Ignore "See: URL" + - 'See:' diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md index 9a1aff412704..d325872bdfad 100644 --- a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md +++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md @@ -1,3 +1,3 @@ -## Prometheus Community Code of Conduct +# Prometheus Community Code of Conduct -Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md index 943de7615eec..853eb9d49b8b 100644 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -97,7 +97,7 @@ Many of the files are changing continuously and the data being read can in some reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the full file in a single operation using an internal utility function called `util.ReadFileNoStat`. -This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of +This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of the file. Note that parsing the file's contents can still be performed one line at a time. This is done by first reading @@ -113,7 +113,7 @@ the full file, and then using a scanner on the `[]byte` or `string` containing t ``` The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files -can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does +can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does not bother to check the size of the file before reading. ``` data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index fa2bd5b5288f..7edfe4d09325 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -14,18 +14,18 @@ include Makefile.common %/.unpacked: %.ttar - @echo ">> extracting fixtures" + @echo ">> extracting fixtures $*" ./ttar -C $(dir $*) -x -f $*.ttar touch $@ -fixtures: fixtures/.unpacked +fixtures: testdata/fixtures/.unpacked update_fixtures: - rm -vf fixtures/.unpacked - ./ttar -c -f fixtures.ttar fixtures/ + rm -vf testdata/fixtures/.unpacked + ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/ .PHONY: build build: .PHONY: test -test: fixtures/.unpacked common-test +test: testdata/fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index a1b1ca40f4b0..6c8e3e219797 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -36,29 +36,6 @@ GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') -GOVENDOR := -GO111MODULE := -ifeq (, $(PRE_GO_111)) - ifneq (,$(wildcard go.mod)) - # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). - GO111MODULE := on - - ifneq (,$(wildcard vendor)) - # Always use the local vendor/ directory to satisfy the dependencies. - GOOPTS := $(GOOPTS) -mod=vendor - endif - endif -else - ifneq (,$(wildcard go.mod)) - ifneq (,$(wildcard vendor)) -$(warning This repository requires Go >= 1.11 because of Go modules) -$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') - endif - else - # This repository isn't using Go modules (yet). - GOVENDOR := $(FIRST_GOPATH)/bin/govendor - endif -endif PROMU := $(FIRST_GOPATH)/bin/promu pkgs = ./... @@ -78,17 +55,23 @@ ifneq ($(shell which gotestsum),) endif endif -PROMU_VERSION ?= 0.12.0 +PROMU_VERSION ?= 0.13.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.39.0 +GOLANGCI_LINT_VERSION ?= v1.45.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + # If we're in CI and there is an Actions file, that means the linter + # is being run in Actions, so we don't need to run it here. + ifeq (,$(CIRCLE_JOB)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif endif endif @@ -144,32 +127,25 @@ common-check_license: .PHONY: common-deps common-deps: @echo ">> getting dependencies" -ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(GO) mod download -else - $(GO) get $(GOOPTS) -t ./... -endif + $(GO) mod download .PHONY: update-go-deps update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get $$m; \ + $(GO) get -d $$m; \ done - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifneq (,$(wildcard vendor)) - GO111MODULE=$(GO111MODULE) $(GO) mod vendor -endif + $(GO) mod tidy .PHONY: common-test-short common-test-short: $(GOTEST_DIR) @echo ">> running short tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) + $(GOTEST) -short $(GOOPTS) $(pkgs) .PHONY: common-test common-test: $(GOTEST_DIR) @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) $(GOTEST_DIR): @mkdir -p $@ @@ -177,25 +153,21 @@ $(GOTEST_DIR): .PHONY: common-format common-format: @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) + $(GO) fmt $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" - GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) + $(GO) vet $(GOOPTS) $(pkgs) .PHONY: common-lint common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" -ifdef GO111MODULE # 'go list' needs to be executed before staticcheck to prepopulate the modules cache. # Otherwise staticcheck might fail randomly for some reason not yet explained. - GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null - GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) -else - $(GOLANGCI_LINT) run $(pkgs) -endif + $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null + $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) endif .PHONY: common-yamllint @@ -212,28 +184,15 @@ endif common-staticcheck: lint .PHONY: common-unused -common-unused: $(GOVENDOR) -ifdef GOVENDOR - @echo ">> running check for unused packages" - @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' -else -ifdef GO111MODULE +common-unused: @echo ">> running check for unused/missing packages in go.mod" - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifeq (,$(wildcard vendor)) + $(GO) mod tidy @git diff --exit-code -- go.sum go.mod -else - @echo ">> running check for unused packages in vendor/" - GO111MODULE=$(GO111MODULE) $(GO) mod vendor - @git diff --exit-code -- go.sum go.mod vendor/ -endif -endif -endif .PHONY: common-build common-build: promu @echo ">> building binaries" - GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) .PHONY: common-tarball common-tarball: promu @@ -289,12 +248,6 @@ $(GOLANGCI_LINT): | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif -ifdef GOVENDOR -.PHONY: $(GOVENDOR) -$(GOVENDOR): - GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor -endif - .PHONY: precheck precheck:: diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md index 67741f015af1..fed02d85c79e 100644 --- a/vendor/github.com/prometheus/procfs/SECURITY.md +++ b/vendor/github.com/prometheus/procfs/SECURITY.md @@ -3,4 +3,4 @@ The Prometheus security policy, including how to report vulnerabilities, can be found here: -https://prometheus.io/docs/operating/security/ + diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 4e47e6172096..68f36e888f91 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -15,11 +15,28 @@ package procfs import ( "fmt" - "io/ioutil" "net" + "os" + "strconv" "strings" ) +// Learned from include/uapi/linux/if_arp.h. +const ( + // completed entry (ha valid). + ATFComplete = 0x02 + // permanent entry. + ATFPermanent = 0x04 + // Publish entry. + ATFPublish = 0x08 + // Has requested trailers. + ATFUseTrailers = 0x10 + // Obsoleted: Want to use a netmask (only for proxy entries). + ATFNetmask = 0x20 + // Don't answer this addresses. + ATFDontPublish = 0x40 +) + // ARPEntry contains a single row of the columnar data represented in // /proc/net/arp. type ARPEntry struct { @@ -29,12 +46,14 @@ type ARPEntry struct { HWAddr net.HardwareAddr // Name of the device Device string + // Flags + Flags byte } // GatherARPEntries retrieves all the ARP entries, parse the relevant columns, // and then return a slice of ARPEntry's. func (fs FS) GatherARPEntries() ([]ARPEntry, error) { - data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) + data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) } @@ -72,14 +91,26 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } func parseARPEntry(columns []string) (ARPEntry, error) { + entry := ARPEntry{Device: columns[5]} ip := net.ParseIP(columns[0]) - mac := net.HardwareAddr(columns[3]) + entry.IPAddr = ip + + if mac, err := net.ParseMAC(columns[3]); err == nil { + entry.HWAddr = mac + } else { + return ARPEntry{}, err + } - entry := ARPEntry{ - IPAddr: ip, - HWAddr: mac, - Device: columns[5], + if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil { + entry.Flags = byte(flags) + } else { + return ARPEntry{}, err } return entry, nil } + +// IsComplete returns true if ARP entry is marked with complete flag. +func (entry *ARPEntry) IsComplete() bool { + return entry.Flags&ATFComplete != 0 +} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index 5623b24a161f..ff6b927da159 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux // +build linux package procfs @@ -27,7 +28,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// CPUInfo contains general information about a system CPU found in /proc/cpuinfo +// CPUInfo contains general information about a system CPU found in /proc/cpuinfo. type CPUInfo struct { Processor uint VendorID string @@ -469,7 +470,7 @@ func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode } // firstNonEmptyLine advances the scanner to the first non-empty line -// and returns the contents of that line +// and returns the contents of that line. func firstNonEmptyLine(scanner *bufio.Scanner) string { for scanner.Scan() { line := scanner.Text() diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go index 44b590ed38fa..64cfd534c1f9 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (arm || arm64) // +build linux // +build arm arm64 diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go index 91e272573a51..c11207f3ab61 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (mips || mipsle || mips64 || mips64le) // +build linux // +build mips mipsle mips64 mips64le diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go index 95b5b4ec44a5..ea41bf2ca1e2 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux -// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x +//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x +// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go index 6068bd571c24..003bc2ad4a33 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (ppc64 || ppc64le) // +build linux // +build ppc64 ppc64le diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go index e83c2e207c18..1c9b7313b6cb 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (riscv || riscv64) // +build linux // +build riscv riscv64 diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go index 26814eebaaf3..fa3686bc0048 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux // +build linux package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go index d5bedf97f31c..a0ef55562ebb 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (386 || amd64) // +build linux // +build 386 amd64 diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar deleted file mode 100644 index 5e7eeef4a531..000000000000 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ /dev/null @@ -1,7673 +0,0 @@ -# Archive created by ttar -c -f fixtures.ttar fixtures/ -Directory: fixtures -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cmdline -Lines: 1 -vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/comm -Lines: 1 -vim -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cwd -SymlinkTo: /usr/bin -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/environ -Lines: 1 -PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/exe -SymlinkTo: /usr/bin/vim -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/10 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fdinfo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/0 -Lines: 6 -pos: 0 -flags: 02004000 -mnt_id: 13 -inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000 -inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a -inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/1 -Lines: 4 -pos: 0 -flags: 02004002 -mnt_id: 13 -eventfd-count: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/10 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/2 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/3 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/io -Lines: 7 -rchar: 750339 -wchar: 818609 -syscr: 7405 -syscw: 5245 -read_bytes: 1024 -write_bytes: 2048 -cancelled_write_bytes: -1024 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 62898 62898 processes -Max open files 2048 4096 files -Max locked memory 18446744073708503040 18446744073708503040 bytes -Max address space 8589934592 unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 62898 62898 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/mountstats -Lines: 20 -device rootfs mounted on / with fstype rootfs -device sysfs mounted on /sys with fstype sysfs -device proc mounted on /proc with fstype proc -device /dev/sda1 mounted on / with fstype ext4 -device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none - age: 13968 - caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 - nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured - sec: flavor=1,pseudoflavor=1 - events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 - bytes: 1207640230 0 0 0 1210214218 0 295483 0 - RPC iostats version: 1.0 p/v: 100003/4 (nfs) - xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 - per-op statistics - NULL: 0 0 0 0 0 0 0 0 - READ: 1298 1298 0 207680 1210292152 6 79386 79407 - WRITE: 0 0 0 0 0 0 0 0 - ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/net/dev -Lines: 4 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed - lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/ns -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/mnt -SymlinkTo: mnt:[4026531840] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/net -SymlinkTo: net:[4026531993] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/root -SymlinkTo: / -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/schedstat -Lines: 1 -411605849 93680043 79 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/smaps -Lines: 252 -00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager -Size: 8900 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 2952 kB -Pss: 2952 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 2952 kB -Private_Dirty: 0 kB -Referenced: 2864 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex mr mw me dw sd -00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager -Size: 10236 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 6152 kB -Pss: 6152 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 6152 kB -Private_Dirty: 0 kB -Referenced: 5308 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd mr mw me dw sd -016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager -Size: 424 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 176 kB -Pss: 176 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 84 kB -Private_Dirty: 92 kB -Referenced: 176 kB -Anonymous: 92 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 12 kB -SwapPss: 12 kB -Locked: 0 kB -VmFlags: rd wr mr mw me dw ac sd -0171a000-0173f000 rw-p 00000000 00:00 0 -Size: 148 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 76 kB -Pss: 76 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 76 kB -Referenced: 76 kB -Anonymous: 76 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -c000000000-c000400000 rw-p 00000000 00:00 0 -Size: 4096 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 2564 kB -Pss: 2564 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 20 kB -Private_Dirty: 2544 kB -Referenced: 2544 kB -Anonymous: 2564 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 1100 kB -SwapPss: 1100 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -c000400000-c001600000 rw-p 00000000 00:00 0 -Size: 18432 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 16024 kB -Pss: 16024 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 5864 kB -Private_Dirty: 10160 kB -Referenced: 11944 kB -Anonymous: 16024 kB -LazyFree: 5848 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 440 kB -SwapPss: 440 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd nh -c001600000-c004000000 rw-p 00000000 00:00 0 -Size: 43008 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0 -Size: 38596 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 1992 kB -Pss: 1992 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 476 kB -Private_Dirty: 1516 kB -Referenced: 1828 kB -Anonymous: 1992 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 384 kB -SwapPss: 384 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack] -Size: 132 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 8 kB -Pss: 8 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 8 kB -Referenced: 8 kB -Anonymous: 8 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 4 kB -SwapPss: 4 kB -Locked: 0 kB -VmFlags: rd wr mr mw me gd ac -7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar] -Size: 12 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd mr pf io de dd sd -7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso] -Size: 8 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 4 kB -Pss: 0 kB -Shared_Clean: 4 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 4 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex mr mw me de sd -ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] -Size: 4 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/smaps_rollup -Lines: 17 -00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup] -Rss: 29948 kB -Pss: 29944 kB -Shared_Clean: 4 kB -Shared_Dirty: 0 kB -Private_Clean: 15548 kB -Private_Dirty: 14396 kB -Referenced: 24752 kB -Anonymous: 20756 kB -LazyFree: 5848 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 1940 kB -SwapPss: 1940 kB -Locked: 0 kB -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/stat -Lines: 1 -26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/status -Lines: 53 - -Name: prometheus -Umask: 0022 -State: S (sleeping) -Tgid: 26231 -Ngid: 0 -Pid: 26231 -PPid: 1 -TracerPid: 0 -Uid: 1000 1000 1000 0 -Gid: 1001 1001 1001 0 -FDSize: 128 -Groups: -NStgid: 1 -NSpid: 1 -NSpgid: 1 -NSsid: 1 -VmPeak: 58472 kB -VmSize: 58440 kB -VmLck: 0 kB -VmPin: 0 kB -VmHWM: 8028 kB -VmRSS: 6716 kB -RssAnon: 2092 kB -RssFile: 4624 kB -RssShmem: 0 kB -VmData: 2580 kB -VmStk: 136 kB -VmExe: 948 kB -VmLib: 6816 kB -VmPTE: 128 kB -VmPMD: 12 kB -VmSwap: 660 kB -HugetlbPages: 0 kB -Threads: 1 -SigQ: 8/63965 -SigPnd: 0000000000000000 -ShdPnd: 0000000000000000 -SigBlk: 7be3c0fe28014a03 -SigIgn: 0000000000001000 -SigCgt: 00000001800004ec -CapInh: 0000000000000000 -CapPrm: 0000003fffffffff -CapEff: 0000003fffffffff -CapBnd: 0000003fffffffff -CapAmb: 0000000000000000 -Seccomp: 0 -Cpus_allowed: ff -Cpus_allowed_list: 0-7 -Mems_allowed: 00000000,00000001 -Mems_allowed_list: 0 -voluntary_ctxt_switches: 4742839 -nonvoluntary_ctxt_switches: 1727500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/wchan -Lines: 1 -poll_schedule_timeoutEOF -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cmdline -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/comm -Lines: 1 -ata_sff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cwd -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/4 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/maps -Lines: 9 -55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994 /bin/cat -55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994 /bin/cat -55680bed6000-55680bef7000 rw-p 00000000 00:00 0 [heap] -7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624 /usr/lib/locale/locale-archive -7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062 /lib/x86_64-linux-gnu/libc-2.29.so -7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0 [stack] -7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0 [vvar] -7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0 [vdso] -ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/root -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/stat -Lines: 1 -33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/wchan -Lines: 1 -0EOF -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26233 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/cmdline -Lines: 1 -com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/schedstat -Lines: 8 - ____________________________________ -< this is a malformed schedstat file > - ------------------------------------ - \ ^__^ - \ (oo)\_______ - (__)\ )\/\ - ||----w | - || || -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26234 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26234/maps -Lines: 4 -08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh -08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh -0808c000-08146000 rwxp 00000000 00:00 0 -40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/584 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/584/stat -Lines: 2 -1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 -#!/bin/cat /proc/self/stat -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/cmdline -Lines: 1 -BOOT_IMAGE=/vmlinuz-5.11.0-22-generic root=UUID=456a0345-450d-4f7b-b7c9-43e3241d99ad ro quiet splash vt.handoff=7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/cpuinfo -Lines: 216 -processor : 0 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 799.998 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 0 -cpu cores : 4 -apicid : 0 -initial apicid : 0 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 1 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.037 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 1 -cpu cores : 4 -apicid : 2 -initial apicid : 2 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 2 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.010 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 2 -cpu cores : 4 -apicid : 4 -initial apicid : 4 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 3 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.028 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 3 -cpu cores : 4 -apicid : 6 -initial apicid : 6 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 4 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 799.989 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 0 -cpu cores : 4 -apicid : 1 -initial apicid : 1 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 5 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.083 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 1 -cpu cores : 4 -apicid : 3 -initial apicid : 3 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 6 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.017 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 2 -cpu cores : 4 -apicid : 5 -initial apicid : 5 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 7 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.030 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 3 -cpu cores : 4 -apicid : 7 -initial apicid : 7 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/crypto -Lines: 972 -name : ccm(aes) -driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni)) -module : ccm -priority : 300 -refcnt : 4 -selftest : passed -internal : no -type : aead -async : no -blocksize : 1 -ivsize : 16 -maxauthsize : 16 -geniv : - -name : cbcmac(aes) -driver : cbcmac(aes-aesni) -module : ccm -priority : 300 -refcnt : 7 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 16 - -name : ecdh -driver : ecdh-generic -module : ecdh_generic -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : kpp -async : yes - -name : ecb(arc4) -driver : ecb(arc4)-generic -module : arc4 -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 1 -max keysize : 256 -ivsize : 0 -chunksize : 1 -walksize : 1 - -name : arc4 -driver : arc4-generic -module : arc4 -priority : 0 -refcnt : 3 -selftest : passed -internal : no -type : cipher -blocksize : 1 -min keysize : 1 -max keysize : 256 - -name : crct10dif -driver : crct10dif-pclmul -module : crct10dif_pclmul -priority : 200 -refcnt : 2 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 2 - -name : crc32 -driver : crc32-pclmul -module : crc32_pclmul -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : __ghash -driver : cryptd(__ghash-pclmulqdqni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : ahash -async : yes -blocksize : 16 -digestsize : 16 - -name : ghash -driver : ghash-clmulni -module : ghash_clmulni_intel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : ahash -async : yes -blocksize : 16 -digestsize : 16 - -name : __ghash -driver : __ghash-pclmulqdqni -module : ghash_clmulni_intel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : shash -blocksize : 16 -digestsize : 16 - -name : crc32c -driver : crc32c-intel -module : crc32c_intel -priority : 200 -refcnt : 5 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : cbc(aes) -driver : cbc(aes-aesni) -module : kernel -priority : 300 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : ctr(aes) -driver : ctr(aes-aesni) -module : kernel -priority : 300 -refcnt : 5 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : pkcs1pad(rsa,sha256) -driver : pkcs1pad(rsa-generic,sha256) -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : akcipher - -name : __xts(aes) -driver : cryptd(__xts-aes-aesni) -module : kernel -priority : 451 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : xts(aes) -driver : xts-aes-aesni -module : kernel -priority : 401 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ctr(aes) -driver : cryptd(__ctr-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 1 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : ctr(aes) -driver : ctr-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __cbc(aes) -driver : cryptd(__cbc-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : cbc(aes) -driver : cbc-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ecb(aes) -driver : cryptd(__ecb-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : ecb(aes) -driver : ecb-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : __generic-gcm-aes-aesni -driver : cryptd(__driver-generic-gcm-aes-aesni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : yes -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : gcm(aes) -driver : generic-gcm-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : aead -async : yes -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : __generic-gcm-aes-aesni -driver : __driver-generic-gcm-aes-aesni -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : no -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : __gcm-aes-aesni -driver : cryptd(__driver-gcm-aes-aesni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : yes -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : rfc4106(gcm(aes)) -driver : rfc4106-gcm-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : aead -async : yes -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : __gcm-aes-aesni -driver : __driver-gcm-aes-aesni -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : no -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : __xts(aes) -driver : __xts-aes-aesni -module : kernel -priority : 401 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ctr(aes) -driver : __ctr-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __cbc(aes) -driver : __cbc-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ecb(aes) -driver : __ecb-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : __aes -driver : __aes-aesni -module : kernel -priority : 300 -refcnt : 1 -selftest : passed -internal : yes -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : aes -driver : aes-aesni -module : kernel -priority : 300 -refcnt : 8 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : hmac(sha1) -driver : hmac(sha1-generic) -module : kernel -priority : 100 -refcnt : 9 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 20 - -name : ghash -driver : ghash-generic -module : kernel -priority : 100 -refcnt : 3 -selftest : passed -internal : no -type : shash -blocksize : 16 -digestsize : 16 - -name : jitterentropy_rng -driver : jitterentropy_rng -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha256 -module : kernel -priority : 221 -refcnt : 2 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha512 -module : kernel -priority : 220 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha384 -module : kernel -priority : 219 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha1 -module : kernel -priority : 218 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha256 -module : kernel -priority : 217 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha512 -module : kernel -priority : 216 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha384 -module : kernel -priority : 215 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha1 -module : kernel -priority : 214 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes256 -module : kernel -priority : 213 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes192 -module : kernel -priority : 212 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes128 -module : kernel -priority : 211 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : hmac(sha256) -driver : hmac(sha256-generic) -module : kernel -priority : 100 -refcnt : 10 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 32 - -name : stdrng -driver : drbg_pr_hmac_sha256 -module : kernel -priority : 210 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha512 -module : kernel -priority : 209 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha384 -module : kernel -priority : 208 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha1 -module : kernel -priority : 207 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha256 -module : kernel -priority : 206 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha512 -module : kernel -priority : 205 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha384 -module : kernel -priority : 204 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha1 -module : kernel -priority : 203 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes256 -module : kernel -priority : 202 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes192 -module : kernel -priority : 201 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes128 -module : kernel -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : 842 -driver : 842-scomp -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : 842 -driver : 842-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : lzo-rle -driver : lzo-rle-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : lzo-rle -driver : lzo-rle-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : lzo -driver : lzo-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : lzo -driver : lzo-generic -module : kernel -priority : 0 -refcnt : 9 -selftest : passed -internal : no -type : compression - -name : crct10dif -driver : crct10dif-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 2 - -name : crc32c -driver : crc32c-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : zlib-deflate -driver : zlib-deflate-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : deflate -driver : deflate-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : deflate -driver : deflate-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : aes -driver : aes-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : sha224 -driver : sha224-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 28 - -name : sha256 -driver : sha256-generic -module : kernel -priority : 100 -refcnt : 11 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 32 - -name : sha1 -driver : sha1-generic -module : kernel -priority : 100 -refcnt : 11 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 20 - -name : md5 -driver : md5-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 16 - -name : ecb(cipher_null) -driver : ecb-cipher_null -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 0 -max keysize : 0 -ivsize : 0 -chunksize : 1 -walksize : 1 - -name : digest_null -driver : digest_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 0 - -name : compress_null -driver : compress_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : cipher_null -driver : cipher_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 1 -min keysize : 0 -max keysize : 0 - -name : rsa -driver : rsa-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : akcipher - -name : dh -driver : dh-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : kpp - -name : aes -driver : aes-asm -module : kernel -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -Mode: 444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/diskstats -Lines: 52 - 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 - 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 - 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 - 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 - 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 - 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 - 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 - 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 - 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 - 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 - 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 - 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 - 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 - 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 - 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 - 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 - 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 - 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 - 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 - 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 - 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 - 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 - 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 - 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 - 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 - 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 - 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 - 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 - 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 - 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 - 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 - 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 - 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 - 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 - 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 - 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 - 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 - 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 - 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 - 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 - 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 - 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 - 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 - 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 - 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 - 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 - 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 - 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 - 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 - 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182 - 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0 - 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/fscache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/fscache/stats -Lines: 24 -FS-Cache statistics -Cookies: idx=3 dat=67877 spc=0 -Objects: alc=67473 nal=0 avl=67473 ded=388 -ChkAux : non=12 ok=33 upd=44 obs=55 -Pages : mrk=547164 unc=364577 -Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26 -Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85 -Invals : n=14 run=13 -Updates: n=7 nul=3 run=8 -Relinqs: n=394 nul=1 wcr=2 rtr=3 -AttrChg: n=6 ok=5 nbf=4 oom=3 run=2 -Allocs : n=20 ok=19 wt=18 nbf=17 int=16 -Allocs : ops=15 owt=14 abt=13 -Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43 -Retrvls: ops=151959 owt=42747 abt=44 -Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14 -Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43 -VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66 -Ops : pend=42753 run=221129 enq=628798 can=11 rej=88 -Ops : ini=377538 dfr=27 rel=377538 gc=37 -CacheOp: alo=1 luo=2 luc=3 gro=4 -CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10 -CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17 -CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/xfs/stat -Lines: 23 -extent_alloc 92447 97589 92448 93751 -abt 0 0 0 0 -blk_map 1767055 188820 184891 92447 92448 2140766 0 -bmbt 0 0 0 0 -dir 185039 92447 92444 136422 -trans 706 944304 0 -ig 185045 58807 0 126238 0 33637 22 -log 2883 113448 9 17360 739 -push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 -xstrat 92447 0 -rw 107739 94045 -attr 4 0 0 0 -icluster 8677 7849 135802 -vnodes 92601 0 0 0 92444 92444 92444 0 -buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 -abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 -abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 -bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 -fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -qm 0 0 0 0 0 0 0 0 -xpc 399724544 92823103 86219234 -debug 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/loadavg -Lines: 1 -0.02 0.04 0.05 1/497 11947 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/mdstat -Lines: 60 -Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] - -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) - 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - -md127 : active raid1 sdi2[0] sdj2[1] - 312319552 blocks [2/2] [UU] - -md0 : active raid1 sdi1[0] sdj1[1] - 248896 blocks [2/2] [UU] - -md4 : inactive raid1 sda3[0](F) sdb3[1](S) - 4883648 blocks [2/2] [UU] - -md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0] - 195310144 blocks [2/1] [U_] - [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) - 195310144 blocks [2/2] [UU] - [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md201 : active raid1 sda3[0] sdb3[1] - 1993728 blocks super 1.2 [2/2] [UU] - [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec - -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) - 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] - bitmap: 0/30 pages [0KB], 65536KB chunk - -md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S) - 523968 blocks super 1.2 [4/4] [UUUU] - resync=DELAYED - -md10 : active raid0 sda1[0] sdb1[1] - 314159265 blocks 64k chunks - -md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S) - 4190208 blocks super 1.2 [2/2] [UU] - resync=PENDING - -md12 : active raid0 sdc2[0] sdd2[1] - 3886394368 blocks super 1.2 512k chunks - -md126 : active raid0 sdb[1] sdc[0] - 1855870976 blocks super external:/md127/0 128k chunks - -md219 : inactive sdb[2](S) sdc[1](S) sda[0](S) - 7932 blocks super external:imsm - -md00 : active raid0 xvdb[0] - 4186624 blocks super 1.2 256k chunks - -md120 : active linear sda1[1] sdb1[0] - 2095104 blocks super 1.2 0k rounding - -md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0] - 322560 blocks super 1.2 512k chunks - -unused devices: -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/meminfo -Lines: 42 -MemTotal: 15666184 kB -MemFree: 440324 kB -Buffers: 1020128 kB -Cached: 12007640 kB -SwapCached: 0 kB -Active: 6761276 kB -Inactive: 6532708 kB -Active(anon): 267256 kB -Inactive(anon): 268 kB -Active(file): 6494020 kB -Inactive(file): 6532440 kB -Unevictable: 0 kB -Mlocked: 0 kB -SwapTotal: 0 kB -SwapFree: 0 kB -Dirty: 768 kB -Writeback: 0 kB -AnonPages: 266216 kB -Mapped: 44204 kB -Shmem: 1308 kB -Slab: 1807264 kB -SReclaimable: 1738124 kB -SUnreclaim: 69140 kB -KernelStack: 1616 kB -PageTables: 5288 kB -NFS_Unstable: 0 kB -Bounce: 0 kB -WritebackTmp: 0 kB -CommitLimit: 7833092 kB -Committed_AS: 530844 kB -VmallocTotal: 34359738367 kB -VmallocUsed: 36596 kB -VmallocChunk: 34359637840 kB -HardwareCorrupted: 0 kB -AnonHugePages: 12288 kB -HugePages_Total: 0 -HugePages_Free: 0 -HugePages_Rsvd: 0 -HugePages_Surp: 0 -Hugepagesize: 2048 kB -DirectMap4k: 91136 kB -DirectMap2M: 16039936 kB -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/arp -Lines: 2 -IP address HW type Flags HW address Mask Device -192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/dev -Lines: 6 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed -vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 -docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs -Lines: 21 -IP Virtual Server version 1.2.1 (size=4096) -Prot LocalAddress:Port Scheduler Flags - -> RemoteAddress:Port Forward Weight ActiveConn InActConn -TCP C0A80016:0CEA wlc - -> C0A85216:0CEA Tunnel 100 248 2 - -> C0A85318:0CEA Tunnel 100 248 2 - -> C0A85315:0CEA Tunnel 100 248 1 -TCP C0A80039:0CEA wlc - -> C0A85416:0CEA Tunnel 0 0 0 - -> C0A85215:0CEA Tunnel 100 1499 0 - -> C0A83215:0CEA Tunnel 100 1498 0 -TCP C0A80037:0CEA wlc - -> C0A8321A:0CEA Tunnel 0 0 0 - -> C0A83120:0CEA Tunnel 100 0 0 -TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh - -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 -FWM 10001000 wlc - -> C0A8321A:0CEA Route 0 0 1 - -> C0A83215:0CEA Route 0 0 2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs_stats -Lines: 6 - Total Incoming Outgoing Incoming Outgoing - Conns Packets Packets Bytes Bytes - 16AA370 E33656E5 0 51D8C8883AB3 0 - - Conns/s Pkts/s Pkts/s Bytes/s Bytes/s - 4 1FB3C 0 1282A8F 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/protocols -Lines: 14 -protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em -PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n -PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n -RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n -UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n -UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n -TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y -UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n -UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n -PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n -RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n -UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n -TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y -NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/rpc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfs -Lines: 5 -net 18628 0 18628 6 -rpc 4329785 0 4338291 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 -proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfsd -Lines: 11 -rc 0 6 18622 -fh 0 0 0 0 0 -io 157286400 0 -th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 -ra 32 0 0 0 0 0 0 0 0 0 0 0 -net 18628 0 18628 6 -rpc 18628 0 0 0 0 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 -proc4 2 2 10853 -proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/sockstat -Lines: 6 -sockets: used 1602 -TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22 -UDP: inuse 12 mem 62 -UDPLITE: inuse 0 -RAW: inuse 0 -FRAG: inuse 0 memory 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/sockstat6 -Lines: 5 -TCP6: inuse 17 -UDP6: inuse 9 -UDPLITE6: inuse 0 -RAW6: inuse 1 -FRAG6: inuse 0 memory 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/softnet_stat -Lines: 2 -00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 -01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/softnet_stat.broken -Lines: 1 -00015c73 00020e76 F0000769 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/stat -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/stat/arp_cache -Lines: 3 -entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls -00000014 00000001 00000002 00000003 00000004 00000005 00000006 00000007 00000008 00000009 0000000a 0000000b 0000000c -00000014 0000000d 0000000e 0000000f 00000010 00000011 00000012 00000013 00000014 00000015 00000016 00000017 00000018 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/stat/ndisc_cache -Lines: 3 -entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls -00000024 000000f0 000000f1 000000f2 000000f3 000000f4 000000f5 000000f6 000000f7 000000f8 000000f9 000000fa 000000fb -00000024 000000fc 000000fd 000000fe 000000ff 00000100 00000101 00000102 00000103 00000104 00000105 00000106 00000107 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/tcp -Lines: 4 - sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/tcp6 -Lines: 3 - sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops - 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 - 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp -Lines: 4 - sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp6 -Lines: 3 - sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops - 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 - 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp_broken -Lines: 2 - sl local_address rem_address st - 1: 00000000:0016 00000000:0000 0A -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix -Lines: 6 -Num RefCount Protocol Flags Type St Inode Path -0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 5091797 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix_without_inode -Lines: 6 -Num RefCount Protocol Flags Type St Path -0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/xfrm_stat -Lines: 28 -XfrmInError 1 -XfrmInBufferError 2 -XfrmInHdrError 4 -XfrmInNoStates 3 -XfrmInStateProtoError 40 -XfrmInStateModeError 100 -XfrmInStateSeqError 6000 -XfrmInStateExpired 4 -XfrmInStateMismatch 23451 -XfrmInStateInvalid 55555 -XfrmInTmplMismatch 51 -XfrmInNoPols 65432 -XfrmInPolBlock 100 -XfrmInPolError 10000 -XfrmOutError 1000000 -XfrmOutBundleGenError 43321 -XfrmOutBundleCheckError 555 -XfrmOutNoStates 869 -XfrmOutStateProtoError 4542 -XfrmOutStateModeError 4 -XfrmOutStateSeqError 543 -XfrmOutStateExpired 565 -XfrmOutPolBlock 43456 -XfrmOutPolDead 7656 -XfrmOutPolError 1454 -XfrmFwdHdrError 6654 -XfrmOutStateInvalid 28765 -XfrmAcquireError 24532 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/pressure -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/cpu -Lines: 1 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/io -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/memory -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/schedstat -Lines: 6 -version 15 -timestamp 15819019232 -cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306 -domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0 -cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945 -domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/self -SymlinkTo: 26231 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/slabinfo -Lines: 302 -slabinfo - version: 2.1 -# name : tunables : slabdata -pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0 -pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0 -nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0 -kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 -kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0 -kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0 -pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0 -x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0 -iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 -ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0 -bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0 -bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0 -fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0 -fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0 -squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 -fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 -fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0 -xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0 -xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 -nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 -nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0 -nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0 -nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 -jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 -jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0 -reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 -btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0 -ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0 -ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0 -ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0 -ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0 -ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0 -ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0 -jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0 -jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0 -jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0 -jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0 -jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 -ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0 -mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0 -dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 -dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 -dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0 -kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0 -io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0 -dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 -aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 -qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 -sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0 -scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0 -virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0 -RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0 -UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0 -UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0 -tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0 -TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0 -uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0 -bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0 -mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0 -isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0 -io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0 -aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0 -dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 -bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0 -posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0 -iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0 -iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0 -UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0 -ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0 -ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 -inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 -xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 -ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0 -ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0 -ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0 -PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0 -UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0 -tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0 -request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0 -TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0 -hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0 -dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0 -eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0 -inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0 -scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0 -request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0 -blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0 -bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0 -biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0 -biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0 -biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0 -biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 -bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0 -ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0 -uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0 -audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0 -sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0 -skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0 -skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0 -configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0 -file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0 -file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0 -fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0 -net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0 -task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0 -taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0 -proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0 -pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0 -proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0 -seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0 -sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 -bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 -shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0 -kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0 -kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0 -mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0 -inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0 -dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0 -names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0 -hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 -iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0 -lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0 -key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0 -uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 -nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 -vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0 -mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0 -fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0 -files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0 -signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0 -sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0 -task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0 -cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0 -anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0 -anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0 -pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0 -Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0 -Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0 -Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0 -Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0 -trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0 -ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0 -pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0 -radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0 -task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 -vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0 -dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0 -kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0 -kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0 -kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0 -kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0 -kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0 -kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0 -kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0 -kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0 -kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0 -kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0 -kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0 -kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0 -kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0 -kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0 -kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0 -kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/stat -Lines: 16 -cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 -cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 -cpu1 47869 23 16474 1110787 591 0 46 0 0 0 -cpu2 46504 36 15916 1112321 441 0 326 0 0 0 -cpu3 47054 102 15683 1113230 533 0 60 0 0 0 -cpu4 28413 25 10776 1140321 217 0 8 0 0 0 -cpu5 29271 101 11586 1136270 672 0 30 0 0 0 -cpu6 29152 36 10276 1139721 319 0 29 0 0 0 -cpu7 29098 268 10164 1139282 555 0 31 0 0 0 -intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ctxt 38014093 -btime 1418183276 -processes 26442 -procs_running 2 -procs_blocked 1 -softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/swaps -Lines: 2 -Filename Type Size Used Priority -/dev/dm-2 partition 131068 176 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/symlinktargets -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/README -Lines: 2 -This directory contains some empty files that are the symlinks the files in the "fd" directory point to. -They are otherwise ignored by the tests -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/abc -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/def -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/ghi -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/uvw -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/xyz -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/kernel -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/kernel/random -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/entropy_avail -Lines: 1 -3943 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/poolsize -Lines: 1 -4096 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs -Lines: 1 -60 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold -Lines: 1 -3072 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/vm -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/admin_reserve_kbytes -Lines: 1 -8192 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/block_dump -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/compact_unevictable_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_background_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_background_ratio -Lines: 1 -10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_expire_centisecs -Lines: 1 -3000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_ratio -Lines: 1 -20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_writeback_centisecs -Lines: 1 -500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirtytime_expire_seconds -Lines: 1 -43200 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/drop_caches -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/extfrag_threshold -Lines: 1 -500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/hugetlb_shm_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/laptop_mode -Lines: 1 -5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/legacy_va_layout -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/lowmem_reserve_ratio -Lines: 1 -256 256 32 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/max_map_count -Lines: 1 -65530 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/memory_failure_early_kill -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/memory_failure_recovery -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_free_kbytes -Lines: 1 -67584 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_slab_ratio -Lines: 1 -5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_unmapped_ratio -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/mmap_min_addr -Lines: 1 -65536 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_hugepages -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_overcommit_hugepages -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/numa_stat -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/numa_zonelist_order -Lines: 1 -Node -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/oom_dump_tasks -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/oom_kill_allocating_task -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_kbytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_memory -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_ratio -Lines: 1 -50 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/page-cluster -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/panic_on_oom -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/percpu_pagelist_fraction -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/stat_interval -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/swappiness -Lines: 1 -60 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/user_reserve_kbytes -Lines: 1 -131072 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/vfs_cache_pressure -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/watermark_boost_factor -Lines: 1 -15000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/watermark_scale_factor -Lines: 1 -10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/zone_reclaim_mode -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/zoneinfo -Lines: 262 -Node 0, zone DMA - per-node stats - nr_inactive_anon 230981 - nr_active_anon 547580 - nr_inactive_file 316904 - nr_active_file 346282 - nr_unevictable 115467 - nr_slab_reclaimable 131220 - nr_slab_unreclaimable 47320 - nr_isolated_anon 0 - nr_isolated_file 0 - workingset_nodes 11627 - workingset_refault 466886 - workingset_activate 276925 - workingset_restore 84055 - workingset_nodereclaim 487 - nr_anon_pages 795576 - nr_mapped 215483 - nr_file_pages 761874 - nr_dirty 908 - nr_writeback 0 - nr_writeback_temp 0 - nr_shmem 224925 - nr_shmem_hugepages 0 - nr_shmem_pmdmapped 0 - nr_anon_transparent_hugepages 0 - nr_unstable 0 - nr_vmscan_write 12950 - nr_vmscan_immediate_reclaim 3033 - nr_dirtied 8007423 - nr_written 7752121 - nr_kernel_misc_reclaimable 0 - pages free 3952 - min 33 - low 41 - high 49 - spanned 4095 - present 3975 - managed 3956 - protection: (0, 2877, 7826, 7826, 7826) - nr_free_pages 3952 - nr_zone_inactive_anon 0 - nr_zone_active_anon 0 - nr_zone_inactive_file 0 - nr_zone_active_file 0 - nr_zone_unevictable 0 - nr_zone_write_pending 0 - nr_mlock 0 - nr_page_table_pages 0 - nr_kernel_stack 0 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 1 - numa_miss 0 - numa_foreign 0 - numa_interleave 0 - numa_local 1 - numa_other 0 - pagesets - cpu: 0 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 1 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 2 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 3 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 4 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 5 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 6 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 7 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - node_unreclaimable: 0 - start_pfn: 1 -Node 0, zone DMA32 - pages free 204252 - min 19510 - low 21059 - high 22608 - spanned 1044480 - present 759231 - managed 742806 - protection: (0, 0, 4949, 4949, 4949) - nr_free_pages 204252 - nr_zone_inactive_anon 118558 - nr_zone_active_anon 106598 - nr_zone_inactive_file 75475 - nr_zone_active_file 70293 - nr_zone_unevictable 66195 - nr_zone_write_pending 64 - nr_mlock 4 - nr_page_table_pages 1756 - nr_kernel_stack 2208 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 113952967 - numa_miss 0 - numa_foreign 0 - numa_interleave 0 - numa_local 113952967 - numa_other 0 - pagesets - cpu: 0 - count: 345 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 1 - count: 356 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 2 - count: 325 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 3 - count: 346 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 4 - count: 321 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 5 - count: 316 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 6 - count: 373 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 7 - count: 339 - high: 378 - batch: 63 - vm stats threshold: 48 - node_unreclaimable: 0 - start_pfn: 4096 -Node 0, zone Normal - pages free 18553 - min 11176 - low 13842 - high 16508 - spanned 1308160 - present 1308160 - managed 1268711 - protection: (0, 0, 0, 0, 0) - nr_free_pages 18553 - nr_zone_inactive_anon 112423 - nr_zone_active_anon 440982 - nr_zone_inactive_file 241429 - nr_zone_active_file 275989 - nr_zone_unevictable 49272 - nr_zone_write_pending 844 - nr_mlock 154 - nr_page_table_pages 9750 - nr_kernel_stack 15136 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 162718019 - numa_miss 0 - numa_foreign 0 - numa_interleave 26812 - numa_local 162718019 - numa_other 0 - pagesets - cpu: 0 - count: 316 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 1 - count: 366 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 2 - count: 60 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 3 - count: 256 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 4 - count: 253 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 5 - count: 159 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 6 - count: 311 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 7 - count: 264 - high: 378 - batch: 63 - vm stats threshold: 56 - node_unreclaimable: 0 - start_pfn: 1048576 -Node 0, zone Movable - pages free 0 - min 0 - low 0 - high 0 - spanned 0 - present 0 - managed 0 - protection: (0, 0, 0, 0, 0) -Node 0, zone Device - pages free 0 - min 0 - low 0 - high 0 - spanned 0 - present 0 - managed 0 - protection: (0, 0, 0, 0, 0) -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/dm-0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/dm-0/stat -Lines: 1 -6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda/queue -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/add_random -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/chunk_sectors -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/dax -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_granularity -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_max_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_zeroes_data -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/fua -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/hw_sector_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_poll -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_poll_delay -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_timeout -Lines: 1 -30000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda/queue/iosched -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/back_seek_max -Lines: 1 -16384 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async -Lines: 1 -250 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync -Lines: 1 -125 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/low_latency -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/max_budget -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/slice_idle -Lines: 1 -8 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us -Lines: 1 -8000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/timeout_sync -Lines: 1 -125 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iostats -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/logical_block_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_discard_segments -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb -Lines: 1 -32767 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_integrity_segments -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_sectors_kb -Lines: 1 -1280 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_segment_size -Lines: 1 -65536 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_segments -Lines: 1 -168 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/minimum_io_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nomerges -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nr_requests -Lines: 1 -64 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nr_zones -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/optimal_io_size -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/physical_block_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/read_ahead_kb -Lines: 1 -128 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/rotational -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/rq_affinity -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/scheduler -Lines: 1 -mq-deadline kyber [bfq] none -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/wbt_lat_usec -Lines: 1 -75000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_cache -Lines: 1 -write back -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_same_max_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/zoned -Lines: 1 -none -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/stat -Lines: 1 -9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm/card0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm/card0/device -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_correctable -Lines: 9 -RxErr 0 -BadTLP 0 -BadDLLP 0 -Rollover 0 -Timeout 0 -NonFatalErr 0 -CorrIntErr 0 -HeaderOF 0 -TOTAL_ERR_COR 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_fatal -Lines: 19 -Undefined 0 -DLP 0 -SDES 0 -TLP 0 -FCP 0 -CmpltTO 0 -CmpltAbrt 0 -UnxCmplt 0 -RxOF 0 -MalfTLP 0 -ECRC 0 -UnsupReq 0 -ACSViol 0 -UncorrIntErr 0 -BlockedTLP 0 -AtomicOpBlocked 0 -TLPBlockedErr 0 -PoisonTLPBlocked 0 -TOTAL_ERR_FATAL 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_nonfatal -Lines: 19 -Undefined 0 -DLP 0 -SDES 0 -TLP 0 -FCP 0 -CmpltTO 0 -CmpltAbrt 0 -UnxCmplt 0 -RxOF 0 -MalfTLP 0 -ECRC 0 -UnsupReq 0 -ACSViol 0 -UncorrIntErr 0 -BlockedTLP 0 -AtomicOpBlocked 0 -TLPBlockedErr 0 -PoisonTLPBlocked 0 -TOTAL_ERR_NONFATAL 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/ari_enabled -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/boot_vga -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/broken_parity_status -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/class -Lines: 1 -0x030000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/consistent_dma_mask_bits -Lines: 1 -44 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/current_link_speed -Lines: 1 -8.0 GT/s PCIe -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/current_link_width -Lines: 1 -16 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/d3cold_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/device -Lines: 1 -0x687f -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/dma_mask_bits -Lines: 1 -44 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/driver_override -Lines: 1 -(null) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/gpu_busy_percent -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/irq -Lines: 1 -95 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/local_cpulist -Lines: 1 -0-15 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/local_cpus -Lines: 1 -0000ffff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/max_link_speed -Lines: 1 -8.0 GT/s PCIe -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/max_link_width -Lines: 1 -16 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_gtt_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_gtt_used -Lines: 1 -144560128 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vis_vram_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vis_vram_used -Lines: 1 -1490378752 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_used -Lines: 1 -1490378752 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_vendor -Lines: 1 -samsung -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/modalias -Lines: 1 -pci:v00001002d0000687Fsv00001043sd000004C4bc03sc00i00 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/msi_bus -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/numa_node -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pcie_bw -Lines: 1 -6641 815 256 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pcie_replay_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_dpm_force_performance_level -Lines: 1 -manual -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_dpm_state -Lines: 1 -performance -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_state -Lines: 1 -D0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_cur_state -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_dcefclk -Lines: 5 -0: 600Mhz * -1: 720Mhz -2: 800Mhz -3: 847Mhz -4: 900Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_mclk -Lines: 4 -0: 167Mhz * -1: 500Mhz -2: 800Mhz -3: 945Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_pcie -Lines: 2 -0: 8.0GT/s, x16 -1: 8.0GT/s, x16 * -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_sclk -Lines: 8 -0: 852Mhz * -1: 991Mhz -2: 1084Mhz -3: 1138Mhz -4: 1200Mhz -5: 1401Mhz -6: 1536Mhz -7: 1630Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_socclk -Lines: 8 -0: 600Mhz -1: 720Mhz * -2: 800Mhz -3: 847Mhz -4: 900Mhz -5: 960Mhz -6: 1028Mhz -7: 1107Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_features -Lines: 32 -Current ppfeatures: 0x0000000019a1ff4f -FEATURES BITMASK ENABLEMENT -DPM_PREFETCHER 0x0000000000000001 Y -GFXCLK_DPM 0x0000000000000002 Y -UCLK_DPM 0x0000000000000004 Y -SOCCLK_DPM 0x0000000000000008 Y -UVD_DPM 0x0000000000000010 N -VCE_DPM 0x0000000000000020 N -ULV 0x0000000000000040 Y -MP0CLK_DPM 0x0000000000000080 N -LINK_DPM 0x0000000000000100 Y -DCEFCLK_DPM 0x0000000000000200 Y -AVFS 0x0000000000000400 Y -GFXCLK_DS 0x0000000000000800 Y -SOCCLK_DS 0x0000000000001000 Y -LCLK_DS 0x0000000000002000 Y -PPT 0x0000000000004000 Y -TDC 0x0000000000008000 Y -THERMAL 0x0000000000010000 Y -GFX_PER_CU_CG 0x0000000000020000 N -RM 0x0000000000040000 N -DCEFCLK_DS 0x0000000000080000 N -ACDC 0x0000000000100000 N -VR0HOT 0x0000000000200000 Y -VR1HOT 0x0000000000400000 N -FW_CTF 0x0000000000800000 Y -LED_DISPLAY 0x0000000001000000 Y -FAN_CONTROL 0x0000000002000000 N -FAST_PPT 0x0000000004000000 N -DIDT 0x0000000008000000 Y -ACG 0x0000000010000000 Y -PCC_LIMIT 0x0000000020000000 N -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_force_state -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_mclk_od -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_num_states -Lines: 3 -states: 2 -0 boot -1 performance -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_od_clk_voltage -Lines: 18 -OD_SCLK: -0: 852Mhz 800mV -1: 991Mhz 900mV -2: 1084Mhz 950mV -3: 1138Mhz 1000mV -4: 1200Mhz 1050mV -5: 1401Mhz 1100mV -6: 1536Mhz 1150mV -7: 1630Mhz 1200mV -OD_MCLK: -0: 167Mhz 800mV -1: 500Mhz 800mV -2: 800Mhz 950mV -3: 945Mhz 1100mV -OD_RANGE: -SCLK: 852MHz 2400MHz -MCLK: 167MHz 1500MHz -VDDC: 800mV 1200mV -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_power_profile_mode -Lines: 8 -NUM MODE_NAME BUSY_SET_POINT FPS USE_RLC_BUSY MIN_ACTIVE_LEVEL - 0 BOOTUP_DEFAULT : 70 60 0 0 - 1 3D_FULL_SCREEN*: 70 60 1 3 - 2 POWER_SAVING : 90 60 0 0 - 3 VIDEO : 70 60 0 0 - 4 VR : 70 90 0 0 - 5 COMPUTE : 30 60 0 6 - 6 CUSTOM : 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_sclk_od -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/product_name -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/product_number -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/resource -Lines: 13 -0x0000007c00000000 0x0000007dffffffff 0x000000000014220c -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000007e00000000 0x0000007e0fffffff 0x000000000014220c -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x000000000000d000 0x000000000000d0ff 0x0000000000040101 -0x00000000fcd00000 0x00000000fcd7ffff 0x0000000000040200 -0x00000000fcd80000 0x00000000fcd9ffff 0x0000000000046200 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/revision -Lines: 1 -0xc1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/serial_number -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/subsystem_device -Lines: 1 -0x04c4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/subsystem_vendor -Lines: 1 -0x1043 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/thermal_throttling_logging -Lines: 1 -0000:09:00.0: thermal throttling logging enabled, with interval 60 seconds -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/uevent -Lines: 6 -DRIVER=amdgpu -PCI_CLASS=30000 -PCI_ID=1002:687F -PCI_SUBSYS_ID=1043:04C4 -PCI_SLOT_NAME=0000:09:00.0 -MODALIAS=pci:v00001002d0000687Fsv00001043sd000004C4bc03sc00i00 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/unique_id -Lines: 1 -0123456789abcdef -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/vbios_version -Lines: 1 -115-D050PIL-100 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/vendor -Lines: 1 -0x1002 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host/host0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo -Lines: 1 -30 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/fabric_name -Lines: 1 -0x0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/node_name -Lines: 1 -0x2000e0071bce95f2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_id -Lines: 1 -0x000002 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_name -Lines: 1 -0x1000e0071bce95f2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_state -Lines: 1 -Online -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_type -Lines: 1 -Point-To-Point (direct nport connection) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/speed -Lines: 1 -16 Gbit -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host/host0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames -Lines: 1 -0xffffffffffffffff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/error_frames -Lines: 1 -0x0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts -Lines: 1 -0x13 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count -Lines: 1 -0x2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count -Lines: 1 -0x8 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count -Lines: 1 -0x9 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count -Lines: 1 -0x11 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count -Lines: 1 -0x10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/nos_count -Lines: 1 -0x12 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames -Lines: 1 -0x3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/rx_words -Lines: 1 -0x4 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset -Lines: 1 -0x7 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames -Lines: 1 -0x5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/tx_words -Lines: 1 -0x6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/supported_classes -Lines: 1 -Class 3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/supported_speeds -Lines: 1 -4 Gbit, 8 Gbit, 16 Gbit -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/symbolic_name -Lines: 1 -Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/board_id -Lines: 1 -HPE 100Gb 1-port OP101 QSFP28 x16 PCIe Gen3 with Intel Omni-Path Adapter -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/fw_ver -Lines: 1 -1.27.0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/VL15_dropped -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_data -Lines: 1 -345091702026 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_packets -Lines: 1 -638036947 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_data -Lines: 1 -273558326543 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_packets -Lines: 1 -568318856 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_wait -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/rate -Lines: 1 -100 Gb/sec (4X EDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/board_id -Lines: 1 -SM_1141000001000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver -Lines: 1 -2.31.5050 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/hca_type -Lines: 1 -MT4099 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data -Lines: 1 -2221223609 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets -Lines: 1 -87169372 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data -Lines: 1 -26509113295 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets -Lines: 1 -85734114 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait -Lines: 1 -3599 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate -Lines: 1 -40 Gb/sec (4X QDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data -Lines: 1 -2460436784 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets -Lines: 1 -89332064 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data -Lines: 1 -26540356890 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets -Lines: 1 -88622850 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait -Lines: 1 -3846 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate -Lines: 1 -40 Gb/sec (4X QDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net/eth0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_assign_type -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_len -Lines: 1 -6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/address -Lines: 1 -01:01:01:01:01:01 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/broadcast -Lines: 1 -ff:ff:ff:ff:ff:ff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_changes -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_down_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_up_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dev_id -Lines: 1 -0x20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/device -SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/ -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dormant -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/duplex -Lines: 1 -full -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/flags -Lines: 1 -0x1303 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifalias -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifindex -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/iflink -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/link_mode -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/mtu -Lines: 1 -1500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/name_assign_type -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/netdev_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/operstate -Lines: 1 -up -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_name -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_switch_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/speed -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/tx_queue_len -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/type -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/nvme -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/nvme/nvme0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/firmware_rev -Lines: 1 -1B2QEXP7 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/model -Lines: 1 -Samsung SSD 970 PRO 512GB -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/serial -Lines: 1 -S680HF8N190894I -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/state -Lines: 1 -live -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC -SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0 -SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw -Lines: 1 -95000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us -Lines: 1 -999424 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name -Lines: 1 -short_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us -Lines: 1 -2440 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj -Lines: 1 -240422366267 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/name -Lines: 1 -package-0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw -Lines: 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us -Lines: 1 -976 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj -Lines: 1 -118821284256 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/name -Lines: 1 -core -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw -Lines: 1 -95000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us -Lines: 1 -999424 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name -Lines: 1 -short_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us -Lines: 1 -2440 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj -Lines: 1 -240422366267 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/name -Lines: 1 -package-10 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/scsi_tape -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0 -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0a -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0l -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0m -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0 -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0a -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0l -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0m -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/cooling_device0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/cur_state -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/max_state -Lines: 1 -50 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/type -Lines: 1 -Processor -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/cooling_device1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/cur_state -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/max_state -Lines: 1 -27 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/type -Lines: 1 -intel_powerclamp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/temp -Lines: 1 -49925 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/type -Lines: 1 -bcm2835_thermal -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/mode -Lines: 1 -enabled -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/passive -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/temp -Lines: 1 --44000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/type -Lines: 1 -acpitz -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device -SymlinkTo: ../../../ACPI0003:00 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async -Lines: 1 -disabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control -Lines: 1 -auto -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled -Lines: 1 -disabled -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status -Lines: 1 -unsupported -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup -Lines: 1 -enabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms -Lines: 1 -10598 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem -SymlinkTo: ../../../../../../../../../class/power_supply -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type -Lines: 1 -Mains -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent -Lines: 2 -POWER_SUPPLY_NAME=AC -POWER_SUPPLY_ONLINE=0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm -Lines: 1 -2369000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity -Lines: 1 -98 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level -Lines: 1 -Normal -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold -Lines: 1 -95 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device -SymlinkTo: ../../../PNP0C0A:00 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full -Lines: 1 -50060000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design -Lines: 1 -47520000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now -Lines: 1 -49450000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer -Lines: 1 -LGC -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name -Lines: 1 -LNV-45N1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async -Lines: 1 -disabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control -Lines: 1 -auto -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled -Lines: 1 -disabled -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status -Lines: 1 -unsupported -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now -Lines: 1 -4830000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number -Lines: 1 -38109 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status -Lines: 1 -Discharging -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem -SymlinkTo: ../../../../../../../../../class/power_supply -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology -Lines: 1 -Li-ion -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type -Lines: 1 -Battery -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent -Lines: 16 -POWER_SUPPLY_NAME=BAT0 -POWER_SUPPLY_STATUS=Discharging -POWER_SUPPLY_PRESENT=1 -POWER_SUPPLY_TECHNOLOGY=Li-ion -POWER_SUPPLY_CYCLE_COUNT=0 -POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 -POWER_SUPPLY_VOLTAGE_NOW=11750000 -POWER_SUPPLY_POWER_NOW=5064000 -POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 -POWER_SUPPLY_ENERGY_FULL=47390000 -POWER_SUPPLY_ENERGY_NOW=40730000 -POWER_SUPPLY_CAPACITY=85 -POWER_SUPPLY_CAPACITY_LEVEL=Normal -POWER_SUPPLY_MODEL_NAME=LNV-45N1 -POWER_SUPPLY_MANUFACTURER=LGC -POWER_SUPPLY_SERIAL_NUMBER=38109 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design -Lines: 1 -10800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now -Lines: 1 -12229000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class -Lines: 1 -0x020000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits -Lines: 1 -64 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device -Lines: 1 -0x15d7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits -Lines: 1 -64 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override -Lines: 1 -(null) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq -Lines: 1 -140 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias -Lines: 1 -pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource -Lines: 13 -0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision -Lines: 1 -0x21 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device -Lines: 1 -0x225a -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor -Lines: 1 -0x17aa -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent -Lines: 6 -DRIVER=e1000e -PCI_CLASS=20000 -PCI_ID=8086:15D7 -PCI_SUBSYS_ID=17AA:225A -PCI_SLOT_NAME=0000:00:1f.6 -MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor -Lines: 1 -0x8086 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/name -Lines: 1 -demo -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/pool -Lines: 1 -iscsi-images -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/name -Lines: 1 -wrong -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/pool -Lines: 1 -wrong-images -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource/clocksource0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource -Lines: 1 -tsc hpet acpi_pm -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource -Lines: 1 -tsc -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq -SymlinkTo: ../cpufreq/policy0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count -Lines: 1 -10084 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count -Lines: 1 -34818 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0/topology -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings -Lines: 1 -11 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list -Lines: 1 -0,4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq -Lines: 1 -1200195 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency -Lines: 1 -4294967295 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus -Lines: 1 -1 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors -Lines: 1 -performance powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver -Lines: 1 -intel_pstate -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor -Lines: 1 -powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed -Lines: 1 - -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count -Lines: 1 -523 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count -Lines: 1 -34818 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/topology -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings -Lines: 1 -22 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list -Lines: 1 -1,5 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq -Lines: 1 -2400000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq -Lines: 1 -800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors -Lines: 1 -performance powersave -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq -Lines: 1 -1219917 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver -Lines: 1 -intel_pstate -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor -Lines: 1 -powersave -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq -Lines: 1 -2400000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq -Lines: 1 -800000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node/node1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/node/node1/vmstat -Lines: 6 -nr_free_pages 1 -nr_zone_inactive_anon 2 -nr_zone_active_anon 3 -nr_zone_inactive_file 4 -nr_zone_active_file 5 -nr_zone_unevictable 6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node/node2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/node/node2/vmstat -Lines: 6 -nr_free_pages 7 -nr_zone_inactive_anon 8 -nr_zone_active_anon 9 -nr_zone_inactive_file 10 -nr_zone_active_file 11 -nr_zone_unevictable 12 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug -Lines: 7 -rate: 1.1M/sec -dirty: 20.4G -target: 20.4G -proportional: 427.5k -integral: 790.0k -change: 321.5k/sec -next io: 17ms -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us -Lines: 1 -1305 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly -Lines: 1 -131072 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used -Lines: 1 -933888 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used -Lines: 1 -1867776 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes -Lines: 1 -1073741824 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes -Lines: 1 -933888 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes -Lines: 1 -1073741824 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used -Lines: 1 -32768 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags -Lines: 1 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes -Lines: 1 -8388608 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes -Lines: 1 -8388608 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size -Lines: 1 -20971520 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size -Lines: 1 -20971520 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label -Lines: 1 -fixture -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid -Lines: 1 -0abb23a9-579b-43e6-ad30-227ef47fcb9d -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly -Lines: 1 -262144 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags -Lines: 1 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22 -SymlinkTo: ../../../../devices/virtual/block/loop22 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23 -SymlinkTo: ../../../../devices/virtual/block/loop23 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24 -SymlinkTo: ../../../../devices/virtual/block/loop24 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25 -SymlinkTo: ../../../../devices/virtual/block/loop25 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56 -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid -Lines: 1 -7f07c59f-6136-449c-ab87-e1cf2328731b -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sda1/stats/stats -Lines: 1 -extent_alloc 1 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sdb1/stats/stats -Lines: 1 -extent_alloc 2 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path -Lines: 1 -/home/iscsi/file_back_1G -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/iblock_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path -Lines: 1 -/dev/rbd1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rbd_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path -Lines: 1 -/dev/rbd/iscsi-images/demo -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d -SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -204950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -10325 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -40325 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026 -SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -104950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -20095 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -71235 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686 -SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -301950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -10195 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -30195 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893 -SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -1234 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -1504 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -4733 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 0040753b1c18..3c18c7610ef5 100644 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -26,7 +26,7 @@ const ( // DefaultSysMountPoint is the common mount point of the sys filesystem. DefaultSysMountPoint = "/sys" - // DefaultConfigfsMountPoint is the common mount point of the configfs + // DefaultConfigfsMountPoint is the common mount point of the configfs. DefaultConfigfsMountPoint = "/sys/kernel/config" ) diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 22cb07a6bbb5..b030951faf98 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -14,7 +14,7 @@ package util import ( - "io/ioutil" + "os" "strconv" "strings" ) @@ -66,7 +66,7 @@ func ParsePInt64s(ss []string) ([]*int64, error) { // ReadUintFromFile reads a file and attempts to parse a uint64 from it. func ReadUintFromFile(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } @@ -75,7 +75,7 @@ func ReadUintFromFile(path string) (uint64, error) { // ReadIntFromFile reads a file and attempts to parse a int64 from it. func ReadIntFromFile(path string) (int64, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go index 8051161b2aa4..71b7a70ebd68 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -15,17 +15,16 @@ package util import ( "io" - "io/ioutil" "os" ) -// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file. -// This is similar to ioutil.ReadFile but without the call to os.Stat, because +// ReadFileNoStat uses io.ReadAll to read contents of entire file. +// This is similar to os.ReadFile but without the call to os.Stat, because // many files in /proc and /sys report incorrect file sizes (either 0 or 4096). -// Reads a max file size of 512kB. For files larger than this, a scanner +// Reads a max file size of 1024kB. For files larger than this, a scanner // should be used. func ReadFileNoStat(filename string) ([]byte, error) { - const maxBufferSize = 1024 * 512 + const maxBufferSize = 1024 * 1024 f, err := os.Open(filename) if err != nil { @@ -34,5 +33,5 @@ func ReadFileNoStat(filename string) ([]byte, error) { defer f.Close() reader := io.LimitReader(f, maxBufferSize) - return ioutil.ReadAll(reader) + return io.ReadAll(reader) } diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index c07de0b6c9c6..1ab875ceec6a 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -11,7 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux,!appengine +//go:build (linux || darwin) && !appengine +// +build linux darwin +// +build !appengine package util @@ -21,7 +23,7 @@ import ( "syscall" ) -// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly. // https://github.com/prometheus/node_exporter/pull/728/files // // Note that this function will not read files larger than 128 bytes. @@ -33,7 +35,7 @@ func SysReadFile(file string) (string, error) { defer f.Close() // On some machines, hwmon drivers are broken and return EAGAIN. This causes - // Go's ioutil.ReadFile implementation to poll forever. + // Go's os.ReadFile implementation to poll forever. // // Since we either want to read data or bail immediately, do the simplest // possible read using syscall directly. diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go index bd55b45377db..1d86f5e63f3c 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -11,7 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux,appengine !linux +//go:build (linux && appengine) || (!linux && !darwin) +// +build linux,appengine !linux,!darwin package util diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 89e447746cfe..391c07957e90 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -20,7 +20,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "os" "strconv" @@ -84,7 +83,7 @@ func parseIPVSStats(r io.Reader) (IPVSStats, error) { stats IPVSStats ) - statContent, err := ioutil.ReadAll(r) + statContent, err := io.ReadAll(r) if err != nil { return IPVSStats{}, err } diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go index da3a941d60b9..db88566bdf0a 100644 --- a/vendor/github.com/prometheus/procfs/kernel_random.go +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 0cce190ec22b..0096cafbdf86 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -21,7 +21,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// LoadAvg represents an entry in /proc/loadavg +// LoadAvg represents an entry in /proc/loadavg. type LoadAvg struct { Load1 float64 Load5 float64 diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index f0b9e5f75a9e..a95c889cb9e2 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -15,7 +15,7 @@ package procfs import ( "fmt" - "io/ioutil" + "os" "regexp" "strconv" "strings" @@ -64,7 +64,7 @@ type MDStat struct { // structs containing the relevant info. More information available here: // https://raid.wiki.kernel.org/index.php/Mdstat func (fs FS) MDStat() ([]MDStat, error) { - data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) + data, err := os.ReadFile(fs.proc.Path("mdstat")) if err != nil { return nil, err } @@ -166,8 +166,12 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { } func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { + statusFields := strings.Fields(statusLine) + if len(statusFields) < 1 { + return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine) + } - sizeStr := strings.Fields(statusLine)[0] + sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index 9964a3600b4b..8300daca0545 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -25,7 +25,7 @@ import ( ) // A ConntrackStatEntry represents one line from net/stat/nf_conntrack -// and contains netfilter conntrack statistics at one CPU core +// and contains netfilter conntrack statistics at one CPU core. type ConntrackStatEntry struct { Entries uint64 Found uint64 @@ -38,12 +38,12 @@ type ConntrackStatEntry struct { SearchRestart uint64 } -// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores +// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores. func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) } -// Parses a slice of ConntrackStatEntries from the given filepath +// Parses a slice of ConntrackStatEntries from the given filepath. func readConntrackStat(path string) ([]ConntrackStatEntry, error) { // This file is small and can be read with one syscall. b, err := util.ReadFileNoStat(path) @@ -61,7 +61,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { return stat, nil } -// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries +// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries. func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { var entries []ConntrackStatEntry @@ -79,7 +79,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { return entries, nil } -// Parses a ConntrackStatEntry from given array of fields +// Parses a ConntrackStatEntry from given array of fields. func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { if len(fields) != 17 { return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") @@ -143,7 +143,7 @@ func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { return entry, nil } -// Parses a uint64 from given hex in string +// Parses a uint64 from given hex in string. func parseConntrackStatField(field string) (uint64, error) { val, err := strconv.ParseUint(field, 16, 64) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go index 47a710befb93..e66208aa05fe 100644 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -87,17 +87,17 @@ func newNetDev(file string) (NetDev, error) { // parseLine parses a single line from the /proc/net/dev file. Header lines // must be filtered prior to calling this method. func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { - parts := strings.SplitN(rawLine, ":", 2) - if len(parts) != 2 { + idx := strings.LastIndex(rawLine, ":") + if idx == -1 { return nil, errors.New("invalid net/dev line, missing colon") } - fields := strings.Fields(strings.TrimSpace(parts[1])) + fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:])) var err error line := &NetDevLine{} // Interface Name - line.Name = strings.TrimSpace(parts[0]) + line.Name = strings.TrimSpace(rawLine[:idx]) if line.Name == "" { return nil, errors.New("invalid net/dev line, empty interface name") } diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 8c9ee3de8786..7fd57d7f463b 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -34,7 +34,7 @@ const ( readLimit = 4294967296 // Byte -> 4 GiB ) -// this contains generic data structures for both udp and tcp sockets +// This contains generic data structures for both udp and tcp sockets. type ( // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header. NetIPSocket []*netIPSocketLine diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index 8c6de3791baf..374b6f73f821 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// NetProtocolStats stores the contents from /proc/net/protocols +// NetProtocolStats stores the contents from /proc/net/protocols. type NetProtocolStats map[string]NetProtocolStatLine // NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We @@ -41,7 +41,7 @@ type NetProtocolStatLine struct { Capabilities NetProtocolCapabilities } -// NetProtocolCapabilities contains a list of capabilities for each protocol +// NetProtocolCapabilities contains a list of capabilities for each protocol. type NetProtocolCapabilities struct { Close bool // 8 Connect bool // 9 diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index 46f12c61d3e9..a94f86dc4ae6 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -30,13 +30,13 @@ import ( // * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 // and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. -// SoftnetStat contains a single row of data from /proc/net/softnet_stat +// SoftnetStat contains a single row of data from /proc/net/softnet_stat. type SoftnetStat struct { - // Number of processed packets + // Number of processed packets. Processed uint32 - // Number of dropped packets + // Number of dropped packets. Dropped uint32 - // Number of times processing packets ran out of quota + // Number of times processing packets ran out of quota. TimeSqueezed uint32 } diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go similarity index 96% rename from vendor/github.com/prometheus/procfs/xfrm.go rename to vendor/github.com/prometheus/procfs/net_xfrm.go index eed07c7d7748..f9d9d243db38 100644 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ b/vendor/github.com/prometheus/procfs/net_xfrm.go @@ -79,10 +79,13 @@ type XfrmStat struct { // Policy is dead XfrmOutPolDead int // Policy Error - XfrmOutPolError int - XfrmFwdHdrError int + XfrmOutPolError int + // Forward routing of a packet is not allowed + XfrmFwdHdrError int + // State is invalid, perhaps expired XfrmOutStateInvalid int - XfrmAcquireError int + // State hasn’t been fully acquired before use + XfrmAcquireError int } // NewXfrmStat reads the xfrm_stat statistics. diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index 94d892f11348..dcea9c5a671f 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -21,13 +21,13 @@ import ( "strings" ) -// NetStat contains statistics for all the counters from one file +// NetStat contains statistics for all the counters from one file. type NetStat struct { - Filename string Stats map[string][]uint64 + Filename string } -// NetStat retrieves stats from /proc/net/stat/ +// NetStat retrieves stats from `/proc/net/stat/`. func (fs FS) NetStat() ([]NetStat, error) { statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*")) if err != nil { @@ -55,7 +55,7 @@ func (fs FS) NetStat() ([]NetStat, error) { // Other strings represent per-CPU counters for scanner.Scan() { for num, counter := range strings.Fields(scanner.Text()) { - value, err := strconv.ParseUint(counter, 16, 32) + value, err := strconv.ParseUint(counter, 16, 64) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 28f696803f6f..c30223af72ad 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -16,7 +16,7 @@ package procfs import ( "bytes" "fmt" - "io/ioutil" + "io" "os" "strconv" "strings" @@ -82,7 +82,7 @@ func (fs FS) Self() (Proc, error) { // NewProc returns a process for the given pid. // -// Deprecated: use fs.Proc() instead +// Deprecated: Use fs.Proc() instead. func (fs FS) NewProc(pid int) (Proc, error) { return fs.Proc(pid) } @@ -142,7 +142,7 @@ func (p Proc) Wchan() (string, error) { } defer f.Close() - data, err := ioutil.ReadAll(f) + data, err := io.ReadAll(f) if err != nil { return "", err } @@ -185,7 +185,7 @@ func (p Proc) Cwd() (string, error) { return wd, err } -// RootDir returns the absolute path to the process's root directory (as set by chroot) +// RootDir returns the absolute path to the process's root directory (as set by chroot). func (p Proc) RootDir() (string, error) { rdir, err := os.Readlink(p.path("root")) if os.IsNotExist(err) { @@ -311,7 +311,7 @@ func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { // Schedstat returns task scheduling information for the process. func (p Proc) Schedstat() (ProcSchedstat, error) { - contents, err := ioutil.ReadFile(p.path("schedstat")) + contents, err := os.ReadFile(p.path("schedstat")) if err != nil { return ProcSchedstat{}, err } diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index be45b798733b..cca03327c3fe 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -45,7 +45,7 @@ type Cgroup struct { } // parseCgroupString parses each line of the /proc/[pid]/cgroup file -// Line format is hierarchyID:[controller1,controller2]:path +// Line format is hierarchyID:[controller1,controller2]:path. func parseCgroupString(cgroupStr string) (*Cgroup, error) { var err error @@ -69,7 +69,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) { return cgroup, nil } -// parseCgroups reads each line of the /proc/[pid]/cgroup file +// parseCgroups reads each line of the /proc/[pid]/cgroup file. func parseCgroups(data []byte) ([]Cgroup, error) { var cgroups []Cgroup scanner := bufio.NewScanner(bytes.NewReader(data)) @@ -88,7 +88,7 @@ func parseCgroups(data []byte) ([]Cgroup, error) { // Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process // control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, -// so the len of the returned struct is equal to the number of active hierarchies on this system +// so the len of the returned struct is equal to the number of active hierarchies on this system. func (p Proc) Cgroups() ([]Cgroup, error) { data, err := util.ReadFileNoStat(p.path("cgroup")) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go new file mode 100644 index 000000000000..24d4dce9cfc7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go @@ -0,0 +1,98 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CgroupSummary models one line from /proc/cgroups. +// This file contains information about the controllers that are compiled into the kernel. +// +// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html +type CgroupSummary struct { + // The name of the controller. controller is also known as subsystem. + SubsysName string + // The unique ID of the cgroup hierarchy on which this controller is mounted. + Hierarchy int + // The number of control groups in this hierarchy using this controller. + Cgroups int + // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled + Enabled int +} + +// parseCgroupSummary parses each line of the /proc/cgroup file +// Line format is `subsys_name hierarchy num_cgroups enabled`. +func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { + var err error + + fields := strings.Fields(CgroupSummaryStr) + // require at least 4 fields + if len(fields) < 4 { + return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr) + } + + CgroupSummary := &CgroupSummary{ + SubsysName: fields[0], + } + CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse hierarchy ID") + } + CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) + if err != nil { + return nil, fmt.Errorf("failed to parse Cgroup Num") + } + CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) + if err != nil { + return nil, fmt.Errorf("failed to parse Enabled") + } + return CgroupSummary, nil +} + +// parseCgroupSummary reads each line of the /proc/cgroup file. +func parseCgroupSummary(data []byte) ([]CgroupSummary, error) { + var CgroupSummarys []CgroupSummary + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + CgroupSummaryString := scanner.Text() + // ignore comment lines + if strings.HasPrefix(CgroupSummaryString, "#") { + continue + } + CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString) + if err != nil { + return nil, err + } + CgroupSummarys = append(CgroupSummarys, *CgroupSummary) + } + + err := scanner.Err() + return CgroupSummarys, err +} + +// CgroupSummarys returns information about current /proc/cgroups. +func (fs FS) CgroupSummarys() ([]CgroupSummary, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cgroups")) + if err != nil { + return nil, err + } + return parseCgroupSummary(data) +} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go index 6134b3580c45..57a89895d66a 100644 --- a/vendor/github.com/prometheus/procfs/proc_environ.go +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -19,7 +19,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Environ reads process environments from /proc//environ +// Environ reads process environments from `/proc//environ`. func (p Proc) Environ() ([]string, error) { environments := make([]string, 0) diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index cf63227f064f..1bbdd4a8e998 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -22,7 +22,6 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Regexp variables var ( rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) @@ -122,7 +121,7 @@ func (p ProcFDInfos) Len() int { return len(p) } func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } -// InotifyWatchLen returns the total number of inotify watches +// InotifyWatchLen returns the total number of inotify watches. func (p ProcFDInfos) InotifyWatchLen() (int, error) { length := 0 for _, f := range p { diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index dd20f198a308..7a1388185a97 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -79,7 +79,7 @@ var ( // NewLimits returns the current soft limits of the process. // -// Deprecated: use p.Limits() instead +// Deprecated: Use p.Limits() instead. func (p Proc) NewLimits() (ProcLimits, error) { return p.Limits() } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index 1d7772d516a4..f1bcbf32bb3d 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -11,7 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +// +build !js package procfs @@ -25,7 +27,7 @@ import ( "golang.org/x/sys/unix" ) -// ProcMapPermissions contains permission settings read from /proc/[pid]/maps +// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`. type ProcMapPermissions struct { // mapping has the [R]ead flag set Read bool @@ -39,8 +41,8 @@ type ProcMapPermissions struct { Private bool } -// ProcMap contains the process memory-mappings of the process, -// read from /proc/[pid]/maps +// ProcMap contains the process memory-mappings of the process +// read from `/proc/[pid]/maps`. type ProcMap struct { // The start address of current mapping. StartAddr uintptr @@ -79,7 +81,7 @@ func parseDevice(s string) (uint64, error) { return unix.Mkdev(uint32(major), uint32(minor)), nil } -// parseAddress just converts a hex-string to a uintptr +// parseAddress converts a hex-string to a uintptr. func parseAddress(s string) (uintptr, error) { a, err := strconv.ParseUint(s, 16, 0) if err != nil { @@ -89,7 +91,7 @@ func parseAddress(s string) (uintptr, error) { return uintptr(a), nil } -// parseAddresses parses the start-end address +// parseAddresses parses the start-end address. func parseAddresses(s string) (uintptr, uintptr, error) { toks := strings.Split(s, "-") if len(toks) < 2 { diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go new file mode 100644 index 000000000000..48b5238194e8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -0,0 +1,440 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcNetstat models the content of /proc//net/netstat. +type ProcNetstat struct { + // The process ID. + PID int + TcpExt + IpExt +} + +type TcpExt struct { // nolint:revive + SyncookiesSent float64 + SyncookiesRecv float64 + SyncookiesFailed float64 + EmbryonicRsts float64 + PruneCalled float64 + RcvPruned float64 + OfoPruned float64 + OutOfWindowIcmps float64 + LockDroppedIcmps float64 + ArpFilter float64 + TW float64 + TWRecycled float64 + TWKilled float64 + PAWSActive float64 + PAWSEstab float64 + DelayedACKs float64 + DelayedACKLocked float64 + DelayedACKLost float64 + ListenOverflows float64 + ListenDrops float64 + TCPHPHits float64 + TCPPureAcks float64 + TCPHPAcks float64 + TCPRenoRecovery float64 + TCPSackRecovery float64 + TCPSACKReneging float64 + TCPSACKReorder float64 + TCPRenoReorder float64 + TCPTSReorder float64 + TCPFullUndo float64 + TCPPartialUndo float64 + TCPDSACKUndo float64 + TCPLossUndo float64 + TCPLostRetransmit float64 + TCPRenoFailures float64 + TCPSackFailures float64 + TCPLossFailures float64 + TCPFastRetrans float64 + TCPSlowStartRetrans float64 + TCPTimeouts float64 + TCPLossProbes float64 + TCPLossProbeRecovery float64 + TCPRenoRecoveryFail float64 + TCPSackRecoveryFail float64 + TCPRcvCollapsed float64 + TCPDSACKOldSent float64 + TCPDSACKOfoSent float64 + TCPDSACKRecv float64 + TCPDSACKOfoRecv float64 + TCPAbortOnData float64 + TCPAbortOnClose float64 + TCPAbortOnMemory float64 + TCPAbortOnTimeout float64 + TCPAbortOnLinger float64 + TCPAbortFailed float64 + TCPMemoryPressures float64 + TCPMemoryPressuresChrono float64 + TCPSACKDiscard float64 + TCPDSACKIgnoredOld float64 + TCPDSACKIgnoredNoUndo float64 + TCPSpuriousRTOs float64 + TCPMD5NotFound float64 + TCPMD5Unexpected float64 + TCPMD5Failure float64 + TCPSackShifted float64 + TCPSackMerged float64 + TCPSackShiftFallback float64 + TCPBacklogDrop float64 + PFMemallocDrop float64 + TCPMinTTLDrop float64 + TCPDeferAcceptDrop float64 + IPReversePathFilter float64 + TCPTimeWaitOverflow float64 + TCPReqQFullDoCookies float64 + TCPReqQFullDrop float64 + TCPRetransFail float64 + TCPRcvCoalesce float64 + TCPOFOQueue float64 + TCPOFODrop float64 + TCPOFOMerge float64 + TCPChallengeACK float64 + TCPSYNChallenge float64 + TCPFastOpenActive float64 + TCPFastOpenActiveFail float64 + TCPFastOpenPassive float64 + TCPFastOpenPassiveFail float64 + TCPFastOpenListenOverflow float64 + TCPFastOpenCookieReqd float64 + TCPFastOpenBlackhole float64 + TCPSpuriousRtxHostQueues float64 + BusyPollRxPackets float64 + TCPAutoCorking float64 + TCPFromZeroWindowAdv float64 + TCPToZeroWindowAdv float64 + TCPWantZeroWindowAdv float64 + TCPSynRetrans float64 + TCPOrigDataSent float64 + TCPHystartTrainDetect float64 + TCPHystartTrainCwnd float64 + TCPHystartDelayDetect float64 + TCPHystartDelayCwnd float64 + TCPACKSkippedSynRecv float64 + TCPACKSkippedPAWS float64 + TCPACKSkippedSeq float64 + TCPACKSkippedFinWait2 float64 + TCPACKSkippedTimeWait float64 + TCPACKSkippedChallenge float64 + TCPWinProbe float64 + TCPKeepAlive float64 + TCPMTUPFail float64 + TCPMTUPSuccess float64 + TCPWqueueTooBig float64 +} + +type IpExt struct { // nolint:revive + InNoRoutes float64 + InTruncatedPkts float64 + InMcastPkts float64 + OutMcastPkts float64 + InBcastPkts float64 + OutBcastPkts float64 + InOctets float64 + OutOctets float64 + InMcastOctets float64 + OutMcastOctets float64 + InBcastOctets float64 + OutBcastOctets float64 + InCsumErrors float64 + InNoECTPkts float64 + InECT1Pkts float64 + InECT0Pkts float64 + InCEPkts float64 + ReasmOverlaps float64 +} + +func (p Proc) Netstat() (ProcNetstat, error) { + filename := p.path("net/netstat") + data, err := util.ReadFileNoStat(filename) + if err != nil { + return ProcNetstat{PID: p.PID}, err + } + procNetstat, err := parseNetstat(bytes.NewReader(data), filename) + procNetstat.PID = p.PID + return procNetstat, err +} + +// parseNetstat parses the metrics from proc//net/netstat file +// and returns a ProcNetstat structure. +func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { + var ( + scanner = bufio.NewScanner(r) + procNetstat = ProcNetstat{} + ) + + for scanner.Scan() { + nameParts := strings.Split(scanner.Text(), " ") + scanner.Scan() + valueParts := strings.Split(scanner.Text(), " ") + // Remove trailing :. + protocol := strings.TrimSuffix(nameParts[0], ":") + if len(nameParts) != len(valueParts) { + return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s", + fileName, protocol) + } + for i := 1; i < len(nameParts); i++ { + value, err := strconv.ParseFloat(valueParts[i], 64) + if err != nil { + return procNetstat, err + } + key := nameParts[i] + + switch protocol { + case "TcpExt": + switch key { + case "SyncookiesSent": + procNetstat.TcpExt.SyncookiesSent = value + case "SyncookiesRecv": + procNetstat.TcpExt.SyncookiesRecv = value + case "SyncookiesFailed": + procNetstat.TcpExt.SyncookiesFailed = value + case "EmbryonicRsts": + procNetstat.TcpExt.EmbryonicRsts = value + case "PruneCalled": + procNetstat.TcpExt.PruneCalled = value + case "RcvPruned": + procNetstat.TcpExt.RcvPruned = value + case "OfoPruned": + procNetstat.TcpExt.OfoPruned = value + case "OutOfWindowIcmps": + procNetstat.TcpExt.OutOfWindowIcmps = value + case "LockDroppedIcmps": + procNetstat.TcpExt.LockDroppedIcmps = value + case "ArpFilter": + procNetstat.TcpExt.ArpFilter = value + case "TW": + procNetstat.TcpExt.TW = value + case "TWRecycled": + procNetstat.TcpExt.TWRecycled = value + case "TWKilled": + procNetstat.TcpExt.TWKilled = value + case "PAWSActive": + procNetstat.TcpExt.PAWSActive = value + case "PAWSEstab": + procNetstat.TcpExt.PAWSEstab = value + case "DelayedACKs": + procNetstat.TcpExt.DelayedACKs = value + case "DelayedACKLocked": + procNetstat.TcpExt.DelayedACKLocked = value + case "DelayedACKLost": + procNetstat.TcpExt.DelayedACKLost = value + case "ListenOverflows": + procNetstat.TcpExt.ListenOverflows = value + case "ListenDrops": + procNetstat.TcpExt.ListenDrops = value + case "TCPHPHits": + procNetstat.TcpExt.TCPHPHits = value + case "TCPPureAcks": + procNetstat.TcpExt.TCPPureAcks = value + case "TCPHPAcks": + procNetstat.TcpExt.TCPHPAcks = value + case "TCPRenoRecovery": + procNetstat.TcpExt.TCPRenoRecovery = value + case "TCPSackRecovery": + procNetstat.TcpExt.TCPSackRecovery = value + case "TCPSACKReneging": + procNetstat.TcpExt.TCPSACKReneging = value + case "TCPSACKReorder": + procNetstat.TcpExt.TCPSACKReorder = value + case "TCPRenoReorder": + procNetstat.TcpExt.TCPRenoReorder = value + case "TCPTSReorder": + procNetstat.TcpExt.TCPTSReorder = value + case "TCPFullUndo": + procNetstat.TcpExt.TCPFullUndo = value + case "TCPPartialUndo": + procNetstat.TcpExt.TCPPartialUndo = value + case "TCPDSACKUndo": + procNetstat.TcpExt.TCPDSACKUndo = value + case "TCPLossUndo": + procNetstat.TcpExt.TCPLossUndo = value + case "TCPLostRetransmit": + procNetstat.TcpExt.TCPLostRetransmit = value + case "TCPRenoFailures": + procNetstat.TcpExt.TCPRenoFailures = value + case "TCPSackFailures": + procNetstat.TcpExt.TCPSackFailures = value + case "TCPLossFailures": + procNetstat.TcpExt.TCPLossFailures = value + case "TCPFastRetrans": + procNetstat.TcpExt.TCPFastRetrans = value + case "TCPSlowStartRetrans": + procNetstat.TcpExt.TCPSlowStartRetrans = value + case "TCPTimeouts": + procNetstat.TcpExt.TCPTimeouts = value + case "TCPLossProbes": + procNetstat.TcpExt.TCPLossProbes = value + case "TCPLossProbeRecovery": + procNetstat.TcpExt.TCPLossProbeRecovery = value + case "TCPRenoRecoveryFail": + procNetstat.TcpExt.TCPRenoRecoveryFail = value + case "TCPSackRecoveryFail": + procNetstat.TcpExt.TCPSackRecoveryFail = value + case "TCPRcvCollapsed": + procNetstat.TcpExt.TCPRcvCollapsed = value + case "TCPDSACKOldSent": + procNetstat.TcpExt.TCPDSACKOldSent = value + case "TCPDSACKOfoSent": + procNetstat.TcpExt.TCPDSACKOfoSent = value + case "TCPDSACKRecv": + procNetstat.TcpExt.TCPDSACKRecv = value + case "TCPDSACKOfoRecv": + procNetstat.TcpExt.TCPDSACKOfoRecv = value + case "TCPAbortOnData": + procNetstat.TcpExt.TCPAbortOnData = value + case "TCPAbortOnClose": + procNetstat.TcpExt.TCPAbortOnClose = value + case "TCPDeferAcceptDrop": + procNetstat.TcpExt.TCPDeferAcceptDrop = value + case "IPReversePathFilter": + procNetstat.TcpExt.IPReversePathFilter = value + case "TCPTimeWaitOverflow": + procNetstat.TcpExt.TCPTimeWaitOverflow = value + case "TCPReqQFullDoCookies": + procNetstat.TcpExt.TCPReqQFullDoCookies = value + case "TCPReqQFullDrop": + procNetstat.TcpExt.TCPReqQFullDrop = value + case "TCPRetransFail": + procNetstat.TcpExt.TCPRetransFail = value + case "TCPRcvCoalesce": + procNetstat.TcpExt.TCPRcvCoalesce = value + case "TCPOFOQueue": + procNetstat.TcpExt.TCPOFOQueue = value + case "TCPOFODrop": + procNetstat.TcpExt.TCPOFODrop = value + case "TCPOFOMerge": + procNetstat.TcpExt.TCPOFOMerge = value + case "TCPChallengeACK": + procNetstat.TcpExt.TCPChallengeACK = value + case "TCPSYNChallenge": + procNetstat.TcpExt.TCPSYNChallenge = value + case "TCPFastOpenActive": + procNetstat.TcpExt.TCPFastOpenActive = value + case "TCPFastOpenActiveFail": + procNetstat.TcpExt.TCPFastOpenActiveFail = value + case "TCPFastOpenPassive": + procNetstat.TcpExt.TCPFastOpenPassive = value + case "TCPFastOpenPassiveFail": + procNetstat.TcpExt.TCPFastOpenPassiveFail = value + case "TCPFastOpenListenOverflow": + procNetstat.TcpExt.TCPFastOpenListenOverflow = value + case "TCPFastOpenCookieReqd": + procNetstat.TcpExt.TCPFastOpenCookieReqd = value + case "TCPFastOpenBlackhole": + procNetstat.TcpExt.TCPFastOpenBlackhole = value + case "TCPSpuriousRtxHostQueues": + procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value + case "BusyPollRxPackets": + procNetstat.TcpExt.BusyPollRxPackets = value + case "TCPAutoCorking": + procNetstat.TcpExt.TCPAutoCorking = value + case "TCPFromZeroWindowAdv": + procNetstat.TcpExt.TCPFromZeroWindowAdv = value + case "TCPToZeroWindowAdv": + procNetstat.TcpExt.TCPToZeroWindowAdv = value + case "TCPWantZeroWindowAdv": + procNetstat.TcpExt.TCPWantZeroWindowAdv = value + case "TCPSynRetrans": + procNetstat.TcpExt.TCPSynRetrans = value + case "TCPOrigDataSent": + procNetstat.TcpExt.TCPOrigDataSent = value + case "TCPHystartTrainDetect": + procNetstat.TcpExt.TCPHystartTrainDetect = value + case "TCPHystartTrainCwnd": + procNetstat.TcpExt.TCPHystartTrainCwnd = value + case "TCPHystartDelayDetect": + procNetstat.TcpExt.TCPHystartDelayDetect = value + case "TCPHystartDelayCwnd": + procNetstat.TcpExt.TCPHystartDelayCwnd = value + case "TCPACKSkippedSynRecv": + procNetstat.TcpExt.TCPACKSkippedSynRecv = value + case "TCPACKSkippedPAWS": + procNetstat.TcpExt.TCPACKSkippedPAWS = value + case "TCPACKSkippedSeq": + procNetstat.TcpExt.TCPACKSkippedSeq = value + case "TCPACKSkippedFinWait2": + procNetstat.TcpExt.TCPACKSkippedFinWait2 = value + case "TCPACKSkippedTimeWait": + procNetstat.TcpExt.TCPACKSkippedTimeWait = value + case "TCPACKSkippedChallenge": + procNetstat.TcpExt.TCPACKSkippedChallenge = value + case "TCPWinProbe": + procNetstat.TcpExt.TCPWinProbe = value + case "TCPKeepAlive": + procNetstat.TcpExt.TCPKeepAlive = value + case "TCPMTUPFail": + procNetstat.TcpExt.TCPMTUPFail = value + case "TCPMTUPSuccess": + procNetstat.TcpExt.TCPMTUPSuccess = value + case "TCPWqueueTooBig": + procNetstat.TcpExt.TCPWqueueTooBig = value + } + case "IpExt": + switch key { + case "InNoRoutes": + procNetstat.IpExt.InNoRoutes = value + case "InTruncatedPkts": + procNetstat.IpExt.InTruncatedPkts = value + case "InMcastPkts": + procNetstat.IpExt.InMcastPkts = value + case "OutMcastPkts": + procNetstat.IpExt.OutMcastPkts = value + case "InBcastPkts": + procNetstat.IpExt.InBcastPkts = value + case "OutBcastPkts": + procNetstat.IpExt.OutBcastPkts = value + case "InOctets": + procNetstat.IpExt.InOctets = value + case "OutOctets": + procNetstat.IpExt.OutOctets = value + case "InMcastOctets": + procNetstat.IpExt.InMcastOctets = value + case "OutMcastOctets": + procNetstat.IpExt.OutMcastOctets = value + case "InBcastOctets": + procNetstat.IpExt.InBcastOctets = value + case "OutBcastOctets": + procNetstat.IpExt.OutBcastOctets = value + case "InCsumErrors": + procNetstat.IpExt.InCsumErrors = value + case "InNoECTPkts": + procNetstat.IpExt.InNoECTPkts = value + case "InECT1Pkts": + procNetstat.IpExt.InECT1Pkts = value + case "InECT0Pkts": + procNetstat.IpExt.InECT0Pkts = value + case "InCEPkts": + procNetstat.IpExt.InCEPkts = value + case "ReasmOverlaps": + procNetstat.IpExt.ReasmOverlaps = value + } + } + } + } + return procNetstat, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index dc6c14f0a4c1..a68fe15290a8 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -35,9 +35,10 @@ import ( const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" -// PSILine is a single line of values as returned by /proc/pressure/* -// The Avg entries are averages over n seconds, as a percentage -// The Total line is in microseconds +// PSILine is a single line of values as returned by `/proc/pressure/*`. +// +// The Avg entries are averages over n seconds, as a percentage. +// The Total line is in microseconds. type PSILine struct { Avg10 float64 Avg60 float64 @@ -46,8 +47,9 @@ type PSILine struct { } // PSIStats represent pressure stall information from /proc/pressure/* -// Some indicates the share of time in which at least some tasks are stalled -// Full indicates the share of time in which all non-idle tasks are stalled simultaneously +// +// "Some" indicates the share of time in which at least some tasks are stalled. +// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously. type PSIStats struct { Some *PSILine Full *PSILine @@ -65,7 +67,7 @@ func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { return parsePSIStats(resource, bytes.NewReader(data)) } -// parsePSIStats parses the specified file for pressure stall information +// parsePSIStats parses the specified file for pressure stall information. func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { psiStats := PSIStats{} diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index a576a720a442..0e97d99575e7 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs @@ -28,30 +29,30 @@ import ( ) var ( - // match the header line before each mapped zone in /proc/pid/smaps + // match the header line before each mapped zone in `/proc/pid/smaps`. procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) ) type ProcSMapsRollup struct { - // Amount of the mapping that is currently resident in RAM + // Amount of the mapping that is currently resident in RAM. Rss uint64 - // Process's proportional share of this mapping + // Process's proportional share of this mapping. Pss uint64 - // Size in bytes of clean shared pages + // Size in bytes of clean shared pages. SharedClean uint64 - // Size in bytes of dirty shared pages + // Size in bytes of dirty shared pages. SharedDirty uint64 - // Size in bytes of clean private pages + // Size in bytes of clean private pages. PrivateClean uint64 - // Size in bytes of dirty private pages + // Size in bytes of dirty private pages. PrivateDirty uint64 - // Amount of memory currently marked as referenced or accessed + // Amount of memory currently marked as referenced or accessed. Referenced uint64 - // Amount of memory that does not belong to any file + // Amount of memory that does not belong to any file. Anonymous uint64 - // Amount would-be-anonymous memory currently on swap + // Amount would-be-anonymous memory currently on swap. Swap uint64 - // Process's proportional memory on swap + // Process's proportional memory on swap. SwapPss uint64 } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go new file mode 100644 index 000000000000..ae191896cbd7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -0,0 +1,353 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcSnmp models the content of /proc//net/snmp. +type ProcSnmp struct { + // The process ID. + PID int + Ip + Icmp + IcmpMsg + Tcp + Udp + UdpLite +} + +type Ip struct { // nolint:revive + Forwarding float64 + DefaultTTL float64 + InReceives float64 + InHdrErrors float64 + InAddrErrors float64 + ForwDatagrams float64 + InUnknownProtos float64 + InDiscards float64 + InDelivers float64 + OutRequests float64 + OutDiscards float64 + OutNoRoutes float64 + ReasmTimeout float64 + ReasmReqds float64 + ReasmOKs float64 + ReasmFails float64 + FragOKs float64 + FragFails float64 + FragCreates float64 +} + +type Icmp struct { + InMsgs float64 + InErrors float64 + InCsumErrors float64 + InDestUnreachs float64 + InTimeExcds float64 + InParmProbs float64 + InSrcQuenchs float64 + InRedirects float64 + InEchos float64 + InEchoReps float64 + InTimestamps float64 + InTimestampReps float64 + InAddrMasks float64 + InAddrMaskReps float64 + OutMsgs float64 + OutErrors float64 + OutDestUnreachs float64 + OutTimeExcds float64 + OutParmProbs float64 + OutSrcQuenchs float64 + OutRedirects float64 + OutEchos float64 + OutEchoReps float64 + OutTimestamps float64 + OutTimestampReps float64 + OutAddrMasks float64 + OutAddrMaskReps float64 +} + +type IcmpMsg struct { + InType3 float64 + OutType3 float64 +} + +type Tcp struct { // nolint:revive + RtoAlgorithm float64 + RtoMin float64 + RtoMax float64 + MaxConn float64 + ActiveOpens float64 + PassiveOpens float64 + AttemptFails float64 + EstabResets float64 + CurrEstab float64 + InSegs float64 + OutSegs float64 + RetransSegs float64 + InErrs float64 + OutRsts float64 + InCsumErrors float64 +} + +type Udp struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 + IgnoredMulti float64 +} + +type UdpLite struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 + IgnoredMulti float64 +} + +func (p Proc) Snmp() (ProcSnmp, error) { + filename := p.path("net/snmp") + data, err := util.ReadFileNoStat(filename) + if err != nil { + return ProcSnmp{PID: p.PID}, err + } + procSnmp, err := parseSnmp(bytes.NewReader(data), filename) + procSnmp.PID = p.PID + return procSnmp, err +} + +// parseSnmp parses the metrics from proc//net/snmp file +// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}). +func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { + var ( + scanner = bufio.NewScanner(r) + procSnmp = ProcSnmp{} + ) + + for scanner.Scan() { + nameParts := strings.Split(scanner.Text(), " ") + scanner.Scan() + valueParts := strings.Split(scanner.Text(), " ") + // Remove trailing :. + protocol := strings.TrimSuffix(nameParts[0], ":") + if len(nameParts) != len(valueParts) { + return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s", + fileName, protocol) + } + for i := 1; i < len(nameParts); i++ { + value, err := strconv.ParseFloat(valueParts[i], 64) + if err != nil { + return procSnmp, err + } + key := nameParts[i] + + switch protocol { + case "Ip": + switch key { + case "Forwarding": + procSnmp.Ip.Forwarding = value + case "DefaultTTL": + procSnmp.Ip.DefaultTTL = value + case "InReceives": + procSnmp.Ip.InReceives = value + case "InHdrErrors": + procSnmp.Ip.InHdrErrors = value + case "InAddrErrors": + procSnmp.Ip.InAddrErrors = value + case "ForwDatagrams": + procSnmp.Ip.ForwDatagrams = value + case "InUnknownProtos": + procSnmp.Ip.InUnknownProtos = value + case "InDiscards": + procSnmp.Ip.InDiscards = value + case "InDelivers": + procSnmp.Ip.InDelivers = value + case "OutRequests": + procSnmp.Ip.OutRequests = value + case "OutDiscards": + procSnmp.Ip.OutDiscards = value + case "OutNoRoutes": + procSnmp.Ip.OutNoRoutes = value + case "ReasmTimeout": + procSnmp.Ip.ReasmTimeout = value + case "ReasmReqds": + procSnmp.Ip.ReasmReqds = value + case "ReasmOKs": + procSnmp.Ip.ReasmOKs = value + case "ReasmFails": + procSnmp.Ip.ReasmFails = value + case "FragOKs": + procSnmp.Ip.FragOKs = value + case "FragFails": + procSnmp.Ip.FragFails = value + case "FragCreates": + procSnmp.Ip.FragCreates = value + } + case "Icmp": + switch key { + case "InMsgs": + procSnmp.Icmp.InMsgs = value + case "InErrors": + procSnmp.Icmp.InErrors = value + case "InCsumErrors": + procSnmp.Icmp.InCsumErrors = value + case "InDestUnreachs": + procSnmp.Icmp.InDestUnreachs = value + case "InTimeExcds": + procSnmp.Icmp.InTimeExcds = value + case "InParmProbs": + procSnmp.Icmp.InParmProbs = value + case "InSrcQuenchs": + procSnmp.Icmp.InSrcQuenchs = value + case "InRedirects": + procSnmp.Icmp.InRedirects = value + case "InEchos": + procSnmp.Icmp.InEchos = value + case "InEchoReps": + procSnmp.Icmp.InEchoReps = value + case "InTimestamps": + procSnmp.Icmp.InTimestamps = value + case "InTimestampReps": + procSnmp.Icmp.InTimestampReps = value + case "InAddrMasks": + procSnmp.Icmp.InAddrMasks = value + case "InAddrMaskReps": + procSnmp.Icmp.InAddrMaskReps = value + case "OutMsgs": + procSnmp.Icmp.OutMsgs = value + case "OutErrors": + procSnmp.Icmp.OutErrors = value + case "OutDestUnreachs": + procSnmp.Icmp.OutDestUnreachs = value + case "OutTimeExcds": + procSnmp.Icmp.OutTimeExcds = value + case "OutParmProbs": + procSnmp.Icmp.OutParmProbs = value + case "OutSrcQuenchs": + procSnmp.Icmp.OutSrcQuenchs = value + case "OutRedirects": + procSnmp.Icmp.OutRedirects = value + case "OutEchos": + procSnmp.Icmp.OutEchos = value + case "OutEchoReps": + procSnmp.Icmp.OutEchoReps = value + case "OutTimestamps": + procSnmp.Icmp.OutTimestamps = value + case "OutTimestampReps": + procSnmp.Icmp.OutTimestampReps = value + case "OutAddrMasks": + procSnmp.Icmp.OutAddrMasks = value + case "OutAddrMaskReps": + procSnmp.Icmp.OutAddrMaskReps = value + } + case "IcmpMsg": + switch key { + case "InType3": + procSnmp.IcmpMsg.InType3 = value + case "OutType3": + procSnmp.IcmpMsg.OutType3 = value + } + case "Tcp": + switch key { + case "RtoAlgorithm": + procSnmp.Tcp.RtoAlgorithm = value + case "RtoMin": + procSnmp.Tcp.RtoMin = value + case "RtoMax": + procSnmp.Tcp.RtoMax = value + case "MaxConn": + procSnmp.Tcp.MaxConn = value + case "ActiveOpens": + procSnmp.Tcp.ActiveOpens = value + case "PassiveOpens": + procSnmp.Tcp.PassiveOpens = value + case "AttemptFails": + procSnmp.Tcp.AttemptFails = value + case "EstabResets": + procSnmp.Tcp.EstabResets = value + case "CurrEstab": + procSnmp.Tcp.CurrEstab = value + case "InSegs": + procSnmp.Tcp.InSegs = value + case "OutSegs": + procSnmp.Tcp.OutSegs = value + case "RetransSegs": + procSnmp.Tcp.RetransSegs = value + case "InErrs": + procSnmp.Tcp.InErrs = value + case "OutRsts": + procSnmp.Tcp.OutRsts = value + case "InCsumErrors": + procSnmp.Tcp.InCsumErrors = value + } + case "Udp": + switch key { + case "InDatagrams": + procSnmp.Udp.InDatagrams = value + case "NoPorts": + procSnmp.Udp.NoPorts = value + case "InErrors": + procSnmp.Udp.InErrors = value + case "OutDatagrams": + procSnmp.Udp.OutDatagrams = value + case "RcvbufErrors": + procSnmp.Udp.RcvbufErrors = value + case "SndbufErrors": + procSnmp.Udp.SndbufErrors = value + case "InCsumErrors": + procSnmp.Udp.InCsumErrors = value + case "IgnoredMulti": + procSnmp.Udp.IgnoredMulti = value + } + case "UdpLite": + switch key { + case "InDatagrams": + procSnmp.UdpLite.InDatagrams = value + case "NoPorts": + procSnmp.UdpLite.NoPorts = value + case "InErrors": + procSnmp.UdpLite.InErrors = value + case "OutDatagrams": + procSnmp.UdpLite.OutDatagrams = value + case "RcvbufErrors": + procSnmp.UdpLite.RcvbufErrors = value + case "SndbufErrors": + procSnmp.UdpLite.SndbufErrors = value + case "InCsumErrors": + procSnmp.UdpLite.InCsumErrors = value + case "IgnoredMulti": + procSnmp.UdpLite.IgnoredMulti = value + } + } + } + } + return procSnmp, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go new file mode 100644 index 000000000000..f611992d52ca --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -0,0 +1,381 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "io" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcSnmp6 models the content of /proc//net/snmp6. +type ProcSnmp6 struct { + // The process ID. + PID int + Ip6 + Icmp6 + Udp6 + UdpLite6 +} + +type Ip6 struct { // nolint:revive + InReceives float64 + InHdrErrors float64 + InTooBigErrors float64 + InNoRoutes float64 + InAddrErrors float64 + InUnknownProtos float64 + InTruncatedPkts float64 + InDiscards float64 + InDelivers float64 + OutForwDatagrams float64 + OutRequests float64 + OutDiscards float64 + OutNoRoutes float64 + ReasmTimeout float64 + ReasmReqds float64 + ReasmOKs float64 + ReasmFails float64 + FragOKs float64 + FragFails float64 + FragCreates float64 + InMcastPkts float64 + OutMcastPkts float64 + InOctets float64 + OutOctets float64 + InMcastOctets float64 + OutMcastOctets float64 + InBcastOctets float64 + OutBcastOctets float64 + InNoECTPkts float64 + InECT1Pkts float64 + InECT0Pkts float64 + InCEPkts float64 +} + +type Icmp6 struct { + InMsgs float64 + InErrors float64 + OutMsgs float64 + OutErrors float64 + InCsumErrors float64 + InDestUnreachs float64 + InPktTooBigs float64 + InTimeExcds float64 + InParmProblems float64 + InEchos float64 + InEchoReplies float64 + InGroupMembQueries float64 + InGroupMembResponses float64 + InGroupMembReductions float64 + InRouterSolicits float64 + InRouterAdvertisements float64 + InNeighborSolicits float64 + InNeighborAdvertisements float64 + InRedirects float64 + InMLDv2Reports float64 + OutDestUnreachs float64 + OutPktTooBigs float64 + OutTimeExcds float64 + OutParmProblems float64 + OutEchos float64 + OutEchoReplies float64 + OutGroupMembQueries float64 + OutGroupMembResponses float64 + OutGroupMembReductions float64 + OutRouterSolicits float64 + OutRouterAdvertisements float64 + OutNeighborSolicits float64 + OutNeighborAdvertisements float64 + OutRedirects float64 + OutMLDv2Reports float64 + InType1 float64 + InType134 float64 + InType135 float64 + InType136 float64 + InType143 float64 + OutType133 float64 + OutType135 float64 + OutType136 float64 + OutType143 float64 +} + +type Udp6 struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 + IgnoredMulti float64 +} + +type UdpLite6 struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 +} + +func (p Proc) Snmp6() (ProcSnmp6, error) { + filename := p.path("net/snmp6") + data, err := util.ReadFileNoStat(filename) + if err != nil { + // On systems with IPv6 disabled, this file won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return ProcSnmp6{PID: p.PID}, nil + } + + return ProcSnmp6{PID: p.PID}, err + } + + procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data)) + procSnmp6.PID = p.PID + return procSnmp6, err +} + +// parseSnmp6 parses the metrics from proc//net/snmp6 file +// and returns a map contains those metrics. +func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { + var ( + scanner = bufio.NewScanner(r) + procSnmp6 = ProcSnmp6{} + ) + + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + // Expect to have "6" in metric name, skip line otherwise + if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 { + protocol := stat[0][:sixIndex+1] + key := stat[0][sixIndex+1:] + value, err := strconv.ParseFloat(stat[1], 64) + if err != nil { + return procSnmp6, err + } + + switch protocol { + case "Ip6": + switch key { + case "InReceives": + procSnmp6.Ip6.InReceives = value + case "InHdrErrors": + procSnmp6.Ip6.InHdrErrors = value + case "InTooBigErrors": + procSnmp6.Ip6.InTooBigErrors = value + case "InNoRoutes": + procSnmp6.Ip6.InNoRoutes = value + case "InAddrErrors": + procSnmp6.Ip6.InAddrErrors = value + case "InUnknownProtos": + procSnmp6.Ip6.InUnknownProtos = value + case "InTruncatedPkts": + procSnmp6.Ip6.InTruncatedPkts = value + case "InDiscards": + procSnmp6.Ip6.InDiscards = value + case "InDelivers": + procSnmp6.Ip6.InDelivers = value + case "OutForwDatagrams": + procSnmp6.Ip6.OutForwDatagrams = value + case "OutRequests": + procSnmp6.Ip6.OutRequests = value + case "OutDiscards": + procSnmp6.Ip6.OutDiscards = value + case "OutNoRoutes": + procSnmp6.Ip6.OutNoRoutes = value + case "ReasmTimeout": + procSnmp6.Ip6.ReasmTimeout = value + case "ReasmReqds": + procSnmp6.Ip6.ReasmReqds = value + case "ReasmOKs": + procSnmp6.Ip6.ReasmOKs = value + case "ReasmFails": + procSnmp6.Ip6.ReasmFails = value + case "FragOKs": + procSnmp6.Ip6.FragOKs = value + case "FragFails": + procSnmp6.Ip6.FragFails = value + case "FragCreates": + procSnmp6.Ip6.FragCreates = value + case "InMcastPkts": + procSnmp6.Ip6.InMcastPkts = value + case "OutMcastPkts": + procSnmp6.Ip6.OutMcastPkts = value + case "InOctets": + procSnmp6.Ip6.InOctets = value + case "OutOctets": + procSnmp6.Ip6.OutOctets = value + case "InMcastOctets": + procSnmp6.Ip6.InMcastOctets = value + case "OutMcastOctets": + procSnmp6.Ip6.OutMcastOctets = value + case "InBcastOctets": + procSnmp6.Ip6.InBcastOctets = value + case "OutBcastOctets": + procSnmp6.Ip6.OutBcastOctets = value + case "InNoECTPkts": + procSnmp6.Ip6.InNoECTPkts = value + case "InECT1Pkts": + procSnmp6.Ip6.InECT1Pkts = value + case "InECT0Pkts": + procSnmp6.Ip6.InECT0Pkts = value + case "InCEPkts": + procSnmp6.Ip6.InCEPkts = value + + } + case "Icmp6": + switch key { + case "InMsgs": + procSnmp6.Icmp6.InMsgs = value + case "InErrors": + procSnmp6.Icmp6.InErrors = value + case "OutMsgs": + procSnmp6.Icmp6.OutMsgs = value + case "OutErrors": + procSnmp6.Icmp6.OutErrors = value + case "InCsumErrors": + procSnmp6.Icmp6.InCsumErrors = value + case "InDestUnreachs": + procSnmp6.Icmp6.InDestUnreachs = value + case "InPktTooBigs": + procSnmp6.Icmp6.InPktTooBigs = value + case "InTimeExcds": + procSnmp6.Icmp6.InTimeExcds = value + case "InParmProblems": + procSnmp6.Icmp6.InParmProblems = value + case "InEchos": + procSnmp6.Icmp6.InEchos = value + case "InEchoReplies": + procSnmp6.Icmp6.InEchoReplies = value + case "InGroupMembQueries": + procSnmp6.Icmp6.InGroupMembQueries = value + case "InGroupMembResponses": + procSnmp6.Icmp6.InGroupMembResponses = value + case "InGroupMembReductions": + procSnmp6.Icmp6.InGroupMembReductions = value + case "InRouterSolicits": + procSnmp6.Icmp6.InRouterSolicits = value + case "InRouterAdvertisements": + procSnmp6.Icmp6.InRouterAdvertisements = value + case "InNeighborSolicits": + procSnmp6.Icmp6.InNeighborSolicits = value + case "InNeighborAdvertisements": + procSnmp6.Icmp6.InNeighborAdvertisements = value + case "InRedirects": + procSnmp6.Icmp6.InRedirects = value + case "InMLDv2Reports": + procSnmp6.Icmp6.InMLDv2Reports = value + case "OutDestUnreachs": + procSnmp6.Icmp6.OutDestUnreachs = value + case "OutPktTooBigs": + procSnmp6.Icmp6.OutPktTooBigs = value + case "OutTimeExcds": + procSnmp6.Icmp6.OutTimeExcds = value + case "OutParmProblems": + procSnmp6.Icmp6.OutParmProblems = value + case "OutEchos": + procSnmp6.Icmp6.OutEchos = value + case "OutEchoReplies": + procSnmp6.Icmp6.OutEchoReplies = value + case "OutGroupMembQueries": + procSnmp6.Icmp6.OutGroupMembQueries = value + case "OutGroupMembResponses": + procSnmp6.Icmp6.OutGroupMembResponses = value + case "OutGroupMembReductions": + procSnmp6.Icmp6.OutGroupMembReductions = value + case "OutRouterSolicits": + procSnmp6.Icmp6.OutRouterSolicits = value + case "OutRouterAdvertisements": + procSnmp6.Icmp6.OutRouterAdvertisements = value + case "OutNeighborSolicits": + procSnmp6.Icmp6.OutNeighborSolicits = value + case "OutNeighborAdvertisements": + procSnmp6.Icmp6.OutNeighborAdvertisements = value + case "OutRedirects": + procSnmp6.Icmp6.OutRedirects = value + case "OutMLDv2Reports": + procSnmp6.Icmp6.OutMLDv2Reports = value + case "InType1": + procSnmp6.Icmp6.InType1 = value + case "InType134": + procSnmp6.Icmp6.InType134 = value + case "InType135": + procSnmp6.Icmp6.InType135 = value + case "InType136": + procSnmp6.Icmp6.InType136 = value + case "InType143": + procSnmp6.Icmp6.InType143 = value + case "OutType133": + procSnmp6.Icmp6.OutType133 = value + case "OutType135": + procSnmp6.Icmp6.OutType135 = value + case "OutType136": + procSnmp6.Icmp6.OutType136 = value + case "OutType143": + procSnmp6.Icmp6.OutType143 = value + } + case "Udp6": + switch key { + case "InDatagrams": + procSnmp6.Udp6.InDatagrams = value + case "NoPorts": + procSnmp6.Udp6.NoPorts = value + case "InErrors": + procSnmp6.Udp6.InErrors = value + case "OutDatagrams": + procSnmp6.Udp6.OutDatagrams = value + case "RcvbufErrors": + procSnmp6.Udp6.RcvbufErrors = value + case "SndbufErrors": + procSnmp6.Udp6.SndbufErrors = value + case "InCsumErrors": + procSnmp6.Udp6.InCsumErrors = value + case "IgnoredMulti": + procSnmp6.Udp6.IgnoredMulti = value + } + case "UdpLite6": + switch key { + case "InDatagrams": + procSnmp6.UdpLite6.InDatagrams = value + case "NoPorts": + procSnmp6.UdpLite6.NoPorts = value + case "InErrors": + procSnmp6.UdpLite6.InErrors = value + case "OutDatagrams": + procSnmp6.UdpLite6.OutDatagrams = value + case "RcvbufErrors": + procSnmp6.UdpLite6.RcvbufErrors = value + case "SndbufErrors": + procSnmp6.UdpLite6.SndbufErrors = value + case "InCsumErrors": + procSnmp6.UdpLite6.InCsumErrors = value + } + } + } + } + return procSnmp6, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 8c7b6e80a31f..06c556ef9623 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -81,10 +81,10 @@ type ProcStat struct { STime uint // Amount of time that this process's waited-for children have been // scheduled in user mode, measured in clock ticks. - CUTime uint + CUTime int // Amount of time that this process's waited-for children have been // scheduled in kernel mode, measured in clock ticks. - CSTime uint + CSTime int // For processes running a real-time scheduling policy, this is the negated // scheduling priority, minus one. Priority int @@ -115,7 +115,7 @@ type ProcStat struct { // NewStat returns the current status information of the process. // -// Deprecated: use p.Stat() instead +// Deprecated: Use p.Stat() instead. func (p Proc) NewStat() (ProcStat, error) { return p.Stat() } @@ -141,6 +141,11 @@ func (p Proc) Stat() (ProcStat, error) { } s.Comm = string(data[l+1 : r]) + + // Check the following resources for the details about the particular stat + // fields and their data types: + // * https://man7.org/linux/man-pages/man5/proc.5.html + // * https://man7.org/linux/man-pages/man3/scanf.3.html _, err = fmt.Fscan( bytes.NewBuffer(data[r+2:]), &s.State, diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 6edd8333b334..594022ded48a 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -33,37 +33,37 @@ type ProcStatus struct { TGID int // Peak virtual memory size. - VmPeak uint64 // nolint:golint + VmPeak uint64 // nolint:revive // Virtual memory size. - VmSize uint64 // nolint:golint + VmSize uint64 // nolint:revive // Locked memory size. - VmLck uint64 // nolint:golint + VmLck uint64 // nolint:revive // Pinned memory size. - VmPin uint64 // nolint:golint + VmPin uint64 // nolint:revive // Peak resident set size. - VmHWM uint64 // nolint:golint + VmHWM uint64 // nolint:revive // Resident set size (sum of RssAnnon RssFile and RssShmem). - VmRSS uint64 // nolint:golint + VmRSS uint64 // nolint:revive // Size of resident anonymous memory. - RssAnon uint64 // nolint:golint + RssAnon uint64 // nolint:revive // Size of resident file mappings. - RssFile uint64 // nolint:golint + RssFile uint64 // nolint:revive // Size of resident shared memory. - RssShmem uint64 // nolint:golint + RssShmem uint64 // nolint:revive // Size of data segments. - VmData uint64 // nolint:golint + VmData uint64 // nolint:revive // Size of stack segments. - VmStk uint64 // nolint:golint + VmStk uint64 // nolint:revive // Size of text segments. - VmExe uint64 // nolint:golint + VmExe uint64 // nolint:revive // Shared library code size. - VmLib uint64 // nolint:golint + VmLib uint64 // nolint:revive // Page table entries size. - VmPTE uint64 // nolint:golint + VmPTE uint64 // nolint:revive // Size of second-level page tables. - VmPMD uint64 // nolint:golint + VmPMD uint64 // nolint:revive // Swapped-out virtual memory size by anonymous private. - VmSwap uint64 // nolint:golint + VmSwap uint64 // nolint:revive // Size of hugetlb memory portions HugetlbPages uint64 diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go new file mode 100644 index 000000000000..d46533ebf419 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -0,0 +1,51 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +func sysctlToPath(sysctl string) string { + return strings.Replace(sysctl, ".", "/", -1) +} + +func (fs FS) SysctlStrings(sysctl string) ([]string, error) { + value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl))) + if err != nil { + return nil, err + } + return strings.Fields(value), nil + +} + +func (fs FS) SysctlInts(sysctl string) ([]int, error) { + fields, err := fs.SysctlStrings(sysctl) + if err != nil { + return nil, err + } + + values := make([]int, len(fields)) + for i, f := range fields { + vp := util.NewValueParser(f) + values[i] = vp.Int() + if err := vp.Err(); err != nil { + return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err) + } + } + return values, nil +} diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go index 28228164efba..5f7f32dc83c6 100644 --- a/vendor/github.com/prometheus/procfs/schedstat.go +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -40,7 +40,7 @@ type Schedstat struct { CPUs []*SchedstatCPU } -// SchedstatCPU contains the values from one "cpu" line +// SchedstatCPU contains the values from one "cpu" line. type SchedstatCPU struct { CPUNum string @@ -49,14 +49,14 @@ type SchedstatCPU struct { RunTimeslices uint64 } -// ProcSchedstat contains the values from /proc//schedstat +// ProcSchedstat contains the values from `/proc//schedstat`. type ProcSchedstat struct { RunningNanoseconds uint64 WaitingNanoseconds uint64 RunTimeslices uint64 } -// Schedstat reads data from /proc/schedstat +// Schedstat reads data from `/proc/schedstat`. func (fs FS) Schedstat() (*Schedstat, error) { file, err := os.Open(fs.proc.Path("schedstat")) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go index 7896fd724283..bc9aaf5c2889 100644 --- a/vendor/github.com/prometheus/procfs/slab.go +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -137,7 +137,7 @@ func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) { return s, nil } -// SlabInfo reads data from /proc/slabinfo +// SlabInfo reads data from `/proc/slabinfo`. func (fs FS) SlabInfo() (SlabInfo, error) { // TODO: Consider passing options to allow for parsing different // slabinfo versions. However, slabinfo 2.1 has been stable since diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go new file mode 100644 index 000000000000..559129cbca3c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Softirqs represents the softirq statistics. +type Softirqs struct { + Hi []uint64 + Timer []uint64 + NetTx []uint64 + NetRx []uint64 + Block []uint64 + IRQPoll []uint64 + Tasklet []uint64 + Sched []uint64 + HRTimer []uint64 + RCU []uint64 +} + +func (fs FS) Softirqs() (Softirqs, error) { + fileName := fs.proc.Path("softirqs") + data, err := util.ReadFileNoStat(fileName) + if err != nil { + return Softirqs{}, err + } + + reader := bytes.NewReader(data) + + return parseSoftirqs(reader) +} + +func parseSoftirqs(r io.Reader) (Softirqs, error) { + var ( + softirqs = Softirqs{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return Softirqs{}, fmt.Errorf("softirqs empty") + } + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + var err error + + // require at least one cpu + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "HI:": + perCPU := parts[1:] + softirqs.Hi = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err) + } + } + case parts[0] == "TIMER:": + perCPU := parts[1:] + softirqs.Timer = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err) + } + } + case parts[0] == "NET_TX:": + perCPU := parts[1:] + softirqs.NetTx = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err) + } + } + case parts[0] == "NET_RX:": + perCPU := parts[1:] + softirqs.NetRx = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err) + } + } + case parts[0] == "BLOCK:": + perCPU := parts[1:] + softirqs.Block = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err) + } + } + case parts[0] == "IRQ_POLL:": + perCPU := parts[1:] + softirqs.IRQPoll = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err) + } + } + case parts[0] == "TASKLET:": + perCPU := parts[1:] + softirqs.Tasklet = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err) + } + } + case parts[0] == "SCHED:": + perCPU := parts[1:] + softirqs.Sched = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err) + } + } + case parts[0] == "HRTIMER:": + perCPU := parts[1:] + softirqs.HRTimer = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err) + } + } + case parts[0] == "RCU:": + perCPU := parts[1:] + softirqs.RCU = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err) + } + } + } + } + + if err := scanner.Err(); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err) + } + + return softirqs, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 6d8727541e40..33f97caa08da 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -41,7 +41,7 @@ type CPUStat struct { // SoftIRQStat represent the softirq statistics as exported in the procfs stat file. // A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html -// It is possible to get per-cpu stats by reading /proc/softirqs +// It is possible to get per-cpu stats by reading `/proc/softirqs`. type SoftIRQStat struct { Hi uint64 Timer uint64 @@ -145,7 +145,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { // NewStat returns information about current cpu/process statistics. // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt // -// Deprecated: use fs.Stat() instead +// Deprecated: Use fs.Stat() instead. func NewStat() (Stat, error) { fs, err := NewFS(fs.DefaultProcMountPoint) if err != nil { @@ -155,15 +155,15 @@ func NewStat() (Stat, error) { } // NewStat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt // -// Deprecated: use fs.Stat() instead +// Deprecated: Use fs.Stat() instead. func (fs FS) NewStat() (Stat, error) { return fs.Stat() } // Stat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt func (fs FS) Stat() (Stat, error) { fileName := fs.proc.Path("stat") data, err := util.ReadFileNoStat(fileName) diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index cb13891414b1..20ceb77e2df7 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -11,13 +11,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs import ( "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -29,7 +29,7 @@ import ( // https://www.kernel.org/doc/Documentation/sysctl/vm.txt // Each setting is exposed as a single file. // Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array -// and numa_zonelist_order (deprecated) which is a string +// and numa_zonelist_order (deprecated) which is a string. type VM struct { AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes BlockDump *int64 // /proc/sys/vm/block_dump @@ -87,7 +87,7 @@ func (fs FS) VM() (*VM, error) { return nil, fmt.Errorf("%s is not a directory", path) } - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index 209e2ac98798..c745a4c04ff1 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs @@ -18,7 +19,7 @@ package procfs import ( "bytes" "fmt" - "io/ioutil" + "os" "regexp" "strings" @@ -72,7 +73,7 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) // structs containing the relevant info. More information available here: // https://www.kernel.org/doc/Documentation/sysctl/vm.txt func (fs FS) Zoneinfo() ([]Zoneinfo, error) { - data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) + data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) } diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index 5152b6aa406f..b042c896f25b 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -1,4 +1,4 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. @@ -341,7 +341,7 @@ import ( log "github.com/sirupsen/logrus" ) -init() { +func init() { // do something here to set environment depending on an environment variable // or command-line flag if Environment == "production" { diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go index 4545dec07d8b..c7787f77cbfa 100644 --- a/vendor/github.com/sirupsen/logrus/buffer_pool.go +++ b/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -26,15 +26,6 @@ func (p *defaultPool) Get() *bytes.Buffer { return p.pool.Get().(*bytes.Buffer) } -func getBuffer() *bytes.Buffer { - return bufferPool.Get() -} - -func putBuffer(buf *bytes.Buffer) { - buf.Reset() - bufferPool.Put(buf) -} - // SetBufferPool allows to replace the default logrus buffer pool // to better meets the specific needs of an application. func SetBufferPool(bp BufferPool) { diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 07a1e5fa7249..71cdbbc35d21 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -232,6 +232,7 @@ func (entry *Entry) log(level Level, msg string) { newEntry.Logger.mu.Lock() reportCaller := newEntry.Logger.ReportCaller + bufPool := newEntry.getBufferPool() newEntry.Logger.mu.Unlock() if reportCaller { @@ -239,11 +240,11 @@ func (entry *Entry) log(level Level, msg string) { } newEntry.fireHooks() - - buffer = getBuffer() + buffer = bufPool.Get() defer func() { newEntry.Buffer = nil - putBuffer(buffer) + buffer.Reset() + bufPool.Put(buffer) }() buffer.Reset() newEntry.Buffer = buffer @@ -260,6 +261,13 @@ func (entry *Entry) log(level Level, msg string) { } } +func (entry *Entry) getBufferPool() (pool BufferPool) { + if entry.Logger.BufferPool != nil { + return entry.Logger.BufferPool + } + return bufferPool +} + func (entry *Entry) fireHooks() { var tmpHooks LevelHooks entry.Logger.mu.Lock() @@ -276,18 +284,21 @@ func (entry *Entry) fireHooks() { } func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) return } - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() if _, err := entry.Logger.Out.Write(serialized); err != nil { fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } } +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Entry.Panic or Entry.Fatal should be used instead. func (entry *Entry) Log(level Level, args ...interface{}) { if entry.Logger.IsLevelEnabled(level) { entry.log(level, fmt.Sprint(args...)) diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index 337704457a28..5ff0aef6d3f1 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -44,6 +44,9 @@ type Logger struct { entryPool sync.Pool // Function to exit the application, defaults to `os.Exit()` ExitFunc exitFunc + // The buffer pool used to format the log. If it is nil, the default global + // buffer pool will be used. + BufferPool BufferPool } type exitFunc func(int) @@ -192,6 +195,9 @@ func (logger *Logger) Panicf(format string, args ...interface{}) { logger.Logf(PanicLevel, format, args...) } +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Logger.Panic or Logger.Fatal should be used instead. func (logger *Logger) Log(level Level, args ...interface{}) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() @@ -402,3 +408,10 @@ func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { logger.mu.Unlock() return oldHooks } + +// SetBufferPool sets the logger buffer pool. +func (logger *Logger) SetBufferPool(pool BufferPool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.BufferPool = pool +} diff --git a/vendor/github.com/stretchr/objx/.travis.yml b/vendor/github.com/stretchr/objx/.travis.yml deleted file mode 100644 index cde6eb2affdf..000000000000 --- a/vendor/github.com/stretchr/objx/.travis.yml +++ /dev/null @@ -1,30 +0,0 @@ -language: go -go: - - "1.10.x" - - "1.11.x" - - "1.12.x" - - master - -matrix: - allow_failures: - - go: master -fast_finish: true - -env: - global: - - CC_TEST_REPORTER_ID=68feaa3410049ce73e145287acbcdacc525087a30627f96f04e579e75bd71c00 - -before_script: - - curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter - - chmod +x ./cc-test-reporter - - ./cc-test-reporter before-build - -install: - - curl -sL https://taskfile.dev/install.sh | sh - -script: - - diff -u <(echo -n) <(./bin/task lint) - - ./bin/task test-coverage - -after_script: - - ./cc-test-reporter after-build --exit-code $TRAVIS_TEST_RESULT diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml index a749ac5492e5..7746f516da20 100644 --- a/vendor/github.com/stretchr/objx/Taskfile.yml +++ b/vendor/github.com/stretchr/objx/Taskfile.yml @@ -25,6 +25,6 @@ tasks: - go test -race ./... test-coverage: - desc: Runs go tests and calucates test coverage + desc: Runs go tests and calculates test coverage cmds: - go test -race -coverprofile=c.out ./... diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go index 676316281154..4c6045588637 100644 --- a/vendor/github.com/stretchr/objx/accessors.go +++ b/vendor/github.com/stretchr/objx/accessors.go @@ -1,6 +1,7 @@ package objx import ( + "reflect" "regexp" "strconv" "strings" @@ -16,11 +17,18 @@ const ( // arrayAccesRegexString is the regex used to extract the array number // from the access path arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + + // mapAccessRegexString is the regex used to extract the map key + // from the access path + mapAccessRegexString = `^([^\[]*)\[([^\]]+)\](.*)$` ) // arrayAccesRegex is the compiled arrayAccesRegexString var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) +// mapAccessRegex is the compiled mapAccessRegexString +var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) + // Get gets the value using the specified selector and // returns it inside a new Obj object. // @@ -70,15 +78,53 @@ func getIndex(s string) (int, string) { return -1, s } +// getKey returns the key which is held in s by two brackets. +// It also returns the next selector. +func getKey(s string) (string, string) { + selSegs := strings.SplitN(s, PathSeparator, 2) + thisSel := selSegs[0] + nextSel := "" + + if len(selSegs) > 1 { + nextSel = selSegs[1] + } + + mapMatches := mapAccessRegex.FindStringSubmatch(s) + if len(mapMatches) > 0 { + if _, err := strconv.Atoi(mapMatches[2]); err != nil { + thisSel = mapMatches[1] + nextSel = "[" + mapMatches[2] + "]" + mapMatches[3] + + if thisSel == "" { + thisSel = mapMatches[2] + nextSel = mapMatches[3] + } + + if nextSel == "" { + selSegs = []string{"", ""} + } else if nextSel[0] == '.' { + nextSel = nextSel[1:] + } + } + } + + return thisSel, nextSel +} + // access accesses the object using the selector and performs the // appropriate action. func access(current interface{}, selector string, value interface{}, isSet bool) interface{} { - selSegs := strings.SplitN(selector, PathSeparator, 2) - thisSel := selSegs[0] - index := -1 + thisSel, nextSel := getKey(selector) - if strings.Contains(thisSel, "[") { + indexes := []int{} + for strings.Contains(thisSel, "[") { + prevSel := thisSel + index := -1 index, thisSel = getIndex(thisSel) + indexes = append(indexes, index) + if prevSel == thisSel { + break + } } if curMap, ok := current.(Map); ok { @@ -88,13 +134,17 @@ func access(current interface{}, selector string, value interface{}, isSet bool) switch current.(type) { case map[string]interface{}: curMSI := current.(map[string]interface{}) - if len(selSegs) <= 1 && isSet { + if nextSel == "" && isSet { curMSI[thisSel] = value return nil } _, ok := curMSI[thisSel].(map[string]interface{}) - if (curMSI[thisSel] == nil || !ok) && index == -1 && isSet { + if !ok { + _, ok = curMSI[thisSel].(Map) + } + + if (curMSI[thisSel] == nil || !ok) && len(indexes) == 0 && isSet { curMSI[thisSel] = map[string]interface{}{} } @@ -102,18 +152,46 @@ func access(current interface{}, selector string, value interface{}, isSet bool) default: current = nil } + // do we need to access the item of an array? - if index > -1 { - if array, ok := current.([]interface{}); ok { - if index < len(array) { - current = array[index] - } else { - current = nil + if len(indexes) > 0 { + num := len(indexes) + for num > 0 { + num-- + index := indexes[num] + indexes = indexes[:num] + if array, ok := interSlice(current); ok { + if index < len(array) { + current = array[index] + } else { + current = nil + break + } } } } - if len(selSegs) > 1 { - current = access(current, selSegs[1], value, isSet) + + if nextSel != "" { + current = access(current, nextSel, value, isSet) } return current } + +func interSlice(slice interface{}) ([]interface{}, bool) { + if array, ok := slice.([]interface{}); ok { + return array, ok + } + + s := reflect.ValueOf(slice) + if s.Kind() != reflect.Slice { + return nil, false + } + + ret := make([]interface{}, s.Len()) + + for i := 0; i < s.Len(); i++ { + ret[i] = s.Index(i).Interface() + } + + return ret, true +} diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go index 95149c06a6d3..a64712a08b50 100644 --- a/vendor/github.com/stretchr/objx/map.go +++ b/vendor/github.com/stretchr/objx/map.go @@ -92,6 +92,18 @@ func MustFromJSON(jsonString string) Map { return o } +// MustFromJSONSlice creates a new slice of Map containing the data specified in the +// jsonString. Works with jsons with a top level array +// +// Panics if the JSON is invalid. +func MustFromJSONSlice(jsonString string) []Map { + slice, err := FromJSONSlice(jsonString) + if err != nil { + panic("objx: MustFromJSONSlice failed with error: " + err.Error()) + } + return slice +} + // FromJSON creates a new Map containing the data specified in the // jsonString. // @@ -102,45 +114,20 @@ func FromJSON(jsonString string) (Map, error) { if err != nil { return Nil, err } - m.tryConvertFloat64() return m, nil } -func (m Map) tryConvertFloat64() { - for k, v := range m { - switch v.(type) { - case float64: - f := v.(float64) - if float64(int(f)) == f { - m[k] = int(f) - } - case map[string]interface{}: - t := New(v) - t.tryConvertFloat64() - m[k] = t - case []interface{}: - m[k] = tryConvertFloat64InSlice(v.([]interface{})) - } - } -} - -func tryConvertFloat64InSlice(s []interface{}) []interface{} { - for k, v := range s { - switch v.(type) { - case float64: - f := v.(float64) - if float64(int(f)) == f { - s[k] = int(f) - } - case map[string]interface{}: - t := New(v) - t.tryConvertFloat64() - s[k] = t - case []interface{}: - s[k] = tryConvertFloat64InSlice(v.([]interface{})) - } +// FromJSONSlice creates a new slice of Map containing the data specified in the +// jsonString. Works with jsons with a top level array +// +// Returns an error if the JSON is invalid. +func FromJSONSlice(jsonString string) ([]Map, error) { + var slice []Map + err := json.Unmarshal([]byte(jsonString), &slice) + if err != nil { + return nil, err } - return s + return slice, nil } // FromBase64 creates a new Obj containing the data specified diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go index 9859b407f029..45850456e17f 100644 --- a/vendor/github.com/stretchr/objx/type_specific_codegen.go +++ b/vendor/github.com/stretchr/objx/type_specific_codegen.go @@ -385,6 +385,11 @@ func (v *Value) Int(optionalDefault ...int) int { if s, ok := v.data.(int); ok { return s } + if s, ok := v.data.(float64); ok { + if float64(int(s)) == s { + return int(s) + } + } if len(optionalDefault) == 1 { return optionalDefault[0] } @@ -395,6 +400,11 @@ func (v *Value) Int(optionalDefault ...int) int { // // Panics if the object is not a int. func (v *Value) MustInt() int { + if s, ok := v.data.(float64); ok { + if float64(int(s)) == s { + return int(s) + } + } return v.data.(int) } diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 41649d267924..95d8e59da69b 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -1,8 +1,10 @@ package assert import ( + "bytes" "fmt" "reflect" + "time" ) type CompareType int @@ -30,6 +32,9 @@ var ( float64Type = reflect.TypeOf(float64(1)) stringType = reflect.TypeOf("") + + timeType = reflect.TypeOf(time.Time{}) + bytesType = reflect.TypeOf([]byte{}) ) func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { @@ -299,6 +304,47 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return compareLess, true } } + // Check for known struct types we can check for compare results. + case reflect.Struct: + { + // All structs enter here. We're not interested in most types. + if !canConvert(obj1Value, timeType) { + break + } + + // time.Time can compared! + timeObj1, ok := obj1.(time.Time) + if !ok { + timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) + } + + timeObj2, ok := obj2.(time.Time) + if !ok { + timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) + } + + return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + } + case reflect.Slice: + { + // We only care about the []byte type. + if !canConvert(obj1Value, bytesType) { + break + } + + // []byte can be compared! + bytesObj1, ok := obj1.([]byte) + if !ok { + bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte) + + } + bytesObj2, ok := obj2.([]byte) + if !ok { + bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) + } + + return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + } } return compareEqual, false @@ -310,7 +356,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -320,7 +369,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -329,7 +381,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // assert.Less(t, float64(1), float64(2)) // assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -339,7 +394,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -347,8 +405,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // assert.Positive(t, 1) // assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -356,8 +417,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { // assert.Negative(t, -1) // assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go new file mode 100644 index 000000000000..da867903e2fa --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go @@ -0,0 +1,16 @@ +//go:build go1.17 +// +build go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_legacy.go + +package assert + +import "reflect" + +// Wrapper around reflect.Value.CanConvert, for compatibility +// reasons. +func canConvert(value reflect.Value, to reflect.Type) bool { + return value.CanConvert(to) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go new file mode 100644 index 000000000000..1701af2a3c89 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go @@ -0,0 +1,16 @@ +//go:build !go1.17 +// +build !go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_can_convert.go + +package assert + +import "reflect" + +// Older versions of Go does not have the reflect.Value.CanConvert +// method. +func canConvert(value reflect.Value, to reflect.Type) bool { + return false +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 4dfd1229a861..7880b8f94333 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) } +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...) +} + // ErrorIsf asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { @@ -724,6 +736,16 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) } +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...) +} + // YAMLEqf asserts that two YAML strings are equivalent. func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 25337a6f07e6..339515b8bfb9 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. return ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { @@ -1437,6 +1461,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta return WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1c3b47182a72..759448783585 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index bcac4401f57f..fa1245b18973 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "os" + "path/filepath" "reflect" "regexp" "runtime" @@ -144,7 +145,8 @@ func CallerInfo() []string { if len(parts) > 1 { dir := parts[len(parts)-2] if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + path, _ := filepath.Abs(file) + callers = append(callers, fmt.Sprintf("%s:%d", path, line)) } } @@ -563,16 +565,17 @@ func isEmpty(object interface{}) bool { switch objValue.Kind() { // collection types are empty when they have no element - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // pointers are empty if nil or if the value they point to is empty case reflect.Ptr: if objValue.IsNil() { return true } deref := objValue.Elem().Interface() return isEmpty(deref) - // for all other types, compare against the zero value + // for all other types, compare against the zero value + // array types are empty when they match their zero-initialized state default: zero := reflect.Zero(objValue.Type()) return reflect.DeepEqual(object, zero.Interface()) @@ -718,10 +721,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (false, false) if impossible. // return (true, false) if element was not found. // return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { +func containsElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() + listType := reflect.TypeOf(list) + if listType == nil { + return false, false + } + listKind := listType.Kind() defer func() { if e := recover(); e != nil { ok = false @@ -764,7 +771,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) } @@ -787,7 +794,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) } @@ -811,7 +818,6 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -821,17 +827,35 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + } + } + + return true + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -852,10 +876,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) h.Helper() } if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -865,17 +888,35 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -1000,27 +1041,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { type PanicTestFunc func() // didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { - - didPanic := false - var message interface{} - var stack string - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - stack = string(debug.Stack()) - } - }() - - // call the target function - f() +func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) { + didPanic = true + defer func() { + message = recover() + if didPanic { + stack = string(debug.Stack()) + } }() - return didPanic, message, stack + // call the target function + f() + didPanic = false + return } // Panics asserts that the code inside the specified PanicTestFunc panics. @@ -1111,6 +1146,27 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, return true } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if end.Before(start) { + return Fail(t, "Start should be before end", msgAndArgs...) + } + + if actual.Before(start) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is before the range", actual, start, end), msgAndArgs...) + } else if actual.After(end) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is after the range", actual, start, end), msgAndArgs...) + } + + return true +} + func toFloat(x interface{}) (float64, bool) { var xf float64 xok := true @@ -1161,11 +1217,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs bf, bok := toFloat(actual) if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + return Fail(t, "Parameters must be numerical", msgAndArgs...) + } + + if math.IsNaN(af) && math.IsNaN(bf) { + return true } if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + return Fail(t, "Expected must not be NaN", msgAndArgs...) } if math.IsNaN(bf) { @@ -1188,7 +1248,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1250,8 +1310,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + bf, bok := toFloat(actual) + if !aok || !bok { + return 0, fmt.Errorf("Parameters must be numerical") + } + if math.IsNaN(af) && math.IsNaN(bf) { + return 0, nil } if math.IsNaN(af) { return 0, errors.New("expected value must not be NaN") @@ -1259,10 +1323,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if af == 0 { return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } if math.IsNaN(bf) { return 0, errors.New("actual value must not be NaN") } @@ -1298,7 +1358,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1375,6 +1435,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte return true } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + + actual := theError.Error() + if !strings.Contains(actual, contains) { + return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...) + } + + return true +} + // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { @@ -1588,12 +1669,17 @@ func diff(expected interface{}, actual interface{}) string { } var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { + + switch et { + case reflect.TypeOf(""): e = reflect.ValueOf(expected).String() a = reflect.ValueOf(actual).String() + case reflect.TypeOf(time.Time{}): + e = spewConfigStringerEnabled.Sdump(expected) + a = spewConfigStringerEnabled.Sdump(actual) + default: + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1625,6 +1711,14 @@ var spewConfig = spew.ConfigState{ MaxDepth: 10, } +var spewConfigStringerEnabled = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + MaxDepth: 10, +} + type tHelper interface { Helper() } diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index e2e6a2d237df..f0af8246cfc9 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -70,6 +70,9 @@ type Call struct { // if the PanicMsg is set to a non nil string the function call will panic // irrespective of other settings PanicMsg *string + + // Calls which must be satisfied before this call can be + requires []*Call } func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call { @@ -199,6 +202,64 @@ func (c *Call) On(methodName string, arguments ...interface{}) *Call { return c.Parent.On(methodName, arguments...) } +// Unset removes a mock handler from being called. +// test.On("func", mock.Anything).Unset() +func (c *Call) Unset() *Call { + var unlockOnce sync.Once + + for _, arg := range c.Arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { + panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) + } + } + + c.lock() + defer unlockOnce.Do(c.unlock) + + foundMatchingCall := false + + for i, call := range c.Parent.ExpectedCalls { + if call.Method == c.Method { + _, diffCount := call.Arguments.Diff(c.Arguments) + if diffCount == 0 { + foundMatchingCall = true + // Remove from ExpectedCalls + c.Parent.ExpectedCalls = append(c.Parent.ExpectedCalls[:i], c.Parent.ExpectedCalls[i+1:]...) + } + } + } + + if !foundMatchingCall { + unlockOnce.Do(c.unlock) + c.Parent.fail("\n\nmock: Could not find expected call\n-----------------------------\n\n%s\n\n", + callString(c.Method, c.Arguments, true), + ) + } + + return c +} + +// NotBefore indicates that the mock should only be called after the referenced +// calls have been called as expected. The referenced calls may be from the +// same mock instance and/or other mock instances. +// +// Mock.On("Do").Return(nil).Notbefore( +// Mock.On("Init").Return(nil) +// ) +func (c *Call) NotBefore(calls ...*Call) *Call { + c.lock() + defer c.unlock() + + for _, call := range calls { + if call.Parent == nil { + panic("not before calls must be created with Mock.On()") + } + } + + c.requires = append(c.requires, calls...) + return c +} + // Mock is the workhorse used to track activity on another object. // For an example of its usage, refer to the "Example Usage" section at the top // of this document. @@ -221,10 +282,17 @@ type Mock struct { mutex sync.Mutex } +// String provides a %v format string for Mock. +// Note: this is used implicitly by Arguments.Diff if a Mock is passed. +// It exists because go's default %v formatting traverses the struct +// without acquiring the mutex, which is detected by go test -race. +func (m *Mock) String() string { + return fmt.Sprintf("%[1]T<%[1]p>", m) +} + // TestData holds any data that might be useful for testing. Testify ignores // this data completely allowing you to do whatever you like with it. func (m *Mock) TestData() objx.Map { - if m.testData == nil { m.testData = make(objx.Map) } @@ -346,7 +414,6 @@ func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call, } func callString(method string, arguments Arguments, includeArgumentValues bool) string { - var argValsString string if includeArgumentValues { var argVals []string @@ -370,10 +437,10 @@ func (m *Mock) Called(arguments ...interface{}) Arguments { panic("Couldn't get the caller information") } functionPath := runtime.FuncForPC(pc).Name() - //Next four lines are required to use GCCGO function naming conventions. - //For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock - //uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree - //With GCCGO we need to remove interface information starting from pN

. + // Next four lines are required to use GCCGO function naming conventions. + // For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock + // uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree + // With GCCGO we need to remove interface information starting from pN
. re := regexp.MustCompile("\\.pN\\d+_") if re.MatchString(functionPath) { functionPath = re.Split(functionPath, -1)[0] @@ -389,7 +456,7 @@ func (m *Mock) Called(arguments ...interface{}) Arguments { // If Call.WaitFor is set, blocks until the channel is closed or receives a message. func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments { m.mutex.Lock() - //TODO: could combine expected and closes in single loop + // TODO: could combine expected and closes in single loop found, call := m.findExpectedCall(methodName, arguments...) if found < 0 { @@ -419,6 +486,25 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen } } + for _, requirement := range call.requires { + if satisfied, _ := requirement.Parent.checkExpectation(requirement); !satisfied { + m.mutex.Unlock() + m.fail("mock: Unexpected Method Call\n-----------------------------\n\n%s\n\nMust not be called before%s:\n\n%s", + callString(call.Method, call.Arguments, true), + func() (s string) { + if requirement.totalCalls > 0 { + s = " another call of" + } + if call.Parent != requirement.Parent { + s += " method from another mock instance" + } + return + }(), + callString(requirement.Method, requirement.Arguments, true), + ) + } + } + if call.Repeatability == 1 { call.Repeatability = -1 } else if call.Repeatability > 1 { @@ -476,9 +562,9 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { h.Helper() } for _, obj := range testObjects { - if m, ok := obj.(Mock); ok { + if m, ok := obj.(*Mock); ok { t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") - obj = &m + obj = m } m := obj.(assertExpectationser) if !m.AssertExpectations(t) { @@ -495,34 +581,36 @@ func (m *Mock) AssertExpectations(t TestingT) bool { if h, ok := t.(tHelper); ok { h.Helper() } + m.mutex.Lock() defer m.mutex.Unlock() - var somethingMissing bool var failedExpectations int // iterate through each expectation expectedCalls := m.expectedCalls() for _, expectedCall := range expectedCalls { - if !expectedCall.optional && !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 { - somethingMissing = true + satisfied, reason := m.checkExpectation(expectedCall) + if !satisfied { failedExpectations++ - t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) - } else { - if expectedCall.Repeatability > 0 { - somethingMissing = true - failedExpectations++ - t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) - } else { - t.Logf("PASS:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) - } } + t.Logf(reason) } - if somethingMissing { + if failedExpectations != 0 { t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) } - return !somethingMissing + return failedExpectations == 0 +} + +func (m *Mock) checkExpectation(call *Call) (bool, string) { + if !call.optional && !m.methodWasCalled(call.Method, call.Arguments) && call.totalCalls == 0 { + return false, fmt.Sprintf("FAIL:\t%s(%s)\n\t\tat: %s", call.Method, call.Arguments.String(), call.callerInfo) + } + if call.Repeatability > 0 { + return false, fmt.Sprintf("FAIL:\t%s(%s)\n\t\tat: %s", call.Method, call.Arguments.String(), call.callerInfo) + } + return true, fmt.Sprintf("PASS:\t%s(%s)", call.Method, call.Arguments.String()) } // AssertNumberOfCalls asserts that the method was called expectedCalls times. @@ -720,7 +808,7 @@ func (f argumentMatcher) Matches(argument interface{}) bool { } func (f argumentMatcher) String() string { - return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name()) + return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).String()) } // MatchedBy can be used to match a mock call based on only certain properties @@ -773,12 +861,12 @@ func (args Arguments) Is(objects ...interface{}) bool { // // Returns the diff string and number of differences found. func (args Arguments) Diff(objects []interface{}) (string, int) { - //TODO: could return string as error and nil for No difference + // TODO: could return string as error and nil for No difference - var output = "\n" + output := "\n" var differences int - var maxArgCount = len(args) + maxArgCount := len(args) if len(objects) > maxArgCount { maxArgCount = len(objects) } @@ -804,21 +892,28 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { } if matcher, ok := expected.(argumentMatcher); ok { - if matcher.Matches(actual) { + var matches bool + func() { + defer func() { + if r := recover(); r != nil { + actualFmt = fmt.Sprintf("panic in argument matcher: %v", r) + } + }() + matches = matcher.Matches(actual) + }() + if matches { output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher) } else { differences++ output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher) } } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { - // type checking if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { // not match differences++ output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*IsTypeArgument)(nil)) { t := expected.(*IsTypeArgument).t if reflect.TypeOf(t) != reflect.TypeOf(actual) { @@ -826,7 +921,6 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt) } } else { - // normal checking if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { @@ -846,7 +940,6 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { } return output, differences - } // Assert compares the arguments with the specified objects and fails if @@ -868,7 +961,6 @@ func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { t.Errorf("%sArguments do not match.", assert.CallerInfo()) return false - } // String gets the argument at the specified index. Panics if there is no argument, or @@ -877,7 +969,6 @@ func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { // If no index is provided, String() returns a complete string representation // of the arguments. func (args Arguments) String(indexOrNil ...int) string { - if len(indexOrNil) == 0 { // normal String() method - return a string representation of the args var argsStr []string @@ -887,7 +978,7 @@ func (args Arguments) String(indexOrNil ...int) string { return strings.Join(argsStr, ",") } else if len(indexOrNil) == 1 { // Index has been specified - get the argument at that index - var index = indexOrNil[0] + index := indexOrNil[0] var s string var ok bool if s, ok = args.Get(index).(string); !ok { @@ -897,7 +988,6 @@ func (args Arguments) String(indexOrNil ...int) string { } panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) - } // Int gets the argument at the specified index. Panics if there is no argument, or diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 51820df2e672..880853f5a2c5 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -280,6 +280,36 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int t.FailNow() } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContains(t, theError, contains, msgAndArgs...) { + return + } + t.FailNow() +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContainsf(t, theError, contains, msg, args...) { + return + } + t.FailNow() +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { @@ -1834,6 +1864,32 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim t.FailNow() } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinRange(t, actual, start, end, msgAndArgs...) { + return + } + t.FailNow() +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinRangef(t, actual, start, end, msg, args...) { + return + } + t.FailNow() +} + // YAMLEq asserts that two YAML strings are equivalent. func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index ed54a9d83f35..960bf6f2cabf 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -223,6 +223,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) { @@ -1438,6 +1462,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/xdg-go/scram/CHANGELOG.md b/vendor/github.com/xdg-go/scram/CHANGELOG.md index 425c122fad32..21828db14918 100644 --- a/vendor/github.com/xdg-go/scram/CHANGELOG.md +++ b/vendor/github.com/xdg-go/scram/CHANGELOG.md @@ -1,5 +1,13 @@ # CHANGELOG +## v1.1.1 - 2022-03-03 + +- Bump stringprep dependency to v1.0.3 for upstream CVE fix. + +## v1.1.0 - 2022-01-16 + +- Add SHA-512 hash generator function for convenience. + ## v1.0.2 - 2021-03-28 - Switch PBKDF2 dependency to github.com/xdg-go/pbkdf2 to diff --git a/vendor/github.com/xdg-go/scram/doc.go b/vendor/github.com/xdg-go/scram/doc.go index d43bee6071ce..82e8aeed8794 100644 --- a/vendor/github.com/xdg-go/scram/doc.go +++ b/vendor/github.com/xdg-go/scram/doc.go @@ -10,14 +10,16 @@ // // Usage // -// The scram package provides two variables, `SHA1` and `SHA256`, that are -// used to construct Client or Server objects. +// The scram package provides variables, `SHA1`, `SHA256`, and `SHA512`, that +// are used to construct Client or Server objects. // // clientSHA1, err := scram.SHA1.NewClient(username, password, authID) // clientSHA256, err := scram.SHA256.NewClient(username, password, authID) +// clientSHA512, err := scram.SHA512.NewClient(username, password, authID) // // serverSHA1, err := scram.SHA1.NewServer(credentialLookupFcn) // serverSHA256, err := scram.SHA256.NewServer(credentialLookupFcn) +// serverSHA512, err := scram.SHA512.NewServer(credentialLookupFcn) // // These objects are used to construct ClientConversation or // ServerConversation objects that are used to carry out authentication. diff --git a/vendor/github.com/xdg-go/scram/scram.go b/vendor/github.com/xdg-go/scram/scram.go index 927659969b67..a7b366027e27 100644 --- a/vendor/github.com/xdg-go/scram/scram.go +++ b/vendor/github.com/xdg-go/scram/scram.go @@ -9,6 +9,7 @@ package scram import ( "crypto/sha1" "crypto/sha256" + "crypto/sha512" "fmt" "hash" @@ -29,6 +30,10 @@ var SHA1 HashGeneratorFcn = func() hash.Hash { return sha1.New() } // to create Client objects configured for SHA-256 hashing. var SHA256 HashGeneratorFcn = func() hash.Hash { return sha256.New() } +// SHA512 is a function that returns a crypto/sha512 hasher and should be used +// to create Client objects configured for SHA-512 hashing. +var SHA512 HashGeneratorFcn = func() hash.Hash { return sha512.New() } + // NewClient constructs a SCRAM client component based on a given hash.Hash // factory receiver. This constructor will normalize the username, password // and authzID via the SASLprep algorithm, as recommended by RFC-5802. If diff --git a/vendor/github.com/xdg-go/stringprep/CHANGELOG.md b/vendor/github.com/xdg-go/stringprep/CHANGELOG.md index 2849637ca6ad..e06787fba385 100644 --- a/vendor/github.com/xdg-go/stringprep/CHANGELOG.md +++ b/vendor/github.com/xdg-go/stringprep/CHANGELOG.md @@ -1,5 +1,12 @@ # CHANGELOG + +## [v1.0.3] - 2022-03-01 + +### Maintenance + +- Bump golang.org/x/text to v0.3.7 due to CVE-2021-38561 + ## [v1.0.2] - 2021-03-27 diff --git a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go index 5ea2cf88dd76..ae112ae131ba 100644 --- a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go +++ b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go @@ -65,6 +65,7 @@ var ( ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err() ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err() ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err() + ErrGRPCAuthOldRevision = status.New(codes.InvalidArgument, "etcdserver: revision of auth store is old").Err() ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err() ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err() @@ -74,6 +75,7 @@ var ( ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err() ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err() ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err() + ErrGRPCTimeoutWaitAppliedIndex = status.New(codes.Unavailable, "etcdserver: request timed out, waiting for the applied index took too long").Err() ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err() ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err() ErrGPRCNotSupportedForLearner = status.New(codes.Unavailable, "etcdserver: rpc not supported for learner").Err() @@ -131,6 +133,7 @@ var ( ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, + ErrorDesc(ErrGRPCAuthOldRevision): ErrGRPCAuthOldRevision, ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader, @@ -195,6 +198,7 @@ var ( ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) + ErrAuthOldRevision = Error(ErrGRPCAuthOldRevision) ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) ErrNoLeader = Error(ErrGRPCNoLeader) @@ -205,6 +209,7 @@ var ( ErrTimeout = Error(ErrGRPCTimeout) ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail) ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) + ErrTimeoutWaitAppliedIndex = Error(ErrGRPCTimeoutWaitAppliedIndex) ErrUnhealthy = Error(ErrGRPCUnhealthy) ErrCorrupt = Error(ErrGRPCCorrupt) ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee) diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go index 95b62fd8b2ff..f833c76d661f 100644 --- a/vendor/go.etcd.io/etcd/api/v3/version/version.go +++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.5.0" + Version = "3.5.4" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go index 0c739dd0ac4c..a0d818582611 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bson.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bson.go @@ -7,8 +7,6 @@ // Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer // See THIRD-PARTY-NOTICES for original license terms. -// +build go1.9 - package bson // import "go.mongodb.org/mongo-driver/bson" import ( @@ -29,7 +27,7 @@ type Zeroer interface { // // Example usage: // -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} type D = primitive.D // E represents a BSON element for a D. It is usually used inside a D. @@ -41,12 +39,12 @@ type E = primitive.E // // Example usage: // -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} type M = primitive.M // An A is an ordered representation of a BSON array. // // Example usage: // -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} +// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} type A = primitive.A diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go b/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go deleted file mode 100644 index bbe7792848f0..000000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// +build !go1.9 - -package bson // import "go.mongodb.org/mongo-driver/bson" - -import ( - "math" - "strconv" - "strings" -) - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters, -// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead. -// -// Example usage: -// -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} -type D []E - -// Map creates a map from the elements of the D. -func (d D) Map() M { - m := make(M, len(d)) - for _, e := range d { - m[e.Key] = e.Value - } - return m -} - -// E represents a BSON element for a D. It is usually used inside a D. -type E struct { - Key string - Value interface{} -} - -// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not -// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be -// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead. -// -// Example usage: -// -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} -type M map[string]interface{} - -// An A is an ordered representation of a BSON array. -// -// Example usage: -// -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} -type A []interface{} - -func formatDouble(f float64) string { - var s string - if math.IsInf(f, 1) { - s = "Infinity" - } else if math.IsInf(f, -1) { - s = "-Infinity" - } else if math.IsNaN(f) { - s = "NaN" - } else { - // Print exactly one decimalType place for integers; otherwise, print as many are necessary to - // perfectly represent it. - s = strconv.FormatFloat(f, 'G', -1, 64) - if !strings.ContainsRune(s, '.') { - s += ".0" - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go index 2c861b5cd3b9..098ed69f98c1 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -13,6 +13,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" ) var ( @@ -43,7 +44,7 @@ type Unmarshaler interface { } // ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representaiton of themselves. The BSON bytes and type can be +// BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. type ValueUnmarshaler interface { @@ -118,11 +119,32 @@ type EncodeContext struct { type DecodeContext struct { *Registry Truncate bool + // Ancestor is the type of a containing document. This is mainly used to determine what type // should be used when decoding an embedded document into an empty interface. For example, if // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface // will be decoded into a bson.M. + // + // Deprecated: Use DefaultDocumentM or DefaultDocumentD instead. Ancestor reflect.Type + + // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the + // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is + // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an + // error. DocumentType overrides the Ancestor field. + defaultDocumentType reflect.Type +} + +// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (dc *DecodeContext) DefaultDocumentM() { + dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) +} + +// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (dc *DecodeContext) DefaultDocumentD() { + dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) } // ValueCodec is the interface that groups the methods to encode and decode diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index 32fd1427874e..e95cab585fb6 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -53,7 +53,7 @@ type DefaultValueDecoders struct{} // RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with // the provided RegistryBuilder. // -// There is no support for decoding map[string]interface{} becuase there is no decoder for +// There is no support for decoding map[string]interface{} because there is no decoder for // interface{}, so users must either register this decoder themselves or use the // EmptyInterfaceDecoder available in the bson package. func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { @@ -1463,7 +1463,7 @@ func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr if !val.CanAddr() { return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} } - val = val.Addr() // If they type doesn't implement the interface, a pointer to it must. + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. } t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) @@ -1492,16 +1492,28 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bson val.Set(reflect.New(val.Type().Elem())) } + _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + // If the target Go value is a pointer and the BSON field value is empty, set the value to the + // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to + // change the pointer value from within the function (only the value at the pointer address), + // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON + // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches + // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and + // the JSON field value is "null". + if val.Kind() == reflect.Ptr && len(src) == 0 { + val.Set(reflect.Zero(val.Type())) + return nil + } + if !val.Type().Implements(tUnmarshaler) { if !val.CanAddr() { return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} } - val = val.Addr() // If they type doesn't implement the interface, a pointer to it must. - } - - _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. } fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go index 49a0c3f14bf6..6bdb43cb4368 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -30,7 +30,7 @@ var errInvalidValue = errors.New("cannot encode invalid element") var sliceWriterPool = sync.Pool{ New: func() interface{} { - sw := make(bsonrw.SliceWriter, 0, 0) + sw := make(bsonrw.SliceWriter, 0) return &sw }, } @@ -333,14 +333,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum continue } - if enc, ok := currEncoder.(ValueEncoder); ok { - err = enc.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - continue - } - err = encoder.EncodeValue(ec, vw, currVal) + err = currEncoder.EncodeValue(ec, vw, currVal) if err != nil { return err } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go index c1e20f9489e6..5f903ebea6c9 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -1,3 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + // Package bsoncodec provides a system for encoding values to BSON representations and decoding // values from BSON representations. This package considers both binary BSON and ExtendedJSON as // BSON representations. The types in this package enable a flexible system for handling this @@ -11,7 +17,7 @@ // 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for // retrieving them. // -// ValueEncoders and ValueDecoders +// # ValueEncoders and ValueDecoders // // The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. // The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the @@ -25,7 +31,7 @@ // allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext // instance is provided and serves similar functionality to the EncodeContext. // -// Registry and RegistryBuilder +// # Registry and RegistryBuilder // // A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. See the Registry type // documentation for examples of registering various custom encoders and decoders. A Registry can be constructed using a @@ -47,15 +53,15 @@ // values decode as Go int32 and int64 instances, respectively, when decoding into a bson.D. The following code would // change the behavior so these values decode as Go int instances instead: // -// intType := reflect.TypeOf(int(0)) -// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) +// intType := reflect.TypeOf(int(0)) +// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) // // 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and RegisterDefaultDecoder // methods. The registered codec will be invoked when encoding or decoding values whose reflect.Kind matches the // registered reflect.Kind as long as the value's type doesn't match a registered type or hook encoder/decoder first. // These methods should be used to change the behavior for all values for a specific kind. // -// Registry Lookup Procedure +// # Registry Lookup Procedure // // When looking up an encoder in a Registry, the precedence rules are as follows: // @@ -73,7 +79,7 @@ // rules apply for decoders, with the exception that an error of type ErrNoDecoder will be returned if no decoder is // found. // -// DefaultValueEncoders and DefaultValueDecoders +// # DefaultValueEncoders and DefaultValueDecoders // // The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and // ValueDecoders for handling a wide range of Go types, including all of the types within the diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go index a15636d0a8c1..eda417cff8ae 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -57,11 +57,18 @@ func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWrit func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument - if isDocument && dc.Ancestor != nil { - // Using ancestor information rather than looking up the type map entry forces consistent decoding. - // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry - // has been registered. - return dc.Ancestor, nil + if isDocument { + if dc.defaultDocumentType != nil { + // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return + // that type. + return dc.defaultDocumentType, nil + } + if dc.Ancestor != nil { + // Using ancestor information rather than looking up the type map entry forces consistent decoding. + // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry + // has been registered. + return dc.Ancestor, nil + } } rtype, err := dc.LookupTypeMapEntry(valueType) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go index fbb8ef427ce9..e1fbef9c6c77 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -7,6 +7,7 @@ package bsoncodec import ( + "encoding" "fmt" "reflect" "strconv" @@ -126,14 +127,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v continue } - if enc, ok := currEncoder.(ValueEncoder); ok { - err = enc.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - continue - } - err = encoder.EncodeValue(ec, vw, currVal) + err = currEncoder.EncodeValue(ec, vw, currVal) if err != nil { return err } @@ -237,6 +231,19 @@ func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { } return "", err } + // keys implement encoding.TextMarshaler are marshaled. + if km, ok := val.Interface().(encoding.TextMarshaler); ok { + if val.Kind() == reflect.Ptr && val.IsNil() { + return "", nil + } + + buf, err := km.MarshalText() + if err != nil { + return "", err + } + + return string(buf), nil + } switch val.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: @@ -248,6 +255,7 @@ func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { } var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { keyVal := reflect.ValueOf(key) @@ -259,23 +267,27 @@ func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, v := keyVal.Interface().(KeyUnmarshaler) err = v.UnmarshalKey(key) keyVal = keyVal.Elem() + // Try to decode encoding.TextUnmarshalers. + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + keyVal = reflect.New(keyType) + v := keyVal.Interface().(encoding.TextUnmarshaler) + err = v.UnmarshalText([]byte(key)) + keyVal = keyVal.Elem() // Otherwise, go to type specific behavior default: switch keyType.Kind() { case reflect.String: keyVal = reflect.ValueOf(key).Convert(keyType) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - s := string(key) - n, parseErr := strconv.ParseInt(s, 10, 64) + n, parseErr := strconv.ParseInt(key, 10, 64) if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) { - err = fmt.Errorf("failed to unmarshal number key %v", s) + err = fmt.Errorf("failed to unmarshal number key %v", key) } keyVal = reflect.ValueOf(n).Convert(keyType) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - s := string(key) - n, parseErr := strconv.ParseUint(s, 10, 64) + n, parseErr := strconv.ParseUint(key, 10, 64) if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) { - err = fmt.Errorf("failed to unmarshal number key %v", s) + err = fmt.Errorf("failed to unmarshal number key %v", key) break } keyVal = reflect.ValueOf(n).Convert(keyType) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index 60abffb24836..80644023c249 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -54,12 +54,6 @@ func (entme ErrNoTypeMapEntry) Error() string { // ErrNotInterface is returned when the provided type is not an interface. var ErrNotInterface = errors.New("The provided type is not an interface") -var defaultRegistry *Registry - -func init() { - defaultRegistry = buildDefaultRegistry() -} - // A RegistryBuilder is used to build a Registry. This type is not goroutine // safe. type RegistryBuilder struct { @@ -260,6 +254,7 @@ func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDe // By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON // documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents // to decode to bson.Raw, use the following code: +// // rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { rb.typeMap[bt] = rt @@ -304,7 +299,7 @@ func (rb *RegistryBuilder) Build() *Registry { return registry } -// LookupEncoder inspects the registry for an encoder for the given type. The lookup precendence works as follows: +// LookupEncoder inspects the registry for an encoder for the given type. The lookup precedence works as follows: // // 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using // RegisterTypeEncoder for the interface will be selected. @@ -374,7 +369,7 @@ func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (Value // in interfaceEncoders defaultEnc, found := r.lookupInterfaceEncoder(t, false) if !found { - defaultEnc, _ = r.kindEncoders[t.Kind()] + defaultEnc = r.kindEncoders[t.Kind()] } return newCondAddrEncoder(ienc.ve, defaultEnc), true } @@ -382,7 +377,7 @@ func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (Value return nil, false } -// LookupDecoder inspects the registry for an decoder for the given type. The lookup precendence works as follows: +// LookupDecoder inspects the registry for an decoder for the given type. The lookup precedence works as follows: // // 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using // RegisterTypeDecoder for the interface will be selected. @@ -445,7 +440,7 @@ func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (Value // in interfaceDecoders defaultDec, found := r.lookupInterfaceDecoder(t, false) if !found { - defaultDec, _ = r.kindDecoders[t.Kind()] + defaultDec = r.kindDecoders[t.Kind()] } return newCondAddrDecoder(idec.vd, defaultDec), true } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go index 9ce901782f7e..be3f2081e9a7 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -331,14 +331,7 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) } - if decoder, ok := fd.decoder.(ValueDecoder); ok { - err = decoder.DecodeValue(dctx, vr, field.Elem()) - if err != nil { - return newDecodeError(fd.name, err) - } - continue - } - err = fd.decoder.DecodeValue(dctx, vr, field) + err = fd.decoder.DecodeValue(dctx, vr, field.Elem()) if err != nil { return newDecodeError(fd.name, err) } @@ -567,7 +560,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr } dominant, ok := dominantField(fields[i : i+advance]) if !ok || !sc.OverwriteDuplicatedInlinedFields { - return nil, fmt.Errorf("struct %s) duplicated key %s", t.String(), name) + return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) } sd.fl = append(sd.fl, dominant) sd.fm[name] = dominant diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go index 6f406c162327..62708c5c745e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go @@ -34,21 +34,21 @@ func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructT // // The properties are defined below: // -// OmitEmpty Only include the field if it's not set to the zero value for the type or to -// empty slices or maps. +// OmitEmpty Only include the field if it's not set to the zero value for the type or to +// empty slices or maps. // -// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's -// feasible while preserving the numeric value. +// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's +// feasible while preserving the numeric value. // -// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within -// a float32. +// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within +// a float32. // -// Inline Inline the field, which must be a struct or a map, causing all of its fields -// or keys to be processed as if they were part of the outer struct. For maps, -// keys must not conflict with the bson keys of other struct fields. +// Inline Inline the field, which must be a struct or a map, causing all of its fields +// or keys to be processed as if they were part of the outer struct. For maps, +// keys must not conflict with the bson keys of other struct fields. // -// Skip This struct field should be skipped. This is usually denoted by parsing a "-" -// for the name. +// Skip This struct field should be skipped. This is usually denoted by parsing a "-" +// for the name. // // TODO(skriptble): Add tags for undefined as nil and for null as nil. type StructTags struct { @@ -67,20 +67,20 @@ type StructTags struct { // If there is no name in the struct tag fields, the struct field name is lowercased. // The tag formats accepted are: // -// "[][,[,]]" +// "[][,[,]]" // -// `(...) bson:"[][,[,]]" (...)` +// `(...) bson:"[][,[,]]" (...)` // // An example: // -// type T struct { -// A bool -// B int "myb" -// C string "myc,omitempty" -// D string `bson:",omitempty" json:"jsonkey"` -// E int64 ",minsize" -// F int64 "myf,omitempty,minsize" -// } +// type T struct { +// A bool +// B int "myb" +// C string "myc,omitempty" +// D string `bson:",omitempty" json:"jsonkey"` +// E int64 ",minsize" +// F int64 "myf,omitempty,minsize" +// } // // A struct tag either consisting entirely of '-' or with a bson key with a // value consisting entirely of '-' will return a StructTags with Skip true and diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go index fb5b51084da2..07f4b70e6d54 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go @@ -16,36 +16,12 @@ import ( "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) -var ptBool = reflect.TypeOf((*bool)(nil)) -var ptInt8 = reflect.TypeOf((*int8)(nil)) -var ptInt16 = reflect.TypeOf((*int16)(nil)) -var ptInt32 = reflect.TypeOf((*int32)(nil)) -var ptInt64 = reflect.TypeOf((*int64)(nil)) -var ptInt = reflect.TypeOf((*int)(nil)) -var ptUint8 = reflect.TypeOf((*uint8)(nil)) -var ptUint16 = reflect.TypeOf((*uint16)(nil)) -var ptUint32 = reflect.TypeOf((*uint32)(nil)) -var ptUint64 = reflect.TypeOf((*uint64)(nil)) -var ptUint = reflect.TypeOf((*uint)(nil)) -var ptFloat32 = reflect.TypeOf((*float32)(nil)) -var ptFloat64 = reflect.TypeOf((*float64)(nil)) -var ptString = reflect.TypeOf((*string)(nil)) - var tBool = reflect.TypeOf(false) -var tFloat32 = reflect.TypeOf(float32(0)) var tFloat64 = reflect.TypeOf(float64(0)) -var tInt = reflect.TypeOf(int(0)) -var tInt8 = reflect.TypeOf(int8(0)) -var tInt16 = reflect.TypeOf(int16(0)) var tInt32 = reflect.TypeOf(int32(0)) var tInt64 = reflect.TypeOf(int64(0)) var tString = reflect.TypeOf("") var tTime = reflect.TypeOf(time.Time{}) -var tUint = reflect.TypeOf(uint(0)) -var tUint8 = reflect.TypeOf(uint8(0)) -var tUint16 = reflect.TypeOf(uint16(0)) -var tUint32 = reflect.TypeOf(uint32(0)) -var tUint64 = reflect.TypeOf(uint64(0)) var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() var tByteSlice = reflect.TypeOf([]byte(nil)) @@ -74,7 +50,6 @@ var tDecimal = reflect.TypeOf(primitive.Decimal128{}) var tMinKey = reflect.TypeOf(primitive.MinKey{}) var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) var tD = reflect.TypeOf(primitive.D{}) -var tM = reflect.TypeOf(primitive.M{}) var tA = reflect.TypeOf(primitive.A{}) var tE = reflect.TypeOf(primitive.E{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go new file mode 100644 index 000000000000..c40973c8d436 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go @@ -0,0 +1,8 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsonoptions defines the optional configurations for the BSON codecs. +package bsonoptions diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go index 8a690e37ce31..54c76bf746a7 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -423,7 +423,7 @@ func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { if ejp.canonical { return nil, invalidJSONErrorForType("object", t) } - return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as decribed in RFC-3339", t) + return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) } ejp.advanceState() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go index 3af6fcf01090..9695704246dd 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go @@ -19,7 +19,7 @@ import ( ) func wrapperKeyBSONType(key string) bsontype.Type { - switch string(key) { + switch key { case "$numberInt": return bsontype.Int32 case "$numberLong": @@ -269,7 +269,7 @@ func (ejv *extJSONValue) parseDouble() (float64, error) { return 0, fmt.Errorf("$numberDouble value should be string, but instead is %s", ejv.t) } - switch string(ejv.v.(string)) { + switch ejv.v.(string) { case "Infinity": return math.Inf(1), nil case "-Infinity": @@ -364,7 +364,7 @@ func (ejv *extJSONValue) parseRegex() (pattern, options string, err error) { for i, key := range regexObj.keys { val := regexObj.values[i] - switch string(key) { + switch key { case "pattern": if patFound { return "", "", errors.New("duplicate pattern key in $regularExpression") diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go index 605e41a138ca..99ed524b77ed 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go @@ -10,7 +10,6 @@ import ( "bytes" "encoding/base64" "fmt" - "go.mongodb.org/mongo-driver/bson/primitive" "io" "math" "sort" @@ -19,13 +18,9 @@ import ( "sync" "time" "unicode/utf8" -) -var ejvwPool = sync.Pool{ - New: func() interface{} { - return new(extJSONValueWriter) - }, -} + "go.mongodb.org/mongo-driver/bson/primitive" +) // ExtJSONValueWriterPool is a pool for ExtJSON ValueWriters. type ExtJSONValueWriterPool struct { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index 55378093a23e..ef5d837c2f82 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -86,12 +86,11 @@ type valueReader struct { // NewBSONDocumentReader returns a ValueReader using b for the underlying BSON // representation. Parameter b must be a BSON Document. -// -// TODO(skriptble): There's a lack of symmetry between the reader and writer, since the reader takes -// a []byte while the writer takes an io.Writer. We should have two versions of each, one that takes -// a []byte and one that takes an io.Reader or io.Writer. The []byte version will need to return a -// thing that can return the finished []byte since it might be reallocated when appended to. func NewBSONDocumentReader(b []byte) ValueReader { + // TODO(skriptble): There's a lack of symmetry between the reader and writer, since the reader takes a []byte while the + // TODO writer takes an io.Writer. We should have two versions of each, one that takes a []byte and one that takes an + // TODO io.Reader or io.Writer. The []byte version will need to return a thing that can return the finished []byte since + // TODO it might be reallocated when appended to. return newValueReader(b) } @@ -384,9 +383,13 @@ func (vr *valueReader) ReadBinary() (b []byte, btype byte, err error) { if err != nil { return nil, 0, err } + // Make a copy of the returned byte slice because it's just a subslice from the valueReader's + // buffer and is not safe to return in the unmarshaled value. + cp := make([]byte, len(b)) + copy(cp, b) vr.pop() - return b, btype, nil + return cp, btype, nil } func (vr *valueReader) ReadBoolean() (bool, error) { @@ -737,6 +740,9 @@ func (vr *valueReader) ReadValue() (ValueReader, error) { return vr, nil } +// readBytes reads length bytes from the valueReader starting at the current offset. Note that the +// returned byte slice is a subslice from the valueReader buffer and must be converted or copied +// before returning in an unmarshaled value. func (vr *valueReader) readBytes(length int32) ([]byte, error) { if length < 0 { return nil, fmt.Errorf("invalid length: %d", length) @@ -748,6 +754,7 @@ func (vr *valueReader) readBytes(length int32) ([]byte, error) { start := vr.offset vr.offset += int64(length) + return vr.d[start : start+int64(length)], nil } @@ -790,16 +797,6 @@ func (vr *valueReader) readCString() (string, error) { return string(vr.d[start : start+int64(idx)]), nil } -func (vr *valueReader) skipCString() error { - idx := bytes.IndexByte(vr.d[vr.offset:], 0x00) - if idx < 0 { - return io.EOF - } - // idx does not include the null byte - vr.offset += int64(idx) + 1 - return nil -} - func (vr *valueReader) readString() (string, error) { length, err := vr.readLength() if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go index a39c4ea4cb81..f95a08afd5d7 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go @@ -529,7 +529,7 @@ func (vw *valueWriter) WriteDocumentEnd() error { vw.pop() if vw.stack[vw.frame].mode == mCodeWithScope { - // We ignore the error here because of the gaurantee of writeLength. + // We ignore the error here because of the guarantee of writeLength. // See the docs for writeLength for more info. _ = vw.writeLength() vw.pop() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go index 7644df129098..dff65f87f711 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go @@ -76,27 +76,3 @@ func (sw *SliceWriter) Write(p []byte) (int, error) { *sw = append(*sw, p...) return written, nil } - -type writer []byte - -func (w *writer) Write(p []byte) (int, error) { - index := len(*w) - return w.WriteAt(p, int64(index)) -} - -func (w *writer) WriteAt(p []byte, off int64) (int, error) { - newend := off + int64(len(p)) - if newend < int64(len(*w)) { - newend = int64(len(*w)) - } - - if newend > int64(cap(*w)) { - buf := make([]byte, int64(2*cap(*w))+newend) - copy(buf, *w) - *w = buf - } - - *w = []byte(*w)[:newend] - copy([]byte(*w)[off:], p) - return len(p), nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go index 63a59ca08f34..7c91ae5186fb 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go @@ -38,6 +38,8 @@ const ( BinaryUUIDOld byte = 0x03 BinaryUUID byte = 0x04 BinaryMD5 byte = 0x05 + BinaryEncrypted byte = 0x06 + BinaryColumn byte = 0x07 BinaryUserDefined byte = 0x80 ) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go index 7f6b7694f9c6..6e189fa58996 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go @@ -33,6 +33,11 @@ var decPool = sync.Pool{ type Decoder struct { dc bsoncodec.DecodeContext vr bsonrw.ValueReader + + // We persist defaultDocumentM and defaultDocumentD on the Decoder to prevent overwriting from + // (*Decoder).SetContext. + defaultDocumentM bool + defaultDocumentD bool } // NewDecoder returns a new decoder that uses the DefaultRegistry to read from vr. @@ -95,6 +100,12 @@ func (d *Decoder) Decode(val interface{}) error { if err != nil { return err } + if d.defaultDocumentM { + d.dc.DefaultDocumentM() + } + if d.defaultDocumentD { + d.dc.DefaultDocumentD() + } return decoder.DecodeValue(d.dc, d.vr, rval) } @@ -116,3 +127,15 @@ func (d *Decoder) SetContext(dc bsoncodec.DecodeContext) error { d.dc = dc return nil } + +// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (d *Decoder) DefaultDocumentM() { + d.defaultDocumentM = true +} + +// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (d *Decoder) DefaultDocumentD() { + d.defaultDocumentD = true +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 094be934f094..0134006d8eaf 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -9,21 +9,22 @@ // The BSON library handles marshalling and unmarshalling of values through a configurable codec system. For a description // of the codec system and examples of registering custom codecs, see the bsoncodec package. // -// Raw BSON +// # Raw BSON // // The Raw family of types is used to validate and retrieve elements from a slice of bytes. This // type is most useful when you want do lookups on BSON bytes without unmarshaling it into another // type. // // Example: -// var raw bson.Raw = ... // bytes from somewhere -// err := raw.Validate() -// if err != nil { return err } -// val := raw.Lookup("foo") -// i32, ok := val.Int32OK() -// // do something with i32... // -// Native Go Types +// var raw bson.Raw = ... // bytes from somewhere +// err := raw.Validate() +// if err != nil { return err } +// val := raw.Lookup("foo") +// i32, ok := val.Int32OK() +// // do something with i32... +// +// # Native Go Types // // The D and M types defined in this package can be used to build representations of BSON using native Go types. D is a // slice and M is a map. For more information about the use cases for these types, see the documentation on the type @@ -32,63 +33,64 @@ // Note that a D should not be constructed with duplicate key names, as that can cause undefined server behavior. // // Example: -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +// +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} // // When decoding BSON to a D or M, the following type mappings apply when unmarshalling: // -// 1. BSON int32 unmarshals to an int32. -// 2. BSON int64 unmarshals to an int64. -// 3. BSON double unmarshals to a float64. -// 4. BSON string unmarshals to a string. -// 5. BSON boolean unmarshals to a bool. -// 6. BSON embedded document unmarshals to the parent type (i.e. D for a D, M for an M). -// 7. BSON array unmarshals to a bson.A. -// 8. BSON ObjectId unmarshals to a primitive.ObjectID. -// 9. BSON datetime unmarshals to a primitive.DateTime. -// 10. BSON binary unmarshals to a primitive.Binary. -// 11. BSON regular expression unmarshals to a primitive.Regex. -// 12. BSON JavaScript unmarshals to a primitive.JavaScript. -// 13. BSON code with scope unmarshals to a primitive.CodeWithScope. -// 14. BSON timestamp unmarshals to an primitive.Timestamp. -// 15. BSON 128-bit decimal unmarshals to an primitive.Decimal128. -// 16. BSON min key unmarshals to an primitive.MinKey. -// 17. BSON max key unmarshals to an primitive.MaxKey. -// 18. BSON undefined unmarshals to a primitive.Undefined. -// 19. BSON null unmarshals to nil. -// 20. BSON DBPointer unmarshals to a primitive.DBPointer. -// 21. BSON symbol unmarshals to a primitive.Symbol. +// 1. BSON int32 unmarshals to an int32. +// 2. BSON int64 unmarshals to an int64. +// 3. BSON double unmarshals to a float64. +// 4. BSON string unmarshals to a string. +// 5. BSON boolean unmarshals to a bool. +// 6. BSON embedded document unmarshals to the parent type (i.e. D for a D, M for an M). +// 7. BSON array unmarshals to a bson.A. +// 8. BSON ObjectId unmarshals to a primitive.ObjectID. +// 9. BSON datetime unmarshals to a primitive.DateTime. +// 10. BSON binary unmarshals to a primitive.Binary. +// 11. BSON regular expression unmarshals to a primitive.Regex. +// 12. BSON JavaScript unmarshals to a primitive.JavaScript. +// 13. BSON code with scope unmarshals to a primitive.CodeWithScope. +// 14. BSON timestamp unmarshals to an primitive.Timestamp. +// 15. BSON 128-bit decimal unmarshals to an primitive.Decimal128. +// 16. BSON min key unmarshals to an primitive.MinKey. +// 17. BSON max key unmarshals to an primitive.MaxKey. +// 18. BSON undefined unmarshals to a primitive.Undefined. +// 19. BSON null unmarshals to nil. +// 20. BSON DBPointer unmarshals to a primitive.DBPointer. +// 21. BSON symbol unmarshals to a primitive.Symbol. // // The above mappings also apply when marshalling a D or M to BSON. Some other useful marshalling mappings are: // -// 1. time.Time marshals to a BSON datetime. -// 2. int8, int16, and int32 marshal to a BSON int32. -// 3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64 -// otherwise. -// 4. int64 marshals to BSON int64. -// 5. uint8 and uint16 marshal to a BSON int32. -// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, -// inclusive, and BSON int64 otherwise. -// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or -// undefined value into a string will yield the empty string.). +// 1. time.Time marshals to a BSON datetime. +// 2. int8, int16, and int32 marshal to a BSON int32. +// 3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64 +// otherwise. +// 4. int64 marshals to BSON int64. +// 5. uint8 and uint16 marshal to a BSON int32. +// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, +// inclusive, and BSON int64 otherwise. +// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or +// undefined value into a string will yield the empty string.). // -// Structs +// # Structs // // Structs can be marshalled/unmarshalled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended // JSON, the following rules apply: // -// 1. Only exported fields in structs will be marshalled or unmarshalled. +// 1. Only exported fields in structs will be marshalled or unmarshalled. // -// 2. When marshalling a struct, each field will be lowercased to generate the key for the corresponding BSON element. +// 2. When marshalling a struct, each field will be lowercased to generate the key for the corresponding BSON element. // For example, a struct field named "Foo" will generate key "foo". This can be overridden via a struct tag (e.g. // `bson:"fooField"` to generate key "fooField" instead). // -// 3. An embedded struct field is marshalled as a subdocument. The key will be the lowercased name of the field's type. +// 3. An embedded struct field is marshalled as a subdocument. The key will be the lowercased name of the field's type. // -// 4. A pointer field is marshalled as the underlying type if the pointer is non-nil. If the pointer is nil, it is +// 4. A pointer field is marshalled as the underlying type if the pointer is non-nil. If the pointer is nil, it is // marshalled as a BSON null value. // -// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents +// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents // unmarshalled into an interface{} field will be unmarshalled as a D. // // The encoding of each struct field can be customized by the "bson" struct tag. @@ -98,13 +100,14 @@ // are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below: // // Example: -// structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser) +// +// structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser) // // The bson tag gives the name of the field, possibly followed by a comma-separated list of options. // The name may be empty in order to specify options without overriding the default field name. The following options can be used // to configure behavior: // -// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to +// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to // the zero value. Fields with language primitive types such as integers, booleans, and strings are considered empty if // their value is equal to the zero value for the type (i.e. 0 for integers, false for booleans, and "" for strings). // Slices, maps, and arrays are considered empty if they are of length zero. Interfaces and pointers are considered @@ -113,16 +116,16 @@ // never considered empty and will be marshalled as embedded documents. // NOTE: It is recommended that this tag be used for all slice and map fields. // -// 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of +// 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of // the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For other // types, this tag is ignored. // -// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled -// into that field will be trucated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, +// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled +// into that field will be truncated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, // it will be unmarshalled as 3. If this tag is not specified, the decoder will throw an error if the value cannot be // decoded without losing precision. For float64 or non-numeric types, this tag is ignored. // -// 4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when +// 4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when // marshalling and "un-flattened" when unmarshalling. This means that all of the fields in that struct/map will be // pulled up one level and will become top-level fields rather than being fields in a nested document. For example, if a // map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will be @@ -132,7 +135,7 @@ // This tag can be used with fields that are pointers to structs. If an inlined pointer field is nil, it will not be // marshalled. For fields that are not maps or structs, this tag is ignored. // -// Marshalling and Unmarshalling +// # Marshalling and Unmarshalling // // Manually marshalling and unmarshalling can be done with the Marshal and Unmarshal family of functions. package bson diff --git a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go index 8d8fc8ce1b8b..db8d8ee92b5e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go @@ -7,6 +7,9 @@ package bson import ( + "bytes" + "encoding/json" + "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" @@ -221,3 +224,25 @@ func MarshalExtJSONAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val return *sw, nil } + +// IndentExtJSON will prefix and indent the provided extended JSON src and append it to dst. +func IndentExtJSON(dst *bytes.Buffer, src []byte, prefix, indent string) error { + return json.Indent(dst, src, prefix, indent) +} + +// MarshalExtJSONIndent returns the extended JSON encoding of val with each line with prefixed +// and indented. +func MarshalExtJSONIndent(val interface{}, canonical, escapeHTML bool, prefix, indent string) ([]byte, error) { + marshaled, err := MarshalExtJSON(val, canonical, escapeHTML) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + err = IndentExtJSON(&buf, marshaled, prefix, indent) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go index a57e1d698777..ba7c9112e9b2 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go @@ -133,11 +133,9 @@ Loop: } // BigInt returns significand as big.Int and exponent, bi * 10 ^ exp. -func (d Decimal128) BigInt() (bi *big.Int, exp int, err error) { +func (d Decimal128) BigInt() (*big.Int, int, error) { high, low := d.GetBytes() - var posSign bool // positive sign - - posSign = high>>63&1 == 0 + posSign := high>>63&1 == 0 // positive sign switch high >> 58 & (1<<5 - 1) { case 0x1F: @@ -149,6 +147,7 @@ func (d Decimal128) BigInt() (bi *big.Int, exp int, err error) { return nil, 0, ErrParseNegInf } + var exp int if high>>61&3 == 3 { // Bits: 1*sign 2*ignored 14*exponent 111*significand. // Implicit 0b100 prefix in significand. @@ -171,7 +170,7 @@ func (d Decimal128) BigInt() (bi *big.Int, exp int, err error) { return new(big.Int), 0, nil } - bi = big.NewInt(0) + bi := big.NewInt(0) const host32bit = ^uint(0)>>32 == 0 if host32bit { bi.SetBits([]big.Word{big.Word(low), big.Word(low >> 32), big.Word(high), big.Word(high >> 32)}) @@ -182,7 +181,7 @@ func (d Decimal128) BigInt() (bi *big.Int, exp int, err error) { if !posSign { return bi.Neg(bi), exp, nil } - return + return bi, exp, nil } // IsNaN returns whether d is NaN. @@ -192,10 +191,9 @@ func (d Decimal128) IsNaN() bool { // IsInf returns: // -// +1 d == Infinity -// 0 other case -// -1 d == -Infinity -// +// +1 d == Infinity +// 0 other case +// -1 d == -Infinity func (d Decimal128) IsInf() int { if d.h>>58&(1<<5-1) != 0x1E { return 0 diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go index a5a7d7c69878..ded3673165d2 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go @@ -10,8 +10,8 @@ package primitive import ( - "bytes" "crypto/rand" + "encoding" "encoding/binary" "encoding/hex" "encoding/json" @@ -34,6 +34,9 @@ var NilObjectID ObjectID var objectIDCounter = readRandomUint32() var processUnique = processUniqueBytes() +var _ encoding.TextMarshaler = ObjectID{} +var _ encoding.TextUnmarshaler = &ObjectID{} + // NewObjectID generates a new ObjectID. func NewObjectID() ObjectID { return NewObjectIDFromTimestamp(time.Now()) @@ -58,7 +61,9 @@ func (id ObjectID) Timestamp() time.Time { // Hex returns the hex encoding of the ObjectID as a string. func (id ObjectID) Hex() string { - return hex.EncodeToString(id[:]) + var buf [24]byte + hex.Encode(buf[:], id[:]) + return string(buf[:]) } func (id ObjectID) String() string { @@ -67,7 +72,7 @@ func (id ObjectID) String() string { // IsZero returns true if id is the empty ObjectID. func (id ObjectID) IsZero() bool { - return bytes.Equal(id[:], NilObjectID[:]) + return id == NilObjectID } // ObjectIDFromHex creates a new ObjectID from a hex string. It returns an error if the hex string is not a @@ -83,7 +88,7 @@ func ObjectIDFromHex(s string) (ObjectID, error) { } var oid [12]byte - copy(oid[:], b[:]) + copy(oid[:], b) return oid, nil } @@ -94,6 +99,23 @@ func IsValidObjectID(s string) bool { return err == nil } +// MarshalText returns the ObjectID as UTF-8-encoded text. Implementing this allows us to use ObjectID +// as a map key when marshalling JSON. See https://pkg.go.dev/encoding#TextMarshaler +func (id ObjectID) MarshalText() ([]byte, error) { + return []byte(id.Hex()), nil +} + +// UnmarshalText populates the byte slice with the ObjectID. Implementing this allows us to use ObjectID +// as a map key when unmarshalling JSON. See https://pkg.go.dev/encoding#TextUnmarshaler +func (id *ObjectID) UnmarshalText(b []byte) error { + oid, err := ObjectIDFromHex(string(b)) + if err != nil { + return err + } + *id = oid + return nil +} + // MarshalJSON returns the ObjectID as a string func (id ObjectID) MarshalJSON() ([]byte, error) { return json.Marshal(id.Hex()) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go index 51984e223e64..c72ccc1c4d49 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go @@ -111,7 +111,7 @@ func (d DBPointer) String() string { // Equal compares d to d2 and returns true if they are equal. func (d DBPointer) Equal(d2 DBPointer) bool { - return d.DB == d2.DB && bytes.Equal(d.Pointer[:], d2.Pointer[:]) + return d == d2 } // IsZero returns if d is the empty DBPointer. @@ -182,7 +182,7 @@ type MaxKey struct{} // // Example usage: // -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} type D []E // Map creates a map from the elements of the D. @@ -206,12 +206,12 @@ type E struct { // // Example usage: // -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} type M map[string]interface{} // An A is an ordered representation of a BSON array. // // Example usage: // -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} +// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} type A []interface{} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go index f397fa2d58a5..1cbe3884d192 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go @@ -14,6 +14,9 @@ import ( "go.mongodb.org/mongo-driver/bson/bsonrw" ) +var tRawValue = reflect.TypeOf(RawValue{}) +var tRaw = reflect.TypeOf(Raw(nil)) + var primitiveCodecs PrimitiveCodecs // PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types @@ -87,25 +90,3 @@ func (PrimitiveCodecs) RawDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.Valu val.Set(reflect.ValueOf(rdr)) return err } - -func (pc PrimitiveCodecs) encodeRaw(ec bsoncodec.EncodeContext, dw bsonrw.DocumentWriter, raw Raw) error { - var copier bsonrw.Copier - elems, err := raw.Elements() - if err != nil { - return err - } - for _, elem := range elems { - dvw, err := dw.WriteDocumentElement(elem.Key()) - if err != nil { - return err - } - - val := elem.Value() - err = copier.CopyValueFromBytes(dvw, val.Type, val.Value) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw.go b/vendor/go.mongodb.org/mongo-driver/bson/raw.go index 2aae9f56ab56..efd705daaa5d 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw.go @@ -15,7 +15,6 @@ import ( // ErrNilReader indicates that an operation was attempted on a nil bson.Reader. var ErrNilReader = errors.New("nil reader") -var errValidateDone = errors.New("validation loop complete") // Raw is a wrapper around a byte slice. It will interpret the slice as a // BSON document. This type is a wrapper around a bsoncore.Document. Errors returned from the @@ -84,9 +83,3 @@ func (r Raw) IndexErr(index uint) (RawElement, error) { // String implements the fmt.Stringer interface. func (r Raw) String() string { return bsoncore.Document(r).String() } - -// readi32 is a helper function for reading an int32 from slice of bytes. -func readi32(b []byte) int32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/registry.go index 09062d20854e..16d7573e7536 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/registry.go @@ -13,7 +13,7 @@ import "go.mongodb.org/mongo-driver/bson/bsoncodec" var DefaultRegistry = NewRegistryBuilder().Build() // NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and -// deocders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the +// decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the // PrimitiveCodecs type in this package. func NewRegistryBuilder() *bsoncodec.RegistryBuilder { rb := bsoncodec.NewRegistryBuilder() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/types.go b/vendor/go.mongodb.org/mongo-driver/bson/types.go index bf91f691adea..13a1c35cf6fd 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/types.go @@ -7,11 +7,7 @@ package bson import ( - "reflect" - "time" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" ) // These constants uniquely refer to each BSON type. @@ -38,48 +34,3 @@ const ( TypeMinKey = bsontype.MinKey TypeMaxKey = bsontype.MaxKey ) - -var tBinary = reflect.TypeOf(primitive.Binary{}) -var tBool = reflect.TypeOf(false) -var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) -var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) -var tDecimal = reflect.TypeOf(primitive.Decimal128{}) -var tD = reflect.TypeOf(D{}) -var tA = reflect.TypeOf(A{}) -var tDateTime = reflect.TypeOf(primitive.DateTime(0)) -var tUndefined = reflect.TypeOf(primitive.Undefined{}) -var tNull = reflect.TypeOf(primitive.Null{}) -var tRawValue = reflect.TypeOf(RawValue{}) -var tFloat32 = reflect.TypeOf(float32(0)) -var tFloat64 = reflect.TypeOf(float64(0)) -var tInt = reflect.TypeOf(int(0)) -var tInt8 = reflect.TypeOf(int8(0)) -var tInt16 = reflect.TypeOf(int16(0)) -var tInt32 = reflect.TypeOf(int32(0)) -var tInt64 = reflect.TypeOf(int64(0)) -var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) -var tOID = reflect.TypeOf(primitive.ObjectID{}) -var tRaw = reflect.TypeOf(Raw(nil)) -var tRegex = reflect.TypeOf(primitive.Regex{}) -var tString = reflect.TypeOf("") -var tSymbol = reflect.TypeOf(primitive.Symbol("")) -var tTime = reflect.TypeOf(time.Time{}) -var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) -var tUint = reflect.TypeOf(uint(0)) -var tUint8 = reflect.TypeOf(uint8(0)) -var tUint16 = reflect.TypeOf(uint16(0)) -var tUint32 = reflect.TypeOf(uint32(0)) -var tUint64 = reflect.TypeOf(uint64(0)) -var tMinKey = reflect.TypeOf(primitive.MinKey{}) -var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) - -var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() -var tEmptySlice = reflect.TypeOf([]interface{}(nil)) - -var zeroVal reflect.Value - -// this references the quantity of milliseconds between zero time and -// the unix epoch. useful for making sure that we convert time.Time -// objects correctly to match the legacy bson library's handling of -// time.Time values. -const zeroEpochMs = int64(62135596800000) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go index 6f9ca04d3ce8..f936ba183635 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go @@ -23,7 +23,7 @@ type Unmarshaler interface { } // ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representaiton of themselves. The BSON bytes and type can be +// BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. type ValueUnmarshaler interface { diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go index b0d45212dbe4..52162f8aa02a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go @@ -45,7 +45,7 @@ func (db *DocumentBuilder) AppendInt32(key string, i32 int32) *DocumentBuilder { return db } -// AppendDocument will append a bson embeded document element using key +// AppendDocument will append a bson embedded document element using key // and doc to DocumentBuilder.doc func (db *DocumentBuilder) AppendDocument(key string, doc []byte) *DocumentBuilder { db.doc = AppendDocumentElement(db.doc, key, doc) diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go index b8db838a5fbe..17aad6d71ea1 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go @@ -15,7 +15,7 @@ // enough bytes. This library attempts to do no validation, it will only return // false if there are not enough bytes for an item to be read. For example, the // ReadDocument function checks the length, if that length is larger than the -// number of bytes availble, it will return false, if there are enough bytes, it +// number of bytes available, it will return false, if there are enough bytes, it // will return those bytes and true. It is the consumers responsibility to // validate those bytes. // @@ -65,11 +65,10 @@ func AppendHeader(dst []byte, t bsontype.Type, key string) []byte { // return append(AppendType(dst, t), key+string(0x00)...) } -// TODO(skriptble): All of the Read* functions should return src resliced to start just after what -// was read. +// TODO(skriptble): All of the Read* functions should return src resliced to start just after what was read. // ReadType will return the first byte of the provided []byte as a type. If -// there is no availble byte, false is returned. +// there is no available byte, false is returned. func ReadType(src []byte) (bsontype.Type, []byte, bool) { if len(src) < 1 { return 0, src, false @@ -197,12 +196,13 @@ func ReadString(src []byte) (string, []byte, bool) { // AppendDocumentStart reserves a document's length and returns the index where the length begins. // This index can later be used to write the length of the document. -// -// TODO(skriptble): We really need AppendDocumentStart and AppendDocumentEnd. -// AppendDocumentStart would handle calling ReserveLength and providing the index of the start of -// the document. AppendDocumentEnd would handle taking that start index, adding the null byte, -// calculating the length, and filling in the length at the start of the document. -func AppendDocumentStart(dst []byte) (index int32, b []byte) { return ReserveLength(dst) } +func AppendDocumentStart(dst []byte) (index int32, b []byte) { + // TODO(skriptble): We really need AppendDocumentStart and AppendDocumentEnd. AppendDocumentStart would handle calling + // TODO ReserveLength and providing the index of the start of the document. AppendDocumentEnd would handle taking that + // TODO start index, adding the null byte, calculating the length, and filling in the length at the start of the + // TODO document. + return ReserveLength(dst) +} // AppendDocumentStartInline functions the same as AppendDocumentStart but takes a pointer to the // index int32 which allows this function to be used inline. @@ -231,7 +231,7 @@ func AppendDocumentEnd(dst []byte, index int32) ([]byte, error) { // AppendDocument will append doc to dst and return the extended buffer. func AppendDocument(dst []byte, doc []byte) []byte { return append(dst, doc...) } -// AppendDocumentElement will append a BSON embeded document element using key +// AppendDocumentElement will append a BSON embedded document element using key // and doc to dst and return the extended buffer. func AppendDocumentElement(dst []byte, key string, doc []byte) []byte { return AppendDocument(AppendHeader(dst, bsontype.EmbeddedDocument, key), doc) diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go index b687b5a8066c..d6e4bb069f04 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go @@ -13,7 +13,6 @@ import ( "io" "strconv" - "github.com/go-stack/stack" "go.mongodb.org/mongo-driver/bson/bsontype" ) @@ -37,13 +36,12 @@ func lengthError(bufferType string, length, rem int) error { type InsufficientBytesError struct { Source []byte Remaining []byte - Stack stack.CallStack } -// NewInsufficientBytesError creates a new InsufficientBytesError with the given Document, remaining -// bytes, and the current stack. +// NewInsufficientBytesError creates a new InsufficientBytesError with the given Document and +// remaining bytes. func NewInsufficientBytesError(src, rem []byte) InsufficientBytesError { - return InsufficientBytesError{Source: src, Remaining: rem, Stack: stack.Trace().TrimRuntime()} + return InsufficientBytesError{Source: src, Remaining: rem} } // Error implements the error interface. @@ -51,28 +49,6 @@ func (ibe InsufficientBytesError) Error() string { return "too few bytes to read next component" } -// ErrorStack returns a string representing the stack at the point where the error occurred. -func (ibe InsufficientBytesError) ErrorStack() string { - s := bytes.NewBufferString("too few bytes to read next component: [") - - for i, call := range ibe.Stack { - if i != 0 { - s.WriteString(", ") - } - - // go vet doesn't like %k even though it's part of stack's API, so we move the format - // string so it doesn't complain. (We also can't make it a constant, or go vet still - // complains.) - callFormat := "%k.%n %v" - - s.WriteString(fmt.Sprintf(callFormat, call, call, call)) - } - - s.WriteRune(']') - - return s.String() -} - // Equal checks that err2 also is an ErrTooSmall. func (ibe InsufficientBytesError) Equal(err2 error) bool { switch err2.(type) { diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go index 04d162faa19b..e35bd0cd9ad3 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go @@ -1,3 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + package bsoncore import ( @@ -90,8 +96,8 @@ func (ds *DocumentSequence) Empty() bool { } } -//ResetIterator resets the iteration point for the Next method to the beginning of the document -//sequence. +// ResetIterator resets the iteration point for the Next method to the beginning of the document +// sequence. func (ds *DocumentSequence) ResetIterator() { if ds == nil { return diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go index ed95233d969e..54aa617cfd53 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go @@ -602,7 +602,7 @@ func (v Value) Time() time.Time { if !ok { panic(NewInsufficientBytesError(v.Data, v.Data)) } - return time.Unix(int64(dt)/1000, int64(dt)%1000*1000000) + return time.Unix(dt/1000, dt%1000*1000000) } // TimeOK is the same as Time, except it returns a boolean instead of @@ -615,7 +615,7 @@ func (v Value) TimeOK() (time.Time, bool) { if !ok { return time.Time{}, false } - return time.Unix(int64(dt)/1000, int64(dt)%1000*1000000), true + return time.Unix(dt/1000, dt%1000*1000000), true } // Regex returns the BSON regex value the Value represents. It panics if the value is a BSON diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile index b3ce3df3032d..d896edc99681 100644 --- a/vendor/go.opencensus.io/Makefile +++ b/vendor/go.opencensus.io/Makefile @@ -91,7 +91,7 @@ embedmd: .PHONY: install-tools install-tools: - go get -u golang.org/x/lint/golint - go get -u golang.org/x/tools/cmd/cover - go get -u golang.org/x/tools/cmd/goimports - go get -u github.com/rakyll/embedmd + go install golang.org/x/lint/golint@latest + go install golang.org/x/tools/cmd/cover@latest + go install golang.org/x/tools/cmd/goimports@latest + go install github.com/rakyll/embedmd@latest diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go index e5e4b4368c1a..11e31f421c5d 100644 --- a/vendor/go.opencensus.io/opencensus.go +++ b/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.23.0" + return "0.24.0" } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go index 49fde3d8c826..fb3c19d6b646 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go @@ -28,6 +28,7 @@ var ( ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) + ClientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "Number of started client RPCs.", stats.UnitDimensionless) ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) ) @@ -70,6 +71,14 @@ var ( Aggregation: view.Count(), } + ClientStartedRPCsView = &view.View{ + Measure: ClientStartedRPCs, + Name: "grpc.io/client/started_rpcs", + Description: "Number of started client RPCs.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: view.Count(), + } + ClientSentMessagesPerRPCView = &view.View{ Measure: ClientSentMessagesPerRPC, Name: "grpc.io/client/sent_messages_per_rpc", diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go index b2059824a85f..fe0e971086e6 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go @@ -27,6 +27,7 @@ var ( ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) + ServerStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "Number of started server RPCs.", stats.UnitDimensionless) ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) ) @@ -73,6 +74,14 @@ var ( Aggregation: view.Count(), } + ServerStartedRPCsView = &view.View{ + Measure: ServerStartedRPCs, + Name: "grpc.io/server/started_rpcs", + Description: "Number of started server RPCs.", + TagKeys: []tag.Key{KeyServerMethod}, + Aggregation: view.Count(), + } + ServerReceivedMessagesPerRPCView = &view.View{ Name: "grpc.io/server/received_messages_per_rpc", Description: "Distribution of messages received count per RPC, by method.", diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go index 89cac9c4ec0b..9cb27320ca13 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go @@ -82,8 +82,10 @@ func methodName(fullname string) string { // statsHandleRPC processes the RPC events. func statsHandleRPC(ctx context.Context, s stats.RPCStats) { switch st := s.(type) { - case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: + case *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: // do nothing for client + case *stats.Begin: + handleRPCBegin(ctx, st) case *stats.OutPayload: handleRPCOutPayload(ctx, st) case *stats.InPayload: @@ -95,6 +97,25 @@ func statsHandleRPC(ctx context.Context, s stats.RPCStats) { } } +func handleRPCBegin(ctx context.Context, s *stats.Begin) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + } + + if s.IsClient() { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), + ocstats.WithMeasurements(ClientStartedRPCs.M(1))) + } else { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), + ocstats.WithMeasurements(ServerStartedRPCs.M(1))) + } +} + func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { d, ok := ctx.Value(rpcDataKey).(*rpcData) if !ok { diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index c7ea64235726..f7c8434be06c 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -31,14 +31,14 @@ import ( // Handler is an http.Handler wrapper to instrument your HTTP server with // OpenCensus. It supports both stats and tracing. // -// Tracing +// # Tracing // // This handler is aware of the incoming request's span, reading it from request // headers as configured using the Propagation field. // The extracted span can be accessed from the incoming request's // context. // -// span := trace.FromContext(r.Context()) +// span := trace.FromContext(r.Context()) // // The server span will be automatically ended at the end of ServeHTTP. type Handler struct { @@ -224,7 +224,9 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) { } // wrappedResponseWriter returns a wrapped version of the original -// ResponseWriter and only implements the same combination of additional +// +// ResponseWriter and only implements the same combination of additional +// // interfaces as the original. // This implementation is based on https://github.com/felixge/httpsnoop. func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go index 00d473ee0298..31477a464fd9 100644 --- a/vendor/go.opencensus.io/stats/doc.go +++ b/vendor/go.opencensus.io/stats/doc.go @@ -19,7 +19,7 @@ Package stats contains support for OpenCensus stats recording. OpenCensus allows users to create typed measures, record measurements, aggregate the collected data, and export the aggregated data. -Measures +# Measures A measure represents a type of data point to be tracked and recorded. For example, latency, request Mb/s, and response Mb/s are measures @@ -33,7 +33,7 @@ Libraries can define and export measures. Application authors can then create views and collect and break down measures by the tags they are interested in. -Recording measurements +# Recording measurements Measurement is a data point to be collected for a measure. For example, for a latency (ms) measure, 100 is a measurement that represents a 100ms @@ -49,7 +49,7 @@ Libraries can always record measurements, and applications can later decide on which measurements they want to collect by registering views. This allows libraries to turn on the instrumentation by default. -Exemplars +# Exemplars For a given recorded measurement, the associated exemplar is a diagnostic map that gives more information about the measurement. @@ -64,6 +64,5 @@ then the trace span will be added to the exemplar associated with the measuremen When exported to a supporting back end, you should be able to easily navigate to example traces that fell into each bucket in the Distribution. - */ package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go index 36935e629b66..436dc791f834 100644 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -21,5 +21,11 @@ import ( // DefaultRecorder will be called for each Record call. var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) +// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but +// avoids interface{} conversion. +// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, +// but is interface{} here to avoid import loops +var MeasurementRecorder interface{} + // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index 2b97283462e2..8b5b99803ce3 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -86,10 +86,29 @@ func createRecordOption(ros ...Options) *recordOptions { return o } +type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) + // Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { - RecordWithOptions(ctx, WithMeasurements(ms...)) + // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality + // (RecordOptions) we can reduce some allocations to speed up this hot path + if len(ms) == 0 { + return + } + recorder := internal.MeasurementRecorder.(measurementRecorder) + record := false + for _, m := range ms { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return + } + recorder(tag.FromContext(ctx), ms, nil) + return } // RecordWithTags records one or multiple measurements at once. diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go index 748bd568cda0..61f72d20da33 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -90,9 +90,9 @@ func Sum() *Aggregation { // // If len(bounds) >= 2 then the boundaries for bucket index i are: // -// [-infinity, bounds[i]) for i = 0 -// [bounds[i-1], bounds[i]) for 0 < i < length -// [bounds[i-1], +infinity) for i = length +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length // // If len(bounds) is 0 then there is no histogram associated with the // distribution. There will be a single bucket with boundaries diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go index ac22c93a2b57..bcd6e08c7481 100644 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -59,8 +59,15 @@ func (c *collector) clearRows() { // encodeWithKeys encodes the map by using values // only associated with the keys provided. func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + // Compute the buffer length we will need ahead of time to avoid resizing later + reqLen := 0 + for _, k := range keys { + s, _ := m.Value(k) + // We will store each key + its length + reqLen += len(s) + 1 + } vb := &tagencoding.Values{ - Buffer: make([]byte, len(keys)), + Buffer: make([]byte, reqLen), } for _, k := range keys { v, _ := m.Value(k) diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go index 7bbedfe1ff23..60bf0e392540 100644 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -34,7 +34,7 @@ // Libraries can define views but it is recommended that in most cases registering // views be left up to applications. // -// Exporting +// # Exporting // // Collected and aggregated data can be exported to a metric collection // backend by registering its exporter. diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 6e8d18b7f6d3..6a79cd8a34c3 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -33,6 +33,7 @@ func init() { defaultWorker = NewMeter().(*worker) go defaultWorker.start() internal.DefaultRecorder = record + internal.MeasurementRecorder = recordMeasurement } type measureRef struct { @@ -199,11 +200,21 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { defaultWorker.Record(tags, ms, attachments) } +func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { + defaultWorker.recordMeasurement(tags, ms, attachments) +} + // Record records a set of measurements ms associated with the given tags and attachments. func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) +} + +// recordMeasurement records a set of measurements ms associated with the given tags and attachments. +// This is the same as Record but without an interface{} type to avoid allocations +func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { req := &recordReq{ tm: tags, - ms: ms.([]stats.Measurement), + ms: ms, attachments: attachments, t: time.Now(), } @@ -221,6 +232,11 @@ func SetReportingPeriod(d time.Duration) { defaultWorker.SetReportingPeriod(d) } +// Stop stops the default worker. +func Stop() { + defaultWorker.Stop() +} + // SetReportingPeriod sets the interval between reporting aggregated views in // the program. If duration is less than or equal to zero, it enables the // default behavior. @@ -281,7 +297,7 @@ func (w *worker) start() { case <-w.quit: w.timer.Stop() close(w.c) - w.done <- true + close(w.done) return } } @@ -290,8 +306,11 @@ func (w *worker) start() { func (w *worker) Stop() { prodMgr := metricproducer.GlobalManager() prodMgr.DeleteProducer(w) - - w.quit <- true + select { + case <-w.quit: + default: + close(w.quit) + } <-w.done } diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go index b34d95e34a2c..8fb17226fe3c 100644 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package tag diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go index 83adbce56b72..e28cf13cde97 100644 --- a/vendor/go.opencensus.io/tag/profile_not19.go +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package tag diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go index 04b1ee4f38ea..7a1616a55c5e 100644 --- a/vendor/go.opencensus.io/trace/doc.go +++ b/vendor/go.opencensus.io/trace/doc.go @@ -18,24 +18,23 @@ Package trace contains support for OpenCensus distributed tracing. The following assumes a basic familiarity with OpenCensus concepts. See http://opencensus.io - -Exporting Traces +# Exporting Traces To export collected tracing data, register at least one exporter. You can use one of the provided exporters or write your own. - trace.RegisterExporter(exporter) + trace.RegisterExporter(exporter) By default, traces will be sampled relatively rarely. To change the sampling frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) Be careful about using trace.AlwaysSample in a production application with significant traffic: a new trace will be started and exported for every request. -Adding Spans to a Trace +# Adding Spans to a Trace A trace consists of a tree of spans. In Go, the current span is carried in a context.Context. @@ -44,8 +43,8 @@ It is common to want to capture all the activity of a function call in a span. F this to work, the function must take a context.Context as a parameter. Add these two lines to the top of the function: - ctx, span := trace.StartSpan(ctx, "example.com/Run") - defer span.End() + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() StartSpan will create a new top-level span if the context doesn't contain another span, otherwise it will create a child span. diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go index 908c2497ed5b..80095a5f6c03 100644 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -44,7 +44,7 @@ func (lm lruMap) len() int { } func (lm lruMap) keys() []interface{} { - keys := make([]interface{}, len(lm.cacheKeys)) + keys := make([]interface{}, 0, len(lm.cacheKeys)) for k := range lm.cacheKeys { keys = append(keys, k) } diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go index b7d8aaf28477..b8fc1e495a9c 100644 --- a/vendor/go.opencensus.io/trace/trace_go11.go +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.11 // +build go1.11 package trace diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go index e25419859c02..da488fc87401 100644 --- a/vendor/go.opencensus.io/trace/trace_nongo11.go +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.11 // +build !go1.11 package trace diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 759cf53e0023..0b605b3d67d4 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -11,11 +11,11 @@ coverage.* gen/ /example/fib/fib +/example/fib/traces.txt /example/jaeger/jaeger /example/namedtracer/namedtracer /example/opencensus/opencensus /example/passthrough/passthrough /example/prometheus/prometheus -/example/prom-collector/prom-collector /example/zipkin/zipkin /example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 7a5fdc07abf5..0f099f575959 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,8 +9,9 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: - - deadcode + - depguard - errcheck + - godot - gofmt - goimports - gosimple @@ -19,29 +20,225 @@ linters: - misspell - revive - staticcheck - - structcheck - typecheck - unused - - varcheck - issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + # Setting to unlimited so the linter only is run once to debug all issues. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + # Setting to unlimited so the linter only is run once to debug all issues. + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source. exclude-rules: - # helpers in tests often (rightfully) pass a *testing.T as their first argument - - path: _test\.go - text: "context.Context should be the first parameter of a function" + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - path: '.*internal/.*' + text: "exported (method|function|type|const) (.+) should have comment or be unexported" linters: - revive - # Yes, they are, but it's okay in a test + # Yes, they are, but it's okay in a test. - path: _test\.go text: "exported func.*returns unexported type.*which can be annoying to use" linters: - revive + # Example test functions should be treated like main. + - path: example.*_test\.go + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive + include: + # revive exported should have comment or be unexported. + - EXC0012 + # revive package comment should be of the form ... + - EXC0013 linters-settings: + depguard: + # Check the list against standard lib. + # Default: false + include-go-root: true + # A list of packages for the list type specified. + # Default: [] + packages: + - "crypto/md5" + - "crypto/sha1" + - "crypto/**/pkix" + ignore-file-rules: + - "**/*_test.go" + additional-guards: + # Do not allow testing packages in non-test files. + - list-type: denylist + include-go-root: true + packages: + - testing + - github.com/stretchr/testify + ignore-file-rules: + - "**/*_test.go" + - "**/*test/*.go" + - "**/internal/matchers/*.go" + godot: + exclude: + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. + - ':$' + goimports: + local-prefixes: go.opentelemetry.io misspell: locale: US ignore-words: - cancelled - goimports: - local-prefixes: go.opentelemetry.io + revive: + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.01 + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports + - name: blank-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + # TODO (#3372) reenable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument + disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + disabled: false + arguments: + - ["call-chain", "loop"] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + disabled: false + arguments: + - "sayRepetitiveInsteadOfStutters" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + disabled: false + arguments: + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + disabled: false + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + disabled: false + arguments: + - "fmt.Fprint" + - "fmt.Fprintf" + - "fmt.Fprintln" + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + disabled: false diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore new file mode 100644 index 000000000000..32e4812755ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -0,0 +1,4 @@ +http://localhost +http://jaeger-collector +https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects diff --git a/vendor/go.opentelemetry.io/otel/.markdown-link.json b/vendor/go.opentelemetry.io/otel/.markdown-link.json deleted file mode 100644 index f222ad89c310..000000000000 --- a/vendor/go.opentelemetry.io/otel/.markdown-link.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "ignorePatterns": [ - { - "pattern": "^http(s)?://localhost" - } - ], - "replacementPatterns": [ - { - "pattern": "^/registry", - "replacement": "https://opentelemetry.io/registry" - } - ], - "retryOn429": true, - "retryCount": 5, - "fallbackRetryDelay": "30s" -} diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 42dada4905bc..2bfe0a9411ba 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,537 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the depreacted `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and confguration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in erros identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Reenabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +### Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + +## [1.11.1/0.33.0] 2022-10-19 + +### Added + +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. + By default, it will register with the default Prometheus registerer. + A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) +- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) + +### Changed + +- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. + It will return an error if the exporter fails to register with Prometheus. (#3239) + +### Fixed + +- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) +- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. + This fixes the implementation to be compliant with the W3C specification. (#3226) +- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) +- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) +- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) +- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) +- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. + Instead the exporter is defined as an "unchecked" collector for Prometheus. + This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. + This can be disabled using the `WithoutUnits()` option added to that package. (#3352) + +## [1.11.0/0.32.3] 2022-10-12 + +### Added + +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) + +### Changed + +- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) +- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. + This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) + +## [0.32.2] Metric SDK (Alpha) - 2022-10-11 + +### Added + +- Added an example of using metric views to customize instruments. (#3177) +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) + +### Changed + +- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) +- Update histogram default bounds to match the requirements of the latest specification. (#3222) +- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) + +### Fixed + +- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) +- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) +- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) +- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) +- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) + +## [0.32.1] Metric SDK (Alpha) - 2022-09-22 + +### Changed + +- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. + Invalid characters are replaced with `_`. (#3212) + +### Added + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) +- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) + +### Fixed + +- Updated go.mods to point to valid versions of the sdk. (#3216) +- Set the `MeterProvider` resource on all exported metric data. (#3218) + +## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 + +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) +- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + +## [1.10.0] - 2022-09-09 + +### Added + +- Support Go 1.19. (#3077) + Include compatibility testing and document support. (#3077) +- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) +- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) + +### Changed + +- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) +- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) +- All exporters will be shutdown even if one reports an error (#3091) +- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) + +## [1.9.0/0.0.3] - 2022-08-01 + +### Added + +- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) +- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. + The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) +- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. + The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) +- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) + +### Fixed + +- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) + +## [1.8.0/0.31.0] - 2022-07-08 + +### Added + +- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods +of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) + +### Changed + +- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) +- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) +- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) + +### Removed + +- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) + +### Deprecated + +- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. + Use the equivalent `Scope` struct instead. (#2977) +- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. + Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) + +## [1.7.0/0.30.0] - 2022-04-28 + +### Added + +- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. + The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) +- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. + The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) +- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. + The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) +- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) + +### Fixed + +- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) +- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) + +### Changed + +- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) +- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. + The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) +- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. + Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) + +### Deprecated + +- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.Attribute` method instead. (#2790) +- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) +- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `MergeIterator.Attribute` method instead. (#2790) + +### Removed + +- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) +- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) + +## [0.29.0] - 2022-04-11 + +### Added + +- The metrics global package was added back into several test files. (#2764) +- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. + This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) + +### Removed + +- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) + +### Changed + +- Don't panic anymore when setting a global MeterProvider to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) + +## [1.6.3] - 2022-04-07 + +### Fixed + +- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) + +## [1.6.2] - 2022-04-06 + +### Changed + +- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) + +## [1.6.1] - 2022-03-28 + +### Fixed + +- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. + Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) + +### Security + +- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. + This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) + +## [1.6.0/0.28.0] - 2022-03-23 + +### ⚠️ Notice ⚠️ + +This update is a breaking change of the unstable Metrics API. +Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. + +### Added + +- Add metrics exponential histogram support. + New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) +- Add Go 1.18 to our compatibility tests. (#2679) +- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) +- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) + +### Changed + +- The metrics API has been significantly changed to match the revised OpenTelemetry specification. + High-level changes include: + + - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. + These `InstrumentProvider`s are managed with a `Meter`. + - Synchronous and asynchronous instruments are grouped into their own packages based on value types. + - Asynchronous callbacks can now be registered with a `Meter`. + + Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) + +### Fixed + +- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) + +## [1.5.0] - 2022-03-16 + +### Added + +- Log the Exporters configuration in the TracerProviders message. (#2578) +- Added support to configure the span limits with environment variables. + The following environment variables are supported. (#2606, #2637) + - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` + - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_EVENT_COUNT_LIMIT` + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` + + If the provided environment variables are invalid (negative), the default values would be used. +- Rename the `gc` runtime name to `go` (#2560) +- Add resource container ID detection. (#2418) +- Add span attribute value length limit. + The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. + The default limit for this resource is "unlimited". (#2637) +- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. + This option replaces the `WithSpanLimits` option. + Zero or negative values will not be changed to the default value like `WithSpanLimits` does. + Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. + Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) + +### Changed + +- Drop oldest tracestate `Member` when capacity is reached. (#2592) +- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) +- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) +- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) +- Introduce new internal `envconfig` package for OTLP exporters. (#2608) +- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) + +### Fixed + +- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) +- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) +- Unlimited span limits are now supported (negative values). (#2636, #2637) + +### Deprecated + +- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. + Use `WithRawSpanLimits` instead. + That option allows setting unlimited and zero limits, this option does not. + This option will be kept until the next major version incremented release. (#2637) + ## [1.4.1] - 2022-02-16 ### Fixed @@ -1689,7 +2220,24 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.4.1...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.12.0...HEAD +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 +[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 +[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 +[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 +[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 +[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 +[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 +[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 +[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 +[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 +[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 +[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 +[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 +[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 +[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 [1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 [1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 [1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 808755fe20cf..c4012ed6ca1c 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @paivagustavo @MadVikingGod @pellared +* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 7dead7084d3e..9371a481ab12 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -506,8 +506,9 @@ Approvers: - [Josh MacDonald](https://github.com/jmacd), LightStep - [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [David Ashpole](https://github.com/dashpole), Google -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Robert Pająk](https://github.com/pellared), Splunk +- [Chester Cheung](https://github.com/hanyuancheung), Tencent +- [Damien Mathieu](https://github.com/dmathieu), Auth0/Okta Maintainers: @@ -515,6 +516,10 @@ Maintainers: - [Anthony Mirabella](https://github.com/Aneurysm9), AWS - [Tyler Yahn](https://github.com/MrAlias), Splunk +Emeritus: + +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep + ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index b085561dbaaf..befb040a77b3 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -17,7 +17,7 @@ TOOLS_MOD_DIR := ./internal/tools ALL_DOCS := $(shell find . -name '*.md' -type f | sort) ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) -ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) GO = go TIMEOUT = 60 @@ -25,7 +25,7 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: license-check misspell go-mod-tidy golangci-lint-fix test-default +precommit: dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage # Tools @@ -45,7 +45,13 @@ SEMCONVGEN = $(TOOLS)/semconvgen $(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen CROSSLINK = $(TOOLS)/crosslink -$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink + +SEMCONVKIT = $(TOOLS)/semconvkit +$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit + +DBOTCONF = $(TOOLS)/dbotconf +$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint @@ -66,7 +72,7 @@ GOJQ = $(TOOLS)/gojq $(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) +tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) # Build @@ -123,6 +129,7 @@ test-coverage: | $(GOCOVMERGE) (cd "$${dir}" && \ $(GO) list ./... \ | grep -v third_party \ + | grep -v 'semconv/v.*' \ | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ $(GO) tool cover -html=coverage.out -o coverage.html); \ done; \ @@ -140,8 +147,8 @@ golangci-lint/%: | $(GOLANGCI_LINT) .PHONY: crosslink crosslink: | $(CROSSLINK) - @echo "cross-linking all go modules" \ - && $(CROSSLINK) + @echo "Updating intra-repository dependencies in all go modules" \ + && $(CROSSLINK) --root=$(shell pwd) --prune .PHONY: go-mod-tidy go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) @@ -149,7 +156,7 @@ go-mod-tidy/%: DIR=$* go-mod-tidy/%: | crosslink @echo "$(GO) mod tidy in $(DIR)" \ && cd $(DIR) \ - && $(GO) mod tidy + && $(GO) mod tidy -compat=1.18 .PHONY: lint-modules lint-modules: go-mod-tidy @@ -159,7 +166,11 @@ lint: misspell lint-modules golangci-lint .PHONY: vanity-import-check vanity-import-check: | $(PORTO) - @$(PORTO) --include-internal -l . + @$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)" + +.PHONY: vanity-import-fix +vanity-import-fix: | $(PORTO) + @$(PORTO) --include-internal -w . .PHONY: misspell misspell: | $(MISSPELL) @@ -167,7 +178,7 @@ misspell: | $(MISSPELL) .PHONY: license-check license-check: - @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*') ; do \ + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \ done); \ if [ -n "$${licRes}" ]; then \ @@ -175,20 +186,14 @@ license-check: exit 1; \ fi -DEPENDABOT_PATH=./.github/dependabot.yml +DEPENDABOT_CONFIG = .github/dependabot.yml .PHONY: dependabot-check -dependabot-check: - @result=$$( \ - for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.//' ); \ - do grep -q "directory: \+$$f" $(DEPENDABOT_PATH) \ - || echo "$$f"; \ - done; \ - ); \ - if [ -n "$$result" ]; then \ - echo "missing dependabot entry:"; echo "$$result"; \ - echo "new modules need to be added to the $(DEPENDABOT_PATH) file"; \ - exit 1; \ - fi +dependabot-check: | $(DBOTCONF) + @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)" + +.PHONY: dependabot-generate +dependabot-generate: | $(DBOTCONF) + @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) .PHONY: check-clean-work-tree check-clean-work-tree: @@ -200,6 +205,15 @@ check-clean-work-tree: exit 1; \ fi +SEMCONVPKG ?= "semconv/" +.PHONY: semconv-generate +semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 ) + [ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 ) + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + .PHONY: prerelease prerelease: | $(MULTIMOD) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 21c2a71612e4..1b2ee21fbf53 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -30,27 +30,36 @@ Project versioning information and stability guarantees can be found in the ### Compatibility -OpenTelemetry-Go attempts to track the current supported versions of the -[Go language](https://golang.org/doc/devel/release#policy). The release -schedule after a new minor version of go is as follows: +OpenTelemetry-Go ensures compatibility with the current supported versions of +the [Go language](https://golang.org/doc/devel/release#policy): -- The first release or one month, which ever is sooner, will add build steps for the new go version. -- The first release after three months will remove support for the oldest go version. +> Each major Go release is supported until there are two newer major releases. +> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. -This project is tested on the following systems. +For versions of Go that are no longer supported upstream, opentelemetry-go will +stop ensuring compatibility with these versions in the following manner: + +- A minor release of opentelemetry-go will be made to add support for the new + supported release of Go. +- The following minor release of opentelemetry-go will remove compatibility + testing for the oldest (now archived upstream) version of Go. This, and + future, releases of opentelemetry-go may include features only supported by + the currently supported versions of Go. + +Currently, this project supports the following environments. | OS | Go Version | Architecture | | ------- | ---------- | ------------ | -| Ubuntu | 1.17 | amd64 | -| Ubuntu | 1.16 | amd64 | -| Ubuntu | 1.17 | 386 | -| Ubuntu | 1.16 | 386 | -| MacOS | 1.17 | amd64 | -| MacOS | 1.16 | amd64 | -| Windows | 1.17 | amd64 | -| Windows | 1.16 | amd64 | -| Windows | 1.17 | 386 | -| Windows | 1.16 | 386 | +| Ubuntu | 1.19 | amd64 | +| Ubuntu | 1.18 | amd64 | +| Ubuntu | 1.19 | 386 | +| Ubuntu | 1.18 | 386 | +| MacOS | 1.19 | amd64 | +| MacOS | 1.18 | amd64 | +| Windows | 1.19 | amd64 | +| Windows | 1.18 | amd64 | +| Windows | 1.19 | 386 | +| Windows | 1.18 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -76,7 +85,7 @@ libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the -[go.opentelemetry.io/otel/api](https://pkg.go.dev/go.opentelemetry.io/otel/api) +[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) package. The included [examples](./example/) are a good way to see some practical uses of this process. @@ -95,8 +104,6 @@ All officially supported exporters for the OpenTelemetry project are contained i | [stdout](./exporters/stdout/) | ✓ | ✓ | | [Zipkin](./exporters/zipkin/) | | ✓ | -Additionally, OpenTelemetry community supported exporters can be found in the [contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/exporters). - ## Contributing See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index e3bff66c6a95..77d56c936515 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -2,35 +2,28 @@ ## Semantic Convention Generation -If a new version of the OpenTelemetry Specification has been released it will be necessary to generate a new -semantic convention package from the YAML definitions in the specification repository. There is a `semconvgen` utility -installed by `make tools` that can be used to generate the a package with the name matching the specification -version number under the `semconv` package. This will ideally be done soon after the specification release is -tagged. Make sure that the specification repo contains a checkout of the the latest tagged release so that the -generated files match the released semantic conventions. +New versions of the [OpenTelemetry specification] mean new versions of the `semconv` package need to be generated. +The `semconv-generate` make target is used for this. -There are currently two categories of semantic conventions that must be generated, `resource` and `trace`. +1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag. +2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` +3. Run the `make semconv-generate ...` target from this repository. -``` -.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/resource -t semconv/template.j2 -.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/trace -t semconv/template.j2 -``` - -Using default values for all options other than `input` will result in using the `template.j2` template to -generate `resource.go` and `trace.go` in `/path/to/otelgo/repo/semconv/`. +For example, -There are several ancillary files that are not generated and should be copied into the new package from the -prior package, with updates made as appropriate to canonical import path statements and constant values. -These files include: +```sh +export TAG="v1.13.0" # Change to the release version you are generating. +export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification" +git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" -b "$TAG" +docker pull otel/semconvgen:latest +make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO. +``` -* doc.go -* exception.go -* http(_test)?.go -* schema.go +This should create a new sub-package of [`semconv`](./semconv). +Ensure things look correct before submitting a pull request to include the addition. -Uses of the previous schema version in this repository should be updated to use the newly generated version. -No tooling for this exists at present, so use find/replace in your editor of choice or craft a `grep | sed` -pipeline if you like living on the edge. +**Note**, the generation code was changed to generate versions >= 1.13. +To generate versions prior to this, checkout the old release of this repository (i.e. [2fe8861](https://github.com/open-telemetry/opentelemetry-go/commit/2fe8861a24e20088c065b116089862caf9e3cd8b)). ## Pre-Release @@ -130,3 +123,5 @@ Once verified be sure to [make a release for the `contrib` repository](https://g Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/). Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 8b940f78dc4a..fe2bc5766cff 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -21,19 +21,17 @@ import ( ) type ( - // Encoder is a mechanism for serializing a label set into a - // specific string representation that supports caching, to - // avoid repeated serialization. An example could be an - // exporter encoding the label set into a wire representation. + // Encoder is a mechanism for serializing an attribute set into a specific + // string representation that supports caching, to avoid repeated + // serialization. An example could be an exporter encoding the attribute + // set into a wire representation. Encoder interface { - // Encode returns the serialized encoding of the label - // set using its Iterator. This result may be cached - // by a attribute.Set. + // Encode returns the serialized encoding of the attribute set using + // its Iterator. This result may be cached by a attribute.Set. Encode(iterator Iterator) string - // ID returns a value that is unique for each class of - // label encoder. Label encoders allocate these using - // `NewEncoderID`. + // ID returns a value that is unique for each class of attribute + // encoder. Attribute encoders allocate these using `NewEncoderID`. ID() EncoderID } @@ -43,54 +41,53 @@ type ( value uint64 } - // defaultLabelEncoder uses a sync.Pool of buffers to reduce - // the number of allocations used in encoding labels. This - // implementation encodes a comma-separated list of key=value, - // with '/'-escaping of '=', ',', and '\'. - defaultLabelEncoder struct { - // pool is a pool of labelset builders. The buffers in this - // pool grow to a size that most label encodings will not - // allocate new memory. + // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of + // allocations used in encoding attributes. This implementation encodes a + // comma-separated list of key=value, with '/'-escaping of '=', ',', and + // '\'. + defaultAttrEncoder struct { + // pool is a pool of attribute set builders. The buffers in this pool + // grow to a size that most attribute encodings will not allocate new + // memory. pool sync.Pool // *bytes.Buffer } ) -// escapeChar is used to ensure uniqueness of the label encoding where -// keys or values contain either '=' or ','. Since there is no parser -// needed for this encoding and its only requirement is to be unique, -// this choice is arbitrary. Users will see these in some exporters -// (e.g., stdout), so the backslash ('\') is used as a conventional choice. +// escapeChar is used to ensure uniqueness of the attribute encoding where +// keys or values contain either '=' or ','. Since there is no parser needed +// for this encoding and its only requirement is to be unique, this choice is +// arbitrary. Users will see these in some exporters (e.g., stdout), so the +// backslash ('\') is used as a conventional choice. const escapeChar = '\\' var ( - _ Encoder = &defaultLabelEncoder{} + _ Encoder = &defaultAttrEncoder{} - // encoderIDCounter is for generating IDs for other label - // encoders. + // encoderIDCounter is for generating IDs for other attribute encoders. encoderIDCounter uint64 defaultEncoderOnce sync.Once defaultEncoderID = NewEncoderID() - defaultEncoderInstance *defaultLabelEncoder + defaultEncoderInstance *defaultAttrEncoder ) -// NewEncoderID returns a unique label encoder ID. It should be -// called once per each type of label encoder. Preferably in init() or -// in var definition. +// NewEncoderID returns a unique attribute encoder ID. It should be called +// once per each type of attribute encoder. Preferably in init() or in var +// definition. func NewEncoderID() EncoderID { return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} } -// DefaultEncoder returns a label encoder that encodes labels -// in such a way that each escaped label's key is followed by an equal -// sign and then by an escaped label's value. All key-value pairs are -// separated by a comma. +// DefaultEncoder returns an attribute encoder that encodes attributes in such +// a way that each escaped attribute's key is followed by an equal sign and +// then by an escaped attribute's value. All key-value pairs are separated by +// a comma. // -// Escaping is done by prepending a backslash before either a -// backslash, equal sign or a comma. +// Escaping is done by prepending a backslash before either a backslash, equal +// sign or a comma. func DefaultEncoder() Encoder { defaultEncoderOnce.Do(func() { - defaultEncoderInstance = &defaultLabelEncoder{ + defaultEncoderInstance = &defaultAttrEncoder{ pool: sync.Pool{ New: func() interface{} { return &bytes.Buffer{} @@ -101,15 +98,14 @@ func DefaultEncoder() Encoder { return defaultEncoderInstance } -// Encode is a part of an implementation of the LabelEncoder -// interface. -func (d *defaultLabelEncoder) Encode(iter Iterator) string { +// Encode is a part of an implementation of the AttributeEncoder interface. +func (d *defaultAttrEncoder) Encode(iter Iterator) string { buf := d.pool.Get().(*bytes.Buffer) defer d.pool.Put(buf) buf.Reset() for iter.Next() { - i, keyValue := iter.IndexedLabel() + i, keyValue := iter.IndexedAttribute() if i > 0 { _, _ = buf.WriteRune(',') } @@ -126,8 +122,8 @@ func (d *defaultLabelEncoder) Encode(iter Iterator) string { return buf.String() } -// ID is a part of an implementation of the LabelEncoder interface. -func (*defaultLabelEncoder) ID() EncoderID { +// ID is a part of an implementation of the AttributeEncoder interface. +func (*defaultAttrEncoder) ID() EncoderID { return defaultEncoderID } @@ -137,9 +133,9 @@ func copyAndEscape(buf *bytes.Buffer, val string) { for _, ch := range val { switch ch { case '=', ',', escapeChar: - buf.WriteRune(escapeChar) + _, _ = buf.WriteRune(escapeChar) } - buf.WriteRune(ch) + _, _ = buf.WriteRune(ch) } } diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index e03aabb62bd3..841b271fb7dd 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -14,16 +14,16 @@ package attribute // import "go.opentelemetry.io/otel/attribute" -// Iterator allows iterating over the set of labels in order, -// sorted by key. +// Iterator allows iterating over the set of attributes in order, sorted by +// key. type Iterator struct { storage *Set idx int } -// MergeIterator supports iterating over two sets of labels while -// eliminating duplicate values from the combined set. The first -// iterator value takes precedence. +// MergeIterator supports iterating over two sets of attributes while +// eliminating duplicate values from the combined set. The first iterator +// value takes precedence. type MergeIterator struct { one oneIterator two oneIterator @@ -31,13 +31,13 @@ type MergeIterator struct { } type oneIterator struct { - iter Iterator - done bool - label KeyValue + iter Iterator + done bool + attr KeyValue } -// Next moves the iterator to the next position. Returns false if there -// are no more labels. +// Next moves the iterator to the next position. Returns false if there are no +// more attributes. func (i *Iterator) Next() bool { i.idx++ return i.idx < i.Len() @@ -45,30 +45,41 @@ func (i *Iterator) Next() bool { // Label returns current KeyValue. Must be called only after Next returns // true. +// +// Deprecated: Use Attribute instead. func (i *Iterator) Label() KeyValue { - kv, _ := i.storage.Get(i.idx) - return kv + return i.Attribute() } -// Attribute is a synonym for Label(). +// Attribute returns the current KeyValue of the Iterator. It must be called +// only after Next returns true. func (i *Iterator) Attribute() KeyValue { - return i.Label() + kv, _ := i.storage.Get(i.idx) + return kv } // IndexedLabel returns current index and attribute. Must be called only // after Next returns true. +// +// Deprecated: Use IndexedAttribute instead. func (i *Iterator) IndexedLabel() (int, KeyValue) { - return i.idx, i.Label() + return i.idx, i.Attribute() } -// Len returns a number of labels in the iterator's `*Set`. +// IndexedAttribute returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedAttribute() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// Len returns a number of attributes in the iterated set. func (i *Iterator) Len() int { return i.storage.Len() } -// ToSlice is a convenience function that creates a slice of labels -// from the passed iterator. The iterator is set up to start from the -// beginning before creating the slice. +// ToSlice is a convenience function that creates a slice of attributes from +// the passed iterator. The iterator is set up to start from the beginning +// before creating the slice. func (i *Iterator) ToSlice() []KeyValue { l := i.Len() if l == 0 { @@ -77,12 +88,12 @@ func (i *Iterator) ToSlice() []KeyValue { i.idx = -1 slice := make([]KeyValue, 0, l) for i.Next() { - slice = append(slice, i.Label()) + slice = append(slice, i.Attribute()) } return slice } -// NewMergeIterator returns a MergeIterator for merging two label sets +// NewMergeIterator returns a MergeIterator for merging two attribute sets. // Duplicates are resolved by taking the value from the first set. func NewMergeIterator(s1, s2 *Set) MergeIterator { mi := MergeIterator{ @@ -102,42 +113,49 @@ func makeOne(iter Iterator) oneIterator { func (oi *oneIterator) advance() { if oi.done = !oi.iter.Next(); !oi.done { - oi.label = oi.iter.Label() + oi.attr = oi.iter.Attribute() } } -// Next returns true if there is another label available. +// Next returns true if there is another attribute available. func (m *MergeIterator) Next() bool { if m.one.done && m.two.done { return false } if m.one.done { - m.current = m.two.label + m.current = m.two.attr m.two.advance() return true } if m.two.done { - m.current = m.one.label + m.current = m.one.attr m.one.advance() return true } - if m.one.label.Key == m.two.label.Key { - m.current = m.one.label // first iterator label value wins + if m.one.attr.Key == m.two.attr.Key { + m.current = m.one.attr // first iterator attribute value wins m.one.advance() m.two.advance() return true } - if m.one.label.Key < m.two.label.Key { - m.current = m.one.label + if m.one.attr.Key < m.two.attr.Key { + m.current = m.one.attr m.one.advance() return true } - m.current = m.two.label + m.current = m.two.attr m.two.advance() return true } // Label returns the current value after Next() returns true. +// +// Deprecated: Use Attribute instead. func (m *MergeIterator) Label() KeyValue { return m.current } + +// Attribute returns the current value after Next() returns true. +func (m *MergeIterator) Attribute() KeyValue { + return m.current +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index a28f1435cb9c..26be5983223b 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -21,49 +21,42 @@ import ( ) type ( - // Set is the representation for a distinct label set. It - // manages an immutable set of labels, with an internal cache - // for storing label encodings. + // Set is the representation for a distinct attribute set. It manages an + // immutable set of attributes, with an internal cache for storing + // attribute encodings. // - // This type supports the `Equivalent` method of comparison - // using values of type `Distinct`. - // - // This type is used to implement: - // 1. Metric labels - // 2. Resource sets - // 3. Correlation map (TODO) + // This type supports the Equivalent method of comparison using values of + // type Distinct. Set struct { equivalent Distinct } - // Distinct wraps a variable-size array of `KeyValue`, - // constructed with keys in sorted order. This can be used as - // a map key or for equality checking between Sets. + // Distinct wraps a variable-size array of KeyValue, constructed with keys + // in sorted order. This can be used as a map key or for equality checking + // between Sets. Distinct struct { iface interface{} } - // Filter supports removing certain labels from label sets. - // When the filter returns true, the label will be kept in - // the filtered label set. When the filter returns false, the - // label is excluded from the filtered label set, and the - // label instead appears in the `removed` list of excluded labels. + // Filter supports removing certain attributes from attribute sets. When + // the filter returns true, the attribute will be kept in the filtered + // attribute set. When the filter returns false, the attribute is excluded + // from the filtered attribute set, and the attribute instead appears in + // the removed list of excluded attributes. Filter func(KeyValue) bool - // Sortable implements `sort.Interface`, used for sorting - // `KeyValue`. This is an exported type to support a - // memory optimization. A pointer to one of these is needed - // for the call to `sort.Stable()`, which the caller may - // provide in order to avoid an allocation. See - // `NewSetWithSortable()`. + // Sortable implements sort.Interface, used for sorting KeyValue. This is + // an exported type to support a memory optimization. A pointer to one of + // these is needed for the call to sort.Stable(), which the caller may + // provide in order to avoid an allocation. See NewSetWithSortable(). Sortable []KeyValue ) var ( - // keyValueType is used in `computeDistinctReflect`. + // keyValueType is used in computeDistinctReflect. keyValueType = reflect.TypeOf(KeyValue{}) - // emptySet is returned for empty label sets. + // emptySet is returned for empty attribute sets. emptySet = &Set{ equivalent: Distinct{ iface: [0]KeyValue{}, @@ -78,30 +71,30 @@ func EmptySet() *Set { return emptySet } -// reflect abbreviates `reflect.ValueOf`. -func (d Distinct) reflect() reflect.Value { +// reflectValue abbreviates reflect.ValueOf(d). +func (d Distinct) reflectValue() reflect.Value { return reflect.ValueOf(d.iface) } -// Valid returns true if this value refers to a valid `*Set`. +// Valid returns true if this value refers to a valid Set. func (d Distinct) Valid() bool { return d.iface != nil } -// Len returns the number of labels in this set. +// Len returns the number of attributes in this set. func (l *Set) Len() int { if l == nil || !l.equivalent.Valid() { return 0 } - return l.equivalent.reflect().Len() + return l.equivalent.reflectValue().Len() } -// Get returns the KeyValue at ordered position `idx` in this set. +// Get returns the KeyValue at ordered position idx in this set. func (l *Set) Get(idx int) (KeyValue, bool) { if l == nil { return KeyValue{}, false } - value := l.equivalent.reflect() + value := l.equivalent.reflectValue() if idx >= 0 && idx < value.Len() { // Note: The Go compiler successfully avoids an allocation for @@ -117,7 +110,7 @@ func (l *Set) Value(k Key) (Value, bool) { if l == nil { return Value{}, false } - rValue := l.equivalent.reflect() + rValue := l.equivalent.reflectValue() vlen := rValue.Len() idx := sort.Search(vlen, func(idx int) bool { @@ -142,7 +135,7 @@ func (l *Set) HasValue(k Key) bool { return ok } -// Iter returns an iterator for visiting the labels in this set. +// Iter returns an iterator for visiting the attributes in this set. func (l *Set) Iter() Iterator { return Iterator{ storage: l, @@ -150,18 +143,17 @@ func (l *Set) Iter() Iterator { } } -// ToSlice returns the set of labels belonging to this set, sorted, -// where keys appear no more than once. +// ToSlice returns the set of attributes belonging to this set, sorted, where +// keys appear no more than once. func (l *Set) ToSlice() []KeyValue { iter := l.Iter() return iter.ToSlice() } -// Equivalent returns a value that may be used as a map key. The -// Distinct type guarantees that the result will equal the equivalent -// Distinct value of any label set with the same elements as this, -// where sets are made unique by choosing the last value in the input -// for any given key. +// Equivalent returns a value that may be used as a map key. The Distinct type +// guarantees that the result will equal the equivalent. Distinct value of any +// attribute set with the same elements as this, where sets are made unique by +// choosing the last value in the input for any given key. func (l *Set) Equivalent() Distinct { if l == nil || !l.equivalent.Valid() { return emptySet.equivalent @@ -174,8 +166,7 @@ func (l *Set) Equals(o *Set) bool { return l.Equivalent() == o.Equivalent() } -// Encoded returns the encoded form of this set, according to -// `encoder`. +// Encoded returns the encoded form of this set, according to encoder. func (l *Set) Encoded(encoder Encoder) string { if l == nil || encoder == nil { return "" @@ -190,11 +181,11 @@ func empty() Set { } } -// NewSet returns a new `Set`. See the documentation for -// `NewSetWithSortableFiltered` for more details. +// NewSet returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. // -// Except for empty sets, this method adds an additional allocation -// compared with calls that include a `*Sortable`. +// Except for empty sets, this method adds an additional allocation compared +// with calls that include a Sortable. func NewSet(kvs ...KeyValue) Set { // Check for empty set. if len(kvs) == 0 { @@ -204,10 +195,10 @@ func NewSet(kvs ...KeyValue) Set { return s } -// NewSetWithSortable returns a new `Set`. See the documentation for -// `NewSetWithSortableFiltered` for more details. +// NewSetWithSortable returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. // -// This call includes a `*Sortable` option as a memory optimization. +// This call includes a Sortable option as a memory optimization. func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { // Check for empty set. if len(kvs) == 0 { @@ -217,12 +208,11 @@ func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { return s } -// NewSetWithFiltered returns a new `Set`. See the documentation for -// `NewSetWithSortableFiltered` for more details. +// NewSetWithFiltered returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. // -// This call includes a `Filter` to include/exclude label keys from -// the return value. Excluded keys are returned as a slice of label -// values. +// This call includes a Filter to include/exclude attribute keys from the +// return value. Excluded keys are returned as a slice of attribute values. func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // Check for empty set. if len(kvs) == 0 { @@ -231,7 +221,7 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { return NewSetWithSortableFiltered(kvs, new(Sortable), filter) } -// NewSetWithSortableFiltered returns a new `Set`. +// NewSetWithSortableFiltered returns a new Set. // // Duplicate keys are eliminated by taking the last value. This // re-orders the input slice so that unique last-values are contiguous @@ -243,17 +233,16 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // - Caller sees the reordering, but doesn't lose values // - Repeated call preserve last-value wins. // -// Note that methods are defined on `*Set`, although this returns `Set`. -// Callers can avoid memory allocations by: +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: // -// - allocating a `Sortable` for use as a temporary in this method -// - allocating a `Set` for storing the return value of this -// constructor. +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. // -// The result maintains a cache of encoded labels, by attribute.EncoderID. +// The result maintains a cache of encoded attributes, by attribute.EncoderID. // This value should not be copied after its first use. // -// The second `[]KeyValue` return value is a list of labels that were +// The second []KeyValue return value is a list of attributes that were // excluded by the Filter (if non-nil). func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { // Check for empty set. @@ -293,13 +282,13 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S }, nil } -// filterSet reorders `kvs` so that included keys are contiguous at -// the end of the slice, while excluded keys precede the included keys. +// filterSet reorders kvs so that included keys are contiguous at the end of +// the slice, while excluded keys precede the included keys. func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { var excluded []KeyValue - // Move labels that do not match the filter so - // they're adjacent before calling computeDistinct(). + // Move attributes that do not match the filter so they're adjacent before + // calling computeDistinct(). distinctPosition := len(kvs) // Swap indistinct keys forward and distinct keys toward the @@ -319,8 +308,8 @@ func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { }, excluded } -// Filter returns a filtered copy of this `Set`. See the -// documentation for `NewSetWithSortableFiltered` for more details. +// Filter returns a filtered copy of this Set. See the documentation for +// NewSetWithSortableFiltered for more details. func (l *Set) Filter(re Filter) (Set, []KeyValue) { if re == nil { return Set{ @@ -333,9 +322,9 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { return filterSet(l.ToSlice(), re) } -// computeDistinct returns a `Distinct` using either the fixed- or -// reflect-oriented code path, depending on the size of the input. -// The input slice is assumed to already be sorted and de-duplicated. +// computeDistinct returns a Distinct using either the fixed- or +// reflect-oriented code path, depending on the size of the input. The input +// slice is assumed to already be sorted and de-duplicated. func computeDistinct(kvs []KeyValue) Distinct { iface := computeDistinctFixed(kvs) if iface == nil { @@ -346,8 +335,8 @@ func computeDistinct(kvs []KeyValue) Distinct { } } -// computeDistinctFixed computes a `Distinct` for small slices. It -// returns nil if the input is too large for this code path. +// computeDistinctFixed computes a Distinct for small slices. It returns nil +// if the input is too large for this code path. func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: @@ -395,8 +384,8 @@ func computeDistinctFixed(kvs []KeyValue) interface{} { } } -// computeDistinctReflect computes a `Distinct` using reflection, -// works for any size input. +// computeDistinctReflect computes a Distinct using reflection, works for any +// size input. func computeDistinctReflect(kvs []KeyValue) interface{} { at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() for i, keyValue := range kvs { @@ -405,7 +394,7 @@ func computeDistinctReflect(kvs []KeyValue) interface{} { return at.Interface() } -// MarshalJSON returns the JSON encoding of the `*Set`. +// MarshalJSON returns the JSON encoding of the Set. func (l *Set) MarshalJSON() ([]byte, error) { return json.Marshal(l.equivalent.iface) } @@ -419,17 +408,17 @@ func (l Set) MarshalLog() interface{} { return kvs } -// Len implements `sort.Interface`. +// Len implements sort.Interface. func (l *Sortable) Len() int { return len(*l) } -// Swap implements `sort.Interface`. +// Swap implements sort.Interface. func (l *Sortable) Swap(i, j int) { (*l)[i], (*l)[j] = (*l)[j], (*l)[i] } -// Less implements `sort.Interface`. +// Less implements sort.Interface. func (l *Sortable) Less(i, j int) bool { return (*l)[i].Key < (*l)[j].Key } diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 6ec5cb290dfa..34a4e548dd1d 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -17,15 +17,17 @@ package attribute // import "go.opentelemetry.io/otel/attribute" import ( "encoding/json" "fmt" + "reflect" "strconv" "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" ) //go:generate stringer -type=Type // Type describes the type of the data Value holds. -type Type int +type Type int // nolint: revive // redefines builtin Type. // Value represents the value part in key-value pairs. type Value struct { @@ -66,12 +68,7 @@ func BoolValue(v bool) Value { // BoolSliceValue creates a BOOLSLICE Value. func BoolSliceValue(v []bool) Value { - cp := make([]bool, len(v)) - copy(cp, v) - return Value{ - vtype: BOOLSLICE, - slice: &cp, - } + return Value{vtype: BOOLSLICE, slice: attribute.SliceValue(v)} } // IntValue creates an INT64 Value. @@ -81,13 +78,14 @@ func IntValue(v int) Value { // IntSliceValue creates an INTSLICE Value. func IntSliceValue(v []int) Value { - cp := make([]int64, 0, len(v)) - for _, i := range v { - cp = append(cp, int64(i)) + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) } return Value{ vtype: INT64SLICE, - slice: &cp, + slice: cp.Elem().Interface(), } } @@ -101,12 +99,7 @@ func Int64Value(v int64) Value { // Int64SliceValue creates an INT64SLICE Value. func Int64SliceValue(v []int64) Value { - cp := make([]int64, len(v)) - copy(cp, v) - return Value{ - vtype: INT64SLICE, - slice: &cp, - } + return Value{vtype: INT64SLICE, slice: attribute.SliceValue(v)} } // Float64Value creates a FLOAT64 Value. @@ -119,12 +112,7 @@ func Float64Value(v float64) Value { // Float64SliceValue creates a FLOAT64SLICE Value. func Float64SliceValue(v []float64) Value { - cp := make([]float64, len(v)) - copy(cp, v) - return Value{ - vtype: FLOAT64SLICE, - slice: &cp, - } + return Value{vtype: FLOAT64SLICE, slice: attribute.SliceValue(v)} } // StringValue creates a STRING Value. @@ -137,12 +125,7 @@ func StringValue(v string) Value { // StringSliceValue creates a STRINGSLICE Value. func StringSliceValue(v []string) Value { - cp := make([]string, len(v)) - copy(cp, v) - return Value{ - vtype: STRINGSLICE, - slice: &cp, - } + return Value{vtype: STRINGSLICE, slice: attribute.SliceValue(v)} } // Type returns a type of the Value. @@ -159,10 +142,14 @@ func (v Value) AsBool() bool { // AsBoolSlice returns the []bool value. Make sure that the Value's type is // BOOLSLICE. func (v Value) AsBoolSlice() []bool { - if s, ok := v.slice.(*[]bool); ok { - return *s + if v.vtype != BOOLSLICE { + return nil } - return nil + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsSlice[bool](v.slice) } // AsInt64 returns the int64 value. Make sure that the Value's type is @@ -174,10 +161,14 @@ func (v Value) AsInt64() int64 { // AsInt64Slice returns the []int64 value. Make sure that the Value's type is // INT64SLICE. func (v Value) AsInt64Slice() []int64 { - if s, ok := v.slice.(*[]int64); ok { - return *s + if v.vtype != INT64SLICE { + return nil } - return nil + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsSlice[int64](v.slice) } // AsFloat64 returns the float64 value. Make sure that the Value's @@ -189,10 +180,14 @@ func (v Value) AsFloat64() float64 { // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is // FLOAT64SLICE. func (v Value) AsFloat64Slice() []float64 { - if s, ok := v.slice.(*[]float64); ok { - return *s + if v.vtype != FLOAT64SLICE { + return nil } - return nil + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsSlice[float64](v.slice) } // AsString returns the string value. Make sure that the Value's type @@ -204,10 +199,14 @@ func (v Value) AsString() string { // AsStringSlice returns the []string value. Make sure that the Value's type is // STRINGSLICE. func (v Value) AsStringSlice() []string { - if s, ok := v.slice.(*[]string); ok { - return *s + if v.vtype != STRINGSLICE { + return nil } - return nil + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsSlice[string](v.slice) } type unknownValueType struct{} @@ -218,19 +217,19 @@ func (v Value) AsInterface() interface{} { case BOOL: return v.AsBool() case BOOLSLICE: - return v.AsBoolSlice() + return v.asBoolSlice() case INT64: return v.AsInt64() case INT64SLICE: - return v.AsInt64Slice() + return v.asInt64Slice() case FLOAT64: return v.AsFloat64() case FLOAT64SLICE: - return v.AsFloat64Slice() + return v.asFloat64Slice() case STRING: return v.stringly case STRINGSLICE: - return v.AsStringSlice() + return v.asStringSlice() } return unknownValueType{} } @@ -239,19 +238,19 @@ func (v Value) AsInterface() interface{} { func (v Value) Emit() string { switch v.Type() { case BOOLSLICE: - return fmt.Sprint(*(v.slice.(*[]bool))) + return fmt.Sprint(v.asBoolSlice()) case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: - return fmt.Sprint(*(v.slice.(*[]int64))) + return fmt.Sprint(v.asInt64Slice()) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: - return fmt.Sprint(*(v.slice.(*[]float64))) + return fmt.Sprint(v.asFloat64Slice()) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: - return fmt.Sprint(*(v.slice.(*[]string))) + return fmt.Sprint(v.asStringSlice()) case STRING: return v.stringly default: diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 824c67b27a32..a36db8f8d85d 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -68,6 +68,9 @@ type Property struct { hasData bool } +// NewKeyProperty returns a new Property for key. +// +// If key is invalid, an error will be returned. func NewKeyProperty(key string) (Property, error) { if !keyRe.MatchString(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) @@ -77,6 +80,9 @@ func NewKeyProperty(key string) (Property, error) { return p, nil } +// NewKeyValueProperty returns a new Property for key with value. +// +// If key or value are invalid, an error will be returned. func NewKeyValueProperty(key, value string) (Property, error) { if !keyRe.MatchString(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) @@ -151,7 +157,7 @@ func (p Property) Key() string { return p.key } -// Value returns the Property value. Additionally a boolean value is returned +// Value returns the Property value. Additionally, a boolean value is returned // indicating if the returned value is the empty if the Property has a value // that is empty or if the value is not set. func (p Property) Value() (string, bool) { @@ -244,8 +250,9 @@ type Member struct { hasData bool } -// NewMember returns a new Member from the passed arguments. An error is -// returned if the created Member would be invalid according to the W3C +// NewMember returns a new Member from the passed arguments. The key will be +// used directly while the value will be url decoded after validation. An error +// is returned if the created Member would be invalid according to the W3C // Baggage specification. func NewMember(key, value string, props ...Property) (Member, error) { m := Member{ @@ -257,7 +264,11 @@ func NewMember(key, value string, props ...Property) (Member, error) { if err := m.validate(); err != nil { return newInvalidMember(), err } - + decodedValue, err := url.QueryUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + m.value = decodedValue return m, nil } @@ -322,8 +333,9 @@ func parseMember(member string) (Member, error) { return Member{key: key, value: value, properties: props, hasData: true}, nil } -// validate ensures m conforms to the W3C Baggage specification, returning an -// error otherwise. +// validate ensures m conforms to the W3C Baggage specification. +// A key is just an ASCII string, but a value must be URL encoded UTF-8, +// returning an error otherwise. func (m Member) validate() error { if !m.hasData { return fmt.Errorf("%w: %q", errInvalidMember, m) @@ -386,7 +398,7 @@ func New(members ...Member) (Baggage, error) { } } - // Check member numbers after deduplicating. + // Check member numbers after deduplication. if len(b) > maxMembers { return Baggage{}, errMemberNumber } @@ -448,7 +460,7 @@ func Parse(bStr string) (Baggage, error) { func (b Baggage) Member(key string) Member { v, ok := b.list[key] if !ok { - // We do not need to worry about distiguising between the situation + // We do not need to worry about distinguishing between the situation // where a zero-valued Member is included in the Baggage because a // zero-valued Member is invalid according to the W3C Baggage // specification (it has an empty key). @@ -459,6 +471,7 @@ func (b Baggage) Member(key string) Member { key: key, value: v.Value, properties: fromInternalProperties(v.Properties), + hasData: true, } } @@ -478,6 +491,7 @@ func (b Baggage) Members() []Member { key: k, value: v.Value, properties: fromInternalProperties(v.Properties), + hasData: true, }) } return members diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 064a9279fd14..587ebae4e30e 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -23,10 +23,20 @@ import ( const ( // Unset is the default status code. Unset Code = 0 + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. Error Code = 1 + // Ok indicates operation has been validated by an Application developers // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. Ok Code = 2 maxCode = 3 diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go index 35263e01ac26..ecd363ab5165 100644 --- a/vendor/go.opentelemetry.io/otel/handler.go +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -17,7 +17,8 @@ package otel // import "go.opentelemetry.io/otel" import ( "log" "os" - "sync" + "sync/atomic" + "unsafe" ) var ( @@ -34,29 +35,26 @@ var ( ) type delegator struct { - lock *sync.RWMutex - eh ErrorHandler + delegate unsafe.Pointer } func (d *delegator) Handle(err error) { - d.lock.RLock() - defer d.lock.RUnlock() - d.eh.Handle(err) + d.getDelegate().Handle(err) +} + +func (d *delegator) getDelegate() ErrorHandler { + return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate)) } // setDelegate sets the ErrorHandler delegate. func (d *delegator) setDelegate(eh ErrorHandler) { - d.lock.Lock() - defer d.lock.Unlock() - d.eh = eh + atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh)) } func defaultErrorHandler() *delegator { - return &delegator{ - lock: &sync.RWMutex{}, - eh: &errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}, - } - + d := &delegator{} + d.setDelegate(&errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) + return d } // errLogger logs errors if no delegate is set, otherwise they are delegated. @@ -92,7 +90,7 @@ func SetErrorHandler(h ErrorHandler) { globalErrorHandler.setDelegate(h) } -// Handle is a convenience function for ErrorHandler().Handle(err) +// Handle is a convenience function for ErrorHandler().Handle(err). func Handle(err error) { GetErrorHandler().Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 000000000000..220348944735 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// SliceValue convert a slice into an array with same elements as slice. +func SliceValue[T bool | int64 | float64 | string](v []T) any { + var zero T + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]T), v) + return cp.Elem().Interface() +} + +// AsSlice convert an array into a slice into with same elements as array. +func AsSlice[T bool | int64 | float64 | string](v any) []T { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero T + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]T) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go index 3c2784eea33e..4469700d9cbb 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -39,8 +39,7 @@ type baggageState struct { // Passing nil SetHookFunc creates a context with no set hook to call. func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { var s baggageState - switch v := parent.Value(baggageKey).(type) { - case baggageState: + if v, ok := parent.Value(baggageKey).(baggageState); ok { s = v } @@ -54,8 +53,7 @@ func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Contex // Passing nil GetHookFunc creates a context with no get hook to call. func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { var s baggageState - switch v := parent.Value(baggageKey).(type) { - case baggageState: + if v, ok := parent.Value(baggageKey).(baggageState); ok { s = v } @@ -67,8 +65,7 @@ func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Contex // returns a context without any baggage. func ContextWithList(parent context.Context, list List) context.Context { var s baggageState - switch v := parent.Value(baggageKey).(type) { - case baggageState: + if v, ok := parent.Value(baggageKey).(baggageState); ok { s = v } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index 0a378476b0fd..293c08961fbc 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -17,7 +17,8 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "log" "os" - "sync" + "sync/atomic" + "unsafe" "github.com/go-logr/logr" "github.com/go-logr/stdr" @@ -27,37 +28,36 @@ import ( // // The default logger uses stdr which is backed by the standard `log.Logger` // interface. This logger will only show messages at the Error Level. -var globalLogger logr.Logger = stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) -var globalLoggerLock = &sync.RWMutex{} +var globalLogger unsafe.Pointer + +func init() { + SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +} // SetLogger overrides the globalLogger with l. // // To see Info messages use a logger with `l.V(1).Enabled() == true` -// To see Debug messages use a logger with `l.V(5).Enabled() == true` +// To see Debug messages use a logger with `l.V(5).Enabled() == true`. func SetLogger(l logr.Logger) { - globalLoggerLock.Lock() - defer globalLoggerLock.Unlock() - globalLogger = l + atomic.StorePointer(&globalLogger, unsafe.Pointer(&l)) +} + +func getLogger() logr.Logger { + return *(*logr.Logger)(atomic.LoadPointer(&globalLogger)) } // Info prints messages about the general state of the API or SDK. -// This should usually be less then 5 messages a minute +// This should usually be less then 5 messages a minute. func Info(msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.V(1).Info(msg, keysAndValues...) + getLogger().V(1).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. func Error(err error, msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.Error(err, msg, keysAndValues...) + getLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. func Debug(msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.V(5).Info(msg, keysAndValues...) + getLogger().V(5).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go index d6b3e900cd45..1ad38f828ecd 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -15,6 +15,7 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( + "errors" "sync" "sync/atomic" @@ -47,17 +48,24 @@ func TracerProvider() trace.TracerProvider { // SetTracerProvider is the internal implementation for global.SetTracerProvider. func SetTracerProvider(tp trace.TracerProvider) { + current := TracerProvider() + + if _, cOk := current.(*tracerProvider); cOk { + if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { + // Do not assign the default delegating TracerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in tracer provider"), + "Setting tracer provider to it's current value. No delegate will be configured", + ) + return + } + } + delegateTraceOnce.Do(func() { - current := TracerProvider() - if current == tp { - // Setting the provider to the prior default is nonsense, panic. - // Panic is acceptable because we are likely still early in the - // process lifetime. - panic("invalid TracerProvider, the global instance cannot be reinstalled") - } else if def, ok := current.(*tracerProvider); ok { + if def, ok := current.(*tracerProvider); ok { def.setDelegate(tp) } - }) globalTracer.Store(tracerProviderHolder{tp: tp}) } @@ -69,15 +77,24 @@ func TextMapPropagator() propagation.TextMapPropagator { // SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. func SetTextMapPropagator(p propagation.TextMapPropagator) { + current := TextMapPropagator() + + if _, cOk := current.(*textMapPropagator); cOk { + if _, pOk := p.(*textMapPropagator); pOk && current == p { + // Do not assign the default delegating TextMapPropagator to + // delegate to itself. + Error( + errors.New("no delegate configured in text map propagator"), + "Setting text map propagator to it's current value. No delegate will be configured", + ) + return + } + } + // For the textMapPropagator already returned by TextMapPropagator // delegate to p. delegateTextMapPropagatorOnce.Do(func() { - if current := TextMapPropagator(); current == p { - // Setting the provider to the prior default is nonsense, panic. - // Panic is acceptable because we are likely still early in the - // process lifetime. - panic("invalid TextMapPropagator, the global instance cannot be reinstalled") - } else if def, ok := current.(*textMapPropagator); ok { + if def, ok := current.(*textMapPropagator); ok { def.SetDelegate(p) } }) @@ -96,11 +113,3 @@ func defaultPropagatorsValue() *atomic.Value { v.Store(propagatorsHolder{tm: newTextMapPropagator()}) return v } - -// ResetForTest restores the initial global state, for testing purposes. -func ResetForTest() { - globalTracer = defaultTracerValue() - globalPropagators = defaultPropagatorsValue() - delegateTraceOnce = sync.Once{} - delegateTextMapPropagatorOnce = sync.Once{} -} diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go deleted file mode 100644 index 77781ead118c..000000000000 --- a/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/metric/global" - -import ( - "context" - "sync" - "sync/atomic" - "unsafe" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/internal/metric/registry" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// This file contains the forwarding implementation of MeterProvider used as -// the default global instance. Metric events using instruments provided by -// this implementation are no-ops until the first Meter implementation is set -// as the global provider. -// -// The implementation here uses Mutexes to maintain a list of active Meters in -// the MeterProvider and Instruments in each Meter, under the assumption that -// these interfaces are not performance-critical. -// -// We have the invariant that setDelegate() will be called before a new -// MeterProvider implementation is registered as the global provider. Mutexes -// in the MeterProvider and Meters ensure that each instrument has a delegate -// before the global provider is set. -// -// Metric uniqueness checking is implemented by calling the exported -// methods of the api/metric/registry package. - -type meterKey struct { - InstrumentationName string - InstrumentationVersion string - SchemaURL string -} - -type meterProvider struct { - delegate metric.MeterProvider - - // lock protects `delegate` and `meters`. - lock sync.Mutex - - // meters maintains a unique entry for every named Meter - // that has been registered through the global instance. - meters map[meterKey]*meterEntry -} - -type meterImpl struct { - delegate unsafe.Pointer // (*metric.MeterImpl) - - lock sync.Mutex - syncInsts []*syncImpl - asyncInsts []*asyncImpl -} - -type meterEntry struct { - unique sdkapi.MeterImpl - impl meterImpl -} - -type instrument struct { - descriptor sdkapi.Descriptor -} - -type syncImpl struct { - delegate unsafe.Pointer // (*sdkapi.SyncImpl) - - instrument -} - -type asyncImpl struct { - delegate unsafe.Pointer // (*sdkapi.AsyncImpl) - - instrument - - runner sdkapi.AsyncRunner -} - -var _ metric.MeterProvider = &meterProvider{} -var _ sdkapi.MeterImpl = &meterImpl{} -var _ sdkapi.InstrumentImpl = &syncImpl{} -var _ sdkapi.AsyncImpl = &asyncImpl{} - -func (inst *instrument) Descriptor() sdkapi.Descriptor { - return inst.descriptor -} - -// MeterProvider interface and delegation - -func newMeterProvider() *meterProvider { - return &meterProvider{ - meters: map[meterKey]*meterEntry{}, - } -} - -func (p *meterProvider) setDelegate(provider metric.MeterProvider) { - p.lock.Lock() - defer p.lock.Unlock() - - p.delegate = provider - for key, entry := range p.meters { - entry.impl.setDelegate(key, provider) - } - p.meters = nil -} - -func (p *meterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { - p.lock.Lock() - defer p.lock.Unlock() - - if p.delegate != nil { - return p.delegate.Meter(instrumentationName, opts...) - } - - cfg := metric.NewMeterConfig(opts...) - key := meterKey{ - InstrumentationName: instrumentationName, - InstrumentationVersion: cfg.InstrumentationVersion(), - SchemaURL: cfg.SchemaURL(), - } - entry, ok := p.meters[key] - if !ok { - entry = &meterEntry{} - // Note: This code implements its own MeterProvider - // name-uniqueness logic because there is - // synchronization required at the moment of - // delegation. We use the same instrument-uniqueness - // checking the real SDK uses here: - entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl) - p.meters[key] = entry - } - return metric.WrapMeterImpl(entry.unique) -} - -// Meter interface and delegation - -func (m *meterImpl) setDelegate(key meterKey, provider metric.MeterProvider) { - m.lock.Lock() - defer m.lock.Unlock() - - d := new(sdkapi.MeterImpl) - *d = provider.Meter( - key.InstrumentationName, - metric.WithInstrumentationVersion(key.InstrumentationVersion), - metric.WithSchemaURL(key.SchemaURL), - ).MeterImpl() - m.delegate = unsafe.Pointer(d) - - for _, inst := range m.syncInsts { - inst.setDelegate(*d) - } - m.syncInsts = nil - for _, obs := range m.asyncInsts { - obs.setDelegate(*d) - } - m.asyncInsts = nil -} - -func (m *meterImpl) NewSyncInstrument(desc sdkapi.Descriptor) (sdkapi.SyncImpl, error) { - m.lock.Lock() - defer m.lock.Unlock() - - if meterPtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { - return (*meterPtr).NewSyncInstrument(desc) - } - - inst := &syncImpl{ - instrument: instrument{ - descriptor: desc, - }, - } - m.syncInsts = append(m.syncInsts, inst) - return inst, nil -} - -// Synchronous delegation - -func (inst *syncImpl) setDelegate(d sdkapi.MeterImpl) { - implPtr := new(sdkapi.SyncImpl) - - var err error - *implPtr, err = d.NewSyncInstrument(inst.descriptor) - - if err != nil { - // TODO: There is no standard way to deliver this error to the user. - // See https://github.com/open-telemetry/opentelemetry-go/issues/514 - // Note that the default SDK will not generate any errors yet, this is - // only for added safety. - panic(err) - } - - atomic.StorePointer(&inst.delegate, unsafe.Pointer(implPtr)) -} - -func (inst *syncImpl) Implementation() interface{} { - if implPtr := (*sdkapi.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil { - return (*implPtr).Implementation() - } - return inst -} - -// Async delegation - -func (m *meterImpl) NewAsyncInstrument( - desc sdkapi.Descriptor, - runner sdkapi.AsyncRunner, -) (sdkapi.AsyncImpl, error) { - - m.lock.Lock() - defer m.lock.Unlock() - - if meterPtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { - return (*meterPtr).NewAsyncInstrument(desc, runner) - } - - inst := &asyncImpl{ - instrument: instrument{ - descriptor: desc, - }, - runner: runner, - } - m.asyncInsts = append(m.asyncInsts, inst) - return inst, nil -} - -func (obs *asyncImpl) Implementation() interface{} { - if implPtr := (*sdkapi.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil { - return (*implPtr).Implementation() - } - return obs -} - -func (obs *asyncImpl) setDelegate(d sdkapi.MeterImpl) { - implPtr := new(sdkapi.AsyncImpl) - - var err error - *implPtr, err = d.NewAsyncInstrument(obs.descriptor, obs.runner) - - if err != nil { - // TODO: There is no standard way to deliver this error to the user. - // See https://github.com/open-telemetry/opentelemetry-go/issues/514 - // Note that the default SDK will not generate any errors yet, this is - // only for added safety. - panic(err) - } - - atomic.StorePointer(&obs.delegate, unsafe.Pointer(implPtr)) -} - -// Metric updates - -func (m *meterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurements ...sdkapi.Measurement) { - if delegatePtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil { - (*delegatePtr).RecordBatch(ctx, labels, measurements...) - } -} - -func (inst *syncImpl) RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) { - if instPtr := (*sdkapi.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil { - (*instPtr).RecordOne(ctx, number, labels) - } -} - -func AtomicFieldOffsets() map[string]uintptr { - return map[string]uintptr{ - "meterProvider.delegate": unsafe.Offsetof(meterProvider{}.delegate), - "meterImpl.delegate": unsafe.Offsetof(meterImpl{}.delegate), - "syncImpl.delegate": unsafe.Offsetof(syncImpl{}.delegate), - "asyncImpl.delegate": unsafe.Offsetof(asyncImpl{}.delegate), - } -} diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go b/vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go deleted file mode 100644 index 2f17562f0eb5..000000000000 --- a/vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package registry provides a non-standalone implementation of -MeterProvider that adds uniqueness checking for instrument descriptors -on top of other MeterProvider it wraps. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. -*/ -package registry // import "go.opentelemetry.io/otel/internal/metric/registry" diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go b/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go deleted file mode 100644 index c929bf45c85c..000000000000 --- a/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry // import "go.opentelemetry.io/otel/internal/metric/registry" - -import ( - "context" - "fmt" - "sync" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// UniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding -// uniqueness checking for instrument descriptors. -type UniqueInstrumentMeterImpl struct { - lock sync.Mutex - impl sdkapi.MeterImpl - state map[string]sdkapi.InstrumentImpl -} - -var _ sdkapi.MeterImpl = (*UniqueInstrumentMeterImpl)(nil) - -// ErrMetricKindMismatch is the standard error for mismatched metric -// instrument definitions. -var ErrMetricKindMismatch = fmt.Errorf( - "a metric was already registered by this name with another kind or number type") - -// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl -// with the addition of instrument name uniqueness checking. -func NewUniqueInstrumentMeterImpl(impl sdkapi.MeterImpl) *UniqueInstrumentMeterImpl { - return &UniqueInstrumentMeterImpl{ - impl: impl, - state: map[string]sdkapi.InstrumentImpl{}, - } -} - -// MeterImpl gives the caller access to the underlying MeterImpl -// used by this UniqueInstrumentMeterImpl. -func (u *UniqueInstrumentMeterImpl) MeterImpl() sdkapi.MeterImpl { - return u.impl -} - -// RecordBatch implements sdkapi.MeterImpl. -func (u *UniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, ms ...sdkapi.Measurement) { - u.impl.RecordBatch(ctx, labels, ms...) -} - -// NewMetricKindMismatchError formats an error that describes a -// mismatched metric instrument definition. -func NewMetricKindMismatchError(desc sdkapi.Descriptor) error { - return fmt.Errorf("metric %s registered as %s %s: %w", - desc.Name(), - desc.NumberKind(), - desc.InstrumentKind(), - ErrMetricKindMismatch) -} - -// Compatible determines whether two sdkapi.Descriptors are considered -// the same for the purpose of uniqueness checking. -func Compatible(candidate, existing sdkapi.Descriptor) bool { - return candidate.InstrumentKind() == existing.InstrumentKind() && - candidate.NumberKind() == existing.NumberKind() -} - -// checkUniqueness returns an ErrMetricKindMismatch error if there is -// a conflict between a descriptor that was already registered and the -// `descriptor` argument. If there is an existing compatible -// registration, this returns the already-registered instrument. If -// there is no conflict and no prior registration, returns (nil, nil). -func (u *UniqueInstrumentMeterImpl) checkUniqueness(descriptor sdkapi.Descriptor) (sdkapi.InstrumentImpl, error) { - impl, ok := u.state[descriptor.Name()] - if !ok { - return nil, nil - } - - if !Compatible(descriptor, impl.Descriptor()) { - return nil, NewMetricKindMismatchError(impl.Descriptor()) - } - - return impl, nil -} - -// NewSyncInstrument implements sdkapi.MeterImpl. -func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) { - u.lock.Lock() - defer u.lock.Unlock() - - impl, err := u.checkUniqueness(descriptor) - - if err != nil { - return nil, err - } else if impl != nil { - return impl.(sdkapi.SyncImpl), nil - } - - syncInst, err := u.impl.NewSyncInstrument(descriptor) - if err != nil { - return nil, err - } - u.state[descriptor.Name()] = syncInst - return syncInst, nil -} - -// NewAsyncInstrument implements sdkapi.MeterImpl. -func (u *UniqueInstrumentMeterImpl) NewAsyncInstrument( - descriptor sdkapi.Descriptor, - runner sdkapi.AsyncRunner, -) (sdkapi.AsyncImpl, error) { - u.lock.Lock() - defer u.lock.Unlock() - - impl, err := u.checkUniqueness(descriptor) - - if err != nil { - return nil, err - } else if impl != nil { - return impl.(sdkapi.AsyncImpl), nil - } - - asyncInst, err := u.impl.NewAsyncInstrument(descriptor, runner) - if err != nil { - return nil, err - } - u.state[descriptor.Name()] = asyncInst - return asyncInst, nil -} diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index ce7afaa18805..e07e7940004c 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -19,7 +19,7 @@ import ( "unsafe" ) -func BoolToRaw(b bool) uint64 { +func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. if b { return 1 } diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index 3f722344fa7c..621e4c5fcb83 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -14,65 +14,6 @@ package metric // import "go.opentelemetry.io/otel/metric" -import ( - "go.opentelemetry.io/otel/metric/unit" -) - -// InstrumentConfig contains options for metric instrument descriptors. -type InstrumentConfig struct { - description string - unit unit.Unit -} - -// Description describes the instrument in human-readable terms. -func (cfg *InstrumentConfig) Description() string { - return cfg.description -} - -// Unit describes the measurement unit for a instrument. -func (cfg *InstrumentConfig) Unit() unit.Unit { - return cfg.unit -} - -// InstrumentOption is an interface for applying metric instrument options. -type InstrumentOption interface { - // ApplyMeter is used to set a InstrumentOption value of a - // InstrumentConfig. - applyInstrument(InstrumentConfig) InstrumentConfig -} - -// NewInstrumentConfig creates a new InstrumentConfig -// and applies all the given options. -func NewInstrumentConfig(opts ...InstrumentOption) InstrumentConfig { - var config InstrumentConfig - for _, o := range opts { - config = o.applyInstrument(config) - } - return config -} - -type instrumentOptionFunc func(InstrumentConfig) InstrumentConfig - -func (fn instrumentOptionFunc) applyInstrument(cfg InstrumentConfig) InstrumentConfig { - return fn(cfg) -} - -// WithDescription applies provided description. -func WithDescription(desc string) InstrumentOption { - return instrumentOptionFunc(func(cfg InstrumentConfig) InstrumentConfig { - cfg.description = desc - return cfg - }) -} - -// WithUnit applies provided unit. -func WithUnit(unit unit.Unit) InstrumentOption { - return instrumentOptionFunc(func(cfg InstrumentConfig) InstrumentConfig { - cfg.unit = unit - return cfg - }) -} - // MeterConfig contains options for Meters. type MeterConfig struct { instrumentationVersion string @@ -80,18 +21,18 @@ type MeterConfig struct { } // InstrumentationVersion is the version of the library providing instrumentation. -func (cfg *MeterConfig) InstrumentationVersion() string { +func (cfg MeterConfig) InstrumentationVersion() string { return cfg.instrumentationVersion } // SchemaURL is the schema_url of the library providing instrumentation. -func (cfg *MeterConfig) SchemaURL() string { +func (cfg MeterConfig) SchemaURL() string { return cfg.schemaURL } // MeterOption is an interface for applying Meter options. type MeterOption interface { - // ApplyMeter is used to set a MeterOption value of a MeterConfig. + // applyMeter is used to set a MeterOption value of a MeterConfig. applyMeter(MeterConfig) MeterConfig } diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index 4baf0719fcc1..bd6f43437208 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -19,49 +19,5 @@ OpenTelemetry API. This package is currently in a pre-GA phase. Backwards incompatible changes may be introduced in subsequent minor version releases as we work to track the evolving OpenTelemetry specification and user feedback. - -Measurements can be made about an operation being performed or the state of a -system in general. These measurements can be crucial to the reliable operation -of code and provide valuable insights about the inner workings of a system. - -Measurements are made using instruments provided by this package. The type of -instrument used will depend on the type of measurement being made and of what -part of a system is being measured. - -Instruments are categorized as Synchronous or Asynchronous and independently -as Adding or Grouping. Synchronous instruments are called by the user with a -Context. Asynchronous instruments are called by the SDK during collection. -Adding instruments are semantically intended for capturing a sum. Grouping -instruments are intended for capturing a distribution. - -Adding instruments may be monotonic, in which case they are non-decreasing -and naturally define a rate. - -The synchronous instrument names are: - - Counter: adding, monotonic - UpDownCounter: adding - Histogram: grouping - -and the asynchronous instruments are: - - CounterObserver: adding, monotonic - UpDownCounterObserver: adding - GaugeObserver: grouping - -All instruments are provided with support for either float64 or int64 input -values. - -An instrument is created using a Meter. Additionally, a Meter is used to -record batches of synchronous measurements or asynchronous observations. A -Meter is obtained using a MeterProvider. A Meter, like a Tracer, is unique to -the instrumentation it instruments and must be named and versioned when -created with a MeterProvider with the name and version of the instrumentation -library. - -Instrumentation should be designed to accept a MeterProvider from which it can -create its own unique Meter. Alternatively, the registered global -MeterProvider from the go.opentelemetry.io/otel package can be used as a -default. */ package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/global/global.go b/vendor/go.opentelemetry.io/otel/metric/global/global.go new file mode 100644 index 000000000000..05a67c2e9995 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/global/global.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/metric/global" + +import ( + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/internal/global" +) + +// Meter returns a Meter from the global MeterProvider. The +// instrumentationName must be the name of the library providing +// instrumentation. This name may be the same as the instrumented code only if +// that code provides built-in instrumentation. If the instrumentationName is +// empty, then a implementation defined default name will be used instead. +// +// This is short for MeterProvider().Meter(name). +func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { + return MeterProvider().Meter(instrumentationName, opts...) +} + +// MeterProvider returns the registered global trace provider. +// If none is registered then a No-op MeterProvider is returned. +func MeterProvider() metric.MeterProvider { + return global.MeterProvider() +} + +// SetMeterProvider registers `mp` as the global meter provider. +func SetMeterProvider(mp metric.MeterProvider) { + global.SetMeterProvider(mp) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/global/metric.go b/vendor/go.opentelemetry.io/otel/metric/global/metric.go deleted file mode 100644 index 14ba862002a2..000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/global/metric.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/metric/global" - -import ( - "go.opentelemetry.io/otel/internal/metric/global" - "go.opentelemetry.io/otel/metric" -) - -// Meter creates an implementation of the Meter interface from the global -// MeterProvider. The instrumentationName must be the name of the library -// providing instrumentation. This name may be the same as the instrumented -// code only if that code provides built-in instrumentation. If the -// instrumentationName is empty, then a implementation defined default name -// will be used instead. -// -// This is short for MeterProvider().Meter(name) -func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { - return GetMeterProvider().Meter(instrumentationName, opts...) -} - -// GetMeterProvider returns the registered global meter provider. If -// none is registered then a default meter provider is returned that -// forwards the Meter interface to the first registered Meter. -// -// Use the meter provider to create a named meter. E.g. -// meter := global.MeterProvider().Meter("example.com/foo") -// or -// meter := global.Meter("example.com/foo") -func GetMeterProvider() metric.MeterProvider { - return global.MeterProvider() -} - -// SetMeterProvider registers `mp` as the global meter provider. -func SetMeterProvider(mp metric.MeterProvider) { - global.SetMeterProvider(mp) -} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go new file mode 100644 index 000000000000..2f624bfe0057 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go @@ -0,0 +1,131 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/unit" +) + +// Float64Observable describes a set of instruments used asynchronously to +// record float64 measurements once per collection cycle. Observations of +// these instruments are only made within a callback. +// +// Warning: methods may be added to this interface in minor releases. +type Float64Observable interface { + Asynchronous + + float64Observable() +} + +// Float64ObservableCounter is an instrument used to asynchronously record +// increasing float64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: methods may be added to this interface in minor releases. +type Float64ObservableCounter interface{ Float64Observable } + +// Float64ObservableUpDownCounter is an instrument used to asynchronously +// record float64 measurements once per collection cycle. Observations are only +// made within a callback for this instrument. The value observed is assumed +// the to be the cumulative sum of the count. +// +// Warning: methods may be added to this interface in minor releases. +type Float64ObservableUpDownCounter interface{ Float64Observable } + +// Float64ObservableGauge is an instrument used to asynchronously record +// instantaneous float64 measurements once per collection cycle. Observations +// are only made within a callback for this instrument. +// +// Warning: methods may be added to this interface in minor releases. +type Float64ObservableGauge interface{ Float64Observable } + +// Float64Observer is a recorder of float64 measurements. +// +// Warning: methods may be added to this interface in minor releases. +type Float64Observer interface { + Observe(value float64, attributes ...attribute.KeyValue) +} + +// Float64Callback is a function registered with a Meter that makes +// observations for a Float64Observerable instrument it is registered with. +// Calls to the Float64Observer record measurement values for the +// Float64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Float64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Float64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Float64Callback func(context.Context, Float64Observer) error + +// Float64ObserverConfig contains options for Asynchronous instruments that +// observe float64 values. +type Float64ObserverConfig struct { + description string + unit unit.Unit + callbacks []Float64Callback +} + +// NewFloat64ObserverConfig returns a new Float64ObserverConfig with all opts +// applied. +func NewFloat64ObserverConfig(opts ...Float64ObserverOption) Float64ObserverConfig { + var config Float64ObserverConfig + for _, o := range opts { + config = o.applyFloat64Observer(config) + } + return config +} + +// Description returns the Config description. +func (c Float64ObserverConfig) Description() string { + return c.description +} + +// Unit returns the Config unit. +func (c Float64ObserverConfig) Unit() unit.Unit { + return c.unit +} + +// Callbacks returns the Config callbacks. +func (c Float64ObserverConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObserverOption applies options to float64 Observer instruments. +type Float64ObserverOption interface { + applyFloat64Observer(Float64ObserverConfig) Float64ObserverConfig +} + +type float64ObserverOptionFunc func(Float64ObserverConfig) Float64ObserverConfig + +func (fn float64ObserverOptionFunc) applyFloat64Observer(cfg Float64ObserverConfig) Float64ObserverConfig { + return fn(cfg) +} + +// WithFloat64Callback adds callback to be called for an instrument. +func WithFloat64Callback(callback Float64Callback) Float64ObserverOption { + return float64ObserverOptionFunc(func(cfg Float64ObserverConfig) Float64ObserverConfig { + cfg.callbacks = append(cfg.callbacks, callback) + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go new file mode 100644 index 000000000000..ac0d09d90cd6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go @@ -0,0 +1,131 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/unit" +) + +// Int64Observable describes a set of instruments used asynchronously to record +// int64 measurements once per collection cycle. Observations of these +// instruments are only made within a callback. +// +// Warning: methods may be added to this interface in minor releases. +type Int64Observable interface { + Asynchronous + + int64Observable() +} + +// Int64ObservableCounter is an instrument used to asynchronously record +// increasing int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: methods may be added to this interface in minor releases. +type Int64ObservableCounter interface{ Int64Observable } + +// Int64ObservableUpDownCounter is an instrument used to asynchronously record +// int64 measurements once per collection cycle. Observations are only made +// within a callback for this instrument. The value observed is assumed the to +// be the cumulative sum of the count. +// +// Warning: methods may be added to this interface in minor releases. +type Int64ObservableUpDownCounter interface{ Int64Observable } + +// Int64ObservableGauge is an instrument used to asynchronously record +// instantaneous int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. +// +// Warning: methods may be added to this interface in minor releases. +type Int64ObservableGauge interface{ Int64Observable } + +// Int64Observer is a recorder of int64 measurements. +// +// Warning: methods may be added to this interface in minor releases. +type Int64Observer interface { + Observe(value int64, attributes ...attribute.KeyValue) +} + +// Int64Callback is a function registered with a Meter that makes +// observations for a Int64Observerable instrument it is registered with. +// Calls to the Int64Observer record measurement values for the +// Int64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Int64Callback. Meaning, it should not report measurements with the same +// attributes as another Int64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Int64Callback func(context.Context, Int64Observer) error + +// Int64ObserverConfig contains options for Asynchronous instruments that +// observe int64 values. +type Int64ObserverConfig struct { + description string + unit unit.Unit + callbacks []Int64Callback +} + +// NewInt64ObserverConfig returns a new Int64ObserverConfig with all opts +// applied. +func NewInt64ObserverConfig(opts ...Int64ObserverOption) Int64ObserverConfig { + var config Int64ObserverConfig + for _, o := range opts { + config = o.applyInt64Observer(config) + } + return config +} + +// Description returns the Config description. +func (c Int64ObserverConfig) Description() string { + return c.description +} + +// Unit returns the Config unit. +func (c Int64ObserverConfig) Unit() unit.Unit { + return c.unit +} + +// Callbacks returns the Config callbacks. +func (c Int64ObserverConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObserverOption applies options to int64 Observer instruments. +type Int64ObserverOption interface { + applyInt64Observer(Int64ObserverConfig) Int64ObserverConfig +} + +type int64ObserverOptionFunc func(Int64ObserverConfig) Int64ObserverConfig + +func (fn int64ObserverOptionFunc) applyInt64Observer(cfg Int64ObserverConfig) Int64ObserverConfig { + return fn(cfg) +} + +// WithInt64Callback adds callback to be called for an instrument. +func WithInt64Callback(callback Int64Callback) Int64ObserverOption { + return int64ObserverOptionFunc(func(cfg Int64ObserverConfig) Int64ObserverConfig { + cfg.callbacks = append(cfg.callbacks, callback) + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go new file mode 100644 index 000000000000..c583be6fbf15 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +import "go.opentelemetry.io/otel/metric/unit" + +// Asynchronous instruments are instruments that are updated within a Callback. +// If an instrument is observed outside of it's callback it should be an error. +// +// This interface is used as a grouping mechanism. +type Asynchronous interface { + asynchronous() +} + +// Synchronous instruments are updated in line with application code. +// +// This interface is used as a grouping mechanism. +type Synchronous interface { + synchronous() +} + +// Option applies options to all instruments. +type Option interface { + Float64ObserverOption + Int64ObserverOption + Float64Option + Int64Option +} + +type descOpt string + +func (o descOpt) applyFloat64(c Float64Config) Float64Config { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64(c Int64Config) Int64Config { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Observer(c Float64ObserverConfig) Float64ObserverConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Observer(c Int64ObserverConfig) Int64ObserverConfig { + c.description = string(o) + return c +} + +// WithDescription sets the instrument description. +func WithDescription(desc string) Option { return descOpt(desc) } + +type unitOpt unit.Unit + +func (o unitOpt) applyFloat64(c Float64Config) Float64Config { + c.unit = unit.Unit(o) + return c +} + +func (o unitOpt) applyInt64(c Int64Config) Int64Config { + c.unit = unit.Unit(o) + return c +} + +func (o unitOpt) applyFloat64Observer(c Float64ObserverConfig) Float64ObserverConfig { + c.unit = unit.Unit(o) + return c +} + +func (o unitOpt) applyInt64Observer(c Int64ObserverConfig) Int64ObserverConfig { + c.unit = unit.Unit(o) + return c +} + +// WithUnit sets the instrument unit. +func WithUnit(u unit.Unit) Option { return unitOpt(u) } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go new file mode 100644 index 000000000000..d8f6ba9f4387 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/unit" +) + +// Float64Counter is an instrument that records increasing float64 values. +// +// Warning: methods may be added to this interface in minor releases. +type Float64Counter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Float64UpDownCounter is an instrument that records increasing or decreasing +// float64 values. +// +// Warning: methods may be added to this interface in minor releases. +type Float64UpDownCounter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Float64Histogram is an instrument that records a distribution of float64 +// values. +// +// Warning: methods may be added to this interface in minor releases. +type Float64Histogram interface { + // Record adds an additional value to the distribution. + Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Float64Config contains options for Asynchronous instruments that +// observe float64 values. +type Float64Config struct { + description string + unit unit.Unit +} + +// Float64Config contains options for Synchronous instruments that record +// float64 values. +func NewFloat64Config(opts ...Float64Option) Float64Config { + var config Float64Config + for _, o := range opts { + config = o.applyFloat64(config) + } + return config +} + +// Description returns the Config description. +func (c Float64Config) Description() string { + return c.description +} + +// Unit returns the Config unit. +func (c Float64Config) Unit() unit.Unit { + return c.unit +} + +// Float64Option applies options to synchronous float64 instruments. +type Float64Option interface { + applyFloat64(Float64Config) Float64Config +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go new file mode 100644 index 000000000000..96bf730e4b34 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/unit" +) + +// Int64Counter is an instrument that records increasing int64 values. +// +// Warning: methods may be added to this interface in minor releases. +type Int64Counter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Int64UpDownCounter is an instrument that records increasing or decreasing +// int64 values. +// +// Warning: methods may be added to this interface in minor releases. +type Int64UpDownCounter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Int64Histogram is an instrument that records a distribution of int64 +// values. +// +// Warning: methods may be added to this interface in minor releases. +type Int64Histogram interface { + // Record adds an additional value to the distribution. + Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Int64Config contains options for Synchronous instruments that record int64 +// values. +type Int64Config struct { + description string + unit unit.Unit +} + +// NewInt64Config returns a new Int64Config with all opts +// applied. +func NewInt64Config(opts ...Int64Option) Int64Config { + var config Int64Config + for _, o := range opts { + config = o.applyInt64(config) + } + return config +} + +// Description returns the Config description. +func (c Int64Config) Description() string { + return c.description +} + +// Unit returns the Config unit. +func (c Int64Config) Unit() unit.Unit { + return c.unit +} + +// Int64Option applies options to synchronous int64 instruments. +type Int64Option interface { + applyInt64(Int64Config) Int64Config +} diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go new file mode 100644 index 000000000000..d1480fa5f3ed --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go @@ -0,0 +1,355 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/metric/internal/global" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/instrument" +) + +// unwrapper unwraps to return the underlying instrument implementation. +type unwrapper interface { + Unwrap() instrument.Asynchronous +} + +type afCounter struct { + instrument.Float64Observable + + name string + opts []instrument.Float64ObserverOption + + delegate atomic.Value //instrument.Float64ObservableCounter +} + +var _ unwrapper = (*afCounter)(nil) +var _ instrument.Float64ObservableCounter = (*afCounter)(nil) + +func (i *afCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afCounter) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Float64ObservableCounter) + } + return nil +} + +type afUpDownCounter struct { + instrument.Float64Observable + + name string + opts []instrument.Float64ObserverOption + + delegate atomic.Value //instrument.Float64ObservableUpDownCounter +} + +var _ unwrapper = (*afUpDownCounter)(nil) +var _ instrument.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) + +func (i *afUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afUpDownCounter) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Float64ObservableUpDownCounter) + } + return nil +} + +type afGauge struct { + instrument.Float64Observable + + name string + opts []instrument.Float64ObserverOption + + delegate atomic.Value //instrument.Float64ObservableGauge +} + +var _ unwrapper = (*afGauge)(nil) +var _ instrument.Float64ObservableGauge = (*afGauge)(nil) + +func (i *afGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableGauge(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afGauge) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Float64ObservableGauge) + } + return nil +} + +type aiCounter struct { + instrument.Int64Observable + + name string + opts []instrument.Int64ObserverOption + + delegate atomic.Value //instrument.Int64ObservableCounter +} + +var _ unwrapper = (*aiCounter)(nil) +var _ instrument.Int64ObservableCounter = (*aiCounter)(nil) + +func (i *aiCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiCounter) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Int64ObservableCounter) + } + return nil +} + +type aiUpDownCounter struct { + instrument.Int64Observable + + name string + opts []instrument.Int64ObserverOption + + delegate atomic.Value //instrument.Int64ObservableUpDownCounter +} + +var _ unwrapper = (*aiUpDownCounter)(nil) +var _ instrument.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) + +func (i *aiUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiUpDownCounter) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Int64ObservableUpDownCounter) + } + return nil +} + +type aiGauge struct { + instrument.Int64Observable + + name string + opts []instrument.Int64ObserverOption + + delegate atomic.Value //instrument.Int64ObservableGauge +} + +var _ unwrapper = (*aiGauge)(nil) +var _ instrument.Int64ObservableGauge = (*aiGauge)(nil) + +func (i *aiGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableGauge(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiGauge) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Int64ObservableGauge) + } + return nil +} + +// Sync Instruments. +type sfCounter struct { + name string + opts []instrument.Float64Option + + delegate atomic.Value //instrument.Float64Counter + + instrument.Synchronous +} + +var _ instrument.Float64Counter = (*sfCounter)(nil) + +func (i *sfCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64Counter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Float64Counter).Add(ctx, incr, attrs...) + } +} + +type sfUpDownCounter struct { + name string + opts []instrument.Float64Option + + delegate atomic.Value //instrument.Float64UpDownCounter + + instrument.Synchronous +} + +var _ instrument.Float64UpDownCounter = (*sfUpDownCounter)(nil) + +func (i *sfUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64UpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Float64UpDownCounter).Add(ctx, incr, attrs...) + } +} + +type sfHistogram struct { + name string + opts []instrument.Float64Option + + delegate atomic.Value //instrument.Float64Histogram + + instrument.Synchronous +} + +var _ instrument.Float64Histogram = (*sfHistogram)(nil) + +func (i *sfHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Float64Histogram(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfHistogram) Record(ctx context.Context, x float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Float64Histogram).Record(ctx, x, attrs...) + } +} + +type siCounter struct { + name string + opts []instrument.Int64Option + + delegate atomic.Value //instrument.Int64Counter + + instrument.Synchronous +} + +var _ instrument.Int64Counter = (*siCounter)(nil) + +func (i *siCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64Counter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Int64Counter).Add(ctx, x, attrs...) + } +} + +type siUpDownCounter struct { + name string + opts []instrument.Int64Option + + delegate atomic.Value //instrument.Int64UpDownCounter + + instrument.Synchronous +} + +var _ instrument.Int64UpDownCounter = (*siUpDownCounter)(nil) + +func (i *siUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64UpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siUpDownCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Int64UpDownCounter).Add(ctx, x, attrs...) + } +} + +type siHistogram struct { + name string + opts []instrument.Int64Option + + delegate atomic.Value //instrument.Int64Histogram + + instrument.Synchronous +} + +var _ instrument.Int64Histogram = (*siHistogram)(nil) + +func (i *siHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Int64Histogram(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siHistogram) Record(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Int64Histogram).Record(ctx, x, attrs...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go new file mode 100644 index 000000000000..8acf632863cb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go @@ -0,0 +1,354 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/metric/internal/global" + +import ( + "container/list" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/instrument" +) + +// meterProvider is a placeholder for a configured SDK MeterProvider. +// +// All MeterProvider functionality is forwarded to a delegate once +// configured. +type meterProvider struct { + mtx sync.Mutex + meters map[il]*meter + + delegate metric.MeterProvider +} + +type il struct { + name string + version string +} + +// setDelegate configures p to delegate all MeterProvider functionality to +// provider. +// +// All Meters provided prior to this function call are switched out to be +// Meters provided by provider. All instruments and callbacks are recreated and +// delegated. +// +// It is guaranteed by the caller that this happens only once. +func (p *meterProvider) setDelegate(provider metric.MeterProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.meters) == 0 { + return + } + + for _, meter := range p.meters { + meter.setDelegate(provider) + } + + p.meters = nil +} + +// Meter implements MeterProvider. +func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Meter(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. + + c := metric.NewMeterConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.meters == nil { + p.meters = make(map[il]*meter) + } + + if val, ok := p.meters[key]; ok { + return val + } + + t := &meter{name: name, opts: opts} + p.meters[key] = t + return t +} + +// meter is a placeholder for a metric.Meter. +// +// All Meter functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopMeter. +type meter struct { + name string + opts []metric.MeterOption + + mtx sync.Mutex + instruments []delegatedInstrument + + registry list.List + + delegate atomic.Value // metric.Meter +} + +type delegatedInstrument interface { + setDelegate(metric.Meter) +} + +// setDelegate configures m to delegate all Meter functionality to Meters +// created by provider. +// +// All subsequent calls to the Meter methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (m *meter) setDelegate(provider metric.MeterProvider) { + meter := provider.Meter(m.name, m.opts...) + m.delegate.Store(meter) + + m.mtx.Lock() + defer m.mtx.Unlock() + + for _, inst := range m.instruments { + inst.setDelegate(meter) + } + + for e := m.registry.Front(); e != nil; e = e.Next() { + r := e.Value.(*registration) + r.setDelegate(meter) + m.registry.Remove(e) + } + + m.instruments = nil + m.registry.Init() +} + +func (m *meter) Int64Counter(name string, options ...instrument.Int64Option) (instrument.Int64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64UpDownCounter(name string, options ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64Histogram(name string, options ...instrument.Int64Option) (instrument.Int64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableUpDownCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableGauge(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Counter(name string, options ...instrument.Float64Option) (instrument.Float64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64UpDownCounter(name string, options ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Histogram(name string, options ...instrument.Float64Option) (instrument.Float64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableUpDownCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableGauge(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +// RegisterCallback captures the function that will be called during Collect. +func (m *meter) RegisterCallback(f metric.Callback, insts ...instrument.Asynchronous) (metric.Registration, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + insts = unwrapInstruments(insts) + return del.RegisterCallback(f, insts...) + } + + m.mtx.Lock() + defer m.mtx.Unlock() + + reg := ®istration{instruments: insts, function: f} + e := m.registry.PushBack(reg) + reg.unreg = func() error { + m.mtx.Lock() + _ = m.registry.Remove(e) + m.mtx.Unlock() + return nil + } + return reg, nil +} + +type wrapped interface { + unwrap() instrument.Asynchronous +} + +func unwrapInstruments(instruments []instrument.Asynchronous) []instrument.Asynchronous { + out := make([]instrument.Asynchronous, 0, len(instruments)) + + for _, inst := range instruments { + if in, ok := inst.(wrapped); ok { + out = append(out, in.unwrap()) + } else { + out = append(out, inst) + } + } + + return out +} + +type registration struct { + instruments []instrument.Asynchronous + function metric.Callback + + unreg func() error + unregMu sync.Mutex +} + +func (c *registration) setDelegate(m metric.Meter) { + insts := unwrapInstruments(c.instruments) + + c.unregMu.Lock() + defer c.unregMu.Unlock() + + if c.unreg == nil { + // Unregister already called. + return + } + + reg, err := m.RegisterCallback(c.function, insts...) + if err != nil { + otel.Handle(err) + } + + c.unreg = reg.Unregister +} + +func (c *registration) Unregister() error { + c.unregMu.Lock() + defer c.unregMu.Unlock() + if c.unreg == nil { + // Unregister already called. + return nil + } + + var err error + err, c.unreg = c.unreg(), nil + return err +} diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go similarity index 54% rename from vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go rename to vendor/go.opentelemetry.io/otel/metric/internal/global/state.go index 896c6dd1c30b..47c0d787d8a6 100644 --- a/vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go @@ -4,7 +4,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// htmp://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,55 +12,57 @@ // See the License for the specific language governing permissions and // limitations under the License. -package global // import "go.opentelemetry.io/otel/internal/metric/global" +package global // import "go.opentelemetry.io/otel/metric/internal/global" import ( + "errors" "sync" "sync/atomic" + "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/metric" ) -type meterProviderHolder struct { - mp metric.MeterProvider -} - var ( - globalMeter = defaultMeterValue() + globalMeterProvider = defaultMeterProvider() delegateMeterOnce sync.Once ) +type meterProviderHolder struct { + mp metric.MeterProvider +} + // MeterProvider is the internal implementation for global.MeterProvider. func MeterProvider() metric.MeterProvider { - return globalMeter.Load().(meterProviderHolder).mp + return globalMeterProvider.Load().(meterProviderHolder).mp } // SetMeterProvider is the internal implementation for global.SetMeterProvider. func SetMeterProvider(mp metric.MeterProvider) { - delegateMeterOnce.Do(func() { - current := MeterProvider() + current := MeterProvider() + if _, cOk := current.(*meterProvider); cOk { + if _, mpOk := mp.(*meterProvider); mpOk && current == mp { + // Do not assign the default delegating MeterProvider to delegate + // to itself. + global.Error( + errors.New("no delegate configured in meter provider"), + "Setting meter provider to it's current value. No delegate will be configured", + ) + return + } + } - if current == mp { - // Setting the provider to the prior default is nonsense, panic. - // Panic is acceptable because we are likely still early in the - // process lifetime. - panic("invalid MeterProvider, the global instance cannot be reinstalled") - } else if def, ok := current.(*meterProvider); ok { + delegateMeterOnce.Do(func() { + if def, ok := current.(*meterProvider); ok { def.setDelegate(mp) } }) - globalMeter.Store(meterProviderHolder{mp: mp}) + globalMeterProvider.Store(meterProviderHolder{mp: mp}) } -func defaultMeterValue() *atomic.Value { +func defaultMeterProvider() *atomic.Value { v := &atomic.Value{} - v.Store(meterProviderHolder{mp: newMeterProvider()}) + v.Store(meterProviderHolder{mp: &meterProvider{}}) return v } - -// ResetForTest restores the initial global state, for testing purposes. -func ResetForTest() { - globalMeter = defaultMeterValue() - delegateMeterOnce = sync.Once{} -} diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go new file mode 100644 index 000000000000..f1e917e9329f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -0,0 +1,138 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/instrument" +) + +// MeterProvider provides access to named Meter instances, for instrumenting +// an application or library. +// +// Warning: methods may be added to this interface in minor releases. +type MeterProvider interface { + // Meter creates an instance of a `Meter` interface. The instrumentationName + // must be the name of the library providing instrumentation. This name may + // be the same as the instrumented code only if that code provides built-in + // instrumentation. If the instrumentationName is empty, then a + // implementation defined default name will be used instead. + Meter(instrumentationName string, opts ...MeterOption) Meter +} + +// Meter provides access to instrument instances for recording metrics. +// +// Warning: methods may be added to this interface in minor releases. +type Meter interface { + // Int64Counter returns a new instrument identified by name and configured + // with options. The instrument is used to synchronously record increasing + // int64 measurements during a computational operation. + Int64Counter(name string, options ...instrument.Int64Option) (instrument.Int64Counter, error) + // Int64UpDownCounter returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // int64 measurements during a computational operation. + Int64UpDownCounter(name string, options ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) + // Int64Histogram returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // the distribution of int64 measurements during a computational operation. + Int64Histogram(name string, options ...instrument.Int64Option) (instrument.Int64Histogram, error) + // Int64ObservableCounter returns a new instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // increasing int64 measurements once per a measurement collection cycle. + Int64ObservableCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new instrument identified by name + // and configured with options. The instrument is used to asynchronously + // record int64 measurements once per a measurement collection cycle. + Int64ObservableUpDownCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // instantaneous int64 measurements once per a measurement collection + // cycle. + Int64ObservableGauge(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) + + // Float64Counter returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // increasing float64 measurements during a computational operation. + Float64Counter(name string, options ...instrument.Float64Option) (instrument.Float64Counter, error) + // Float64UpDownCounter returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // float64 measurements during a computational operation. + Float64UpDownCounter(name string, options ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) + // Float64Histogram returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // the distribution of float64 measurements during a computational + // operation. + Float64Histogram(name string, options ...instrument.Float64Option) (instrument.Float64Histogram, error) + // Float64ObservableCounter returns a new instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // increasing float64 measurements once per a measurement collection cycle. + Float64ObservableCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new instrument identified by + // name and configured with options. The instrument is used to + // asynchronously record float64 measurements once per a measurement + // collection cycle. + Float64ObservableUpDownCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // instantaneous float64 measurements once per a measurement collection + // cycle. + Float64ObservableGauge(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) + + // RegisterCallback registers f to be called during the collection of a + // measurement cycle. + // + // If Unregister of the returned Registration is called, f needs to be + // unregistered and not called during collection. + // + // The instruments f is registered with are the only instruments that f may + // observe values for. + // + // If no instruments are passed, f should not be registered nor called + // during collection. + RegisterCallback(f Callback, instruments ...instrument.Asynchronous) (Registration, error) +} + +// Callback is a function registered with a Meter that makes observations for +// the set of instruments it is registered with. The Observer parameter is used +// to record measurment observations for these instruments. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Callbacks. Meaning, it should not report measurements for an instrument with +// the same attributes as another Callback will report. +// +// The function needs to be concurrent safe. +type Callback func(context.Context, Observer) error + +// Observer records measurements for multiple instruments in a Callback. +type Observer interface { + // ObserveFloat64 records the float64 value with attributes for obsrv. + ObserveFloat64(obsrv instrument.Float64Observable, value float64, attributes ...attribute.KeyValue) + // ObserveInt64 records the int64 value with attributes for obsrv. + ObserveInt64(obsrv instrument.Int64Observable, value int64, attributes ...attribute.KeyValue) +} + +// Registration is an token representing the unique registration of a callback +// for a set of instruments with a Meter. +type Registration interface { + // Unregister removes the callback registration from a Meter. + // + // This method needs to be idempotent and concurrent safe. + Unregister() error +} diff --git a/vendor/go.opentelemetry.io/otel/metric/metric.go b/vendor/go.opentelemetry.io/otel/metric/metric.go deleted file mode 100644 index d8c5a6b3f353..000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/metric.go +++ /dev/null @@ -1,538 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// MeterProvider supports named Meter instances. -type MeterProvider interface { - // Meter creates an implementation of the Meter interface. - // The instrumentationName must be the name of the library providing - // instrumentation. This name may be the same as the instrumented code - // only if that code provides built-in instrumentation. If the - // instrumentationName is empty, then a implementation defined default - // name will be used instead. - Meter(instrumentationName string, opts ...MeterOption) Meter -} - -// Meter is the creator of metric instruments. -// -// An uninitialized Meter is a no-op implementation. -type Meter struct { - impl sdkapi.MeterImpl -} - -// WrapMeterImpl constructs a `Meter` implementation from a -// `MeterImpl` implementation. -func WrapMeterImpl(impl sdkapi.MeterImpl) Meter { - return Meter{ - impl: impl, - } -} - -// Measurement is used for reporting a synchronous batch of metric -// values. Instances of this type should be created by synchronous -// instruments (e.g., Int64Counter.Measurement()). -// -// Note: This is an alias because it is a first-class member of the -// API but is also part of the lower-level sdkapi interface. -type Measurement = sdkapi.Measurement - -// Observation is used for reporting an asynchronous batch of metric -// values. Instances of this type should be created by asynchronous -// instruments (e.g., Int64GaugeObserver.Observation()). -// -// Note: This is an alias because it is a first-class member of the -// API but is also part of the lower-level sdkapi interface. -type Observation = sdkapi.Observation - -// RecordBatch atomically records a batch of measurements. -func (m Meter) RecordBatch(ctx context.Context, ls []attribute.KeyValue, ms ...Measurement) { - if m.impl == nil { - return - } - m.impl.RecordBatch(ctx, ls, ms...) -} - -// NewBatchObserver creates a new BatchObserver that supports -// making batches of observations for multiple instruments. -func (m Meter) NewBatchObserver(callback BatchObserverFunc) BatchObserver { - return BatchObserver{ - meter: m, - runner: newBatchAsyncRunner(callback), - } -} - -// NewInt64Counter creates a new integer Counter instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64Counter(name string, options ...InstrumentOption) (Int64Counter, error) { - return wrapInt64CounterInstrument( - m.newSync(name, sdkapi.CounterInstrumentKind, number.Int64Kind, options)) -} - -// NewFloat64Counter creates a new floating point Counter with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64Counter(name string, options ...InstrumentOption) (Float64Counter, error) { - return wrapFloat64CounterInstrument( - m.newSync(name, sdkapi.CounterInstrumentKind, number.Float64Kind, options)) -} - -// NewInt64UpDownCounter creates a new integer UpDownCounter instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64UpDownCounter(name string, options ...InstrumentOption) (Int64UpDownCounter, error) { - return wrapInt64UpDownCounterInstrument( - m.newSync(name, sdkapi.UpDownCounterInstrumentKind, number.Int64Kind, options)) -} - -// NewFloat64UpDownCounter creates a new floating point UpDownCounter with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64UpDownCounter(name string, options ...InstrumentOption) (Float64UpDownCounter, error) { - return wrapFloat64UpDownCounterInstrument( - m.newSync(name, sdkapi.UpDownCounterInstrumentKind, number.Float64Kind, options)) -} - -// NewInt64Histogram creates a new integer Histogram instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64Histogram(name string, opts ...InstrumentOption) (Int64Histogram, error) { - return wrapInt64HistogramInstrument( - m.newSync(name, sdkapi.HistogramInstrumentKind, number.Int64Kind, opts)) -} - -// NewFloat64Histogram creates a new floating point Histogram with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64Histogram(name string, opts ...InstrumentOption) (Float64Histogram, error) { - return wrapFloat64HistogramInstrument( - m.newSync(name, sdkapi.HistogramInstrumentKind, number.Float64Kind, opts)) -} - -// NewInt64GaugeObserver creates a new integer GaugeObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64GaugeObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64GaugeObserver, error) { - if callback == nil { - return wrapInt64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64GaugeObserverInstrument( - m.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64GaugeObserver creates a new floating point GaugeObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64GaugeObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64GaugeObserver, error) { - if callback == nil { - return wrapFloat64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64GaugeObserverInstrument( - m.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64CounterObserver creates a new integer CounterObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64CounterObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64CounterObserver, error) { - if callback == nil { - return wrapInt64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64CounterObserverInstrument( - m.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64CounterObserver creates a new floating point CounterObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64CounterObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64CounterObserver, error) { - if callback == nil { - return wrapFloat64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64CounterObserverInstrument( - m.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64UpDownCounterObserver creates a new integer UpDownCounterObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64UpDownCounterObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64UpDownCounterObserver, error) { - if callback == nil { - return wrapInt64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64UpDownCounterObserverInstrument( - m.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64UpDownCounterObserver creates a new floating point UpDownCounterObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64UpDownCounterObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64UpDownCounterObserver, error) { - if callback == nil { - return wrapFloat64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64UpDownCounterObserverInstrument( - m.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64GaugeObserver creates a new integer GaugeObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64GaugeObserver(name string, opts ...InstrumentOption) (Int64GaugeObserver, error) { - if b.runner == nil { - return wrapInt64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64GaugeObserverInstrument( - b.meter.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64GaugeObserver creates a new floating point GaugeObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64GaugeObserver(name string, opts ...InstrumentOption) (Float64GaugeObserver, error) { - if b.runner == nil { - return wrapFloat64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64GaugeObserverInstrument( - b.meter.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// NewInt64CounterObserver creates a new integer CounterObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64CounterObserver(name string, opts ...InstrumentOption) (Int64CounterObserver, error) { - if b.runner == nil { - return wrapInt64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64CounterObserverInstrument( - b.meter.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64CounterObserver creates a new floating point CounterObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64CounterObserver(name string, opts ...InstrumentOption) (Float64CounterObserver, error) { - if b.runner == nil { - return wrapFloat64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64CounterObserverInstrument( - b.meter.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// NewInt64UpDownCounterObserver creates a new integer UpDownCounterObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64UpDownCounterObserver(name string, opts ...InstrumentOption) (Int64UpDownCounterObserver, error) { - if b.runner == nil { - return wrapInt64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64UpDownCounterObserverInstrument( - b.meter.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64UpDownCounterObserver creates a new floating point UpDownCounterObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64UpDownCounterObserver(name string, opts ...InstrumentOption) (Float64UpDownCounterObserver, error) { - if b.runner == nil { - return wrapFloat64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64UpDownCounterObserverInstrument( - b.meter.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// MeterImpl returns the underlying MeterImpl of this Meter. -func (m Meter) MeterImpl() sdkapi.MeterImpl { - return m.impl -} - -// newAsync constructs one new asynchronous instrument. -func (m Meter) newAsync( - name string, - mkind sdkapi.InstrumentKind, - nkind number.Kind, - opts []InstrumentOption, - runner sdkapi.AsyncRunner, -) ( - sdkapi.AsyncImpl, - error, -) { - if m.impl == nil { - return sdkapi.NewNoopAsyncInstrument(), nil - } - cfg := NewInstrumentConfig(opts...) - desc := sdkapi.NewDescriptor(name, mkind, nkind, cfg.description, cfg.unit) - return m.impl.NewAsyncInstrument(desc, runner) -} - -// newSync constructs one new synchronous instrument. -func (m Meter) newSync( - name string, - metricKind sdkapi.InstrumentKind, - numberKind number.Kind, - opts []InstrumentOption, -) ( - sdkapi.SyncImpl, - error, -) { - if m.impl == nil { - return sdkapi.NewNoopSyncInstrument(), nil - } - cfg := NewInstrumentConfig(opts...) - desc := sdkapi.NewDescriptor(name, metricKind, numberKind, cfg.description, cfg.unit) - return m.impl.NewSyncInstrument(desc) -} - -// MeterMust is a wrapper for Meter interfaces that panics when any -// instrument constructor encounters an error. -type MeterMust struct { - meter Meter -} - -// BatchObserverMust is a wrapper for BatchObserver that panics when -// any instrument constructor encounters an error. -type BatchObserverMust struct { - batch BatchObserver -} - -// Must constructs a MeterMust implementation from a Meter, allowing -// the application to panic when any instrument constructor yields an -// error. -func Must(meter Meter) MeterMust { - return MeterMust{meter: meter} -} - -// NewInt64Counter calls `Meter.NewInt64Counter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64Counter(name string, cos ...InstrumentOption) Int64Counter { - if inst, err := mm.meter.NewInt64Counter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64Counter calls `Meter.NewFloat64Counter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64Counter(name string, cos ...InstrumentOption) Float64Counter { - if inst, err := mm.meter.NewFloat64Counter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownCounter calls `Meter.NewInt64UpDownCounter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64UpDownCounter(name string, cos ...InstrumentOption) Int64UpDownCounter { - if inst, err := mm.meter.NewInt64UpDownCounter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownCounter calls `Meter.NewFloat64UpDownCounter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64UpDownCounter(name string, cos ...InstrumentOption) Float64UpDownCounter { - if inst, err := mm.meter.NewFloat64UpDownCounter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64Histogram calls `Meter.NewInt64Histogram` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64Histogram(name string, mos ...InstrumentOption) Int64Histogram { - if inst, err := mm.meter.NewInt64Histogram(name, mos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64Histogram calls `Meter.NewFloat64Histogram` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64Histogram(name string, mos ...InstrumentOption) Float64Histogram { - if inst, err := mm.meter.NewFloat64Histogram(name, mos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64GaugeObserver calls `Meter.NewInt64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64GaugeObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64GaugeObserver { - if inst, err := mm.meter.NewInt64GaugeObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64GaugeObserver calls `Meter.NewFloat64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64GaugeObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64GaugeObserver { - if inst, err := mm.meter.NewFloat64GaugeObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64CounterObserver calls `Meter.NewInt64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64CounterObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64CounterObserver { - if inst, err := mm.meter.NewInt64CounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64CounterObserver calls `Meter.NewFloat64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64CounterObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64CounterObserver { - if inst, err := mm.meter.NewFloat64CounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownCounterObserver calls `Meter.NewInt64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64UpDownCounterObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64UpDownCounterObserver { - if inst, err := mm.meter.NewInt64UpDownCounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownCounterObserver calls `Meter.NewFloat64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64UpDownCounterObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64UpDownCounterObserver { - if inst, err := mm.meter.NewFloat64UpDownCounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewBatchObserver returns a wrapper around BatchObserver that panics -// when any instrument constructor returns an error. -func (mm MeterMust) NewBatchObserver(callback BatchObserverFunc) BatchObserverMust { - return BatchObserverMust{ - batch: mm.meter.NewBatchObserver(callback), - } -} - -// NewInt64GaugeObserver calls `BatchObserver.NewInt64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64GaugeObserver(name string, oos ...InstrumentOption) Int64GaugeObserver { - if inst, err := bm.batch.NewInt64GaugeObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64GaugeObserver calls `BatchObserver.NewFloat64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64GaugeObserver(name string, oos ...InstrumentOption) Float64GaugeObserver { - if inst, err := bm.batch.NewFloat64GaugeObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64CounterObserver calls `BatchObserver.NewInt64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64CounterObserver(name string, oos ...InstrumentOption) Int64CounterObserver { - if inst, err := bm.batch.NewInt64CounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64CounterObserver calls `BatchObserver.NewFloat64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64CounterObserver(name string, oos ...InstrumentOption) Float64CounterObserver { - if inst, err := bm.batch.NewFloat64CounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownCounterObserver calls `BatchObserver.NewInt64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64UpDownCounterObserver(name string, oos ...InstrumentOption) Int64UpDownCounterObserver { - if inst, err := bm.batch.NewInt64UpDownCounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownCounterObserver calls `BatchObserver.NewFloat64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64UpDownCounterObserver(name string, oos ...InstrumentOption) Float64UpDownCounterObserver { - if inst, err := bm.batch.NewFloat64UpDownCounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} diff --git a/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go b/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go deleted file mode 100644 index 2da24c8f211e..000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - "errors" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// ErrSDKReturnedNilImpl is returned when a new `MeterImpl` returns nil. -var ErrSDKReturnedNilImpl = errors.New("SDK returned a nil implementation") - -// Int64ObserverFunc is a type of callback that integral -// observers run. -type Int64ObserverFunc func(context.Context, Int64ObserverResult) - -// Float64ObserverFunc is a type of callback that floating point -// observers run. -type Float64ObserverFunc func(context.Context, Float64ObserverResult) - -// BatchObserverFunc is a callback argument for use with any -// Observer instrument that will be reported as a batch of -// observations. -type BatchObserverFunc func(context.Context, BatchObserverResult) - -// Int64ObserverResult is passed to an observer callback to capture -// observations for one asynchronous integer metric instrument. -type Int64ObserverResult struct { - instrument sdkapi.AsyncImpl - function func([]attribute.KeyValue, ...Observation) -} - -// Float64ObserverResult is passed to an observer callback to capture -// observations for one asynchronous floating point metric instrument. -type Float64ObserverResult struct { - instrument sdkapi.AsyncImpl - function func([]attribute.KeyValue, ...Observation) -} - -// BatchObserverResult is passed to a batch observer callback to -// capture observations for multiple asynchronous instruments. -type BatchObserverResult struct { - function func([]attribute.KeyValue, ...Observation) -} - -// Observe captures a single integer value from the associated -// instrument callback, with the given labels. -func (ir Int64ObserverResult) Observe(value int64, labels ...attribute.KeyValue) { - ir.function(labels, sdkapi.NewObservation(ir.instrument, number.NewInt64Number(value))) -} - -// Observe captures a single floating point value from the associated -// instrument callback, with the given labels. -func (fr Float64ObserverResult) Observe(value float64, labels ...attribute.KeyValue) { - fr.function(labels, sdkapi.NewObservation(fr.instrument, number.NewFloat64Number(value))) -} - -// Observe captures a multiple observations from the associated batch -// instrument callback, with the given labels. -func (br BatchObserverResult) Observe(labels []attribute.KeyValue, obs ...Observation) { - br.function(labels, obs...) -} - -var _ sdkapi.AsyncSingleRunner = (*Int64ObserverFunc)(nil) -var _ sdkapi.AsyncSingleRunner = (*Float64ObserverFunc)(nil) -var _ sdkapi.AsyncBatchRunner = (*BatchObserverFunc)(nil) - -// newInt64AsyncRunner returns a single-observer callback for integer Observer instruments. -func newInt64AsyncRunner(c Int64ObserverFunc) sdkapi.AsyncSingleRunner { - return &c -} - -// newFloat64AsyncRunner returns a single-observer callback for floating point Observer instruments. -func newFloat64AsyncRunner(c Float64ObserverFunc) sdkapi.AsyncSingleRunner { - return &c -} - -// newBatchAsyncRunner returns a batch-observer callback use with multiple Observer instruments. -func newBatchAsyncRunner(c BatchObserverFunc) sdkapi.AsyncBatchRunner { - return &c -} - -// AnyRunner implements AsyncRunner. -func (*Int64ObserverFunc) AnyRunner() {} - -// AnyRunner implements AsyncRunner. -func (*Float64ObserverFunc) AnyRunner() {} - -// AnyRunner implements AsyncRunner. -func (*BatchObserverFunc) AnyRunner() {} - -// Run implements AsyncSingleRunner. -func (i *Int64ObserverFunc) Run(ctx context.Context, impl sdkapi.AsyncImpl, function func([]attribute.KeyValue, ...Observation)) { - (*i)(ctx, Int64ObserverResult{ - instrument: impl, - function: function, - }) -} - -// Run implements AsyncSingleRunner. -func (f *Float64ObserverFunc) Run(ctx context.Context, impl sdkapi.AsyncImpl, function func([]attribute.KeyValue, ...Observation)) { - (*f)(ctx, Float64ObserverResult{ - instrument: impl, - function: function, - }) -} - -// Run implements AsyncBatchRunner. -func (b *BatchObserverFunc) Run(ctx context.Context, function func([]attribute.KeyValue, ...Observation)) { - (*b)(ctx, BatchObserverResult{ - function: function, - }) -} - -// wrapInt64GaugeObserverInstrument converts an AsyncImpl into Int64GaugeObserver. -func wrapInt64GaugeObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64GaugeObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64GaugeObserver{asyncInstrument: common}, err -} - -// wrapFloat64GaugeObserverInstrument converts an AsyncImpl into Float64GaugeObserver. -func wrapFloat64GaugeObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64GaugeObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64GaugeObserver{asyncInstrument: common}, err -} - -// wrapInt64CounterObserverInstrument converts an AsyncImpl into Int64CounterObserver. -func wrapInt64CounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64CounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64CounterObserver{asyncInstrument: common}, err -} - -// wrapFloat64CounterObserverInstrument converts an AsyncImpl into Float64CounterObserver. -func wrapFloat64CounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64CounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64CounterObserver{asyncInstrument: common}, err -} - -// wrapInt64UpDownCounterObserverInstrument converts an AsyncImpl into Int64UpDownCounterObserver. -func wrapInt64UpDownCounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64UpDownCounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64UpDownCounterObserver{asyncInstrument: common}, err -} - -// wrapFloat64UpDownCounterObserverInstrument converts an AsyncImpl into Float64UpDownCounterObserver. -func wrapFloat64UpDownCounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64UpDownCounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64UpDownCounterObserver{asyncInstrument: common}, err -} - -// BatchObserver represents an Observer callback that can report -// observations for multiple instruments. -type BatchObserver struct { - meter Meter - runner sdkapi.AsyncBatchRunner -} - -// Int64GaugeObserver is a metric that captures a set of int64 values at a -// point in time. -type Int64GaugeObserver struct { - asyncInstrument -} - -// Float64GaugeObserver is a metric that captures a set of float64 values -// at a point in time. -type Float64GaugeObserver struct { - asyncInstrument -} - -// Int64CounterObserver is a metric that captures a precomputed sum of -// int64 values at a point in time. -type Int64CounterObserver struct { - asyncInstrument -} - -// Float64CounterObserver is a metric that captures a precomputed sum of -// float64 values at a point in time. -type Float64CounterObserver struct { - asyncInstrument -} - -// Int64UpDownCounterObserver is a metric that captures a precomputed sum of -// int64 values at a point in time. -type Int64UpDownCounterObserver struct { - asyncInstrument -} - -// Float64UpDownCounterObserver is a metric that captures a precomputed sum of -// float64 values at a point in time. -type Float64UpDownCounterObserver struct { - asyncInstrument -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64GaugeObserver) Observation(v int64) Observation { - return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64GaugeObserver) Observation(v float64) Observation { - return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64CounterObserver) Observation(v int64) Observation { - return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64CounterObserver) Observation(v float64) Observation { - return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64UpDownCounterObserver) Observation(v int64) Observation { - return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64UpDownCounterObserver) Observation(v float64) Observation { - return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) -} - -// syncInstrument contains a SyncImpl. -type syncInstrument struct { - instrument sdkapi.SyncImpl -} - -// asyncInstrument contains a AsyncImpl. -type asyncInstrument struct { - instrument sdkapi.AsyncImpl -} - -// AsyncImpl implements AsyncImpl. -func (a asyncInstrument) AsyncImpl() sdkapi.AsyncImpl { - return a.instrument -} - -// SyncImpl returns the implementation object for synchronous instruments. -func (s syncInstrument) SyncImpl() sdkapi.SyncImpl { - return s.instrument -} - -func (s syncInstrument) float64Measurement(value float64) Measurement { - return sdkapi.NewMeasurement(s.instrument, number.NewFloat64Number(value)) -} - -func (s syncInstrument) int64Measurement(value int64) Measurement { - return sdkapi.NewMeasurement(s.instrument, number.NewInt64Number(value)) -} - -func (s syncInstrument) directRecord(ctx context.Context, number number.Number, labels []attribute.KeyValue) { - s.instrument.RecordOne(ctx, number, labels) -} - -// checkNewAsync receives an AsyncImpl and potential -// error, and returns the same types, checking for and ensuring that -// the returned interface is not nil. -func checkNewAsync(instrument sdkapi.AsyncImpl, err error) (asyncInstrument, error) { - if instrument == nil { - if err == nil { - err = ErrSDKReturnedNilImpl - } - instrument = sdkapi.NewNoopAsyncInstrument() - } - return asyncInstrument{ - instrument: instrument, - }, err -} - -// checkNewSync receives an SyncImpl and potential -// error, and returns the same types, checking for and ensuring that -// the returned interface is not nil. -func checkNewSync(instrument sdkapi.SyncImpl, err error) (syncInstrument, error) { - if instrument == nil { - if err == nil { - err = ErrSDKReturnedNilImpl - } - // Note: an alternate behavior would be to synthesize a new name - // or group all duplicately-named instruments of a certain type - // together and use a tag for the original name, e.g., - // name = 'invalid.counter.int64' - // label = 'original-name=duplicate-counter-name' - instrument = sdkapi.NewNoopSyncInstrument() - } - return syncInstrument{ - instrument: instrument, - }, err -} - -// wrapInt64CounterInstrument converts a SyncImpl into Int64Counter. -func wrapInt64CounterInstrument(syncInst sdkapi.SyncImpl, err error) (Int64Counter, error) { - common, err := checkNewSync(syncInst, err) - return Int64Counter{syncInstrument: common}, err -} - -// wrapFloat64CounterInstrument converts a SyncImpl into Float64Counter. -func wrapFloat64CounterInstrument(syncInst sdkapi.SyncImpl, err error) (Float64Counter, error) { - common, err := checkNewSync(syncInst, err) - return Float64Counter{syncInstrument: common}, err -} - -// wrapInt64UpDownCounterInstrument converts a SyncImpl into Int64UpDownCounter. -func wrapInt64UpDownCounterInstrument(syncInst sdkapi.SyncImpl, err error) (Int64UpDownCounter, error) { - common, err := checkNewSync(syncInst, err) - return Int64UpDownCounter{syncInstrument: common}, err -} - -// wrapFloat64UpDownCounterInstrument converts a SyncImpl into Float64UpDownCounter. -func wrapFloat64UpDownCounterInstrument(syncInst sdkapi.SyncImpl, err error) (Float64UpDownCounter, error) { - common, err := checkNewSync(syncInst, err) - return Float64UpDownCounter{syncInstrument: common}, err -} - -// wrapInt64HistogramInstrument converts a SyncImpl into Int64Histogram. -func wrapInt64HistogramInstrument(syncInst sdkapi.SyncImpl, err error) (Int64Histogram, error) { - common, err := checkNewSync(syncInst, err) - return Int64Histogram{syncInstrument: common}, err -} - -// wrapFloat64HistogramInstrument converts a SyncImpl into Float64Histogram. -func wrapFloat64HistogramInstrument(syncInst sdkapi.SyncImpl, err error) (Float64Histogram, error) { - common, err := checkNewSync(syncInst, err) - return Float64Histogram{syncInstrument: common}, err -} - -// Float64Counter is a metric that accumulates float64 values. -type Float64Counter struct { - syncInstrument -} - -// Int64Counter is a metric that accumulates int64 values. -type Int64Counter struct { - syncInstrument -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64Counter) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64Counter) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Float64Counter) Add(ctx context.Context, value float64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Int64Counter) Add(ctx context.Context, value int64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} - -// Float64UpDownCounter is a metric instrument that sums floating -// point values. -type Float64UpDownCounter struct { - syncInstrument -} - -// Int64UpDownCounter is a metric instrument that sums integer values. -type Int64UpDownCounter struct { - syncInstrument -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64UpDownCounter) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64UpDownCounter) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Float64UpDownCounter) Add(ctx context.Context, value float64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Int64UpDownCounter) Add(ctx context.Context, value int64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} - -// Float64Histogram is a metric that records float64 values. -type Float64Histogram struct { - syncInstrument -} - -// Int64Histogram is a metric that records int64 values. -type Int64Histogram struct { - syncInstrument -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64Histogram) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64Histogram) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Record adds a new value to the list of Histogram's records. The -// labels should contain the keys and values to be associated with -// this value. -func (c Float64Histogram) Record(ctx context.Context, value float64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Record adds a new value to the Histogram's distribution. The -// labels should contain the keys and values to be associated with -// this value. -func (c Int64Histogram) Record(ctx context.Context, value int64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} diff --git a/vendor/go.opentelemetry.io/otel/metric/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop.go index 37c653f51a1e..c586627aef87 100644 --- a/vendor/go.opentelemetry.io/otel/metric/noop.go +++ b/vendor/go.opentelemetry.io/otel/metric/noop.go @@ -14,17 +14,185 @@ package metric // import "go.opentelemetry.io/otel/metric" -type noopMeterProvider struct{} +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/instrument" +) -// NewNoopMeterProvider returns an implementation of MeterProvider that -// performs no operations. The Meter and Instrument created from the returned -// MeterProvider also perform no operations. +// NewNoopMeterProvider creates a MeterProvider that does not record any metrics. func NewNoopMeterProvider() MeterProvider { return noopMeterProvider{} } -var _ MeterProvider = noopMeterProvider{} +type noopMeterProvider struct{} + +func (noopMeterProvider) Meter(string, ...MeterOption) Meter { + return noopMeter{} +} + +// NewNoopMeter creates a Meter that does not record any metrics. +func NewNoopMeter() Meter { + return noopMeter{} +} + +type noopMeter struct{} + +func (noopMeter) Int64Counter(string, ...instrument.Int64Option) (instrument.Int64Counter, error) { + return nonrecordingSyncInt64Instrument{}, nil +} + +func (noopMeter) Int64UpDownCounter(string, ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) { + return nonrecordingSyncInt64Instrument{}, nil +} + +func (noopMeter) Int64Histogram(string, ...instrument.Int64Option) (instrument.Int64Histogram, error) { + return nonrecordingSyncInt64Instrument{}, nil +} + +func (noopMeter) Int64ObservableCounter(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) { + return nonrecordingAsyncInt64Instrument{}, nil +} + +func (noopMeter) Int64ObservableUpDownCounter(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) { + return nonrecordingAsyncInt64Instrument{}, nil +} + +func (noopMeter) Int64ObservableGauge(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) { + return nonrecordingAsyncInt64Instrument{}, nil +} + +func (noopMeter) Float64Counter(string, ...instrument.Float64Option) (instrument.Float64Counter, error) { + return nonrecordingSyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64UpDownCounter(string, ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) { + return nonrecordingSyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64Histogram(string, ...instrument.Float64Option) (instrument.Float64Histogram, error) { + return nonrecordingSyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64ObservableCounter(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) { + return nonrecordingAsyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64ObservableUpDownCounter(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) { + return nonrecordingAsyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64ObservableGauge(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) { + return nonrecordingAsyncFloat64Instrument{}, nil +} + +// RegisterCallback creates a register callback that does not record any metrics. +func (noopMeter) RegisterCallback(Callback, ...instrument.Asynchronous) (Registration, error) { + return noopReg{}, nil +} + +type noopReg struct{} -func (noopMeterProvider) Meter(instrumentationName string, opts ...MeterOption) Meter { - return Meter{} +func (noopReg) Unregister() error { return nil } + +type nonrecordingAsyncFloat64Instrument struct { + instrument.Float64Observable +} + +var ( + _ instrument.Float64ObservableCounter = nonrecordingAsyncFloat64Instrument{} + _ instrument.Float64ObservableUpDownCounter = nonrecordingAsyncFloat64Instrument{} + _ instrument.Float64ObservableGauge = nonrecordingAsyncFloat64Instrument{} +) + +func (n nonrecordingAsyncFloat64Instrument) Counter(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) { + return n, nil +} + +func (n nonrecordingAsyncFloat64Instrument) UpDownCounter(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) { + return n, nil +} + +func (n nonrecordingAsyncFloat64Instrument) Gauge(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) { + return n, nil +} + +type nonrecordingAsyncInt64Instrument struct { + instrument.Int64Observable +} + +var ( + _ instrument.Int64ObservableCounter = nonrecordingAsyncInt64Instrument{} + _ instrument.Int64ObservableUpDownCounter = nonrecordingAsyncInt64Instrument{} + _ instrument.Int64ObservableGauge = nonrecordingAsyncInt64Instrument{} +) + +func (n nonrecordingAsyncInt64Instrument) Counter(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) { + return n, nil +} + +func (n nonrecordingAsyncInt64Instrument) UpDownCounter(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) { + return n, nil +} + +func (n nonrecordingAsyncInt64Instrument) Gauge(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) { + return n, nil +} + +type nonrecordingSyncFloat64Instrument struct { + instrument.Synchronous +} + +var ( + _ instrument.Float64Counter = nonrecordingSyncFloat64Instrument{} + _ instrument.Float64UpDownCounter = nonrecordingSyncFloat64Instrument{} + _ instrument.Float64Histogram = nonrecordingSyncFloat64Instrument{} +) + +func (n nonrecordingSyncFloat64Instrument) Counter(string, ...instrument.Float64Option) (instrument.Float64Counter, error) { + return n, nil +} + +func (n nonrecordingSyncFloat64Instrument) UpDownCounter(string, ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) { + return n, nil +} + +func (n nonrecordingSyncFloat64Instrument) Histogram(string, ...instrument.Float64Option) (instrument.Float64Histogram, error) { + return n, nil +} + +func (nonrecordingSyncFloat64Instrument) Add(context.Context, float64, ...attribute.KeyValue) { + +} + +func (nonrecordingSyncFloat64Instrument) Record(context.Context, float64, ...attribute.KeyValue) { + +} + +type nonrecordingSyncInt64Instrument struct { + instrument.Synchronous +} + +var ( + _ instrument.Int64Counter = nonrecordingSyncInt64Instrument{} + _ instrument.Int64UpDownCounter = nonrecordingSyncInt64Instrument{} + _ instrument.Int64Histogram = nonrecordingSyncInt64Instrument{} +) + +func (n nonrecordingSyncInt64Instrument) Counter(string, ...instrument.Int64Option) (instrument.Int64Counter, error) { + return n, nil +} + +func (n nonrecordingSyncInt64Instrument) UpDownCounter(string, ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) { + return n, nil +} + +func (n nonrecordingSyncInt64Instrument) Histogram(string, ...instrument.Int64Option) (instrument.Int64Histogram, error) { + return n, nil +} + +func (nonrecordingSyncInt64Instrument) Add(context.Context, int64, ...attribute.KeyValue) { +} +func (nonrecordingSyncInt64Instrument) Record(context.Context, int64, ...attribute.KeyValue) { } diff --git a/vendor/go.opentelemetry.io/otel/metric/number/doc.go b/vendor/go.opentelemetry.io/otel/metric/number/doc.go deleted file mode 100644 index 0649ff875e70..000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/number/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package number provides a number abstraction for instruments that -either support int64 or float64 input values. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. -*/ -package number // import "go.opentelemetry.io/otel/metric/number" diff --git a/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go b/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go deleted file mode 100644 index 6288c7ea295f..000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by "stringer -type=Kind"; DO NOT EDIT. - -package number - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Int64Kind-0] - _ = x[Float64Kind-1] -} - -const _Kind_name = "Int64KindFloat64Kind" - -var _Kind_index = [...]uint8{0, 9, 20} - -func (i Kind) String() string { - if i < 0 || i >= Kind(len(_Kind_index)-1) { - return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] -} diff --git a/vendor/go.opentelemetry.io/otel/metric/number/number.go b/vendor/go.opentelemetry.io/otel/metric/number/number.go deleted file mode 100644 index 3ec95e2014d7..000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/number/number.go +++ /dev/null @@ -1,538 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package number // import "go.opentelemetry.io/otel/metric/number" - -//go:generate stringer -type=Kind - -import ( - "fmt" - "math" - "sync/atomic" - - "go.opentelemetry.io/otel/internal" -) - -// Kind describes the data type of the Number. -type Kind int8 - -const ( - // Int64Kind means that the Number stores int64. - Int64Kind Kind = iota - // Float64Kind means that the Number stores float64. - Float64Kind -) - -// Zero returns a zero value for a given Kind -func (k Kind) Zero() Number { - switch k { - case Int64Kind: - return NewInt64Number(0) - case Float64Kind: - return NewFloat64Number(0.) - default: - return Number(0) - } -} - -// Minimum returns the minimum representable value -// for a given Kind -func (k Kind) Minimum() Number { - switch k { - case Int64Kind: - return NewInt64Number(math.MinInt64) - case Float64Kind: - return NewFloat64Number(-1. * math.MaxFloat64) - default: - return Number(0) - } -} - -// Maximum returns the maximum representable value -// for a given Kind -func (k Kind) Maximum() Number { - switch k { - case Int64Kind: - return NewInt64Number(math.MaxInt64) - case Float64Kind: - return NewFloat64Number(math.MaxFloat64) - default: - return Number(0) - } -} - -// Number represents either an integral or a floating point value. It -// needs to be accompanied with a source of Kind that describes -// the actual type of the value stored within Number. -type Number uint64 - -// - constructors - -// NewNumberFromRaw creates a new Number from a raw value. -func NewNumberFromRaw(r uint64) Number { - return Number(r) -} - -// NewInt64Number creates an integral Number. -func NewInt64Number(i int64) Number { - return NewNumberFromRaw(internal.Int64ToRaw(i)) -} - -// NewFloat64Number creates a floating point Number. -func NewFloat64Number(f float64) Number { - return NewNumberFromRaw(internal.Float64ToRaw(f)) -} - -// NewNumberSignChange returns a number with the same magnitude and -// the opposite sign. `kind` must describe the kind of number in `nn`. -func NewNumberSignChange(kind Kind, nn Number) Number { - switch kind { - case Int64Kind: - return NewInt64Number(-nn.AsInt64()) - case Float64Kind: - return NewFloat64Number(-nn.AsFloat64()) - } - return nn -} - -// - as x - -// AsNumber gets the Number. -func (n *Number) AsNumber() Number { - return *n -} - -// AsRaw gets the uninterpreted raw value. Might be useful for some -// atomic operations. -func (n *Number) AsRaw() uint64 { - return uint64(*n) -} - -// AsInt64 assumes that the value contains an int64 and returns it as -// such. -func (n *Number) AsInt64() int64 { - return internal.RawToInt64(n.AsRaw()) -} - -// AsFloat64 assumes that the measurement value contains a float64 and -// returns it as such. -func (n *Number) AsFloat64() float64 { - return internal.RawToFloat64(n.AsRaw()) -} - -// - as x atomic - -// AsNumberAtomic gets the Number atomically. -func (n *Number) AsNumberAtomic() Number { - return NewNumberFromRaw(n.AsRawAtomic()) -} - -// AsRawAtomic gets the uninterpreted raw value atomically. Might be -// useful for some atomic operations. -func (n *Number) AsRawAtomic() uint64 { - return atomic.LoadUint64(n.AsRawPtr()) -} - -// AsInt64Atomic assumes that the number contains an int64 and returns -// it as such atomically. -func (n *Number) AsInt64Atomic() int64 { - return atomic.LoadInt64(n.AsInt64Ptr()) -} - -// AsFloat64Atomic assumes that the measurement value contains a -// float64 and returns it as such atomically. -func (n *Number) AsFloat64Atomic() float64 { - return internal.RawToFloat64(n.AsRawAtomic()) -} - -// - as x ptr - -// AsRawPtr gets the pointer to the raw, uninterpreted raw -// value. Might be useful for some atomic operations. -func (n *Number) AsRawPtr() *uint64 { - return (*uint64)(n) -} - -// AsInt64Ptr assumes that the number contains an int64 and returns a -// pointer to it. -func (n *Number) AsInt64Ptr() *int64 { - return internal.RawPtrToInt64Ptr(n.AsRawPtr()) -} - -// AsFloat64Ptr assumes that the number contains a float64 and returns a -// pointer to it. -func (n *Number) AsFloat64Ptr() *float64 { - return internal.RawPtrToFloat64Ptr(n.AsRawPtr()) -} - -// - coerce - -// CoerceToInt64 casts the number to int64. May result in -// data/precision loss. -func (n *Number) CoerceToInt64(kind Kind) int64 { - switch kind { - case Int64Kind: - return n.AsInt64() - case Float64Kind: - return int64(n.AsFloat64()) - default: - // you get what you deserve - return 0 - } -} - -// CoerceToFloat64 casts the number to float64. May result in -// data/precision loss. -func (n *Number) CoerceToFloat64(kind Kind) float64 { - switch kind { - case Int64Kind: - return float64(n.AsInt64()) - case Float64Kind: - return n.AsFloat64() - default: - // you get what you deserve - return 0 - } -} - -// - set - -// SetNumber sets the number to the passed number. Both should be of -// the same kind. -func (n *Number) SetNumber(nn Number) { - *n.AsRawPtr() = nn.AsRaw() -} - -// SetRaw sets the number to the passed raw value. Both number and the -// raw number should represent the same kind. -func (n *Number) SetRaw(r uint64) { - *n.AsRawPtr() = r -} - -// SetInt64 assumes that the number contains an int64 and sets it to -// the passed value. -func (n *Number) SetInt64(i int64) { - *n.AsInt64Ptr() = i -} - -// SetFloat64 assumes that the number contains a float64 and sets it -// to the passed value. -func (n *Number) SetFloat64(f float64) { - *n.AsFloat64Ptr() = f -} - -// - set atomic - -// SetNumberAtomic sets the number to the passed number -// atomically. Both should be of the same kind. -func (n *Number) SetNumberAtomic(nn Number) { - atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw()) -} - -// SetRawAtomic sets the number to the passed raw value -// atomically. Both number and the raw number should represent the -// same kind. -func (n *Number) SetRawAtomic(r uint64) { - atomic.StoreUint64(n.AsRawPtr(), r) -} - -// SetInt64Atomic assumes that the number contains an int64 and sets -// it to the passed value atomically. -func (n *Number) SetInt64Atomic(i int64) { - atomic.StoreInt64(n.AsInt64Ptr(), i) -} - -// SetFloat64Atomic assumes that the number contains a float64 and -// sets it to the passed value atomically. -func (n *Number) SetFloat64Atomic(f float64) { - atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f)) -} - -// - swap - -// SwapNumber sets the number to the passed number and returns the old -// number. Both this number and the passed number should be of the -// same kind. -func (n *Number) SwapNumber(nn Number) Number { - old := *n - n.SetNumber(nn) - return old -} - -// SwapRaw sets the number to the passed raw value and returns the old -// raw value. Both number and the raw number should represent the same -// kind. -func (n *Number) SwapRaw(r uint64) uint64 { - old := n.AsRaw() - n.SetRaw(r) - return old -} - -// SwapInt64 assumes that the number contains an int64, sets it to the -// passed value and returns the old int64 value. -func (n *Number) SwapInt64(i int64) int64 { - old := n.AsInt64() - n.SetInt64(i) - return old -} - -// SwapFloat64 assumes that the number contains an float64, sets it to -// the passed value and returns the old float64 value. -func (n *Number) SwapFloat64(f float64) float64 { - old := n.AsFloat64() - n.SetFloat64(f) - return old -} - -// - swap atomic - -// SwapNumberAtomic sets the number to the passed number and returns -// the old number atomically. Both this number and the passed number -// should be of the same kind. -func (n *Number) SwapNumberAtomic(nn Number) Number { - return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw())) -} - -// SwapRawAtomic sets the number to the passed raw value and returns -// the old raw value atomically. Both number and the raw number should -// represent the same kind. -func (n *Number) SwapRawAtomic(r uint64) uint64 { - return atomic.SwapUint64(n.AsRawPtr(), r) -} - -// SwapInt64Atomic assumes that the number contains an int64, sets it -// to the passed value and returns the old int64 value atomically. -func (n *Number) SwapInt64Atomic(i int64) int64 { - return atomic.SwapInt64(n.AsInt64Ptr(), i) -} - -// SwapFloat64Atomic assumes that the number contains an float64, sets -// it to the passed value and returns the old float64 value -// atomically. -func (n *Number) SwapFloat64Atomic(f float64) float64 { - return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f))) -} - -// - add - -// AddNumber assumes that this and the passed number are of the passed -// kind and adds the passed number to this number. -func (n *Number) AddNumber(kind Kind, nn Number) { - switch kind { - case Int64Kind: - n.AddInt64(nn.AsInt64()) - case Float64Kind: - n.AddFloat64(nn.AsFloat64()) - } -} - -// AddRaw assumes that this number and the passed raw value are of the -// passed kind and adds the passed raw value to this number. -func (n *Number) AddRaw(kind Kind, r uint64) { - n.AddNumber(kind, NewNumberFromRaw(r)) -} - -// AddInt64 assumes that the number contains an int64 and adds the -// passed int64 to it. -func (n *Number) AddInt64(i int64) { - *n.AsInt64Ptr() += i -} - -// AddFloat64 assumes that the number contains a float64 and adds the -// passed float64 to it. -func (n *Number) AddFloat64(f float64) { - *n.AsFloat64Ptr() += f -} - -// - add atomic - -// AddNumberAtomic assumes that this and the passed number are of the -// passed kind and adds the passed number to this number atomically. -func (n *Number) AddNumberAtomic(kind Kind, nn Number) { - switch kind { - case Int64Kind: - n.AddInt64Atomic(nn.AsInt64()) - case Float64Kind: - n.AddFloat64Atomic(nn.AsFloat64()) - } -} - -// AddRawAtomic assumes that this number and the passed raw value are -// of the passed kind and adds the passed raw value to this number -// atomically. -func (n *Number) AddRawAtomic(kind Kind, r uint64) { - n.AddNumberAtomic(kind, NewNumberFromRaw(r)) -} - -// AddInt64Atomic assumes that the number contains an int64 and adds -// the passed int64 to it atomically. -func (n *Number) AddInt64Atomic(i int64) { - atomic.AddInt64(n.AsInt64Ptr(), i) -} - -// AddFloat64Atomic assumes that the number contains a float64 and -// adds the passed float64 to it atomically. -func (n *Number) AddFloat64Atomic(f float64) { - for { - o := n.AsFloat64Atomic() - if n.CompareAndSwapFloat64(o, o+f) { - break - } - } -} - -// - compare and swap (atomic only) - -// CompareAndSwapNumber does the atomic CAS operation on this -// number. This number and passed old and new numbers should be of the -// same kind. -func (n *Number) CompareAndSwapNumber(on, nn Number) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw()) -} - -// CompareAndSwapRaw does the atomic CAS operation on this -// number. This number and passed old and new raw values should be of -// the same kind. -func (n *Number) CompareAndSwapRaw(or, nr uint64) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr) -} - -// CompareAndSwapInt64 assumes that this number contains an int64 and -// does the atomic CAS operation on it. -func (n *Number) CompareAndSwapInt64(oi, ni int64) bool { - return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni) -} - -// CompareAndSwapFloat64 assumes that this number contains a float64 and -// does the atomic CAS operation on it. -func (n *Number) CompareAndSwapFloat64(of, nf float64) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf)) -} - -// - compare - -// CompareNumber compares two Numbers given their kind. Both numbers -// should have the same kind. This returns: -// 0 if the numbers are equal -// -1 if the subject `n` is less than the argument `nn` -// +1 if the subject `n` is greater than the argument `nn` -func (n *Number) CompareNumber(kind Kind, nn Number) int { - switch kind { - case Int64Kind: - return n.CompareInt64(nn.AsInt64()) - case Float64Kind: - return n.CompareFloat64(nn.AsFloat64()) - default: - // you get what you deserve - return 0 - } -} - -// CompareRaw compares two numbers, where one is input as a raw -// uint64, interpreting both values as a `kind` of number. -func (n *Number) CompareRaw(kind Kind, r uint64) int { - return n.CompareNumber(kind, NewNumberFromRaw(r)) -} - -// CompareInt64 assumes that the Number contains an int64 and performs -// a comparison between the value and the other value. It returns the -// typical result of the compare function: -1 if the value is less -// than the other, 0 if both are equal, 1 if the value is greater than -// the other. -func (n *Number) CompareInt64(i int64) int { - this := n.AsInt64() - if this < i { - return -1 - } else if this > i { - return 1 - } - return 0 -} - -// CompareFloat64 assumes that the Number contains a float64 and -// performs a comparison between the value and the other value. It -// returns the typical result of the compare function: -1 if the value -// is less than the other, 0 if both are equal, 1 if the value is -// greater than the other. -// -// Do not compare NaN values. -func (n *Number) CompareFloat64(f float64) int { - this := n.AsFloat64() - if this < f { - return -1 - } else if this > f { - return 1 - } - return 0 -} - -// - relations to zero - -// IsPositive returns true if the actual value is greater than zero. -func (n *Number) IsPositive(kind Kind) bool { - return n.compareWithZero(kind) > 0 -} - -// IsNegative returns true if the actual value is less than zero. -func (n *Number) IsNegative(kind Kind) bool { - return n.compareWithZero(kind) < 0 -} - -// IsZero returns true if the actual value is equal to zero. -func (n *Number) IsZero(kind Kind) bool { - return n.compareWithZero(kind) == 0 -} - -// - misc - -// Emit returns a string representation of the raw value of the -// Number. A %d is used for integral values, %f for floating point -// values. -func (n *Number) Emit(kind Kind) string { - switch kind { - case Int64Kind: - return fmt.Sprintf("%d", n.AsInt64()) - case Float64Kind: - return fmt.Sprintf("%f", n.AsFloat64()) - default: - return "" - } -} - -// AsInterface returns the number as an interface{}, typically used -// for Kind-correct JSON conversion. -func (n *Number) AsInterface(kind Kind) interface{} { - switch kind { - case Int64Kind: - return n.AsInt64() - case Float64Kind: - return n.AsFloat64() - default: - return math.NaN() - } -} - -// - private stuff - -func (n *Number) compareWithZero(kind Kind) int { - switch kind { - case Int64Kind: - return n.CompareInt64(0) - case Float64Kind: - return n.CompareFloat64(0.) - default: - // you get what you deserve - return 0 - } -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go deleted file mode 100644 index 14eb0532e45a..000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -import ( - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/unit" -) - -// Descriptor contains all the settings that describe an instrument, -// including its name, metric kind, number kind, and the configurable -// options. -type Descriptor struct { - name string - instrumentKind InstrumentKind - numberKind number.Kind - description string - unit unit.Unit -} - -// NewDescriptor returns a Descriptor with the given contents. -func NewDescriptor(name string, ikind InstrumentKind, nkind number.Kind, description string, unit unit.Unit) Descriptor { - return Descriptor{ - name: name, - instrumentKind: ikind, - numberKind: nkind, - description: description, - unit: unit, - } -} - -// Name returns the metric instrument's name. -func (d Descriptor) Name() string { - return d.name -} - -// InstrumentKind returns the specific kind of instrument. -func (d Descriptor) InstrumentKind() InstrumentKind { - return d.instrumentKind -} - -// Description provides a human-readable description of the metric -// instrument. -func (d Descriptor) Description() string { - return d.description -} - -// Unit describes the units of the metric instrument. Unitless -// metrics return the empty string. -func (d Descriptor) Unit() unit.Unit { - return d.unit -} - -// NumberKind returns whether this instrument is declared over int64, -// float64, or uint64 values. -func (d Descriptor) NumberKind() number.Kind { - return d.numberKind -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go deleted file mode 100644 index 64aa5ead123c..000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate stringer -type=InstrumentKind - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -// InstrumentKind describes the kind of instrument. -type InstrumentKind int8 - -const ( - // HistogramInstrumentKind indicates a Histogram instrument. - HistogramInstrumentKind InstrumentKind = iota - // GaugeObserverInstrumentKind indicates an GaugeObserver instrument. - GaugeObserverInstrumentKind - - // CounterInstrumentKind indicates a Counter instrument. - CounterInstrumentKind - // UpDownCounterInstrumentKind indicates a UpDownCounter instrument. - UpDownCounterInstrumentKind - - // CounterObserverInstrumentKind indicates a CounterObserver instrument. - CounterObserverInstrumentKind - // UpDownCounterObserverInstrumentKind indicates a UpDownCounterObserver - // instrument. - UpDownCounterObserverInstrumentKind -) - -// Synchronous returns whether this is a synchronous kind of instrument. -func (k InstrumentKind) Synchronous() bool { - switch k { - case CounterInstrumentKind, UpDownCounterInstrumentKind, HistogramInstrumentKind: - return true - } - return false -} - -// Asynchronous returns whether this is an asynchronous kind of instrument. -func (k InstrumentKind) Asynchronous() bool { - return !k.Synchronous() -} - -// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping). -func (k InstrumentKind) Adding() bool { - switch k { - case CounterInstrumentKind, UpDownCounterInstrumentKind, CounterObserverInstrumentKind, UpDownCounterObserverInstrumentKind: - return true - } - return false -} - -// Grouping returns whether this kind of instrument groups its inputs (as opposed to Adding). -func (k InstrumentKind) Grouping() bool { - return !k.Adding() -} - -// Monotonic returns whether this kind of instrument exposes a non-decreasing sum. -func (k InstrumentKind) Monotonic() bool { - switch k { - case CounterInstrumentKind, CounterObserverInstrumentKind: - return true - } - return false -} - -// PrecomputedSum returns whether this kind of instrument receives precomputed sums. -func (k InstrumentKind) PrecomputedSum() bool { - return k.Adding() && k.Asynchronous() -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go deleted file mode 100644 index 3a2e79d823ef..000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "stringer -type=InstrumentKind"; DO NOT EDIT. - -package sdkapi - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[HistogramInstrumentKind-0] - _ = x[GaugeObserverInstrumentKind-1] - _ = x[CounterInstrumentKind-2] - _ = x[UpDownCounterInstrumentKind-3] - _ = x[CounterObserverInstrumentKind-4] - _ = x[UpDownCounterObserverInstrumentKind-5] -} - -const _InstrumentKind_name = "HistogramInstrumentKindGaugeObserverInstrumentKindCounterInstrumentKindUpDownCounterInstrumentKindCounterObserverInstrumentKindUpDownCounterObserverInstrumentKind" - -var _InstrumentKind_index = [...]uint8{0, 23, 50, 71, 98, 127, 162} - -func (i InstrumentKind) String() string { - if i < 0 || i >= InstrumentKind(len(_InstrumentKind_index)-1) { - return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go deleted file mode 100644 index f22895dae6fd..000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" -) - -type noopInstrument struct { - descriptor Descriptor -} -type noopSyncInstrument struct{ noopInstrument } -type noopAsyncInstrument struct{ noopInstrument } - -var _ SyncImpl = noopSyncInstrument{} -var _ AsyncImpl = noopAsyncInstrument{} - -// NewNoopSyncInstrument returns a No-op implementation of the -// synchronous instrument interface. -func NewNoopSyncInstrument() SyncImpl { - return noopSyncInstrument{ - noopInstrument{ - descriptor: Descriptor{ - instrumentKind: CounterInstrumentKind, - }, - }, - } -} - -// NewNoopAsyncInstrument returns a No-op implementation of the -// asynchronous instrument interface. -func NewNoopAsyncInstrument() AsyncImpl { - return noopAsyncInstrument{ - noopInstrument{ - descriptor: Descriptor{ - instrumentKind: CounterObserverInstrumentKind, - }, - }, - } -} - -func (noopInstrument) Implementation() interface{} { - return nil -} - -func (n noopInstrument) Descriptor() Descriptor { - return n.descriptor -} - -func (noopSyncInstrument) RecordOne(context.Context, number.Number, []attribute.KeyValue) { -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go deleted file mode 100644 index 36836364bdce..000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" -) - -// MeterImpl is the interface an SDK must implement to supply a Meter -// implementation. -type MeterImpl interface { - // RecordBatch atomically records a batch of measurements. - RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurement ...Measurement) - - // NewSyncInstrument returns a newly constructed - // synchronous instrument implementation or an error, should - // one occur. - NewSyncInstrument(descriptor Descriptor) (SyncImpl, error) - - // NewAsyncInstrument returns a newly constructed - // asynchronous instrument implementation or an error, should - // one occur. - NewAsyncInstrument( - descriptor Descriptor, - runner AsyncRunner, - ) (AsyncImpl, error) -} - -// InstrumentImpl is a common interface for synchronous and -// asynchronous instruments. -type InstrumentImpl interface { - // Implementation returns the underlying implementation of the - // instrument, which allows the implementation to gain access - // to its own representation especially from a `Measurement`. - Implementation() interface{} - - // Descriptor returns a copy of the instrument's Descriptor. - Descriptor() Descriptor -} - -// SyncImpl is the implementation-level interface to a generic -// synchronous instrument (e.g., Histogram and Counter instruments). -type SyncImpl interface { - InstrumentImpl - - // RecordOne captures a single synchronous metric event. - RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) -} - -// AsyncImpl is an implementation-level interface to an -// asynchronous instrument (e.g., Observer instruments). -type AsyncImpl interface { - InstrumentImpl -} - -// AsyncRunner is expected to convert into an AsyncSingleRunner or an -// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner -// does not satisfy one of these interfaces. -type AsyncRunner interface { - // AnyRunner is a non-exported method with no functional use - // other than to make this a non-empty interface. - AnyRunner() -} - -// AsyncSingleRunner is an interface implemented by single-observer -// callbacks. -type AsyncSingleRunner interface { - // Run accepts a single instrument and function for capturing - // observations of that instrument. Each call to the function - // receives one captured observation. (The function accepts - // multiple observations so the same implementation can be - // used for batch runners.) - Run(ctx context.Context, single AsyncImpl, capture func([]attribute.KeyValue, ...Observation)) - - AsyncRunner -} - -// AsyncBatchRunner is an interface implemented by batch-observer -// callbacks. -type AsyncBatchRunner interface { - // Run accepts a function for capturing observations of - // multiple instruments. - Run(ctx context.Context, capture func([]attribute.KeyValue, ...Observation)) - - AsyncRunner -} - -// NewMeasurement constructs a single observation, a binding between -// an asynchronous instrument and a number. -func NewMeasurement(instrument SyncImpl, number number.Number) Measurement { - return Measurement{ - instrument: instrument, - number: number, - } -} - -// Measurement is a low-level type used with synchronous instruments -// as a direct interface to the SDK via `RecordBatch`. -type Measurement struct { - // number needs to be aligned for 64-bit atomic operations. - number number.Number - instrument SyncImpl -} - -// SyncImpl returns the instrument that created this measurement. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (m Measurement) SyncImpl() SyncImpl { - return m.instrument -} - -// Number returns a number recorded in this measurement. -func (m Measurement) Number() number.Number { - return m.number -} - -// NewObservation constructs a single observation, a binding between -// an asynchronous instrument and a number. -func NewObservation(instrument AsyncImpl, number number.Number) Observation { - return Observation{ - instrument: instrument, - number: number, - } -} - -// Observation is a low-level type used with asynchronous instruments -// as a direct interface to the SDK via `BatchObserver`. -type Observation struct { - // number needs to be aligned for 64-bit atomic operations. - number number.Number - instrument AsyncImpl -} - -// AsyncImpl returns the instrument that created this observation. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (m Observation) AsyncImpl() AsyncImpl { - return m.instrument -} - -// Number returns a number recorded in this observation. -func (m Observation) Number() number.Number { - return m.number -} diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/unit.go b/vendor/go.opentelemetry.io/otel/metric/unit/unit.go index 4615eb16f699..647d77302de2 100644 --- a/vendor/go.opentelemetry.io/otel/metric/unit/unit.go +++ b/vendor/go.opentelemetry.io/otel/metric/unit/unit.go @@ -14,6 +14,7 @@ package unit // import "go.opentelemetry.io/otel/metric/unit" +// Unit is a determinate standard quantity of measurement. type Unit string // Units defined by OpenTelemetry. diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go new file mode 100644 index 000000000000..b580eedeff7f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go @@ -0,0 +1,336 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/semconv/internal" + +import ( + "fmt" + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +// SemanticConventions are the semantic convention values defined for a +// version of the OpenTelemetry specification. +type SemanticConventions struct { + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPHostKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPServerNameKey attribute.Key + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key + NetHostIPKey attribute.Key + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerIPKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetTransportIP attribute.KeyValue + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportUnix attribute.KeyValue +} + +// NetAttributesFromHTTPRequest generates attributes of the net +// namespace as specified by the OpenTelemetry specification for a +// span. The network parameter is a string that net.Dial function +// from standard library can understand. +func (sc *SemanticConventions) NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + + switch network { + case "tcp", "tcp4", "tcp6": + attrs = append(attrs, sc.NetTransportTCP) + case "udp", "udp4", "udp6": + attrs = append(attrs, sc.NetTransportUDP) + case "ip", "ip4", "ip6": + attrs = append(attrs, sc.NetTransportIP) + case "unix", "unixgram", "unixpacket": + attrs = append(attrs, sc.NetTransportUnix) + default: + attrs = append(attrs, sc.NetTransportOther) + } + + peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) + if peerIP != "" { + attrs = append(attrs, sc.NetPeerIPKey.String(peerIP)) + } + if peerName != "" { + attrs = append(attrs, sc.NetPeerNameKey.String(peerName)) + } + if peerPort != 0 { + attrs = append(attrs, sc.NetPeerPortKey.Int(peerPort)) + } + + hostIP, hostName, hostPort := "", "", 0 + for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { + hostIP, hostName, hostPort = hostIPNamePort(someHost) + if hostIP != "" || hostName != "" || hostPort != 0 { + break + } + } + if hostIP != "" { + attrs = append(attrs, sc.NetHostIPKey.String(hostIP)) + } + if hostName != "" { + attrs = append(attrs, sc.NetHostNameKey.String(hostName)) + } + if hostPort != 0 { + attrs = append(attrs, sc.NetHostPortKey.Int(hostPort)) + } + + return attrs +} + +// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. +// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized +// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the +// host portion will instead be returned in `name`. +func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { + var ( + hostPart, portPart string + parsedPort uint64 + err error + ) + if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { + hostPart, portPart = hostWithPort, "" + } + if parsedIP := net.ParseIP(hostPart); parsedIP != nil { + ip = parsedIP.String() + } else { + name = hostPart + } + if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { + port = int(parsedPort) + } + return +} + +// EndUserAttributesFromHTTPRequest generates attributes of the +// enduser namespace as specified by the OpenTelemetry specification +// for a span. +func (sc *SemanticConventions) EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + if username, _, ok := request.BasicAuth(); ok { + return []attribute.KeyValue{sc.EnduserIDKey.String(username)} + } + return nil +} + +// HTTPClientAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the client side. +func (sc *SemanticConventions) HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + + // remove any username/password info that may be in the URL + // before adding it to the attributes + userinfo := request.URL.User + request.URL.User = nil + + attrs = append(attrs, sc.HTTPURLKey.String(request.URL.String())) + + // restore any username/password info that was removed + request.URL.User = userinfo + + return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) +} + +func (sc *SemanticConventions) httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + if ua := request.UserAgent(); ua != "" { + attrs = append(attrs, sc.HTTPUserAgentKey.String(ua)) + } + if request.ContentLength > 0 { + attrs = append(attrs, sc.HTTPRequestContentLengthKey.Int64(request.ContentLength)) + } + + return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) +} + +func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality + attrs := []attribute.KeyValue{} + + if request.TLS != nil { + attrs = append(attrs, sc.HTTPSchemeHTTPS) + } else { + attrs = append(attrs, sc.HTTPSchemeHTTP) + } + + if request.Host != "" { + attrs = append(attrs, sc.HTTPHostKey.String(request.Host)) + } else if request.URL != nil && request.URL.Host != "" { + attrs = append(attrs, sc.HTTPHostKey.String(request.URL.Host)) + } + + flavor := "" + if request.ProtoMajor == 1 { + flavor = fmt.Sprintf("1.%d", request.ProtoMinor) + } else if request.ProtoMajor == 2 { + flavor = "2" + } + if flavor != "" { + attrs = append(attrs, sc.HTTPFlavorKey.String(flavor)) + } + + if request.Method != "" { + attrs = append(attrs, sc.HTTPMethodKey.String(request.Method)) + } else { + attrs = append(attrs, sc.HTTPMethodKey.String(http.MethodGet)) + } + + return attrs +} + +// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes +// to be used with server-side HTTP metrics. +func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + if serverName != "" { + attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) + } + return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) +} + +// HTTPServerAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the server side. Currently, only basic authentication is +// supported. +func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + sc.HTTPTargetKey.String(request.RequestURI), + } + + if serverName != "" { + attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) + } + if route != "" { + attrs = append(attrs, sc.HTTPRouteKey.String(route)) + } + if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 { + if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 { + attrs = append(attrs, sc.HTTPClientIPKey.String(addresses[0])) + } + } + + return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) +} + +// HTTPAttributesFromHTTPStatusCode generates attributes of the http +// namespace as specified by the OpenTelemetry specification for a +// span. +func (sc *SemanticConventions) HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + sc.HTTPStatusCodeKey.Int(code), + } + return attrs +} + +type codeRange struct { + fromInclusive int + toInclusive int +} + +func (r codeRange) contains(code int) bool { + return r.fromInclusive <= code && code <= r.toInclusive +} + +var validRangesPerCategory = map[int][]codeRange{ + 1: { + {http.StatusContinue, http.StatusEarlyHints}, + }, + 2: { + {http.StatusOK, http.StatusAlreadyReported}, + {http.StatusIMUsed, http.StatusIMUsed}, + }, + 3: { + {http.StatusMultipleChoices, http.StatusUseProxy}, + {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, + }, + 4: { + {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… + {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, + {http.StatusPreconditionRequired, http.StatusTooManyRequests}, + {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, + {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, + }, + 5: { + {http.StatusInternalServerError, http.StatusLoopDetected}, + {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, + }, +} + +// SpanStatusFromHTTPStatusCode generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { + spanCode, valid := validateHTTPStatusCode(code) + if !valid { + return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) + } + return spanCode, "" +} + +// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +// Exclude 4xx for SERVER to set the appropriate status. +func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { + spanCode, valid := validateHTTPStatusCode(code) + if !valid { + return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) + } + category := code / 100 + if spanKind == trace.SpanKindServer && category == 4 { + return codes.Unset, "" + } + return spanCode, "" +} + +// validateHTTPStatusCode validates the HTTP status code and returns +// corresponding span status code. If the `code` is not a valid HTTP status +// code, returns span status Error and false. +func validateHTTPStatusCode(code int) (codes.Code, bool) { + category := code / 100 + ranges, ok := validRangesPerCategory[category] + if !ok { + return codes.Error, false + } + ok = false + for _, crange := range ranges { + ok = crange.contains(code) + if ok { + break + } + } + if !ok { + return codes.Error, false + } + if category > 0 && category < 4 { + return codes.Unset, true + } + return codes.Error, true +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go index 9b430fac0c64..945c95a18ad5 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go @@ -15,16 +15,12 @@ package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" import ( - "fmt" - "net" "net/http" - "strconv" - "strings" - - "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/semconv/internal" + "go.opentelemetry.io/otel/trace" ) // HTTP scheme attributes. @@ -33,163 +29,60 @@ var ( HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) +var sc = &internal.SemanticConventions{ + EnduserIDKey: EnduserIDKey, + HTTPClientIPKey: HTTPClientIPKey, + HTTPFlavorKey: HTTPFlavorKey, + HTTPHostKey: HTTPHostKey, + HTTPMethodKey: HTTPMethodKey, + HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, + HTTPRouteKey: HTTPRouteKey, + HTTPSchemeHTTP: HTTPSchemeHTTP, + HTTPSchemeHTTPS: HTTPSchemeHTTPS, + HTTPServerNameKey: HTTPServerNameKey, + HTTPStatusCodeKey: HTTPStatusCodeKey, + HTTPTargetKey: HTTPTargetKey, + HTTPURLKey: HTTPURLKey, + HTTPUserAgentKey: HTTPUserAgentKey, + NetHostIPKey: NetHostIPKey, + NetHostNameKey: NetHostNameKey, + NetHostPortKey: NetHostPortKey, + NetPeerIPKey: NetPeerIPKey, + NetPeerNameKey: NetPeerNameKey, + NetPeerPortKey: NetPeerPortKey, + NetTransportIP: NetTransportIP, + NetTransportOther: NetTransportOther, + NetTransportTCP: NetTransportTCP, + NetTransportUDP: NetTransportUDP, + NetTransportUnix: NetTransportUnix, +} + // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - switch network { - case "tcp", "tcp4", "tcp6": - attrs = append(attrs, NetTransportTCP) - case "udp", "udp4", "udp6": - attrs = append(attrs, NetTransportUDP) - case "ip", "ip4", "ip6": - attrs = append(attrs, NetTransportIP) - case "unix", "unixgram", "unixpacket": - attrs = append(attrs, NetTransportUnix) - default: - attrs = append(attrs, NetTransportOther) - } - - peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) - if peerIP != "" { - attrs = append(attrs, NetPeerIPKey.String(peerIP)) - } - if peerName != "" { - attrs = append(attrs, NetPeerNameKey.String(peerName)) - } - if peerPort != 0 { - attrs = append(attrs, NetPeerPortKey.Int(peerPort)) - } - - hostIP, hostName, hostPort := "", "", 0 - for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { - hostIP, hostName, hostPort = hostIPNamePort(someHost) - if hostIP != "" || hostName != "" || hostPort != 0 { - break - } - } - if hostIP != "" { - attrs = append(attrs, NetHostIPKey.String(hostIP)) - } - if hostName != "" { - attrs = append(attrs, NetHostNameKey.String(hostName)) - } - if hostPort != 0 { - attrs = append(attrs, NetHostPortKey.Int(hostPort)) - } - - return attrs -} - -// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. -// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized -// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the -// host portion will instead be returned in `name`. -func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { - var ( - hostPart, portPart string - parsedPort uint64 - err error - ) - if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { - hostPart, portPart = hostWithPort, "" - } - if parsedIP := net.ParseIP(hostPart); parsedIP != nil { - ip = parsedIP.String() - } else { - name = hostPart - } - if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { - port = int(parsedPort) - } - return + return sc.NetAttributesFromHTTPRequest(network, request) } // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - if username, _, ok := request.BasicAuth(); ok { - return []attribute.KeyValue{EnduserIDKey.String(username)} - } - return nil + return sc.EndUserAttributesFromHTTPRequest(request) } // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - if request.Method != "" { - attrs = append(attrs, HTTPMethodKey.String(request.Method)) - } else { - attrs = append(attrs, HTTPMethodKey.String(http.MethodGet)) - } - - // remove any username/password info that may be in the URL - // before adding it to the attributes - userinfo := request.URL.User - request.URL.User = nil - - attrs = append(attrs, HTTPURLKey.String(request.URL.String())) - - // restore any username/password info that was removed - request.URL.User = userinfo - - return append(attrs, httpCommonAttributesFromHTTPRequest(request)...) -} - -func httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if ua := request.UserAgent(); ua != "" { - attrs = append(attrs, HTTPUserAgentKey.String(ua)) - } - if request.ContentLength > 0 { - attrs = append(attrs, HTTPRequestContentLengthKey.Int64(request.ContentLength)) - } - - return append(attrs, httpBasicAttributesFromHTTPRequest(request)...) -} - -func httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality - attrs := []attribute.KeyValue{} - - if request.TLS != nil { - attrs = append(attrs, HTTPSchemeHTTPS) - } else { - attrs = append(attrs, HTTPSchemeHTTP) - } - - if request.Host != "" { - attrs = append(attrs, HTTPHostKey.String(request.Host)) - } - - flavor := "" - if request.ProtoMajor == 1 { - flavor = fmt.Sprintf("1.%d", request.ProtoMinor) - } else if request.ProtoMajor == 2 { - flavor = "2" - } - if flavor != "" { - attrs = append(attrs, HTTPFlavorKey.String(flavor)) - } - - return attrs + return sc.HTTPClientAttributesFromHTTPRequest(request) } // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if serverName != "" { - attrs = append(attrs, HTTPServerNameKey.String(serverName)) - } - return append(attrs, httpBasicAttributesFromHTTPRequest(request)...) + return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) } // HTTPServerAttributesFromHTTPRequest generates attributes of the @@ -197,116 +90,25 @@ func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http. // a span on the server side. Currently, only basic authentication is // supported. func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - HTTPMethodKey.String(request.Method), - HTTPTargetKey.String(request.RequestURI), - } - - if serverName != "" { - attrs = append(attrs, HTTPServerNameKey.String(serverName)) - } - if route != "" { - attrs = append(attrs, HTTPRouteKey.String(route)) - } - if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 { - if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 { - attrs = append(attrs, HTTPClientIPKey.String(addresses[0])) - } - } - - return append(attrs, httpCommonAttributesFromHTTPRequest(request)...) + return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) } // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - HTTPStatusCodeKey.Int(code), - } - return attrs -} - -type codeRange struct { - fromInclusive int - toInclusive int -} - -func (r codeRange) contains(code int) bool { - return r.fromInclusive <= code && code <= r.toInclusive -} - -var validRangesPerCategory = map[int][]codeRange{ - 1: { - {http.StatusContinue, http.StatusEarlyHints}, - }, - 2: { - {http.StatusOK, http.StatusAlreadyReported}, - {http.StatusIMUsed, http.StatusIMUsed}, - }, - 3: { - {http.StatusMultipleChoices, http.StatusUseProxy}, - {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, - }, - 4: { - {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… - {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, - {http.StatusPreconditionRequired, http.StatusTooManyRequests}, - {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, - {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, - }, - 5: { - {http.StatusInternalServerError, http.StatusLoopDetected}, - {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, - }, + return sc.HTTPAttributesFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - return spanCode, "" + return internal.SpanStatusFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message // as specified by the OpenTelemetry specification for a span. // Exclude 4xx for SERVER to set the appropriate status. func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - category := code / 100 - if spanKind == trace.SpanKindServer && category == 4 { - return codes.Unset, "" - } - return spanCode, "" -} - -// Validates the HTTP status code and returns corresponding span status code. -// If the `code` is not a valid HTTP status code, returns span status Error -// and false. -func validateHTTPStatusCode(code int) (codes.Code, bool) { - category := code / 100 - ranges, ok := validRangesPerCategory[category] - if !ok { - return codes.Error, false - } - ok = false - for _, crange := range ranges { - ok = crange.contains(code) - if ok { - break - } - } - if !ok { - return codes.Error, false - } - if category > 0 && category < 4 { - return codes.Unset, true - } - return codes.Error, true + return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go index ec8b463d98e2..64b6c46a58db 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go @@ -17,4 +17,4 @@ package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/v1.7.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.7.0" diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go index 28b4f5e4d821..caf7249de859 100644 --- a/vendor/go.opentelemetry.io/otel/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -31,9 +31,12 @@ func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { // If none is registered then an instance of NoopTracerProvider is returned. // // Use the trace provider to create a named tracer. E.g. -// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// // or -// tracer := otel.Tracer("example.com/foo") +// +// tracer := otel.Tracer("example.com/foo") func GetTracerProvider() trace.TracerProvider { return global.TracerProvider() } diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index bcc333e04e2c..f058cc781e00 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -124,7 +124,7 @@ func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { } // SpanStartOption applies an option to a SpanConfig. These options are applicable -// only when the span is created +// only when the span is created. type SpanStartOption interface { applySpanStart(SpanConfig) SpanConfig } diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index 391417718f5a..ab0346f9664a 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -17,7 +17,7 @@ Package trace provides an implementation of the tracing part of the OpenTelemetry API. To participate in distributed traces a Span needs to be created for the -operation being performed as part of a traced workflow. It its simplest form: +operation being performed as part of a traced workflow. In its simplest form: var tracer trace.Tracer diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index ad9a9fc5be64..73950f207789 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -85,5 +85,5 @@ func (noopSpan) AddEvent(string, ...EventOption) {} // SetName does nothing. func (noopSpan) SetName(string) {} -// TracerProvider returns a no-op TracerProvider +// TracerProvider returns a no-op TracerProvider. func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 0923ceb98d57..4aa94f79f46a 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -63,7 +63,7 @@ func (t TraceID) MarshalJSON() ([]byte, error) { return json.Marshal(t.String()) } -// String returns the hex string representation form of a TraceID +// String returns the hex string representation form of a TraceID. func (t TraceID) String() string { return hex.EncodeToString(t[:]) } @@ -86,7 +86,7 @@ func (s SpanID) MarshalJSON() ([]byte, error) { return json.Marshal(s.String()) } -// String returns the hex string representation form of a SpanID +// String returns the hex string representation form of a SpanID. func (s SpanID) String() string { return hex.EncodeToString(s[:]) } @@ -151,7 +151,7 @@ func decodeHex(h string, b []byte) error { return nil } -// TraceFlags contains flags that can be set on a SpanContext +// TraceFlags contains flags that can be set on a SpanContext. type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. // IsSampled returns if the sampling bit is set in the TraceFlags. @@ -160,7 +160,7 @@ func (tf TraceFlags) IsSampled() bool { } // WithSampled sets the sampling bit in a new copy of the TraceFlags. -func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. if sampled { return tf | FlagsSampled } @@ -174,7 +174,7 @@ func (tf TraceFlags) MarshalJSON() ([]byte, error) { return json.Marshal(tf.String()) } -// String returns the hex string representation form of TraceFlags +// String returns the hex string representation form of TraceFlags. func (tf TraceFlags) String() string { return hex.EncodeToString([]byte{byte(tf)}[:]) } @@ -364,8 +364,9 @@ type Span interface { SpanContext() SpanContext // SetStatus sets the status of the Span in the form of a code and a - // description, overriding previous values set. The description is only - // included in a status when the code is for an error. + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. SetStatus(code codes.Code, description string) // SetName sets the Span name. @@ -386,16 +387,16 @@ type Span interface { // // For example, a Link is used in the following situations: // -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. type Link struct { // SpanContext of the linked Span. SpanContext SpanContext @@ -503,17 +504,48 @@ type Tracer interface { Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) } -// TracerProvider provides access to instrumentation Tracers. +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). // // Warning: methods may be added to this interface in minor releases. type TracerProvider interface { - // Tracer creates an implementation of the Tracer interface. - // The instrumentationName must be the name of the library providing - // instrumentation. This name may be the same as the instrumented code - // only if that code provides built-in instrumentation. If the - // instrumentationName is empty, then a implementation defined default - // name will be used instead. + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. // - // This method must be concurrency safe. - Tracer(instrumentationName string, opts ...TracerOption) Tracer + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer } diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 86fc62c1d8f2..ca68a82e5f73 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -21,7 +21,7 @@ import ( "strings" ) -var ( +const ( maxListMembers = 32 listDelimiter = "," @@ -32,10 +32,6 @@ var ( withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` - keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) - valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) - memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) - errInvalidKey errorConst = "invalid tracestate key" errInvalidValue errorConst = "invalid tracestate value" errInvalidMember errorConst = "invalid tracestate list-member" @@ -43,6 +39,12 @@ var ( errDuplicate errorConst = "duplicate list-member in tracestate" ) +var ( + keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) + valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) + memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) +) + type member struct { Key string Value string @@ -68,7 +70,6 @@ func parseMember(m string) (member, error) { Key: matches[1], Value: matches[4], }, nil - } // String encodes member into a string compliant with the W3C Trace Context @@ -171,7 +172,8 @@ func (ts TraceState) Get(key string) string { // specification an error is returned with the original TraceState. // // If adding a new list-member means the TraceState would have more members -// than is allowed an error is returned instead with the original TraceState. +// then is allowed, the new list-member will be inserted and the right-most +// list-member will be dropped in the returned TraceState. func (ts TraceState) Insert(key, value string) (TraceState, error) { m, err := newMember(key, value) if err != nil { @@ -179,17 +181,10 @@ func (ts TraceState) Insert(key, value string) (TraceState, error) { } cTS := ts.Delete(key) - if cTS.Len()+1 > maxListMembers { - // TODO (MrAlias): When the second version of the Trace Context - // specification is published this needs to not return an error. - // Instead it should drop the "right-most" member and insert the new - // member at the front. - // - // https://github.com/w3c/trace-context/pull/448 - return ts, fmt.Errorf("failed to insert: %w", errMemberNumber) + if cTS.Len()+1 <= maxListMembers { + cTS.list = append(cTS.list, member{}) } - - cTS.list = append(cTS.list, member{}) + // When the number of members exceeds capacity, drop the "right-most". copy(cTS.list[1:], cTS.list) cTS.list[0] = m diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index a09bcbb5e8f2..bda8f7cbfae2 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.4.1" + return "1.12.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 3f06f2993467..66f456222152 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,7 +14,7 @@ module-sets: stable-v1: - version: v1.4.1 + version: v1.12.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing @@ -34,27 +34,23 @@ module-sets: - go.opentelemetry.io/otel/trace - go.opentelemetry.io/otel/sdk experimental-metrics: - version: v0.27.0 + version: v0.35.0 modules: + - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/otlp/otlpmetric - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/prometheus - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric - - go.opentelemetry.io/otel/internal/metric - go.opentelemetry.io/otel/metric - - go.opentelemetry.io/otel/sdk/export/metric - go.opentelemetry.io/otel/sdk/metric + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/example/view experimental-schema: - version: v0.0.2 + version: v0.0.3 modules: - go.opentelemetry.io/otel/schema - bridge: - version: v0.27.1 - modules: - - go.opentelemetry.io/otel/bridge/opencensus - - go.opentelemetry.io/otel/bridge/opencensus/test - - go.opentelemetry.io/otel/example/opencensus excluded-modules: - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md index 38f564e2b36a..5fe03f21bd3d 100644 --- a/vendor/go.uber.org/atomic/CHANGELOG.md +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -4,6 +4,23 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.10.0] - 2022-08-11 +### Added +- Add `atomic.Float32` type for atomic operations on `float32`. +- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`, + and `atomic.Value`. +- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any + type. This is present only for Go 1.18 or higher, and is a drop-in for + replacement for the standard library's `sync/atomic.Pointer` type. + +### Changed +- Deprecate `CAS` methods on all types in favor of corresponding + `CompareAndSwap` methods. + +Thanks to @eNV25 and @icpd for their contributions to this release. + +[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0 + ## [1.9.0] - 2021-07-15 ### Added - Add `Float64.Swap` to match int atomic operations. diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go index 209df7bbcd22..dfa2085f491d 100644 --- a/vendor/go.uber.org/atomic/bool.go +++ b/vendor/go.uber.org/atomic/bool.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -55,8 +55,15 @@ func (x *Bool) Store(val bool) { } // CAS is an atomic compare-and-swap for bool values. +// +// Deprecated: Use CompareAndSwap. func (x *Bool) CAS(old, new bool) (swapped bool) { - return x.v.CAS(boolToInt(old), boolToInt(new)) + return x.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for bool values. +func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) { + return x.v.CompareAndSwap(boolToInt(old), boolToInt(new)) } // Swap atomically stores the given bool and returns the old diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go index 207594f5e806..6f4157445cfb 100644 --- a/vendor/go.uber.org/atomic/duration.go +++ b/vendor/go.uber.org/atomic/duration.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -56,8 +56,15 @@ func (x *Duration) Store(val time.Duration) { } // CAS is an atomic compare-and-swap for time.Duration values. +// +// Deprecated: Use CompareAndSwap. func (x *Duration) CAS(old, new time.Duration) (swapped bool) { - return x.v.CAS(int64(old), int64(new)) + return x.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for time.Duration values. +func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) { + return x.v.CompareAndSwap(int64(old), int64(new)) } // Swap atomically stores the given time.Duration and returns the old diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go index 3be19c35ee75..27b23ea16282 100644 --- a/vendor/go.uber.org/atomic/error.go +++ b/vendor/go.uber.org/atomic/error.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -49,3 +49,14 @@ func (x *Error) Load() error { func (x *Error) Store(val error) { x.v.Store(packError(val)) } + +// CompareAndSwap is an atomic compare-and-swap for error values. +func (x *Error) CompareAndSwap(old, new error) (swapped bool) { + return x.v.CompareAndSwap(packError(old), packError(new)) +} + +// Swap atomically stores the given error and returns the old +// value. +func (x *Error) Swap(val error) (old error) { + return unpackError(x.v.Swap(packError(val))) +} diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go index ffe0be21cb01..d31fb633bb63 100644 --- a/vendor/go.uber.org/atomic/error_ext.go +++ b/vendor/go.uber.org/atomic/error_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -23,7 +23,7 @@ package atomic // atomic.Value panics on nil inputs, or if the underlying type changes. // Stabilize by always storing a custom struct that we control. -//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go +//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go type packedError struct{ Value error } diff --git a/vendor/go.uber.org/atomic/float32.go b/vendor/go.uber.org/atomic/float32.go new file mode 100644 index 000000000000..5d535a6d2acf --- /dev/null +++ b/vendor/go.uber.org/atomic/float32.go @@ -0,0 +1,77 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float32 is an atomic type-safe wrapper for float32 values. +type Float32 struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroFloat32 float32 + +// NewFloat32 creates a new Float32. +func NewFloat32(val float32) *Float32 { + x := &Float32{} + if val != _zeroFloat32 { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped float32. +func (x *Float32) Load() float32 { + return math.Float32frombits(x.v.Load()) +} + +// Store atomically stores the passed float32. +func (x *Float32) Store(val float32) { + x.v.Store(math.Float32bits(val)) +} + +// Swap atomically stores the given float32 and returns the old +// value. +func (x *Float32) Swap(val float32) (old float32) { + return math.Float32frombits(x.v.Swap(math.Float32bits(val))) +} + +// MarshalJSON encodes the wrapped float32 into JSON. +func (x *Float32) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float32 from JSON. +func (x *Float32) UnmarshalJSON(b []byte) error { + var v float32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/float32_ext.go b/vendor/go.uber.org/atomic/float32_ext.go new file mode 100644 index 000000000000..b0cd8d9c820a --- /dev/null +++ b/vendor/go.uber.org/atomic/float32_ext.go @@ -0,0 +1,76 @@ +// Copyright (c) 2020-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "math" + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go + +// Add atomically adds to the wrapped float32 and returns the new value. +func (f *Float32) Add(delta float32) float32 { + for { + old := f.Load() + new := old + delta + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float32 and returns the new value. +func (f *Float32) Sub(delta float32) float32 { + return f.Add(-delta) +} + +// CAS is an atomic compare-and-swap for float32 values. +// +// Deprecated: Use CompareAndSwap +func (f *Float32) CAS(old, new float32) (swapped bool) { + return f.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for float32 values. +// +// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CompareAndSwap loops from blocking forever, e.g., +// +// for { +// old := atom.Load() +// new = f(old) +// if atom.CompareAndSwap(old, new) { +// break +// } +// } +// +// If CompareAndSwap did not match NaN to match, then the above would loop forever. +func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) { + return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new)) +} + +// String encodes the wrapped value as a string. +func (f *Float32) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32) +} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go index 8a1367184721..11d5189a5f29 100644 --- a/vendor/go.uber.org/atomic/float64.go +++ b/vendor/go.uber.org/atomic/float64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go index df36b0107f0b..48c52b0abf66 100644 --- a/vendor/go.uber.org/atomic/float64_ext.go +++ b/vendor/go.uber.org/atomic/float64_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -45,21 +45,28 @@ func (f *Float64) Sub(delta float64) float64 { // CAS is an atomic compare-and-swap for float64 values. // -// Note: CAS handles NaN incorrectly. NaN != NaN using Go's inbuilt operators -// but CAS allows a stored NaN to compare equal to a passed in NaN. -// This avoids typical CAS loops from blocking forever, e.g., +// Deprecated: Use CompareAndSwap +func (f *Float64) CAS(old, new float64) (swapped bool) { + return f.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for float64 values. // -// for { -// old := atom.Load() -// new = f(old) -// if atom.CAS(old, new) { -// break -// } -// } +// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CompareAndSwap loops from blocking forever, e.g., // -// If CAS did not match NaN to match, then the above would loop forever. -func (f *Float64) CAS(old, new float64) (swapped bool) { - return f.v.CAS(math.Float64bits(old), math.Float64bits(new)) +// for { +// old := atom.Load() +// new = f(old) +// if atom.CompareAndSwap(old, new) { +// break +// } +// } +// +// If CompareAndSwap did not match NaN to match, then the above would loop forever. +func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) { + return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new)) } // String encodes the wrapped value as a string. diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go index 640ea36a175f..b9a68f42ca84 100644 --- a/vendor/go.uber.org/atomic/int32.go +++ b/vendor/go.uber.org/atomic/int32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Int32) Dec() int32 { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Int32) CAS(old, new int32) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) { return atomic.CompareAndSwapInt32(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go index 9ab66b98091f..78d260976fcf 100644 --- a/vendor/go.uber.org/atomic/int64.go +++ b/vendor/go.uber.org/atomic/int64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Int64) Dec() int64 { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Int64) CAS(old, new int64) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) { return atomic.CompareAndSwapInt64(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go index a8201cb4a18e..54b74174abdf 100644 --- a/vendor/go.uber.org/atomic/nocmp.go +++ b/vendor/go.uber.org/atomic/nocmp.go @@ -23,13 +23,13 @@ package atomic // nocmp is an uncomparable struct. Embed this inside another struct to make // it uncomparable. // -// type Foo struct { -// nocmp -// // ... -// } +// type Foo struct { +// nocmp +// // ... +// } // // This DOES NOT: // -// - Disallow shallow copies of structs -// - Disallow comparison of pointers to uncomparable structs +// - Disallow shallow copies of structs +// - Disallow comparison of pointers to uncomparable structs type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/pointer_go118.go b/vendor/go.uber.org/atomic/pointer_go118.go new file mode 100644 index 000000000000..e0f47dba4686 --- /dev/null +++ b/vendor/go.uber.org/atomic/pointer_go118.go @@ -0,0 +1,60 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.18 && !go1.19 +// +build go1.18,!go1.19 + +package atomic + +import "unsafe" + +type Pointer[T any] struct { + _ nocmp // disallow non-atomic comparison + p UnsafePointer +} + +// NewPointer creates a new Pointer. +func NewPointer[T any](v *T) *Pointer[T] { + var p Pointer[T] + if v != nil { + p.p.Store(unsafe.Pointer(v)) + } + return &p +} + +// Load atomically loads the wrapped value. +func (p *Pointer[T]) Load() *T { + return (*T)(p.p.Load()) +} + +// Store atomically stores the passed value. +func (p *Pointer[T]) Store(val *T) { + p.p.Store(unsafe.Pointer(val)) +} + +// Swap atomically swaps the wrapped pointer and returns the old value. +func (p *Pointer[T]) Swap(val *T) (old *T) { + return (*T)(p.p.Swap(unsafe.Pointer(val))) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { + return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new)) +} diff --git a/vendor/go.uber.org/atomic/pointer_go119.go b/vendor/go.uber.org/atomic/pointer_go119.go new file mode 100644 index 000000000000..6726f17ad64f --- /dev/null +++ b/vendor/go.uber.org/atomic/pointer_go119.go @@ -0,0 +1,61 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.19 +// +build go1.19 + +package atomic + +import "sync/atomic" + +// Pointer is an atomic pointer of type *T. +type Pointer[T any] struct { + _ nocmp // disallow non-atomic comparison + p atomic.Pointer[T] +} + +// NewPointer creates a new Pointer. +func NewPointer[T any](v *T) *Pointer[T] { + var p Pointer[T] + if v != nil { + p.p.Store(v) + } + return &p +} + +// Load atomically loads the wrapped value. +func (p *Pointer[T]) Load() *T { + return p.p.Load() +} + +// Store atomically stores the passed value. +func (p *Pointer[T]) Store(val *T) { + p.p.Store(val) +} + +// Swap atomically swaps the wrapped pointer and returns the old value. +func (p *Pointer[T]) Swap(val *T) (old *T) { + return p.p.Swap(val) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { + return p.p.CompareAndSwap(old, new) +} diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go index 80df93d0949d..c4bea70f4ddf 100644 --- a/vendor/go.uber.org/atomic/string.go +++ b/vendor/go.uber.org/atomic/string.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -52,3 +52,14 @@ func (x *String) Load() string { func (x *String) Store(val string) { x.v.Store(val) } + +// CompareAndSwap is an atomic compare-and-swap for string values. +func (x *String) CompareAndSwap(old, new string) (swapped bool) { + return x.v.CompareAndSwap(old, new) +} + +// Swap atomically stores the given string and returns the old +// value. +func (x *String) Swap(val string) (old string) { + return x.v.Swap(val).(string) +} diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go index 83d92edafc71..1f63dfd5b978 100644 --- a/vendor/go.uber.org/atomic/string_ext.go +++ b/vendor/go.uber.org/atomic/string_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,9 +20,7 @@ package atomic -//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go -// Note: No Swap as String wraps Value, which wraps the stdlib sync/atomic.Value which -// only supports Swap as of go1.17: https://github.com/golang/go/issues/39351 +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -compareandswap -swap -file=string.go // String returns the wrapped value. func (s *String) String() string { diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go index 33460fc37eae..1660feb14268 100644 --- a/vendor/go.uber.org/atomic/time.go +++ b/vendor/go.uber.org/atomic/time.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go index 7859a9cc3b5b..d6f04a96dc38 100644 --- a/vendor/go.uber.org/atomic/uint32.go +++ b/vendor/go.uber.org/atomic/uint32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Uint32) Dec() uint32 { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Uint32) CAS(old, new uint32) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) { return atomic.CompareAndSwapUint32(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go index 2f2a7db6380f..2574bdd5ec4a 100644 --- a/vendor/go.uber.org/atomic/uint64.go +++ b/vendor/go.uber.org/atomic/uint64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Uint64) Dec() uint64 { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Uint64) CAS(old, new uint64) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) { return atomic.CompareAndSwapUint64(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go index ecf7a77273a1..81b275a7ad5d 100644 --- a/vendor/go.uber.org/atomic/uintptr.go +++ b/vendor/go.uber.org/atomic/uintptr.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Uintptr) Dec() uintptr { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Uintptr) CAS(old, new uintptr) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) { return atomic.CompareAndSwapUintptr(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go index 169f793dcf39..34868baf6a85 100644 --- a/vendor/go.uber.org/atomic/unsafe_pointer.go +++ b/vendor/go.uber.org/atomic/unsafe_pointer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021 Uber Technologies, Inc. +// Copyright (c) 2021-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -53,6 +53,13 @@ func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) { + return p.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) { return atomic.CompareAndSwapPointer(&p.v, old, new) } diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go index 671f3a382475..52caedb9a58f 100644 --- a/vendor/go.uber.org/atomic/value.go +++ b/vendor/go.uber.org/atomic/value.go @@ -25,7 +25,7 @@ import "sync/atomic" // Value shadows the type of the same name from sync/atomic // https://godoc.org/sync/atomic#Value type Value struct { - atomic.Value - _ nocmp // disallow non-atomic comparison + + atomic.Value } diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md index b0814e7c9bd5..d2c8aadaf0e4 100644 --- a/vendor/go.uber.org/multierr/CHANGELOG.md +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -1,6 +1,20 @@ Releases ======== +v1.9.0 (2022-12-12) +=================== + +- Add `AppendFunc` that allow passsing functions to similar to + `AppendInvoke`. + +- Bump up yaml.v3 dependency to 3.0.1. + +v1.8.0 (2022-02-28) +=================== + +- `Combine`: perform zero allocations when there are no errors. + + v1.7.0 (2021-05-06) =================== diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go index faa0a059464e..cdd91ae56d92 100644 --- a/vendor/go.uber.org/multierr/error.go +++ b/vendor/go.uber.org/multierr/error.go @@ -20,106 +20,109 @@ // Package multierr allows combining one or more errors together. // -// Overview +// # Overview // // Errors can be combined with the use of the Combine function. // -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// conn.Close(), -// ) +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) // // If only two errors are being combined, the Append function may be used // instead. // -// err = multierr.Append(reader.Close(), writer.Close()) +// err = multierr.Append(reader.Close(), writer.Close()) // // The underlying list of errors for a returned error object may be retrieved // with the Errors function. // -// errors := multierr.Errors(err) -// if len(errors) > 0 { -// fmt.Println("The following errors occurred:", errors) -// } +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:", errors) +// } // -// Appending from a loop +// # Appending from a loop // // You sometimes need to append into an error from a loop. // -// var err error -// for _, item := range items { -// err = multierr.Append(err, process(item)) -// } +// var err error +// for _, item := range items { +// err = multierr.Append(err, process(item)) +// } // // Cases like this may require knowledge of whether an individual instance // failed. This usually requires introduction of a new variable. // -// var err error -// for _, item := range items { -// if perr := process(item); perr != nil { -// log.Warn("skipping item", item) -// err = multierr.Append(err, perr) -// } -// } +// var err error +// for _, item := range items { +// if perr := process(item); perr != nil { +// log.Warn("skipping item", item) +// err = multierr.Append(err, perr) +// } +// } // // multierr includes AppendInto to simplify cases like this. // -// var err error -// for _, item := range items { -// if multierr.AppendInto(&err, process(item)) { -// log.Warn("skipping item", item) -// } -// } +// var err error +// for _, item := range items { +// if multierr.AppendInto(&err, process(item)) { +// log.Warn("skipping item", item) +// } +// } // // This will append the error into the err variable, and return true if that // individual error was non-nil. // -// See AppendInto for more information. +// See [AppendInto] for more information. // -// Deferred Functions +// # Deferred Functions // // Go makes it possible to modify the return value of a function in a defer // block if the function was using named returns. This makes it possible to // record resource cleanup failures from deferred blocks. // -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer func() { -// err = multierr.Append(err, conn.Close()) -// }() -// // ... -// } +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } // // multierr provides the Invoker type and AppendInvoke function to make cases // like the above simpler and obviate the need for a closure. The following is // roughly equivalent to the example above. // -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer multierr.AppendInvoke(err, multierr.Close(conn)) -// // ... -// } +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(conn)) +// // ... +// } // -// See AppendInvoke and Invoker for more information. +// See [AppendInvoke] and [Invoker] for more information. // -// Advanced Usage +// NOTE: If you're modifying an error from inside a defer, you MUST use a named +// return value for that function. +// +// # Advanced Usage // // Errors returned by Combine and Append MAY implement the following // interface. // -// type errorGroup interface { -// // Returns a slice containing the underlying list of errors. -// // -// // This slice MUST NOT be modified by the caller. -// Errors() []error -// } +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } // // Note that if you need access to list of errors behind a multierr error, you // should prefer using the Errors function. That said, if you need cheap @@ -128,13 +131,13 @@ // because errors returned by Combine and Append are not guaranteed to // implement this interface. // -// var errors []error -// group, ok := err.(errorGroup) -// if ok { -// errors = group.Errors() -// } else { -// errors = []error{err} -// } +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } package multierr // import "go.uber.org/multierr" import ( @@ -185,8 +188,8 @@ type errorGroup interface { // Errors returns a slice containing zero or more errors that the supplied // error is composed of. If the error is nil, a nil slice is returned. // -// err := multierr.Append(r.Close(), w.Close()) -// errors := multierr.Errors(err) +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) // // If the error is not composed of other errors, the returned slice contains // just the error that was passed in. @@ -209,10 +212,7 @@ func Errors(err error) []error { return []error{err} } - errors := eg.Errors() - result := make([]error, len(errors)) - copy(result, errors) - return result + return append(([]error)(nil), eg.Errors()...) } // multiError is an error that holds one or more errors. @@ -372,6 +372,14 @@ func inspect(errors []error) (res inspectResult) { // fromSlice converts the given list of errors into a single error. func fromSlice(errors []error) error { + // Don't pay to inspect small slices. + switch len(errors) { + case 0: + return nil + case 1: + return errors[0] + } + res := inspect(errors) switch res.Count { case 0: @@ -381,8 +389,12 @@ func fromSlice(errors []error) error { return errors[res.FirstErrorIdx] case len(errors): if !res.ContainsMultiError { - // already flat - return &multiError{errors: errors} + // Error list is flat. Make a copy of it + // Otherwise "errors" escapes to the heap + // unconditionally for all other cases. + // This lets us optimize for the "no errors" case. + out := append(([]error)(nil), errors...) + return &multiError{errors: out} } } @@ -407,32 +419,32 @@ func fromSlice(errors []error) error { // If zero arguments were passed or if all items are nil, a nil error is // returned. // -// Combine(nil, nil) // == nil +// Combine(nil, nil) // == nil // // If only a single error was passed, it is returned as-is. // -// Combine(err) // == err +// Combine(err) // == err // // Combine skips over nil arguments so this function may be used to combine // together errors from operations that fail independently of each other. // -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// pipe.Close(), -// ) +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) // // If any of the passed errors is a multierr error, it will be flattened along // with the other errors. // -// multierr.Combine(multierr.Combine(err1, err2), err3) -// // is the same as -// multierr.Combine(err1, err2, err3) +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) // // The returned error formats into a readable multi-line error message if // formatted with %+v. // -// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) func Combine(errors ...error) error { return fromSlice(errors) } @@ -442,16 +454,19 @@ func Combine(errors ...error) error { // This function is a specialization of Combine for the common case where // there are only two errors. // -// err = multierr.Append(reader.Close(), writer.Close()) +// err = multierr.Append(reader.Close(), writer.Close()) // // The following pattern may also be used to record failure of deferred // operations without losing information about the original error. // -// func doSomething(..) (err error) { -// f := acquireResource() -// defer func() { -// err = multierr.Append(err, f.Close()) -// }() +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +// +// Note that the variable MUST be a named return to append an error to it from +// the defer statement. See also [AppendInvoke]. func Append(left error, right error) error { switch { case left == nil: @@ -481,37 +496,37 @@ func Append(left error, right error) error { // AppendInto appends an error into the destination of an error pointer and // returns whether the error being appended was non-nil. // -// var err error -// multierr.AppendInto(&err, r.Close()) -// multierr.AppendInto(&err, w.Close()) +// var err error +// multierr.AppendInto(&err, r.Close()) +// multierr.AppendInto(&err, w.Close()) // // The above is equivalent to, // -// err := multierr.Append(r.Close(), w.Close()) +// err := multierr.Append(r.Close(), w.Close()) // // As AppendInto reports whether the provided error was non-nil, it may be // used to build a multierr error in a loop more ergonomically. For example: // -// var err error -// for line := range lines { -// var item Item -// if multierr.AppendInto(&err, parse(line, &item)) { -// continue -// } -// items = append(items, item) -// } +// var err error +// for line := range lines { +// var item Item +// if multierr.AppendInto(&err, parse(line, &item)) { +// continue +// } +// items = append(items, item) +// } // // Compare this with a version that relies solely on Append: // -// var err error -// for line := range lines { -// var item Item -// if parseErr := parse(line, &item); parseErr != nil { -// err = multierr.Append(err, parseErr) -// continue -// } -// items = append(items, item) -// } +// var err error +// for line := range lines { +// var item Item +// if parseErr := parse(line, &item); parseErr != nil { +// err = multierr.Append(err, parseErr) +// continue +// } +// items = append(items, item) +// } func AppendInto(into *error, err error) (errored bool) { if into == nil { // We panic if 'into' is nil. This is not documented above @@ -532,7 +547,7 @@ func AppendInto(into *error, err error) (errored bool) { // AppendInvoke to append the result of calling the function into an error. // This allows you to conveniently defer capture of failing operations. // -// See also, Close and Invoke. +// See also, [Close] and [Invoke]. type Invoker interface { Invoke() error } @@ -543,19 +558,22 @@ type Invoker interface { // // For example, // -// func processReader(r io.Reader) (err error) { -// scanner := bufio.NewScanner(r) -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) -// for scanner.Scan() { -// // ... -// } -// // ... -// } +// func processReader(r io.Reader) (err error) { +// scanner := bufio.NewScanner(r) +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// for scanner.Scan() { +// // ... +// } +// // ... +// } // // In this example, the following line will construct the Invoker right away, // but defer the invocation of scanner.Err() until the function returns. // -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. type Invoke func() error // Invoke calls the supplied function and returns its result. @@ -566,19 +584,22 @@ func (i Invoke) Invoke() error { return i() } // // For example, // -// func processFile(path string) (err error) { -// f, err := os.Open(path) -// if err != nil { -// return err -// } -// defer multierr.AppendInvoke(&err, multierr.Close(f)) -// return processReader(f) -// } +// func processFile(path string) (err error) { +// f, err := os.Open(path) +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// return processReader(f) +// } // // In this example, multierr.Close will construct the Invoker right away, but // defer the invocation of f.Close until the function returns. // -// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. func Close(closer io.Closer) Invoker { return Invoke(closer.Close) } @@ -588,52 +609,73 @@ func Close(closer io.Closer) Invoker { // invocation of fallible operations until a function returns, and capture the // resulting errors. // -// func doSomething(...) (err error) { -// // ... -// f, err := openFile(..) -// if err != nil { -// return err -// } +// func doSomething(...) (err error) { +// // ... +// f, err := openFile(..) +// if err != nil { +// return err +// } // -// // multierr will call f.Close() when this function returns and -// // if the operation fails, its append its error into the -// // returned error. -// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// // multierr will call f.Close() when this function returns and +// // if the operation fails, its append its error into the +// // returned error. +// defer multierr.AppendInvoke(&err, multierr.Close(f)) // -// scanner := bufio.NewScanner(f) -// // Similarly, this scheduled scanner.Err to be called and -// // inspected when the function returns and append its error -// // into the returned error. -// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// scanner := bufio.NewScanner(f) +// // Similarly, this scheduled scanner.Err to be called and +// // inspected when the function returns and append its error +// // into the returned error. +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) // -// // ... -// } +// // ... +// } +// +// NOTE: If used with a defer, the error variable MUST be a named return. // // Without defer, AppendInvoke behaves exactly like AppendInto. // -// err := // ... -// multierr.AppendInvoke(&err, mutltierr.Invoke(foo)) +// err := // ... +// multierr.AppendInvoke(&err, mutltierr.Invoke(foo)) // -// // ...is roughly equivalent to... +// // ...is roughly equivalent to... // -// err := // ... -// multierr.AppendInto(&err, foo()) +// err := // ... +// multierr.AppendInto(&err, foo()) // // The advantage of the indirection introduced by Invoker is to make it easy // to defer the invocation of a function. Without this indirection, the // invoked function will be evaluated at the time of the defer block rather // than when the function returns. // -// // BAD: This is likely not what the caller intended. This will evaluate -// // foo() right away and append its result into the error when the -// // function returns. -// defer multierr.AppendInto(&err, foo()) +// // BAD: This is likely not what the caller intended. This will evaluate +// // foo() right away and append its result into the error when the +// // function returns. +// defer multierr.AppendInto(&err, foo()) // -// // GOOD: This will defer invocation of foo unutil the function returns. -// defer multierr.AppendInvoke(&err, multierr.Invoke(foo)) +// // GOOD: This will defer invocation of foo unutil the function returns. +// defer multierr.AppendInvoke(&err, multierr.Invoke(foo)) // // multierr provides a few Invoker implementations out of the box for -// convenience. See Invoker for more information. +// convenience. See [Invoker] for more information. func AppendInvoke(into *error, invoker Invoker) { AppendInto(into, invoker.Invoke()) } + +// AppendFunc is a shorthand for [AppendInvoke]. +// It allows using function or method value directly +// without having to wrap it into an [Invoker] interface. +// +// func doSomething(...) (err error) { +// w, err := startWorker(...) +// if err != nil { +// return err +// } +// +// // multierr will call w.Stop() when this function returns and +// // if the operation fails, it appends its error into the +// // returned error. +// defer multierr.AppendFunc(&err, w.Stop) +// } +func AppendFunc(into *error, fn func() error) { + AppendInvoke(into, Invoke(fn)) +} diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl index 3154a1e64cf2..92aa65d660b6 100644 --- a/vendor/go.uber.org/zap/.readme.tmpl +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -96,14 +96,14 @@ Released under the [MIT License](LICENSE.txt). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are -pinned in zap's [glide.lock][] file. [↩](#anchor-versions) +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) -[doc-img]: https://godoc.org/go.uber.org/zap?status.svg -[doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.com/uber-go/zap +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml [cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/zap [benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks -[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 794ee303e138..0db1f9f15fee 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -3,9 +3,110 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## 1.24.0 (30 Nov 2022) + +Enhancements: +* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the + current minimum enabled log level. +* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically. + +Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their +contributions to this release. + +[#1148]: https://github.coml/uber-go/zap/pull/1148 +[#1185]: https://github.coml/uber-go/zap/pull/1185 + +## 1.23.0 (24 Aug 2022) + +Enhancements: +* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a + `LevelEnabler` or `Core`. +* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects + that implement `String() string`. + +[#1147]: https://github.com/uber-go/zap/pull/1147 +[#1155]: https://github.com/uber-go/zap/pull/1155 + + +## 1.22.0 (8 Aug 2022) + +Enhancements: +* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log + arrays of objects. With these two constructors, you don't need to implement + `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement + `zapcore.ObjectMarshaler`. +* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing + `SugaredLogger` with the provided options applied. +* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level. + These functions provide a string joining behavior similar to `fmt.Println`. +* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the + logger for `Fatal`-level log entries. This defaults to exiting the program. +* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or + `NewDevelopment` to panic if the system was unable to build the logger. +* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for + a statement dynamically. + +Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun +for their contributions to this release. + +[#1071]: https://github.com/uber-go/zap/pull/1071 +[#1079]: https://github.com/uber-go/zap/pull/1079 +[#1080]: https://github.com/uber-go/zap/pull/1080 +[#1088]: https://github.com/uber-go/zap/pull/1088 +[#1108]: https://github.com/uber-go/zap/pull/1108 +[#1118]: https://github.com/uber-go/zap/pull/1118 + +## 1.21.0 (7 Feb 2022) + +Enhancements: +* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string. +* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a + string. + +Bugfixes: +* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset. + +Other changes: +* [#1052][]: Improve encoding performance when the `AddCaller` and + `AddStacktrace` options are used together. + +[#1047]: https://github.com/uber-go/zap/pull/1047 +[#1048]: https://github.com/uber-go/zap/pull/1048 +[#1052]: https://github.com/uber-go/zap/pull/1052 +[#1058]: https://github.com/uber-go/zap/pull/1058 + +Thanks to @aerosol and @Techassi for their contributions to this release. + +## 1.20.0 (4 Jan 2022) + +Enhancements: +* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline + characters between log statements. +* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON + encoding of reflected log fields. + +Bugfixes: +* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON. +* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject` + methods when the methods return. +* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero. + +Other changes: +* [#1028][]: Drop support for Go < 1.15. + +[#554]: https://github.com/uber-go/zap/pull/554 +[#989]: https://github.com/uber-go/zap/pull/989 +[#1011]: https://github.com/uber-go/zap/pull/1011 +[#1017]: https://github.com/uber-go/zap/pull/1017 +[#1028]: https://github.com/uber-go/zap/pull/1028 +[#1033]: https://github.com/uber-go/zap/pull/1033 +[#1039]: https://github.com/uber-go/zap/pull/1039 + +Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release. + ## 1.19.1 (8 Sep 2021) -### Fixed +Bugfixes: * [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon. * [#1003][]: JSON: Fix inaccurate precision when encoding float32. diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md index 5cd965687138..ea02f3cae2d6 100644 --- a/vendor/go.uber.org/zap/CONTRIBUTING.md +++ b/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -16,7 +16,7 @@ you to accept the CLA when you open your pull request. [Fork][fork], then clone the repository: -``` +```bash mkdir -p $GOPATH/src/go.uber.org cd $GOPATH/src/go.uber.org git clone git@github.com:your_github_username/zap.git @@ -27,21 +27,16 @@ git fetch upstream Make sure that the tests and the linters pass: -``` +```bash make test make lint ``` -If you're not using the minor version of Go specified in the Makefile's -`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is -fine, but it means that you'll only discover lint failures after you open your -pull request. - ## Making Changes Start by creating a new branch for your changes: -``` +```bash cd $GOPATH/src/go.uber.org/zap git checkout master git fetch upstream @@ -52,22 +47,22 @@ git checkout -b cool_new_feature Make your changes, then ensure that `make lint` and `make test` still pass. If you're satisfied with your changes, push them to your fork. -``` +```bash git push origin cool_new_feature ``` Then use the GitHub UI to open a pull request. -At this point, you're waiting on us to review your changes. We *try* to respond +At this point, you're waiting on us to review your changes. We _try_ to respond to issues and pull requests within a few business days, and we may suggest some improvements or alternatives. Once your changes are approved, one of the project maintainers will merge them. We're much more likely to approve your changes if you: -* Add tests for new functionality. -* Write a [good commit message][commit-message]. -* Maintain backward compatibility. +- Add tests for new functionality. +- Write a [good commit message][commit-message]. +- Maintain backward compatibility. [fork]: https://github.com/uber-go/zap/fork [open-issue]: https://github.com/uber-go/zap/issues/new diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md index 1e64d6cffc13..a553a428c8f6 100644 --- a/vendor/go.uber.org/zap/README.md +++ b/vendor/go.uber.org/zap/README.md @@ -54,7 +54,7 @@ and make many small allocations. Put differently, using `encoding/json` and Zap takes a different approach. It includes a reflection-free, zero-allocation JSON encoder, and the base `Logger` strives to avoid serialization overhead and allocations wherever possible. By building the high-level `SugaredLogger` -on that foundation, zap lets users *choose* when they need to count every +on that foundation, zap lets users _choose_ when they need to count every allocation and when they'd prefer a more familiar, loosely typed API. As measured by its own [benchmarking suite][], not only is zap more performant @@ -64,40 +64,40 @@ id="anchor-versions">[1](#footnote-versions) Log a message and 10 fields: -| Package | Time | Time % to zap | Objects Allocated | -| :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 862 ns/op | +0% | 5 allocs/op -| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op -| zerolog | 4021 ns/op | +366% | 76 allocs/op -| go-kit | 4542 ns/op | +427% | 105 allocs/op -| apex/log | 26785 ns/op | +3007% | 115 allocs/op -| logrus | 29501 ns/op | +3322% | 125 allocs/op -| log15 | 29906 ns/op | +3369% | 122 allocs/op +| Package | Time | Time % to zap | Objects Allocated | +| :------------------ | :---------: | :-----------: | :---------------: | +| :zap: zap | 2900 ns/op | +0% | 5 allocs/op | +| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op | +| zerolog | 10639 ns/op | +267% | 32 allocs/op | +| go-kit | 14434 ns/op | +398% | 59 allocs/op | +| logrus | 17104 ns/op | +490% | 81 allocs/op | +| apex/log | 32424 ns/op | +1018% | 66 allocs/op | +| log15 | 33579 ns/op | +1058% | 76 allocs/op | Log a message with a logger that already has 10 fields of context: -| Package | Time | Time % to zap | Objects Allocated | -| :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 126 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op -| zerolog | 88 ns/op | -30% | 0 allocs/op -| go-kit | 5087 ns/op | +3937% | 103 allocs/op -| log15 | 18548 ns/op | +14621% | 73 allocs/op -| apex/log | 26012 ns/op | +20544% | 104 allocs/op -| logrus | 27236 ns/op | +21516% | 113 allocs/op +| Package | Time | Time % to zap | Objects Allocated | +| :------------------ | :---------: | :-----------: | :---------------: | +| :zap: zap | 373 ns/op | +0% | 0 allocs/op | +| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op | +| zerolog | 288 ns/op | -23% | 0 allocs/op | +| go-kit | 11785 ns/op | +3060% | 58 allocs/op | +| logrus | 19629 ns/op | +5162% | 70 allocs/op | +| log15 | 21866 ns/op | +5762% | 72 allocs/op | +| apex/log | 30890 ns/op | +8182% | 55 allocs/op | Log a static string, without any context or `printf`-style templating: -| Package | Time | Time % to zap | Objects Allocated | -| :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 118 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op -| zerolog | 93 ns/op | -21% | 0 allocs/op -| go-kit | 280 ns/op | +137% | 11 allocs/op -| standard library | 499 ns/op | +323% | 2 allocs/op -| apex/log | 1990 ns/op | +1586% | 10 allocs/op -| logrus | 3129 ns/op | +2552% | 24 allocs/op -| log15 | 3887 ns/op | +3194% | 23 allocs/op +| Package | Time | Time % to zap | Objects Allocated | +| :------------------ | :--------: | :-----------: | :---------------: | +| :zap: zap | 381 ns/op | +0% | 0 allocs/op | +| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op | +| zerolog | 369 ns/op | -3% | 0 allocs/op | +| standard library | 385 ns/op | +1% | 2 allocs/op | +| go-kit | 606 ns/op | +59% | 11 allocs/op | +| logrus | 1730 ns/op | +354% | 25 allocs/op | +| apex/log | 1998 ns/op | +424% | 7 allocs/op | +| log15 | 4546 ns/op | +1093% | 22 allocs/op | ## Development Status: Stable @@ -131,4 +131,3 @@ pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) [cov]: https://codecov.io/gh/uber-go/zap [benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks [benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod - diff --git a/vendor/go.uber.org/zap/array_go118.go b/vendor/go.uber.org/zap/array_go118.go new file mode 100644 index 000000000000..d0d2c49d698a --- /dev/null +++ b/vendor/go.uber.org/zap/array_go118.go @@ -0,0 +1,156 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.18 +// +build go1.18 + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" +) + +// Objects constructs a field with the given key, holding a list of the +// provided objects that can be marshaled by Zap. +// +// Note that these objects must implement zapcore.ObjectMarshaler directly. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the Request type, not its pointer (*Request). +// If it's on the pointer, use ObjectValues. +// +// Given an object that implements MarshalLogObject on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Author struct{ ... } +// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var authors []Author = ... +// logger.Info("loading article", zap.Objects("authors", authors)) +// +// Similarly, given a type that implements MarshalLogObject on its pointer +// receiver, you can log a slice of pointers to that object with Objects like +// so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +// +// If instead, you have a slice of values of such an object, use the +// ObjectValues constructor. +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field { + return Array(key, objects[T](values)) +} + +type objects[T zapcore.ObjectMarshaler] []T + +func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + if err := arr.AppendObject(o); err != nil { + return err + } + } + return nil +} + +// ObjectMarshalerPtr is a constraint that specifies that the given type +// implements zapcore.ObjectMarshaler on a pointer receiver. +type ObjectMarshalerPtr[T any] interface { + *T + zapcore.ObjectMarshaler +} + +// ObjectValues constructs a field with the given key, holding a list of the +// provided objects, where pointers to these objects can be marshaled by Zap. +// +// Note that pointers to these objects must implement zapcore.ObjectMarshaler. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the *Request type, not the value (Request). +// If it's on the value, use Objects. +// +// Given an object that implements MarshalLogObject on the pointer receiver, +// you can log a slice of those objects with ObjectValues like so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +// +// If instead, you have a slice of pointers of such an object, use the Objects +// field constructor. +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field { + return Array(key, objectValues[T, P](values)) +} + +type objectValues[T any, P ObjectMarshalerPtr[T]] []T + +func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range os { + // It is necessary for us to explicitly reference the "P" type. + // We cannot simply pass "&os[i]" to AppendObject because its type + // is "*T", which the type system does not consider as + // implementing ObjectMarshaler. + // Only the type "P" satisfies ObjectMarshaler, which we have + // to convert "*T" to explicitly. + var p P = &os[i] + if err := arr.AppendObject(p); err != nil { + return err + } + } + return nil +} + +// Stringers constructs a field with the given key, holding a list of the +// output provided by the value's String method +// +// Given an object that implements String on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Request struct{ ... } +// func (a Request) String() string +// +// var requests []Request = ... +// logger.Info("sending requests", zap.Stringers("requests", requests)) +// +// Note that these objects must implement fmt.Stringer directly. +// That is, if you're trying to marshal a []Request, the String method +// must be declared on the Request type, not its pointer (*Request). +func Stringers[T fmt.Stringer](key string, values []T) Field { + return Array(key, stringers[T](values)) +} + +type stringers[T fmt.Stringer] []T + +func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + arr.AppendString(o.String()) + } + return nil +} diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go index 55637fb0b4b1..ee6096766a85 100644 --- a/vendor/go.uber.org/zap/config.go +++ b/vendor/go.uber.org/zap/config.go @@ -21,7 +21,7 @@ package zap import ( - "fmt" + "errors" "sort" "time" @@ -182,7 +182,7 @@ func (cfg Config) Build(opts ...Option) (*Logger, error) { } if cfg.Level == (AtomicLevel{}) { - return nil, fmt.Errorf("missing Level") + return nil, errors.New("missing Level") } log := New( diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go index 8638dd1b9656..3c50d7b4d3ff 100644 --- a/vendor/go.uber.org/zap/doc.go +++ b/vendor/go.uber.org/zap/doc.go @@ -32,7 +32,7 @@ // they need to count every allocation and when they'd prefer a more familiar, // loosely typed API. // -// Choosing a Logger +// # Choosing a Logger // // In contexts where performance is nice, but not critical, use the // SugaredLogger. It's 4-10x faster than other structured logging packages and @@ -41,14 +41,15 @@ // variadic number of key-value pairs. (For more advanced use cases, they also // accept strongly typed fields - see the SugaredLogger.With documentation for // details.) -// sugar := zap.NewExample().Sugar() -// defer sugar.Sync() -// sugar.Infow("failed to fetch URL", -// "url", "http://example.com", -// "attempt", 3, -// "backoff", time.Second, -// ) -// sugar.Infof("failed to fetch URL: %s", "http://example.com") +// +// sugar := zap.NewExample().Sugar() +// defer sugar.Sync() +// sugar.Infow("failed to fetch URL", +// "url", "http://example.com", +// "attempt", 3, +// "backoff", time.Second, +// ) +// sugar.Infof("failed to fetch URL: %s", "http://example.com") // // By default, loggers are unbuffered. However, since zap's low-level APIs // allow buffering, calling Sync before letting your process exit is a good @@ -57,32 +58,35 @@ // In the rare contexts where every microsecond and every allocation matter, // use the Logger. It's even faster than the SugaredLogger and allocates far // less, but it only supports strongly-typed, structured logging. -// logger := zap.NewExample() -// defer logger.Sync() -// logger.Info("failed to fetch URL", -// zap.String("url", "http://example.com"), -// zap.Int("attempt", 3), -// zap.Duration("backoff", time.Second), -// ) +// +// logger := zap.NewExample() +// defer logger.Sync() +// logger.Info("failed to fetch URL", +// zap.String("url", "http://example.com"), +// zap.Int("attempt", 3), +// zap.Duration("backoff", time.Second), +// ) // // Choosing between the Logger and SugaredLogger doesn't need to be an // application-wide decision: converting between the two is simple and // inexpensive. -// logger := zap.NewExample() -// defer logger.Sync() -// sugar := logger.Sugar() -// plain := sugar.Desugar() // -// Configuring Zap +// logger := zap.NewExample() +// defer logger.Sync() +// sugar := logger.Sugar() +// plain := sugar.Desugar() +// +// # Configuring Zap // // The simplest way to build a Logger is to use zap's opinionated presets: // NewExample, NewProduction, and NewDevelopment. These presets build a logger // with a single function call: -// logger, err := zap.NewProduction() -// if err != nil { -// log.Fatalf("can't initialize zap logger: %v", err) -// } -// defer logger.Sync() +// +// logger, err := zap.NewProduction() +// if err != nil { +// log.Fatalf("can't initialize zap logger: %v", err) +// } +// defer logger.Sync() // // Presets are fine for small projects, but larger projects and organizations // naturally require a bit more customization. For most users, zap's Config @@ -94,7 +98,7 @@ // go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration // example for sample code. // -// Extending Zap +// # Extending Zap // // The zap package itself is a relatively thin wrapper around the interfaces // in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., @@ -106,7 +110,7 @@ // Similarly, package authors can use the high-performance Encoder and Core // implementations in the zapcore package to build their own loggers. // -// Frequently Asked Questions +// # Frequently Asked Questions // // An FAQ covering everything from installation errors to design decisions is // available at https://github.com/uber-go/zap/blob/master/FAQ.md. diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go index 08ed83354360..caa04ceefd81 100644 --- a/vendor/go.uber.org/zap/encoder.go +++ b/vendor/go.uber.org/zap/encoder.go @@ -63,7 +63,7 @@ func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapco func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { - return nil, fmt.Errorf("missing EncodeTime in EncoderConfig") + return nil, errors.New("missing EncodeTime in EncoderConfig") } _encoderMutex.RLock() diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go index c1ac0507cd9b..3cb46c9e0ac5 100644 --- a/vendor/go.uber.org/zap/global.go +++ b/vendor/go.uber.org/zap/global.go @@ -31,6 +31,7 @@ import ( ) const ( + _stdLogDefaultDepth = 1 _loggerWriterDepth = 2 _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + "https://github.com/uber-go/zap/issues/new and reference this error: %v" diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go index 1297c33b3285..632b6831a856 100644 --- a/vendor/go.uber.org/zap/http_handler.go +++ b/vendor/go.uber.org/zap/http_handler.go @@ -22,6 +22,7 @@ package zap import ( "encoding/json" + "errors" "fmt" "io" "net/http" @@ -32,22 +33,23 @@ import ( // ServeHTTP is a simple JSON endpoint that can report on or change the current // logging level. // -// GET +// # GET // // The GET request returns a JSON description of the current logging level like: -// {"level":"info"} // -// PUT +// {"level":"info"} +// +// # PUT // // The PUT request changes the logging level. It is perfectly safe to change the // logging level while a program is running. Two content types are supported: // -// Content-Type: application/x-www-form-urlencoded +// Content-Type: application/x-www-form-urlencoded // // With this content type, the level can be provided through the request body or // a query parameter. The log level is URL encoded like: // -// level=debug +// level=debug // // The request body takes precedence over the query parameter, if both are // specified. @@ -55,18 +57,17 @@ import ( // This content type is the default for a curl PUT request. Following are two // example curl requests that both set the logging level to debug. // -// curl -X PUT localhost:8080/log/level?level=debug -// curl -X PUT localhost:8080/log/level -d level=debug +// curl -X PUT localhost:8080/log/level?level=debug +// curl -X PUT localhost:8080/log/level -d level=debug // // For any other content type, the payload is expected to be JSON encoded and // look like: // -// {"level":"info"} +// {"level":"info"} // // An example curl request could look like this: // -// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' -// +// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { type errorResponse struct { Error string `json:"error"` @@ -108,7 +109,7 @@ func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error func decodePutURL(r *http.Request) (zapcore.Level, error) { lvl := r.FormValue("level") if lvl == "" { - return 0, fmt.Errorf("must specify logging level") + return 0, errors.New("must specify logging level") } var l zapcore.Level if err := l.UnmarshalText([]byte(lvl)); err != nil { @@ -125,7 +126,7 @@ func decodePutJSON(body io.Reader) (zapcore.Level, error) { return 0, fmt.Errorf("malformed request body: %v", err) } if pld.Level == nil { - return 0, fmt.Errorf("must specify logging level") + return 0, errors.New("must specify logging level") } return *pld.Level, nil diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go index dfc5b05feb77..f673f9947b85 100644 --- a/vendor/go.uber.org/zap/internal/exit/exit.go +++ b/vendor/go.uber.org/zap/internal/exit/exit.go @@ -24,24 +24,25 @@ package exit import "os" -var real = func() { os.Exit(1) } +var _exit = os.Exit -// Exit normally terminates the process by calling os.Exit(1). If the package -// is stubbed, it instead records a call in the testing spy. -func Exit() { - real() +// With terminates the process by calling os.Exit(code). If the package is +// stubbed, it instead records a call in the testing spy. +func With(code int) { + _exit(code) } // A StubbedExit is a testing fake for os.Exit. type StubbedExit struct { Exited bool - prev func() + Code int + prev func(code int) } // Stub substitutes a fake for the call to os.Exit(1). func Stub() *StubbedExit { - s := &StubbedExit{prev: real} - real = s.exit + s := &StubbedExit{prev: _exit} + _exit = s.exit return s } @@ -56,9 +57,10 @@ func WithStub(f func()) *StubbedExit { // Unstub restores the previous exit function. func (se *StubbedExit) Unstub() { - real = se.prev + _exit = se.prev } -func (se *StubbedExit) exit() { +func (se *StubbedExit) exit(code int) { se.Exited = true + se.Code = code } diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/zap/internal/level_enabler.go similarity index 70% rename from vendor/go.uber.org/zap/global_go112.go rename to vendor/go.uber.org/zap/internal/level_enabler.go index 6b5dbda80768..5f3e3f1b924a 100644 --- a/vendor/go.uber.org/zap/global_go112.go +++ b/vendor/go.uber.org/zap/internal/level_enabler.go @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Uber Technologies, Inc. +// Copyright (c) 2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -18,9 +18,18 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -// See #682 for more information. -// +build go1.12 +package internal -package zap +import "go.uber.org/zap/zapcore" -const _stdLogDefaultDepth = 1 +// LeveledEnabler is an interface satisfied by LevelEnablers that are able to +// report their own level. +// +// This interface is defined to use more conveniently in tests and non-zapcore +// packages. +// This cannot be imported from zapcore because of the cyclic dependency. +type LeveledEnabler interface { + zapcore.LevelEnabler + + Level() zapcore.Level +} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go index 3567a9a1e6a3..db951e19a50f 100644 --- a/vendor/go.uber.org/zap/level.go +++ b/vendor/go.uber.org/zap/level.go @@ -22,6 +22,7 @@ package zap import ( "go.uber.org/atomic" + "go.uber.org/zap/internal" "go.uber.org/zap/zapcore" ) @@ -70,6 +71,8 @@ type AtomicLevel struct { l *atomic.Int32 } +var _ internal.LeveledEnabler = AtomicLevel{} + // NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging // enabled. func NewAtomicLevel() AtomicLevel { @@ -86,6 +89,23 @@ func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { return a } +// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseAtomicLevel(text string) (AtomicLevel, error) { + a := NewAtomicLevel() + l, err := zapcore.ParseLevel(text) + if err != nil { + return a, err + } + + a.SetLevel(l) + return a, nil +} + // Enabled implements the zapcore.LevelEnabler interface, which allows the // AtomicLevel to be used in place of traditional static levels. func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index f116bd936fe2..cd44030d13f0 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -22,11 +22,11 @@ package zap import ( "fmt" - "io/ioutil" + "io" "os" - "runtime" "strings" + "go.uber.org/zap/internal/bufferpool" "go.uber.org/zap/zapcore" ) @@ -42,7 +42,7 @@ type Logger struct { development bool addCaller bool - onFatal zapcore.CheckWriteAction // default is WriteThenFatal + onFatal zapcore.CheckWriteHook // default is WriteThenFatal name string errorOutput zapcore.WriteSyncer @@ -85,7 +85,7 @@ func New(core zapcore.Core, options ...Option) *Logger { func NewNop() *Logger { return &Logger{ core: zapcore.NewNopCore(), - errorOutput: zapcore.AddSync(ioutil.Discard), + errorOutput: zapcore.AddSync(io.Discard), addStack: zapcore.FatalLevel + 1, clock: zapcore.DefaultClock, } @@ -107,6 +107,19 @@ func NewDevelopment(options ...Option) (*Logger, error) { return NewDevelopmentConfig().Build(options...) } +// Must is a helper that wraps a call to a function returning (*Logger, error) +// and panics if the error is non-nil. It is intended for use in variable +// initialization such as: +// +// var logger = zap.Must(zap.NewProduction()) +func Must(logger *Logger, err error) *Logger { + if err != nil { + panic(err) + } + + return logger +} + // NewExample builds a Logger that's designed for use in zap's testable // examples. It writes DebugLevel and above logs to standard out as JSON, but // omits the timestamp and calling function to keep example output @@ -170,6 +183,13 @@ func (log *Logger) With(fields ...Field) *Logger { return l } +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (log *Logger) Level() zapcore.Level { + return zapcore.LevelOf(log.core) +} + // Check returns a CheckedEntry if logging a message at the specified level // is enabled. It's a completely optional optimization; in high-performance // applications, Check can help avoid allocating a slice to hold fields. @@ -177,6 +197,14 @@ func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { return log.check(lvl, msg) } +// Log logs a message at the specified level. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) { + if ce := log.check(lvl, msg); ce != nil { + ce.Write(fields...) + } +} + // Debug logs a message at DebugLevel. The message includes any fields passed // at the log site, as well as any fields accumulated on the logger. func (log *Logger) Debug(msg string, fields ...Field) { @@ -259,8 +287,10 @@ func (log *Logger) clone() *Logger { } func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { - // check must always be called directly by a method in the Logger interface - // (e.g., Check, Info, Fatal). + // Logger.check must always be called directly by a method in the + // Logger interface (e.g., Check, Info, Fatal). + // This skips Logger.check and the Info/Fatal/Check/etc. method that + // called it. const callerSkipOffset = 2 // Check the level first to reduce the cost of disabled log calls. @@ -283,18 +313,27 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Set up any required terminal behavior. switch ent.Level { case zapcore.PanicLevel: - ce = ce.Should(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, zapcore.WriteThenPanic) case zapcore.FatalLevel: onFatal := log.onFatal - // Noop is the default value for CheckWriteAction, and it leads to - // continued execution after a Fatal which is unexpected. - if onFatal == zapcore.WriteThenNoop { + // nil or WriteThenNoop will lead to continued execution after + // a Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the + // log.Fatal. + if onFatal == nil || onFatal == zapcore.WriteThenNoop { onFatal = zapcore.WriteThenFatal } - ce = ce.Should(ent, onFatal) + ce = ce.After(ent, onFatal) case zapcore.DPanicLevel: if log.development { - ce = ce.Should(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, zapcore.WriteThenPanic) } } @@ -307,42 +346,55 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Thread the error output through to the CheckedEntry. ce.ErrorOutput = log.errorOutput - if log.addCaller { - frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset) - if !defined { + + addStack := log.addStack.Enabled(ce.Level) + if !log.addCaller && !addStack { + return ce + } + + // Adding the caller or stack trace requires capturing the callers of + // this function. We'll share information between these two. + stackDepth := stacktraceFirst + if addStack { + stackDepth = stacktraceFull + } + stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth) + defer stack.Free() + + if stack.Count() == 0 { + if log.addCaller { fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) log.errorOutput.Sync() } + return ce + } - ce.Entry.Caller = zapcore.EntryCaller{ - Defined: defined, + frame, more := stack.Next() + + if log.addCaller { + ce.Caller = zapcore.EntryCaller{ + Defined: frame.PC != 0, PC: frame.PC, File: frame.File, Line: frame.Line, Function: frame.Function, } } - if log.addStack.Enabled(ce.Entry.Level) { - ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String - } - return ce -} + if addStack { + buffer := bufferpool.Get() + defer buffer.Free() -// getCallerFrame gets caller frame. The argument skip is the number of stack -// frames to ascend, with 0 identifying the caller of getCallerFrame. The -// boolean ok is false if it was not possible to recover the information. -// -// Note: This implementation is similar to runtime.Caller, but it returns the whole frame. -func getCallerFrame(skip int) (frame runtime.Frame, ok bool) { - const skipOffset = 2 // skip getCallerFrame and Callers - - pc := make([]uintptr, 1) - numFrames := runtime.Callers(skip+skipOffset, pc) - if numFrames < 1 { - return + stackfmt := newStackFormatter(buffer) + + // We've already extracted the first frame, so format that + // separately and defer to stackfmt for the rest. + stackfmt.FormatFrame(frame) + if more { + stackfmt.FormatStack(stack) + } + ce.Stack = buffer.String() } - frame, _ = runtime.CallersFrames(pc).Next() - return frame, frame.PC != 0 + return ce } diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index e9e66161f510..c4f3bca3d202 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -133,9 +133,28 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { } // OnFatal sets the action to take on fatal logs. +// +// Deprecated: Use [WithFatalHook] instead. func OnFatal(action zapcore.CheckWriteAction) Option { + return WithFatalHook(action) +} + +// WithFatalHook sets a CheckWriteHook to run on fatal logs. +// Zap will call this hook after writing a log statement with a Fatal level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a fatal log message, but it will not exit the +// program. +// +// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit)) +// +// It is important that the provided CheckWriteHook stops the control flow at +// the current statement to meet expectations of callers of the logger. +// We recommend calling os.Exit or runtime.Goexit inside custom hooks at +// minimum. +func WithFatalHook(hook zapcore.CheckWriteHook) Option { return optionFunc(func(log *Logger) { - log.onFatal = action + log.onFatal = hook }) } diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go index df46fa87a70a..478c9a10ffc3 100644 --- a/vendor/go.uber.org/zap/sink.go +++ b/vendor/go.uber.org/zap/sink.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -26,6 +26,7 @@ import ( "io" "net/url" "os" + "path/filepath" "strings" "sync" @@ -34,23 +35,7 @@ import ( const schemeFile = "file" -var ( - _sinkMutex sync.RWMutex - _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme -) - -func init() { - resetSinkRegistry() -} - -func resetSinkRegistry() { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() - - _sinkFactories = map[string]func(*url.URL) (Sink, error){ - schemeFile: newFileSink, - } -} +var _sinkRegistry = newSinkRegistry() // Sink defines the interface to write to and close logger destinations. type Sink interface { @@ -58,10 +43,6 @@ type Sink interface { io.Closer } -type nopCloserSink struct{ zapcore.WriteSyncer } - -func (nopCloserSink) Close() error { return nil } - type errSinkNotFound struct { scheme string } @@ -70,16 +51,29 @@ func (e *errSinkNotFound) Error() string { return fmt.Sprintf("no sink found for scheme %q", e.scheme) } -// RegisterSink registers a user-supplied factory for all sinks with a -// particular scheme. -// -// All schemes must be ASCII, valid under section 3.1 of RFC 3986 -// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already -// have a factory registered. Zap automatically registers a factory for the -// "file" scheme. -func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type sinkRegistry struct { + mu sync.Mutex + factories map[string]func(*url.URL) (Sink, error) // keyed by scheme + openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile +} + +func newSinkRegistry() *sinkRegistry { + sr := &sinkRegistry{ + factories: make(map[string]func(*url.URL) (Sink, error)), + openFile: os.OpenFile, + } + sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) + return sr +} + +// RegisterScheme registers the given factory for the specific scheme. +func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + sr.mu.Lock() + defer sr.mu.Unlock() if scheme == "" { return errors.New("can't register a sink factory for empty string") @@ -88,14 +82,22 @@ func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { if err != nil { return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) } - if _, ok := _sinkFactories[normalized]; ok { + if _, ok := sr.factories[normalized]; ok { return fmt.Errorf("sink factory already registered for scheme %q", normalized) } - _sinkFactories[normalized] = factory + sr.factories[normalized] = factory return nil } -func newSink(rawURL string) (Sink, error) { +func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) { + // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to + // the drive, and path is unset unless `c:/log.txt` is used. + // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file. + // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows. + if filepath.IsAbs(rawURL) { + return sr.newFileSinkFromPath(rawURL) + } + u, err := url.Parse(rawURL) if err != nil { return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) @@ -104,16 +106,27 @@ func newSink(rawURL string) (Sink, error) { u.Scheme = schemeFile } - _sinkMutex.RLock() - factory, ok := _sinkFactories[u.Scheme] - _sinkMutex.RUnlock() + sr.mu.Lock() + factory, ok := sr.factories[u.Scheme] + sr.mu.Unlock() if !ok { return nil, &errSinkNotFound{u.Scheme} } return factory(u) } -func newFileSink(u *url.URL) (Sink, error) { +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 0.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + return _sinkRegistry.RegisterSink(scheme, factory) +} + +func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) { if u.User != nil { return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) } @@ -130,13 +143,18 @@ func newFileSink(u *url.URL) (Sink, error) { if hn := u.Hostname(); hn != "" && hn != "localhost" { return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) } - switch u.Path { + + return sr.newFileSinkFromPath(u.Path) +} + +func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) { + switch path { case "stdout": return nopCloserSink{os.Stdout}, nil case "stderr": return nopCloserSink{os.Stderr}, nil } - return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) + return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) } func normalizeScheme(s string) (string, error) { diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go index 0cf8c1ddffa1..817a3bde8b10 100644 --- a/vendor/go.uber.org/zap/stacktrace.go +++ b/vendor/go.uber.org/zap/stacktrace.go @@ -24,62 +24,153 @@ import ( "runtime" "sync" + "go.uber.org/zap/buffer" "go.uber.org/zap/internal/bufferpool" ) -var ( - _stacktracePool = sync.Pool{ - New: func() interface{} { - return newProgramCounters(64) - }, - } +var _stacktracePool = sync.Pool{ + New: func() interface{} { + return &stacktrace{ + storage: make([]uintptr, 64), + } + }, +} + +type stacktrace struct { + pcs []uintptr // program counters; always a subslice of storage + frames *runtime.Frames + + // The size of pcs varies depending on requirements: + // it will be one if the only the first frame was requested, + // and otherwise it will reflect the depth of the call stack. + // + // storage decouples the slice we need (pcs) from the slice we pool. + // We will always allocate a reasonably large storage, but we'll use + // only as much of it as we need. + storage []uintptr +} + +// stacktraceDepth specifies how deep of a stack trace should be captured. +type stacktraceDepth int + +const ( + // stacktraceFirst captures only the first frame. + stacktraceFirst stacktraceDepth = iota + + // stacktraceFull captures the entire call stack, allocating more + // storage for it if needed. + stacktraceFull ) -func takeStacktrace(skip int) string { - buffer := bufferpool.Get() - defer buffer.Free() - programCounters := _stacktracePool.Get().(*programCounters) - defer _stacktracePool.Put(programCounters) - - var numFrames int - for { - // Skip the call to runtime.Callers and takeStacktrace so that the - // program counters start at the caller of takeStacktrace. - numFrames = runtime.Callers(skip+2, programCounters.pcs) - if numFrames < len(programCounters.pcs) { - break - } - // Don't put the too-short counter slice back into the pool; this lets - // the pool adjust if we consistently take deep stacktraces. - programCounters = newProgramCounters(len(programCounters.pcs) * 2) +// captureStacktrace captures a stack trace of the specified depth, skipping +// the provided number of frames. skip=0 identifies the caller of +// captureStacktrace. +// +// The caller must call Free on the returned stacktrace after using it. +func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { + stack := _stacktracePool.Get().(*stacktrace) + + switch depth { + case stacktraceFirst: + stack.pcs = stack.storage[:1] + case stacktraceFull: + stack.pcs = stack.storage } - i := 0 - frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) + // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers + // itself. +2 to skip captureStacktrace and runtime.Callers. + numFrames := runtime.Callers( + skip+2, + stack.pcs, + ) - // Note: On the last iteration, frames.Next() returns false, with a valid - // frame, but we ignore this frame. The last frame is a a runtime frame which - // adds noise, since it's only either runtime.main or runtime.goexit. - for frame, more := frames.Next(); more; frame, more = frames.Next() { - if i != 0 { - buffer.AppendByte('\n') + // runtime.Callers truncates the recorded stacktrace if there is no + // room in the provided slice. For the full stack trace, keep expanding + // storage until there are fewer frames than there is room. + if depth == stacktraceFull { + pcs := stack.pcs + for numFrames == len(pcs) { + pcs = make([]uintptr, len(pcs)*2) + numFrames = runtime.Callers(skip+2, pcs) } - i++ - buffer.AppendString(frame.Function) - buffer.AppendByte('\n') - buffer.AppendByte('\t') - buffer.AppendString(frame.File) - buffer.AppendByte(':') - buffer.AppendInt(int64(frame.Line)) + + // Discard old storage instead of returning it to the pool. + // This will adjust the pool size over time if stack traces are + // consistently very deep. + stack.storage = pcs + stack.pcs = pcs[:numFrames] + } else { + stack.pcs = stack.pcs[:numFrames] } + stack.frames = runtime.CallersFrames(stack.pcs) + return stack +} + +// Free releases resources associated with this stacktrace +// and returns it back to the pool. +func (st *stacktrace) Free() { + st.frames = nil + st.pcs = nil + _stacktracePool.Put(st) +} + +// Count reports the total number of frames in this stacktrace. +// Count DOES NOT change as Next is called. +func (st *stacktrace) Count() int { + return len(st.pcs) +} + +// Next returns the next frame in the stack trace, +// and a boolean indicating whether there are more after it. +func (st *stacktrace) Next() (_ runtime.Frame, more bool) { + return st.frames.Next() +} + +func takeStacktrace(skip int) string { + stack := captureStacktrace(skip+1, stacktraceFull) + defer stack.Free() + + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := newStackFormatter(buffer) + stackfmt.FormatStack(stack) return buffer.String() } -type programCounters struct { - pcs []uintptr +// stackFormatter formats a stack trace into a readable string representation. +type stackFormatter struct { + b *buffer.Buffer + nonEmpty bool // whehther we've written at least one frame already +} + +// newStackFormatter builds a new stackFormatter. +func newStackFormatter(b *buffer.Buffer) stackFormatter { + return stackFormatter{b: b} } -func newProgramCounters(size int) *programCounters { - return &programCounters{make([]uintptr, size)} +// FormatStack formats all remaining frames in the provided stacktrace -- minus +// the final runtime.main/runtime.goexit frame. +func (sf *stackFormatter) FormatStack(stack *stacktrace) { + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := stack.Next(); more; frame, more = stack.Next() { + sf.FormatFrame(frame) + } +} + +// FormatFrame formats the given frame. +func (sf *stackFormatter) FormatFrame(frame runtime.Frame) { + if sf.nonEmpty { + sf.b.AppendByte('\n') + } + sf.nonEmpty = true + sf.b.AppendString(frame.Function) + sf.b.AppendByte('\n') + sf.b.AppendByte('\t') + sf.b.AppendString(frame.File) + sf.b.AppendByte(':') + sf.b.AppendInt(int64(frame.Line)) } diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index 0b9651981a90..ac387b3e47d1 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -31,6 +31,7 @@ import ( const ( _oddNumberErrMsg = "Ignored key without a value." _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." + _multipleErrMsg = "Multiple errors without a key." ) // A SugaredLogger wraps the base Logger functionality in a slower, but less @@ -38,10 +39,19 @@ const ( // method. // // Unlike the Logger, the SugaredLogger doesn't insist on structured logging. -// For each log level, it exposes three methods: one for loosely-typed -// structured logging, one for println-style formatting, and one for -// printf-style formatting. For example, SugaredLoggers can produce InfoLevel -// output with Infow ("info with" structured context), Info, or Infof. +// For each log level, it exposes four methods: +// +// - methods named after the log level for log.Print-style logging +// - methods ending in "w" for loosely-typed structured logging +// - methods ending in "f" for log.Printf-style logging +// - methods ending in "ln" for log.Println-style logging +// +// For example, the methods for InfoLevel are: +// +// Info(...any) Print-style logging +// Infow(...any) Structured logging (read as "info with") +// Infof(string, ...any) Printf-style logging +// Infoln(...any) Println-style logging type SugaredLogger struct { base *Logger } @@ -61,27 +71,40 @@ func (s *SugaredLogger) Named(name string) *SugaredLogger { return &SugaredLogger{base: s.base.Named(name)} } +// WithOptions clones the current SugaredLogger, applies the supplied Options, +// and returns the result. It's safe to use concurrently. +func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger { + base := s.base.clone() + for _, opt := range opts { + opt.apply(base) + } + return &SugaredLogger{base: base} +} + // With adds a variadic number of fields to the logging context. It accepts a // mix of strongly-typed Field objects and loosely-typed key-value pairs. When // processing pairs, the first element of the pair is used as the field key // and the second as the field value. // // For example, -// sugaredLogger.With( -// "hello", "world", -// "failure", errors.New("oh no"), -// Stack(), -// "count", 42, -// "user", User{Name: "alice"}, -// ) +// +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// // is the equivalent of -// unsugared.With( -// String("hello", "world"), -// String("failure", "oh no"), -// Stack(), -// Int("count", 42), -// Object("user", User{Name: "alice"}), -// ) +// +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) // // Note that the keys in key-value pairs should be strings. In development, // passing a non-string key panics. In production, the logger is more @@ -92,6 +115,13 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} } +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (s *SugaredLogger) Level() zapcore.Level { + return zapcore.LevelOf(s.base.core) +} + // Debug uses fmt.Sprint to construct and log a message. func (s *SugaredLogger) Debug(args ...interface{}) { s.log(DebugLevel, "", args, nil) @@ -168,7 +198,8 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { // pairs are treated as they are in With. // // When debug-level logging is disabled, this is much faster than -// s.With(keysAndValues).Debug(msg) +// +// s.With(keysAndValues).Debug(msg) func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { s.log(DebugLevel, msg, nil, keysAndValues) } @@ -210,11 +241,48 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { s.log(FatalLevel, msg, nil, keysAndValues) } +// Debugln uses fmt.Sprintln to construct and log a message. +func (s *SugaredLogger) Debugln(args ...interface{}) { + s.logln(DebugLevel, args, nil) +} + +// Infoln uses fmt.Sprintln to construct and log a message. +func (s *SugaredLogger) Infoln(args ...interface{}) { + s.logln(InfoLevel, args, nil) +} + +// Warnln uses fmt.Sprintln to construct and log a message. +func (s *SugaredLogger) Warnln(args ...interface{}) { + s.logln(WarnLevel, args, nil) +} + +// Errorln uses fmt.Sprintln to construct and log a message. +func (s *SugaredLogger) Errorln(args ...interface{}) { + s.logln(ErrorLevel, args, nil) +} + +// DPanicln uses fmt.Sprintln to construct and log a message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanicln(args ...interface{}) { + s.logln(DPanicLevel, args, nil) +} + +// Panicln uses fmt.Sprintln to construct and log a message, then panics. +func (s *SugaredLogger) Panicln(args ...interface{}) { + s.logln(PanicLevel, args, nil) +} + +// Fatalln uses fmt.Sprintln to construct and log a message, then calls os.Exit. +func (s *SugaredLogger) Fatalln(args ...interface{}) { + s.logln(FatalLevel, args, nil) +} + // Sync flushes any buffered log entries. func (s *SugaredLogger) Sync() error { return s.base.Sync() } +// log message with Sprint, Sprintf, or neither. func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { // If logging at this level is completely disabled, skip the overhead of // string formatting. @@ -228,6 +296,18 @@ func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interf } } +// logln message with Sprintln +func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) { + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessageln(fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + // getMessage format with Sprint, Sprintf, or neither. func getMessage(template string, fmtArgs []interface{}) string { if len(fmtArgs) == 0 { @@ -246,15 +326,24 @@ func getMessage(template string, fmtArgs []interface{}) string { return fmt.Sprint(fmtArgs...) } +// getMessageln format with Sprintln. +func getMessageln(fmtArgs []interface{}) string { + msg := fmt.Sprintln(fmtArgs...) + return msg[:len(msg)-1] +} + func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { if len(args) == 0 { return nil } - // Allocate enough space for the worst case; if users pass only structured - // fields, we shouldn't penalize them with extra allocations. - fields := make([]Field, 0, len(args)) - var invalid invalidPairs + var ( + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields = make([]Field, 0, len(args)) + invalid invalidPairs + seenError bool + ) for i := 0; i < len(args); { // This is a strongly-typed field. Consume it and move on. @@ -264,6 +353,18 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { continue } + // If it is an error, consume it and move on. + if err, ok := args[i].(error); ok { + if !seenError { + seenError = true + fields = append(fields, Error(err)) + } else { + s.base.Error(_multipleErrMsg, Error(err)) + } + i++ + continue + } + // Make sure this element isn't a dangling key. if i == len(args)-1 { s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go index 86a709ab0be2..f08728e1ec00 100644 --- a/vendor/go.uber.org/zap/writer.go +++ b/vendor/go.uber.org/zap/writer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -23,7 +23,6 @@ package zap import ( "fmt" "io" - "io/ioutil" "go.uber.org/zap/zapcore" @@ -69,9 +68,9 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { var openErr error for _, path := range paths { - sink, err := newSink(path) + sink, err := _sinkRegistry.newSink(path) if err != nil { - openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) + openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err)) continue } writers = append(writers, sink) @@ -79,7 +78,7 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { } if openErr != nil { close() - return writers, nil, openErr + return nil, nil, openErr } return writers, close, nil @@ -93,7 +92,7 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { // using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { if len(writers) == 0 { - return zapcore.AddSync(ioutil.Discard) + return zapcore.AddSync(io.Discard) } return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) } diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go index ef2f7d9637bc..a40e93b3ec8f 100644 --- a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -43,6 +43,37 @@ const ( // // BufferedWriteSyncer is safe for concurrent use. You don't need to use // zapcore.Lock for WriteSyncers with BufferedWriteSyncer. +// +// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log +// destination (*os.File is a valid WriteSyncer), wrap it with +// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the +// object. +// +// func main() { +// ws := ... // your log destination +// bws := &zapcore.BufferedWriteSyncer{WS: ws} +// defer bws.Stop() +// +// // ... +// core := zapcore.NewCore(enc, bws, lvl) +// logger := zap.New(core) +// +// // ... +// } +// +// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs, +// waiting at most 30 seconds between flushes. +// You can customize these parameters by setting the Size or FlushInterval +// fields. +// For example, the following buffers up to 512 kB of logs before flushing them +// to Stderr, with a maximum of one minute between each flush. +// +// ws := &BufferedWriteSyncer{ +// WS: os.Stderr, +// Size: 512 * 1024, // 512 kB +// FlushInterval: time.Minute, +// } +// defer ws.Stop() type BufferedWriteSyncer struct { // WS is the WriteSyncer around which BufferedWriteSyncer will buffer // writes. diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go index d2ea95b394bc..422fd82a6b0f 100644 --- a/vendor/go.uber.org/zap/zapcore/clock.go +++ b/vendor/go.uber.org/zap/zapcore/clock.go @@ -20,9 +20,7 @@ package zapcore -import ( - "time" -) +import "time" // DefaultClock is the default clock used by Zap in operations that require // time. This clock uses the system clock for all operations. diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index 2307af404c5e..1aa5dc364673 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -125,11 +125,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, line.AppendString(ent.Stack) } - if c.LineEnding != "" { - line.AppendString(c.LineEnding) - } else { - line.AppendString(DefaultLineEnding) - } + line.AppendString(c.LineEnding) return line, nil } diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go index a1ef8b034bb4..9dfd64051f04 100644 --- a/vendor/go.uber.org/zap/zapcore/core.go +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -69,6 +69,15 @@ type ioCore struct { out WriteSyncer } +var ( + _ Core = (*ioCore)(nil) + _ leveledEnabler = (*ioCore)(nil) +) + +func (c *ioCore) Level() Level { + return LevelOf(c.LevelEnabler) +} + func (c *ioCore) With(fields []Field) Core { clone := c.clone() addFields(clone.enc, fields) diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go index 6601ca166c64..5769ff3e4e56 100644 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -22,6 +22,7 @@ package zapcore import ( "encoding/json" + "io" "time" "go.uber.org/zap/buffer" @@ -187,10 +188,13 @@ func (e *TimeEncoder) UnmarshalText(text []byte) error { // UnmarshalYAML unmarshals YAML to a TimeEncoder. // If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. -// timeEncoder: -// layout: 06/01/02 03:04pm +// +// timeEncoder: +// layout: 06/01/02 03:04pm +// // If value is string, it uses UnmarshalText. -// timeEncoder: iso8601 +// +// timeEncoder: iso8601 func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { var o struct { Layout string `json:"layout" yaml:"layout"` @@ -312,14 +316,15 @@ func (e *NameEncoder) UnmarshalText(text []byte) error { type EncoderConfig struct { // Set the keys used for each log entry. If any key is empty, that portion // of the entry is omitted. - MessageKey string `json:"messageKey" yaml:"messageKey"` - LevelKey string `json:"levelKey" yaml:"levelKey"` - TimeKey string `json:"timeKey" yaml:"timeKey"` - NameKey string `json:"nameKey" yaml:"nameKey"` - CallerKey string `json:"callerKey" yaml:"callerKey"` - FunctionKey string `json:"functionKey" yaml:"functionKey"` - StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` - LineEnding string `json:"lineEnding" yaml:"lineEnding"` + MessageKey string `json:"messageKey" yaml:"messageKey"` + LevelKey string `json:"levelKey" yaml:"levelKey"` + TimeKey string `json:"timeKey" yaml:"timeKey"` + NameKey string `json:"nameKey" yaml:"nameKey"` + CallerKey string `json:"callerKey" yaml:"callerKey"` + FunctionKey string `json:"functionKey" yaml:"functionKey"` + StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` + SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"` + LineEnding string `json:"lineEnding" yaml:"lineEnding"` // Configure the primitive representations of common complex types. For // example, some users may want all time.Times serialized as floating-point // seconds since epoch, while others may prefer ISO8601 strings. @@ -330,6 +335,9 @@ type EncoderConfig struct { // Unlike the other primitive type encoders, EncodeName is optional. The // zero value falls back to FullNameEncoder. EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` + // Configure the encoder for interface{} type objects. + // If not provided, objects are encoded using json.Encoder + NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"` // Configures the field separator used by the console encoder. Defaults // to tab. ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"` diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 0885505b75bc..9d326e95ea25 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -27,10 +27,9 @@ import ( "sync" "time" + "go.uber.org/multierr" "go.uber.org/zap/internal/bufferpool" "go.uber.org/zap/internal/exit" - - "go.uber.org/multierr" ) var ( @@ -152,6 +151,27 @@ type Entry struct { Stack string } +// CheckWriteHook is a custom action that may be executed after an entry is +// written. +// +// Register one on a CheckedEntry with the After method. +// +// if ce := logger.Check(...); ce != nil { +// ce = ce.After(hook) +// ce.Write(...) +// } +// +// You can configure the hook for Fatal log statements at the logger level with +// the zap.WithFatalHook option. +type CheckWriteHook interface { + // OnWrite is invoked with the CheckedEntry that was written and a list + // of fields added with that entry. + // + // The list of fields DOES NOT include fields that were already added + // to the logger with the With method. + OnWrite(*CheckedEntry, []Field) +} + // CheckWriteAction indicates what action to take after a log entry is // processed. Actions are ordered in increasing severity. type CheckWriteAction uint8 @@ -164,21 +184,36 @@ const ( WriteThenGoexit // WriteThenPanic causes a panic after Write. WriteThenPanic - // WriteThenFatal causes a fatal os.Exit after Write. + // WriteThenFatal causes an os.Exit(1) after Write. WriteThenFatal ) +// OnWrite implements the OnWrite method to keep CheckWriteAction compatible +// with the new CheckWriteHook interface which deprecates CheckWriteAction. +func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) { + switch a { + case WriteThenGoexit: + runtime.Goexit() + case WriteThenPanic: + panic(ce.Message) + case WriteThenFatal: + exit.With(1) + } +} + +var _ CheckWriteHook = CheckWriteAction(0) + // CheckedEntry is an Entry together with a collection of Cores that have // already agreed to log it. // -// CheckedEntry references should be created by calling AddCore or Should on a +// CheckedEntry references should be created by calling AddCore or After on a // nil *CheckedEntry. References are returned to a pool after Write, and MUST // NOT be retained after calling their Write method. type CheckedEntry struct { Entry ErrorOutput WriteSyncer dirty bool // best-effort detection of pool misuse - should CheckWriteAction + after CheckWriteHook cores []Core } @@ -186,7 +221,7 @@ func (ce *CheckedEntry) reset() { ce.Entry = Entry{} ce.ErrorOutput = nil ce.dirty = false - ce.should = WriteThenNoop + ce.after = nil for i := range ce.cores { // don't keep references to cores ce.cores[i] = nil @@ -224,17 +259,11 @@ func (ce *CheckedEntry) Write(fields ...Field) { ce.ErrorOutput.Sync() } - should, msg := ce.should, ce.Message - putCheckedEntry(ce) - - switch should { - case WriteThenPanic: - panic(msg) - case WriteThenFatal: - exit.Exit() - case WriteThenGoexit: - runtime.Goexit() + hook := ce.after + if hook != nil { + hook.OnWrite(ce, fields) } + putCheckedEntry(ce) } // AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be @@ -252,11 +281,20 @@ func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { // Should sets this CheckedEntry's CheckWriteAction, which controls whether a // Core will panic or fatal after writing this log entry. Like AddCore, it's // safe to call on nil CheckedEntry references. +// +// Deprecated: Use [CheckedEntry.After] instead. func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + return ce.After(ent, should) +} + +// After sets this CheckEntry's CheckWriteHook, which will be called after this +// log entry has been written. It's safe to call this on nil CheckedEntry +// references. +func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry { if ce == nil { ce = getCheckedEntry() ce.Entry = ent } - ce.should = should + ce.after = hook return ce } diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go index 74919b0ccb1b..06359907af41 100644 --- a/vendor/go.uber.org/zap/zapcore/error.go +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -36,13 +36,13 @@ import ( // causer (from github.com/pkg/errors), a ${key}Causes field is added with an // array of objects containing the errors this error was comprised of. // -// { -// "error": err.Error(), -// "errorVerbose": fmt.Sprintf("%+v", err), -// "errorCauses": [ -// ... -// ], -// } +// { +// "error": err.Error(), +// "errorVerbose": fmt.Sprintf("%+v", err), +// "errorCauses": [ +// ... +// ], +// } func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { // Try to capture panics (from nil references or otherwise) when calling // the Error() method diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go index 5db4afb302b3..198def9917ce 100644 --- a/vendor/go.uber.org/zap/zapcore/hook.go +++ b/vendor/go.uber.org/zap/zapcore/hook.go @@ -27,6 +27,11 @@ type hooked struct { funcs []func(Entry) error } +var ( + _ Core = (*hooked)(nil) + _ leveledEnabler = (*hooked)(nil) +) + // RegisterHooks wraps a Core and runs a collection of user-defined callback // hooks each time a message is logged. Execution of the callbacks is blocking. // @@ -40,6 +45,10 @@ func RegisterHooks(core Core, hooks ...func(Entry) error) Core { } } +func (h *hooked) Level() Level { + return LevelOf(h.Core) +} + func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { // Let the wrapped Core decide whether to log this message or not. This // also gives the downstream a chance to register itself directly with the diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go index 5a1749261ab2..7a11237ae976 100644 --- a/vendor/go.uber.org/zap/zapcore/increase_level.go +++ b/vendor/go.uber.org/zap/zapcore/increase_level.go @@ -27,6 +27,11 @@ type levelFilterCore struct { level LevelEnabler } +var ( + _ Core = (*levelFilterCore)(nil) + _ leveledEnabler = (*levelFilterCore)(nil) +) + // NewIncreaseLevelCore creates a core that can be used to increase the level of // an existing Core. It cannot be used to decrease the logging level, as it acts // as a filter before calling the underlying core. If level decreases the log level, @@ -45,6 +50,10 @@ func (c *levelFilterCore) Enabled(lvl Level) bool { return c.level.Enabled(lvl) } +func (c *levelFilterCore) Level() Level { + return LevelOf(c.level) +} + func (c *levelFilterCore) With(fields []Field) Core { return &levelFilterCore{c.core.With(fields), c.level} } diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index af220d9b4d79..3921c5cd333e 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -22,7 +22,6 @@ package zapcore import ( "encoding/base64" - "encoding/json" "math" "sync" "time" @@ -64,7 +63,7 @@ type jsonEncoder struct { // for encoding generic values by reflection reflectBuf *buffer.Buffer - reflectEnc *json.Encoder + reflectEnc ReflectedEncoder } // NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder @@ -72,7 +71,9 @@ type jsonEncoder struct { // // Note that the encoder doesn't deduplicate keys, so it's possible to produce // a message like -// {"foo":"bar","foo":"baz"} +// +// {"foo":"bar","foo":"baz"} +// // This is permitted by the JSON specification, but not encouraged. Many // libraries will ignore duplicate key-value pairs (typically keeping the last // pair) when unmarshaling, but users should attempt to avoid adding duplicate @@ -82,6 +83,17 @@ func NewJSONEncoder(cfg EncoderConfig) Encoder { } func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { + if cfg.SkipLineEnding { + cfg.LineEnding = "" + } else if cfg.LineEnding == "" { + cfg.LineEnding = DefaultLineEnding + } + + // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default + if cfg.NewReflectedEncoder == nil { + cfg.NewReflectedEncoder = defaultReflectedEncoder + } + return &jsonEncoder{ EncoderConfig: &cfg, buf: bufferpool.Get(), @@ -118,6 +130,11 @@ func (enc *jsonEncoder) AddComplex128(key string, val complex128) { enc.AppendComplex128(val) } +func (enc *jsonEncoder) AddComplex64(key string, val complex64) { + enc.addKey(key) + enc.AppendComplex64(val) +} + func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { enc.addKey(key) enc.AppendDuration(val) @@ -141,10 +158,7 @@ func (enc *jsonEncoder) AddInt64(key string, val int64) { func (enc *jsonEncoder) resetReflectBuf() { if enc.reflectBuf == nil { enc.reflectBuf = bufferpool.Get() - enc.reflectEnc = json.NewEncoder(enc.reflectBuf) - - // For consistency with our custom JSON encoder. - enc.reflectEnc.SetEscapeHTML(false) + enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf) } else { enc.reflectBuf.Reset() } @@ -206,10 +220,16 @@ func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { } func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { + // Close ONLY new openNamespaces that are created during + // AppendObject(). + old := enc.openNamespaces + enc.openNamespaces = 0 enc.addElementSeparator() enc.buf.AppendByte('{') err := obj.MarshalLogObject(enc) enc.buf.AppendByte('}') + enc.closeOpenNamespaces() + enc.openNamespaces = old return err } @@ -225,20 +245,23 @@ func (enc *jsonEncoder) AppendByteString(val []byte) { enc.buf.AppendByte('"') } -func (enc *jsonEncoder) AppendComplex128(val complex128) { +// appendComplex appends the encoded form of the provided complex128 value. +// precision specifies the encoding precision for the real and imaginary +// components of the complex number. +func (enc *jsonEncoder) appendComplex(val complex128, precision int) { enc.addElementSeparator() // Cast to a platform-independent, fixed-size type. r, i := float64(real(val)), float64(imag(val)) enc.buf.AppendByte('"') // Because we're always in a quoted string, we can use strconv without // special-casing NaN and +/-Inf. - enc.buf.AppendFloat(r, 64) + enc.buf.AppendFloat(r, precision) // If imaginary part is less than 0, minus (-) sign is added by default // by AppendFloat. if i >= 0 { enc.buf.AppendByte('+') } - enc.buf.AppendFloat(i, 64) + enc.buf.AppendFloat(i, precision) enc.buf.AppendByte('i') enc.buf.AppendByte('"') } @@ -301,28 +324,28 @@ func (enc *jsonEncoder) AppendUint64(val uint64) { enc.buf.AppendUint(val) } -func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } -func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } -func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } -func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } -func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) } +func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) } +func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } func (enc *jsonEncoder) Clone() Encoder { clone := enc.clone() @@ -343,7 +366,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final := enc.clone() final.buf.AppendByte('{') - if final.LevelKey != "" { + if final.LevelKey != "" && final.EncodeLevel != nil { final.addKey(final.LevelKey) cur := final.buf.Len() final.EncodeLevel(ent.Level, final) @@ -404,11 +427,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AddString(final.StacktraceKey, ent.Stack) } final.buf.AppendByte('}') - if final.LineEnding != "" { - final.buf.AppendString(final.LineEnding) - } else { - final.buf.AppendString(DefaultLineEnding) - } + final.buf.AppendString(final.LineEnding) ret := final.buf putJSONEncoder(final) @@ -423,6 +442,7 @@ func (enc *jsonEncoder) closeOpenNamespaces() { for i := 0; i < enc.openNamespaces; i++ { enc.buf.AppendByte('}') } + enc.openNamespaces = 0 } func (enc *jsonEncoder) addKey(key string) { diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go index e575c9f432c2..e01a2413166d 100644 --- a/vendor/go.uber.org/zap/zapcore/level.go +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -53,8 +53,62 @@ const ( _minLevel = DebugLevel _maxLevel = FatalLevel + + // InvalidLevel is an invalid value for Level. + // + // Core implementations may panic if they see messages of this level. + InvalidLevel = _maxLevel + 1 ) +// ParseLevel parses a level based on the lower-case or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseLevel(text string) (Level, error) { + var level Level + err := level.UnmarshalText([]byte(text)) + return level, err +} + +type leveledEnabler interface { + LevelEnabler + + Level() Level +} + +// LevelOf reports the minimum enabled log level for the given LevelEnabler +// from Zap's supported log levels, or [InvalidLevel] if none of them are +// enabled. +// +// A LevelEnabler may implement a 'Level() Level' method to override the +// behavior of this function. +// +// func (c *core) Level() Level { +// return c.currentLevel +// } +// +// It is recommended that [Core] implementations that wrap other cores use +// LevelOf to retrieve the level of the wrapped core. For example, +// +// func (c *coreWrapper) Level() Level { +// return zapcore.LevelOf(c.wrappedCore) +// } +func LevelOf(enab LevelEnabler) Level { + if lvler, ok := enab.(leveledEnabler); ok { + return lvler.Level() + } + + for lvl := _minLevel; lvl <= _maxLevel; lvl++ { + if enab.Enabled(lvl) { + return lvl + } + } + + return InvalidLevel +} + // String returns a lower-case ASCII representation of the log level. func (l Level) String() string { switch l { diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go similarity index 64% rename from vendor/go.uber.org/zap/global_prego112.go rename to vendor/go.uber.org/zap/zapcore/reflected_encoder.go index d3ab9af933ee..8746360eca6f 100644 --- a/vendor/go.uber.org/zap/global_prego112.go +++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Uber Technologies, Inc. +// Copyright (c) 2016 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -18,9 +18,24 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -// See #682 for more information. -// +build !go1.12 +package zapcore -package zap +import ( + "encoding/json" + "io" +) -const _stdLogDefaultDepth = 2 +// ReflectedEncoder serializes log fields that can't be serialized with Zap's +// JSON encoder. These have the ReflectType field type. +// Use EncoderConfig.NewReflectedEncoder to set this. +type ReflectedEncoder interface { + // Encode encodes and writes to the underlying data stream. + Encode(interface{}) error +} + +func defaultReflectedEncoder(w io.Writer) ReflectedEncoder { + enc := json.NewEncoder(w) + // For consistency with our custom JSON encoder. + enc.SetEscapeHTML(false) + return enc +} diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go index 31ed96e129fb..dc518055a417 100644 --- a/vendor/go.uber.org/zap/zapcore/sampler.go +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -113,12 +113,12 @@ func nopSamplingHook(Entry, SamplingDecision) {} // This hook may be used to get visibility into the performance of the sampler. // For example, use it to track metrics of dropped versus sampled logs. // -// var dropped atomic.Int64 -// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { -// if dec&zapcore.LogDropped > 0 { -// dropped.Inc() -// } -// }) +// var dropped atomic.Int64 +// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { +// if dec&zapcore.LogDropped > 0 { +// dropped.Inc() +// } +// }) func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { return optionFunc(func(s *sampler) { s.hook = hook @@ -133,10 +133,21 @@ func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { // each tick. If more Entries with the same level and message are seen during // the same interval, every Mth message is logged and the rest are dropped. // +// For example, +// +// core = NewSamplerWithOptions(core, time.Second, 10, 5) +// +// This will log the first 10 log entries with the same level and message +// in a one second interval as-is. Following that, it will allow through +// every 5th log entry with the same level and message in that interval. +// +// If thereafter is zero, the Core will drop all log entries after the first N +// in that interval. +// // Sampler can be configured to report sampling decisions with the SamplerHook // option. // -// Keep in mind that zap's sampling implementation is optimized for speed over +// Keep in mind that Zap's sampling implementation is optimized for speed over // absolute precision; under load, each tick may be slightly over- or // under-sampled. func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core { @@ -164,6 +175,11 @@ type sampler struct { hook func(Entry, SamplingDecision) } +var ( + _ Core = (*sampler)(nil) + _ leveledEnabler = (*sampler)(nil) +) + // NewSampler creates a Core that samples incoming entries, which // caps the CPU and I/O load of logging while attempting to preserve a // representative subset of your logs. @@ -181,6 +197,10 @@ func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { return NewSamplerWithOptions(core, tick, first, thereafter) } +func (s *sampler) Level() Level { + return LevelOf(s.Core) +} + func (s *sampler) With(fields []Field) Core { return &sampler{ Core: s.Core.With(fields), @@ -200,7 +220,7 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { if ent.Level >= _minLevel && ent.Level <= _maxLevel { counter := s.counts.get(ent.Level, ent.Message) n := counter.IncCheckReset(ent.Time, s.tick) - if n > s.first && (n-s.first)%s.thereafter != 0 { + if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) { s.hook(ent, LogDropped) return ce } diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go index 07a32eef9a45..9bb32f055764 100644 --- a/vendor/go.uber.org/zap/zapcore/tee.go +++ b/vendor/go.uber.org/zap/zapcore/tee.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -24,6 +24,11 @@ import "go.uber.org/multierr" type multiCore []Core +var ( + _ leveledEnabler = multiCore(nil) + _ Core = multiCore(nil) +) + // NewTee creates a Core that duplicates log entries into two or more // underlying Cores. // @@ -48,6 +53,16 @@ func (mc multiCore) With(fields []Field) Core { return clone } +func (mc multiCore) Level() Level { + minLvl := _maxLevel // mc is never empty + for i := range mc { + if lvl := LevelOf(mc[i]); lvl < minLvl { + minLvl = lvl + } + } + return minLvl +} + func (mc multiCore) Enabled(lvl Level) bool { for i := range mc { if mc[i].Enabled(lvl) { diff --git a/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go index 356e12741e09..71ca30b51135 100644 --- a/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go +++ b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go @@ -61,6 +61,7 @@ func (f optionFunc) apply(log *Logger) { // WithDebug configures a Logger to print at zap's DebugLevel instead of // InfoLevel. // It only affects the Printf, Println and Print methods, which are only used in the gRPC v1 grpclog.Logger API. +// // Deprecated: use grpclog.SetLoggerV2() for v2 API. func WithDebug() Option { return optionFunc(func(logger *Logger) { @@ -146,19 +147,22 @@ type Logger struct { } // Print implements grpclog.Logger. -// Deprecated: use Info(). +// +// Deprecated: use [Logger.Info]. func (l *Logger) Print(args ...interface{}) { l.print.Print(args...) } // Printf implements grpclog.Logger. -// Deprecated: use Infof(). +// +// Deprecated: use [Logger.Infof]. func (l *Logger) Printf(format string, args ...interface{}) { l.print.Printf(format, args...) } // Println implements grpclog.Logger. -// Deprecated: use Info(). +// +// Deprecated: use [Logger.Info]. func (l *Logger) Println(args ...interface{}) { l.print.Println(args...) } diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS deleted file mode 100644 index 2b00ddba0dfe..000000000000 --- a/vendor/golang.org/x/crypto/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS deleted file mode 100644 index 1fbd3e976faf..000000000000 --- a/vendor/golang.org/x/crypto/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go index b423feaea9f2..29f0a2de4514 100644 --- a/vendor/golang.org/x/crypto/argon2/argon2.go +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -11,8 +11,7 @@ // If you aren't sure which function you need, use Argon2id (IDKey) and // the parameter recommendations for your scenario. // -// -// Argon2i +// # Argon2i // // Argon2i (implemented by Key) is the side-channel resistant version of Argon2. // It uses data-independent memory access, which is preferred for password @@ -21,8 +20,7 @@ // parameters (taken from [2]) for non-interactive operations are time=3 and to // use the maximum available memory. // -// -// Argon2id +// # Argon2id // // Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining // Argon2i and Argon2d. It uses data-independent memory access for the first @@ -59,7 +57,7 @@ const ( // For example, you can get a derived key for e.g. AES-256 (which needs a // 32-byte key) by doing: // -// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) // // The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. // If using that amount of memory (32 MB) is not possible in some contexts then @@ -83,7 +81,7 @@ func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint3 // For example, you can get a derived key for e.g. AES-256 (which needs a // 32-byte key) by doing: // -// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) // // The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. // If using that amount of memory (64 MB) is not possible in some contexts then diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go index aeb73f81a14c..addf56b435a3 100644 --- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -50,7 +50,7 @@ func (ih InvalidHashPrefixError) Error() string { type InvalidCostError int func (ic InvalidCostError) Error() string { - return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), MinCost, MaxCost) } const ( diff --git a/vendor/golang.org/x/crypto/md4/md4block.go b/vendor/golang.org/x/crypto/md4/md4block.go index 3fed475f3f60..5ea1ba966ea4 100644 --- a/vendor/golang.org/x/crypto/md4/md4block.go +++ b/vendor/golang.org/x/crypto/md4/md4block.go @@ -8,9 +8,11 @@ package md4 -var shift1 = []uint{3, 7, 11, 19} -var shift2 = []uint{3, 5, 9, 13} -var shift3 = []uint{3, 9, 11, 15} +import "math/bits" + +var shift1 = []int{3, 7, 11, 19} +var shift2 = []int{3, 5, 9, 13} +var shift3 = []int{3, 9, 11, 15} var xIndex2 = []uint{0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15} var xIndex3 = []uint{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15} @@ -48,7 +50,7 @@ func _Block(dig *digest, p []byte) int { s := shift1[i%4] f := ((c ^ d) & b) ^ d a += f + X[x] - a = a<>(32-s) + a = bits.RotateLeft32(a, s) a, b, c, d = d, a, b, c } @@ -58,7 +60,7 @@ func _Block(dig *digest, p []byte) int { s := shift2[i%4] g := (b & c) | (b & d) | (c & d) a += g + X[x] + 0x5a827999 - a = a<>(32-s) + a = bits.RotateLeft32(a, s) a, b, c, d = d, a, b, c } @@ -68,7 +70,7 @@ func _Block(dig *digest, p []byte) int { s := shift3[i%4] h := b ^ c ^ d a += h + X[x] + 0x6ed9eba1 - a = a<>(32-s) + a = bits.RotateLeft32(a, s) a, b, c, d = d, a, b, c } diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go index 593f6530084f..904b57e01d7a 100644 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -32,7 +32,7 @@ import ( // can get a derived key for e.g. AES-256 (which needs a 32-byte key) by // doing: // -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) // // Remember to get a good random salt. At least 8 bytes is recommended by the // RFC. diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go index 484ca51b7154..96f4a1a56eca 100644 --- a/vendor/golang.org/x/crypto/pkcs12/crypto.go +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -117,7 +117,7 @@ func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) } ps := decrypted[len(decrypted)-psLen:] decrypted = decrypted[:len(decrypted)-psLen] - if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 { + if !bytes.Equal(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) { return nil, ErrDecryption } diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go index 7499e3fb69d2..05de9cc2cdcc 100644 --- a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go @@ -14,6 +14,7 @@ package rc2 import ( "crypto/cipher" "encoding/binary" + "math/bits" ) // The rc2 block size in bytes @@ -80,10 +81,6 @@ func expandKey(key []byte, t1 int) [64]uint16 { return k } -func rotl16(x uint16, b uint) uint16 { - return (x >> (16 - b)) | (x << b) -} - func (c *rc2Cipher) Encrypt(dst, src []byte) { r0 := binary.LittleEndian.Uint16(src[0:]) @@ -96,22 +93,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) { for j <= 16 { // mix r0 r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) + r0 = bits.RotateLeft16(r0, 1) j++ // mix r1 r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) + r1 = bits.RotateLeft16(r1, 2) j++ // mix r2 r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) + r2 = bits.RotateLeft16(r2, 3) j++ // mix r3 r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) + r3 = bits.RotateLeft16(r3, 5) j++ } @@ -124,22 +121,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) { for j <= 40 { // mix r0 r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) + r0 = bits.RotateLeft16(r0, 1) j++ // mix r1 r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) + r1 = bits.RotateLeft16(r1, 2) j++ // mix r2 r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) + r2 = bits.RotateLeft16(r2, 3) j++ // mix r3 r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) + r3 = bits.RotateLeft16(r3, 5) j++ } @@ -152,22 +149,22 @@ func (c *rc2Cipher) Encrypt(dst, src []byte) { for j <= 60 { // mix r0 r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) + r0 = bits.RotateLeft16(r0, 1) j++ // mix r1 r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) + r1 = bits.RotateLeft16(r1, 2) j++ // mix r2 r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) + r2 = bits.RotateLeft16(r2, 3) j++ // mix r3 r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) + r3 = bits.RotateLeft16(r3, 5) j++ } @@ -188,22 +185,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) { for j >= 44 { // unmix r3 - r3 = rotl16(r3, 16-5) + r3 = bits.RotateLeft16(r3, 16-5) r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) j-- // unmix r2 - r2 = rotl16(r2, 16-3) + r2 = bits.RotateLeft16(r2, 16-3) r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) j-- // unmix r1 - r1 = rotl16(r1, 16-2) + r1 = bits.RotateLeft16(r1, 16-2) r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) j-- // unmix r0 - r0 = rotl16(r0, 16-1) + r0 = bits.RotateLeft16(r0, 16-1) r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) j-- } @@ -215,22 +212,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) { for j >= 20 { // unmix r3 - r3 = rotl16(r3, 16-5) + r3 = bits.RotateLeft16(r3, 16-5) r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) j-- // unmix r2 - r2 = rotl16(r2, 16-3) + r2 = bits.RotateLeft16(r2, 16-3) r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) j-- // unmix r1 - r1 = rotl16(r1, 16-2) + r1 = bits.RotateLeft16(r1, 16-2) r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) j-- // unmix r0 - r0 = rotl16(r0, 16-1) + r0 = bits.RotateLeft16(r0, 16-1) r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) j-- @@ -243,22 +240,22 @@ func (c *rc2Cipher) Decrypt(dst, src []byte) { for j >= 0 { // unmix r3 - r3 = rotl16(r3, 16-5) + r3 = bits.RotateLeft16(r3, 16-5) r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) j-- // unmix r2 - r2 = rotl16(r2, 16-3) + r2 = bits.RotateLeft16(r2, 16-3) r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) j-- // unmix r1 - r1 = rotl16(r1, 16-2) + r1 = bits.RotateLeft16(r1, 16-2) r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) j-- // unmix r0 - r0 = rotl16(r0, 16-1) + r0 = bits.RotateLeft16(r0, 16-1) r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) j-- diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go index bbe4494c6c98..c971a99fa679 100644 --- a/vendor/golang.org/x/crypto/scrypt/scrypt.go +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -186,7 +186,7 @@ func smix(b []byte, r, N int, v, xy []uint32) { // For example, you can get a derived key for e.g. AES-256 (which needs a // 32-byte key) by doing: // -// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) +// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) // // The recommended parameters for interactive logins as of 2017 are N=32768, r=8 // and p=1. The parameters N, r, and p should be increased as memory latency and diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go index c2fef30afff3..decd8cf9bf74 100644 --- a/vendor/golang.org/x/crypto/sha3/doc.go +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -8,8 +8,7 @@ // Both types of hash function use the "sponge" construction and the Keccak // permutation. For a detailed specification see http://keccak.noekeon.org/ // -// -// Guidance +// # Guidance // // If you aren't sure what function you need, use SHAKE256 with at least 64 // bytes of output. The SHAKE instances are faster than the SHA3 instances; @@ -19,8 +18,7 @@ // secret key to the input, hash with SHAKE256 and read at least 32 bytes of // output. // -// -// Security strengths +// # Security strengths // // The SHA3-x (x equals 224, 256, 384, or 512) functions have a security // strength against preimage attacks of x bits. Since they only produce "x" @@ -31,8 +29,7 @@ // is used. Requesting more than 64 or 32 bytes of output, respectively, does // not increase the collision-resistance of the SHAKE functions. // -// -// The sponge construction +// # The sponge construction // // A sponge builds a pseudo-random function from a public pseudo-random // permutation, by applying the permutation to a state of "rate + capacity" @@ -50,8 +47,7 @@ // Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means // that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. // -// -// Recommendations +// # Recommendations // // The SHAKE functions are recommended for most new uses. They can produce // output of arbitrary length. SHAKE256, with an output length of at least diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go index 0f4ae8bacff5..e5faa375c04e 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf.go +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -7,6 +7,8 @@ package sha3 +import "math/bits" + // rc stores the round constants for use in the ι step. var rc = [24]uint64{ 0x0000000000000001, @@ -60,13 +62,13 @@ func keccakF1600(a *[25]uint64) { bc0 = a[0] ^ d0 t = a[6] ^ d1 - bc1 = t<<44 | t>>(64-44) + bc1 = bits.RotateLeft64(t, 44) t = a[12] ^ d2 - bc2 = t<<43 | t>>(64-43) + bc2 = bits.RotateLeft64(t, 43) t = a[18] ^ d3 - bc3 = t<<21 | t>>(64-21) + bc3 = bits.RotateLeft64(t, 21) t = a[24] ^ d4 - bc4 = t<<14 | t>>(64-14) + bc4 = bits.RotateLeft64(t, 14) a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] a[6] = bc1 ^ (bc3 &^ bc2) a[12] = bc2 ^ (bc4 &^ bc3) @@ -74,15 +76,15 @@ func keccakF1600(a *[25]uint64) { a[24] = bc4 ^ (bc1 &^ bc0) t = a[10] ^ d0 - bc2 = t<<3 | t>>(64-3) + bc2 = bits.RotateLeft64(t, 3) t = a[16] ^ d1 - bc3 = t<<45 | t>>(64-45) + bc3 = bits.RotateLeft64(t, 45) t = a[22] ^ d2 - bc4 = t<<61 | t>>(64-61) + bc4 = bits.RotateLeft64(t, 61) t = a[3] ^ d3 - bc0 = t<<28 | t>>(64-28) + bc0 = bits.RotateLeft64(t, 28) t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) + bc1 = bits.RotateLeft64(t, 20) a[10] = bc0 ^ (bc2 &^ bc1) a[16] = bc1 ^ (bc3 &^ bc2) a[22] = bc2 ^ (bc4 &^ bc3) @@ -90,15 +92,15 @@ func keccakF1600(a *[25]uint64) { a[9] = bc4 ^ (bc1 &^ bc0) t = a[20] ^ d0 - bc4 = t<<18 | t>>(64-18) + bc4 = bits.RotateLeft64(t, 18) t = a[1] ^ d1 - bc0 = t<<1 | t>>(64-1) + bc0 = bits.RotateLeft64(t, 1) t = a[7] ^ d2 - bc1 = t<<6 | t>>(64-6) + bc1 = bits.RotateLeft64(t, 6) t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) + bc2 = bits.RotateLeft64(t, 25) t = a[19] ^ d4 - bc3 = t<<8 | t>>(64-8) + bc3 = bits.RotateLeft64(t, 8) a[20] = bc0 ^ (bc2 &^ bc1) a[1] = bc1 ^ (bc3 &^ bc2) a[7] = bc2 ^ (bc4 &^ bc3) @@ -106,15 +108,15 @@ func keccakF1600(a *[25]uint64) { a[19] = bc4 ^ (bc1 &^ bc0) t = a[5] ^ d0 - bc1 = t<<36 | t>>(64-36) + bc1 = bits.RotateLeft64(t, 36) t = a[11] ^ d1 - bc2 = t<<10 | t>>(64-10) + bc2 = bits.RotateLeft64(t, 10) t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) + bc3 = bits.RotateLeft64(t, 15) t = a[23] ^ d3 - bc4 = t<<56 | t>>(64-56) + bc4 = bits.RotateLeft64(t, 56) t = a[4] ^ d4 - bc0 = t<<27 | t>>(64-27) + bc0 = bits.RotateLeft64(t, 27) a[5] = bc0 ^ (bc2 &^ bc1) a[11] = bc1 ^ (bc3 &^ bc2) a[17] = bc2 ^ (bc4 &^ bc3) @@ -122,15 +124,15 @@ func keccakF1600(a *[25]uint64) { a[4] = bc4 ^ (bc1 &^ bc0) t = a[15] ^ d0 - bc3 = t<<41 | t>>(64-41) + bc3 = bits.RotateLeft64(t, 41) t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) + bc4 = bits.RotateLeft64(t, 2) t = a[2] ^ d2 - bc0 = t<<62 | t>>(64-62) + bc0 = bits.RotateLeft64(t, 62) t = a[8] ^ d3 - bc1 = t<<55 | t>>(64-55) + bc1 = bits.RotateLeft64(t, 55) t = a[14] ^ d4 - bc2 = t<<39 | t>>(64-39) + bc2 = bits.RotateLeft64(t, 39) a[15] = bc0 ^ (bc2 &^ bc1) a[21] = bc1 ^ (bc3 &^ bc2) a[2] = bc2 ^ (bc4 &^ bc3) @@ -151,13 +153,13 @@ func keccakF1600(a *[25]uint64) { bc0 = a[0] ^ d0 t = a[16] ^ d1 - bc1 = t<<44 | t>>(64-44) + bc1 = bits.RotateLeft64(t, 44) t = a[7] ^ d2 - bc2 = t<<43 | t>>(64-43) + bc2 = bits.RotateLeft64(t, 43) t = a[23] ^ d3 - bc3 = t<<21 | t>>(64-21) + bc3 = bits.RotateLeft64(t, 21) t = a[14] ^ d4 - bc4 = t<<14 | t>>(64-14) + bc4 = bits.RotateLeft64(t, 14) a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] a[16] = bc1 ^ (bc3 &^ bc2) a[7] = bc2 ^ (bc4 &^ bc3) @@ -165,15 +167,15 @@ func keccakF1600(a *[25]uint64) { a[14] = bc4 ^ (bc1 &^ bc0) t = a[20] ^ d0 - bc2 = t<<3 | t>>(64-3) + bc2 = bits.RotateLeft64(t, 3) t = a[11] ^ d1 - bc3 = t<<45 | t>>(64-45) + bc3 = bits.RotateLeft64(t, 45) t = a[2] ^ d2 - bc4 = t<<61 | t>>(64-61) + bc4 = bits.RotateLeft64(t, 61) t = a[18] ^ d3 - bc0 = t<<28 | t>>(64-28) + bc0 = bits.RotateLeft64(t, 28) t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) + bc1 = bits.RotateLeft64(t, 20) a[20] = bc0 ^ (bc2 &^ bc1) a[11] = bc1 ^ (bc3 &^ bc2) a[2] = bc2 ^ (bc4 &^ bc3) @@ -181,15 +183,15 @@ func keccakF1600(a *[25]uint64) { a[9] = bc4 ^ (bc1 &^ bc0) t = a[15] ^ d0 - bc4 = t<<18 | t>>(64-18) + bc4 = bits.RotateLeft64(t, 18) t = a[6] ^ d1 - bc0 = t<<1 | t>>(64-1) + bc0 = bits.RotateLeft64(t, 1) t = a[22] ^ d2 - bc1 = t<<6 | t>>(64-6) + bc1 = bits.RotateLeft64(t, 6) t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) + bc2 = bits.RotateLeft64(t, 25) t = a[4] ^ d4 - bc3 = t<<8 | t>>(64-8) + bc3 = bits.RotateLeft64(t, 8) a[15] = bc0 ^ (bc2 &^ bc1) a[6] = bc1 ^ (bc3 &^ bc2) a[22] = bc2 ^ (bc4 &^ bc3) @@ -197,15 +199,15 @@ func keccakF1600(a *[25]uint64) { a[4] = bc4 ^ (bc1 &^ bc0) t = a[10] ^ d0 - bc1 = t<<36 | t>>(64-36) + bc1 = bits.RotateLeft64(t, 36) t = a[1] ^ d1 - bc2 = t<<10 | t>>(64-10) + bc2 = bits.RotateLeft64(t, 10) t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) + bc3 = bits.RotateLeft64(t, 15) t = a[8] ^ d3 - bc4 = t<<56 | t>>(64-56) + bc4 = bits.RotateLeft64(t, 56) t = a[24] ^ d4 - bc0 = t<<27 | t>>(64-27) + bc0 = bits.RotateLeft64(t, 27) a[10] = bc0 ^ (bc2 &^ bc1) a[1] = bc1 ^ (bc3 &^ bc2) a[17] = bc2 ^ (bc4 &^ bc3) @@ -213,15 +215,15 @@ func keccakF1600(a *[25]uint64) { a[24] = bc4 ^ (bc1 &^ bc0) t = a[5] ^ d0 - bc3 = t<<41 | t>>(64-41) + bc3 = bits.RotateLeft64(t, 41) t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) + bc4 = bits.RotateLeft64(t, 2) t = a[12] ^ d2 - bc0 = t<<62 | t>>(64-62) + bc0 = bits.RotateLeft64(t, 62) t = a[3] ^ d3 - bc1 = t<<55 | t>>(64-55) + bc1 = bits.RotateLeft64(t, 55) t = a[19] ^ d4 - bc2 = t<<39 | t>>(64-39) + bc2 = bits.RotateLeft64(t, 39) a[5] = bc0 ^ (bc2 &^ bc1) a[21] = bc1 ^ (bc3 &^ bc2) a[12] = bc2 ^ (bc4 &^ bc3) @@ -242,13 +244,13 @@ func keccakF1600(a *[25]uint64) { bc0 = a[0] ^ d0 t = a[11] ^ d1 - bc1 = t<<44 | t>>(64-44) + bc1 = bits.RotateLeft64(t, 44) t = a[22] ^ d2 - bc2 = t<<43 | t>>(64-43) + bc2 = bits.RotateLeft64(t, 43) t = a[8] ^ d3 - bc3 = t<<21 | t>>(64-21) + bc3 = bits.RotateLeft64(t, 21) t = a[19] ^ d4 - bc4 = t<<14 | t>>(64-14) + bc4 = bits.RotateLeft64(t, 14) a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] a[11] = bc1 ^ (bc3 &^ bc2) a[22] = bc2 ^ (bc4 &^ bc3) @@ -256,15 +258,15 @@ func keccakF1600(a *[25]uint64) { a[19] = bc4 ^ (bc1 &^ bc0) t = a[15] ^ d0 - bc2 = t<<3 | t>>(64-3) + bc2 = bits.RotateLeft64(t, 3) t = a[1] ^ d1 - bc3 = t<<45 | t>>(64-45) + bc3 = bits.RotateLeft64(t, 45) t = a[12] ^ d2 - bc4 = t<<61 | t>>(64-61) + bc4 = bits.RotateLeft64(t, 61) t = a[23] ^ d3 - bc0 = t<<28 | t>>(64-28) + bc0 = bits.RotateLeft64(t, 28) t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) + bc1 = bits.RotateLeft64(t, 20) a[15] = bc0 ^ (bc2 &^ bc1) a[1] = bc1 ^ (bc3 &^ bc2) a[12] = bc2 ^ (bc4 &^ bc3) @@ -272,15 +274,15 @@ func keccakF1600(a *[25]uint64) { a[9] = bc4 ^ (bc1 &^ bc0) t = a[5] ^ d0 - bc4 = t<<18 | t>>(64-18) + bc4 = bits.RotateLeft64(t, 18) t = a[16] ^ d1 - bc0 = t<<1 | t>>(64-1) + bc0 = bits.RotateLeft64(t, 1) t = a[2] ^ d2 - bc1 = t<<6 | t>>(64-6) + bc1 = bits.RotateLeft64(t, 6) t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) + bc2 = bits.RotateLeft64(t, 25) t = a[24] ^ d4 - bc3 = t<<8 | t>>(64-8) + bc3 = bits.RotateLeft64(t, 8) a[5] = bc0 ^ (bc2 &^ bc1) a[16] = bc1 ^ (bc3 &^ bc2) a[2] = bc2 ^ (bc4 &^ bc3) @@ -288,15 +290,15 @@ func keccakF1600(a *[25]uint64) { a[24] = bc4 ^ (bc1 &^ bc0) t = a[20] ^ d0 - bc1 = t<<36 | t>>(64-36) + bc1 = bits.RotateLeft64(t, 36) t = a[6] ^ d1 - bc2 = t<<10 | t>>(64-10) + bc2 = bits.RotateLeft64(t, 10) t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) + bc3 = bits.RotateLeft64(t, 15) t = a[3] ^ d3 - bc4 = t<<56 | t>>(64-56) + bc4 = bits.RotateLeft64(t, 56) t = a[14] ^ d4 - bc0 = t<<27 | t>>(64-27) + bc0 = bits.RotateLeft64(t, 27) a[20] = bc0 ^ (bc2 &^ bc1) a[6] = bc1 ^ (bc3 &^ bc2) a[17] = bc2 ^ (bc4 &^ bc3) @@ -304,15 +306,15 @@ func keccakF1600(a *[25]uint64) { a[14] = bc4 ^ (bc1 &^ bc0) t = a[10] ^ d0 - bc3 = t<<41 | t>>(64-41) + bc3 = bits.RotateLeft64(t, 41) t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) + bc4 = bits.RotateLeft64(t, 2) t = a[7] ^ d2 - bc0 = t<<62 | t>>(64-62) + bc0 = bits.RotateLeft64(t, 62) t = a[18] ^ d3 - bc1 = t<<55 | t>>(64-55) + bc1 = bits.RotateLeft64(t, 55) t = a[4] ^ d4 - bc2 = t<<39 | t>>(64-39) + bc2 = bits.RotateLeft64(t, 39) a[10] = bc0 ^ (bc2 &^ bc1) a[21] = bc1 ^ (bc3 &^ bc2) a[7] = bc2 ^ (bc4 &^ bc3) @@ -333,13 +335,13 @@ func keccakF1600(a *[25]uint64) { bc0 = a[0] ^ d0 t = a[1] ^ d1 - bc1 = t<<44 | t>>(64-44) + bc1 = bits.RotateLeft64(t, 44) t = a[2] ^ d2 - bc2 = t<<43 | t>>(64-43) + bc2 = bits.RotateLeft64(t, 43) t = a[3] ^ d3 - bc3 = t<<21 | t>>(64-21) + bc3 = bits.RotateLeft64(t, 21) t = a[4] ^ d4 - bc4 = t<<14 | t>>(64-14) + bc4 = bits.RotateLeft64(t, 14) a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] a[1] = bc1 ^ (bc3 &^ bc2) a[2] = bc2 ^ (bc4 &^ bc3) @@ -347,15 +349,15 @@ func keccakF1600(a *[25]uint64) { a[4] = bc4 ^ (bc1 &^ bc0) t = a[5] ^ d0 - bc2 = t<<3 | t>>(64-3) + bc2 = bits.RotateLeft64(t, 3) t = a[6] ^ d1 - bc3 = t<<45 | t>>(64-45) + bc3 = bits.RotateLeft64(t, 45) t = a[7] ^ d2 - bc4 = t<<61 | t>>(64-61) + bc4 = bits.RotateLeft64(t, 61) t = a[8] ^ d3 - bc0 = t<<28 | t>>(64-28) + bc0 = bits.RotateLeft64(t, 28) t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) + bc1 = bits.RotateLeft64(t, 20) a[5] = bc0 ^ (bc2 &^ bc1) a[6] = bc1 ^ (bc3 &^ bc2) a[7] = bc2 ^ (bc4 &^ bc3) @@ -363,15 +365,15 @@ func keccakF1600(a *[25]uint64) { a[9] = bc4 ^ (bc1 &^ bc0) t = a[10] ^ d0 - bc4 = t<<18 | t>>(64-18) + bc4 = bits.RotateLeft64(t, 18) t = a[11] ^ d1 - bc0 = t<<1 | t>>(64-1) + bc0 = bits.RotateLeft64(t, 1) t = a[12] ^ d2 - bc1 = t<<6 | t>>(64-6) + bc1 = bits.RotateLeft64(t, 6) t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) + bc2 = bits.RotateLeft64(t, 25) t = a[14] ^ d4 - bc3 = t<<8 | t>>(64-8) + bc3 = bits.RotateLeft64(t, 8) a[10] = bc0 ^ (bc2 &^ bc1) a[11] = bc1 ^ (bc3 &^ bc2) a[12] = bc2 ^ (bc4 &^ bc3) @@ -379,15 +381,15 @@ func keccakF1600(a *[25]uint64) { a[14] = bc4 ^ (bc1 &^ bc0) t = a[15] ^ d0 - bc1 = t<<36 | t>>(64-36) + bc1 = bits.RotateLeft64(t, 36) t = a[16] ^ d1 - bc2 = t<<10 | t>>(64-10) + bc2 = bits.RotateLeft64(t, 10) t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) + bc3 = bits.RotateLeft64(t, 15) t = a[18] ^ d3 - bc4 = t<<56 | t>>(64-56) + bc4 = bits.RotateLeft64(t, 56) t = a[19] ^ d4 - bc0 = t<<27 | t>>(64-27) + bc0 = bits.RotateLeft64(t, 27) a[15] = bc0 ^ (bc2 &^ bc1) a[16] = bc1 ^ (bc3 &^ bc2) a[17] = bc2 ^ (bc4 &^ bc3) @@ -395,15 +397,15 @@ func keccakF1600(a *[25]uint64) { a[19] = bc4 ^ (bc1 &^ bc0) t = a[20] ^ d0 - bc3 = t<<41 | t>>(64-41) + bc3 = bits.RotateLeft64(t, 41) t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) + bc4 = bits.RotateLeft64(t, 2) t = a[22] ^ d2 - bc0 = t<<62 | t>>(64-62) + bc0 = bits.RotateLeft64(t, 62) t = a[23] ^ d3 - bc1 = t<<55 | t>>(64-55) + bc1 = bits.RotateLeft64(t, 55) t = a[24] ^ d4 - bc2 = t<<39 | t>>(64-39) + bc2 = bits.RotateLeft64(t, 39) a[20] = bc0 ^ (bc2 &^ bc1) a[21] = bc1 ^ (bc3 &^ bc2) a[22] = bc2 ^ (bc4 &^ bc3) diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go index ba269a073008..fa182beb40b5 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3.go +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -86,7 +86,7 @@ func (d *state) permute() { d.buf = d.storage.asBytes()[:0] keccakF1600(&d.a) case spongeSqueezing: - // If we're squeezing, we need to apply the permutatin before + // If we're squeezing, we need to apply the permutation before // copying more output. keccakF1600(&d.a) d.buf = d.storage.asBytes()[:d.rate] diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go index 4fcfc924ef64..63a3edb4cea7 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -34,11 +34,13 @@ const ( // kimd is a wrapper for the 'compute intermediate message digest' instruction. // src must be a multiple of the rate for the given function code. +// //go:noescape func kimd(function code, chain *[200]byte, src []byte) // klmd is a wrapper for the 'compute last message digest' instruction. // src padding is handled by the instruction. +// //go:noescape func klmd(function code, chain *[200]byte, dst, src []byte) diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index 7be398f80d33..a30a22bf20f1 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -32,7 +32,6 @@ type parsed struct { short string prerelease string build string - err string } // IsValid reports whether v is a valid semantic version string. @@ -172,12 +171,10 @@ func Sort(list []string) { func parse(v string) (p parsed, ok bool) { if v == "" || v[0] != 'v' { - p.err = "missing v prefix" return } p.major, v, ok = parseInt(v[1:]) if !ok { - p.err = "bad major version" return } if v == "" { @@ -187,13 +184,11 @@ func parse(v string) (p parsed, ok bool) { return } if v[0] != '.' { - p.err = "bad minor prefix" ok = false return } p.minor, v, ok = parseInt(v[1:]) if !ok { - p.err = "bad minor version" return } if v == "" { @@ -202,31 +197,26 @@ func parse(v string) (p parsed, ok bool) { return } if v[0] != '.' { - p.err = "bad patch prefix" ok = false return } p.patch, v, ok = parseInt(v[1:]) if !ok { - p.err = "bad patch version" return } if len(v) > 0 && v[0] == '-' { p.prerelease, v, ok = parsePrerelease(v) if !ok { - p.err = "bad prerelease" return } } if len(v) > 0 && v[0] == '+' { p.build, v, ok = parseBuild(v) if !ok { - p.err = "bad build" return } } if v != "" { - p.err = "junk on end" ok = false return } diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vendor/golang.org/x/net/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vendor/golang.org/x/net/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/bpf/doc.go b/vendor/golang.org/x/net/bpf/doc.go index ae62feb53413..04ec1c8ab522 100644 --- a/vendor/golang.org/x/net/bpf/doc.go +++ b/vendor/golang.org/x/net/bpf/doc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. /* - Package bpf implements marshaling and unmarshaling of programs for the Berkeley Packet Filter virtual machine, and provides a Go implementation of the virtual machine. @@ -21,7 +20,7 @@ access to kernel functions, and while conditional branches are allowed, they can only jump forwards, to guarantee that there are no infinite loops. -The virtual machine +# The virtual machine The BPF VM is an accumulator machine. Its main register, called register A, is an implicit source and destination in all arithmetic @@ -50,7 +49,7 @@ to extensions, which are essentially calls to kernel utility functions. Currently, the only extensions supported by this package are the Linux packet filter extensions. -Examples +# Examples This packet filter selects all ARP packets. @@ -77,6 +76,5 @@ This packet filter captures a random 1% sample of traffic. // Ignore. bpf.RetConstant{Val: 0}, }) - */ package bpf // import "golang.org/x/net/bpf" diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go index cf8947c33276..0aa307c0611d 100644 --- a/vendor/golang.org/x/net/bpf/vm_instructions.go +++ b/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -94,7 +94,7 @@ func jumpIfCommon(cond JumpTest, skipTrue, skipFalse uint8, regA uint32, value u func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) { offset := int(ins.Off) - size := int(ins.Size) + size := ins.Size return loadCommon(in, offset, size) } @@ -121,7 +121,7 @@ func loadExtension(ins LoadExtension, in []byte) uint32 { func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { offset := int(ins.Off) + int(regX) - size := int(ins.Size) + size := ins.Size return loadCommon(in, offset, size) } diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index a3c021d3f88e..cf66309c4a8b 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -21,9 +21,9 @@ // explicitly to each function that needs it. The Context should be the first // parameter, typically named ctx: // -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } // // Do not pass a nil Context, even if a function permits it. Pass context.TODO // if you are unsure about which Context to use. diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go index 344bd1433450..2cb9c408f2e7 100644 --- a/vendor/golang.org/x/net/context/go17.go +++ b/vendor/golang.org/x/net/context/go17.go @@ -32,7 +32,7 @@ var DeadlineExceeded = context.DeadlineExceeded // call cancel as soon as the operations running in this Context complete. func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { ctx, f := context.WithCancel(parent) - return ctx, CancelFunc(f) + return ctx, f } // WithDeadline returns a copy of the parent context with the deadline adjusted @@ -46,7 +46,7 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { // call cancel as soon as the operations running in this Context complete. func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { ctx, f := context.WithDeadline(parent, deadline) - return ctx, CancelFunc(f) + return ctx, f } // WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). @@ -54,11 +54,11 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete: // -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return WithDeadline(parent, time.Now().Add(timeout)) } diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go index 5270db5db7db..7b6b685114a9 100644 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -264,11 +264,11 @@ func (c *timerCtx) cancel(removeFromParent bool, err error) { // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete: // -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return WithDeadline(parent, time.Now().Add(timeout)) } diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index c79aa73f28bb..6e071e852432 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -173,13 +173,15 @@ func tokenEqual(t1, t2 string) bool { // isLWS reports whether b is linear white space, according // to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// LWS = [CRLF] 1*( SP | HT ) +// +// LWS = [CRLF] 1*( SP | HT ) func isLWS(b byte) bool { return b == ' ' || b == '\t' } // isCTL reports whether b is a control byte, according // to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// CTL = +// +// CTL = func isCTL(b byte) bool { const del = 0x7f // a CTL return b < ' ' || b == del @@ -189,12 +191,13 @@ func isCTL(b byte) bool { // HTTP/2 imposes the additional restriction that uppercase ASCII // letters are not allowed. // -// RFC 7230 says: -// header-field = field-name ":" OWS field-value OWS -// field-name = token -// token = 1*tchar -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +// RFC 7230 says: +// +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false @@ -267,27 +270,28 @@ var validHostByte = [256]bool{ // ValidHeaderFieldValue reports whether v is a valid "field-value" according to // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : // -// message-header = field-name ":" [ field-value ] -// field-value = *( field-content | LWS ) -// field-content = +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = // // http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : // -// TEXT = -// LWS = [CRLF] 1*( SP | HT ) -// CTL = +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = // // RFC 7230 says: -// field-value = *( field-content / obs-fold ) -// obj-fold = N/A to http2, and deprecated -// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] -// field-vchar = VCHAR / obs-text -// obs-text = %x80-FF -// VCHAR = "any visible [USASCII] character" +// +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" // // http2 further says: "Similarly, HTTP/2 allows header field values // that are not valid. While most of the values that can be encoded diff --git a/vendor/golang.org/x/net/http/httpproxy/proxy.go b/vendor/golang.org/x/net/http/httpproxy/proxy.go index d2c8c87eab91..c3bd9a1eeb55 100644 --- a/vendor/golang.org/x/net/http/httpproxy/proxy.go +++ b/vendor/golang.org/x/net/http/httpproxy/proxy.go @@ -81,8 +81,7 @@ type config struct { // FromEnvironment returns a Config instance populated from the // environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the -// lowercase versions thereof). HTTPS_PROXY takes precedence over -// HTTP_PROXY for https requests. +// lowercase versions thereof). // // The environment values may be either a complete URL or a // "host[:port]", in which case the "http" scheme is assumed. An error @@ -267,6 +266,9 @@ func (c *config) init() { matchHost = true phost = "." + phost } + if v, err := idnaASCII(phost); err == nil { + phost = v + } c.domainMatchers = append(c.domainMatchers, domainMatch{host: phost, port: pport, matchHost: matchHost}) } } diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index c936843eafa1..780968d6c19b 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -139,7 +139,6 @@ func (p *clientConnPool) getStartDialLocked(ctx context.Context, addr string) *d func (c *dialCall) dial(ctx context.Context, addr string) { const singleUse = false // shared conn c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse) - close(c.done) c.p.mu.Lock() delete(c.p.dialing, addr) @@ -147,6 +146,8 @@ func (c *dialCall) dial(ctx context.Context, addr string) { c.p.addConnLocked(addr, c.res) } c.p.mu.Unlock() + + close(c.done) } // addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go index 2663e5d287ee..f2067dabc59e 100644 --- a/vendor/golang.org/x/net/http2/errors.go +++ b/vendor/golang.org/x/net/http2/errors.go @@ -136,7 +136,7 @@ func (e headerFieldNameError) Error() string { type headerFieldValueError string func (e headerFieldValueError) Error() string { - return fmt.Sprintf("invalid header field value %q", string(e)) + return fmt.Sprintf("invalid header field value for %q", string(e)) } var ( diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go index b51f0e0cf1f5..750ac52f2a52 100644 --- a/vendor/golang.org/x/net/http2/flow.go +++ b/vendor/golang.org/x/net/http2/flow.go @@ -6,23 +6,91 @@ package http2 -// flow is the flow control window's size. -type flow struct { +// inflowMinRefresh is the minimum number of bytes we'll send for a +// flow control window update. +const inflowMinRefresh = 4 << 10 + +// inflow accounts for an inbound flow control window. +// It tracks both the latest window sent to the peer (used for enforcement) +// and the accumulated unsent window. +type inflow struct { + avail int32 + unsent int32 +} + +// set sets the initial window. +func (f *inflow) init(n int32) { + f.avail = n +} + +// add adds n bytes to the window, with a maximum window size of max, +// indicating that the peer can now send us more data. +// For example, the user read from a {Request,Response} body and consumed +// some of the buffered data, so the peer can now send more. +// It returns the number of bytes to send in a WINDOW_UPDATE frame to the peer. +// Window updates are accumulated and sent when the unsent capacity +// is at least inflowMinRefresh or will at least double the peer's available window. +func (f *inflow) add(n int) (connAdd int32) { + if n < 0 { + panic("negative update") + } + unsent := int64(f.unsent) + int64(n) + // "A sender MUST NOT allow a flow-control window to exceed 2^31-1 octets." + // RFC 7540 Section 6.9.1. + const maxWindow = 1<<31 - 1 + if unsent+int64(f.avail) > maxWindow { + panic("flow control update exceeds maximum window size") + } + f.unsent = int32(unsent) + if f.unsent < inflowMinRefresh && f.unsent < f.avail { + // If there aren't at least inflowMinRefresh bytes of window to send, + // and this update won't at least double the window, buffer the update for later. + return 0 + } + f.avail += f.unsent + f.unsent = 0 + return int32(unsent) +} + +// take attempts to take n bytes from the peer's flow control window. +// It reports whether the window has available capacity. +func (f *inflow) take(n uint32) bool { + if n > uint32(f.avail) { + return false + } + f.avail -= int32(n) + return true +} + +// takeInflows attempts to take n bytes from two inflows, +// typically connection-level and stream-level flows. +// It reports whether both windows have available capacity. +func takeInflows(f1, f2 *inflow, n uint32) bool { + if n > uint32(f1.avail) || n > uint32(f2.avail) { + return false + } + f1.avail -= int32(n) + f2.avail -= int32(n) + return true +} + +// outflow is the outbound flow control window's size. +type outflow struct { _ incomparable // n is the number of DATA bytes we're allowed to send. - // A flow is kept both on a conn and a per-stream. + // An outflow is kept both on a conn and a per-stream. n int32 - // conn points to the shared connection-level flow that is - // shared by all streams on that conn. It is nil for the flow + // conn points to the shared connection-level outflow that is + // shared by all streams on that conn. It is nil for the outflow // that's on the conn directly. - conn *flow + conn *outflow } -func (f *flow) setConnFlow(cf *flow) { f.conn = cf } +func (f *outflow) setConnFlow(cf *outflow) { f.conn = cf } -func (f *flow) available() int32 { +func (f *outflow) available() int32 { n := f.n if f.conn != nil && f.conn.n < n { n = f.conn.n @@ -30,7 +98,7 @@ func (f *flow) available() int32 { return n } -func (f *flow) take(n int32) { +func (f *outflow) take(n int32) { if n > f.available() { panic("internal error: took too much") } @@ -42,7 +110,7 @@ func (f *flow) take(n int32) { // add adds n bytes (positive or negative) to the flow control window. // It returns false if the sum would exceed 2^31-1. -func (f *flow) add(n int32) bool { +func (f *outflow) add(n int32) bool { sum := f.n + n if (sum > n) == (f.n > 0) { f.n = sum diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 96a747905241..184ac45feb70 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -23,7 +23,7 @@ const frameHeaderLen = 9 var padZeros = make([]byte, 255) // zeros for padding // A FrameType is a registered frame type as defined in -// http://http2.github.io/http2-spec/#rfc.section.11.2 +// https://httpwg.org/specs/rfc7540.html#rfc.section.11.2 type FrameType uint8 const ( @@ -146,7 +146,7 @@ func typeFrameParser(t FrameType) frameParser { // A FrameHeader is the 9 byte header of all HTTP/2 frames. // -// See http://http2.github.io/http2-spec/#FrameHeader +// See https://httpwg.org/specs/rfc7540.html#FrameHeader type FrameHeader struct { valid bool // caller can access []byte fields in the Frame @@ -575,7 +575,7 @@ func (fr *Framer) checkFrameOrder(f Frame) error { // A DataFrame conveys arbitrary, variable-length sequences of octets // associated with a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.1 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.1 type DataFrame struct { FrameHeader data []byte @@ -698,7 +698,7 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by // endpoints communicate, such as preferences and constraints on peer // behavior. // -// See http://http2.github.io/http2-spec/#SETTINGS +// See https://httpwg.org/specs/rfc7540.html#SETTINGS type SettingsFrame struct { FrameHeader p []byte @@ -837,7 +837,7 @@ func (f *Framer) WriteSettingsAck() error { // A PingFrame is a mechanism for measuring a minimal round trip time // from the sender, as well as determining whether an idle connection // is still functional. -// See http://http2.github.io/http2-spec/#rfc.section.6.7 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.7 type PingFrame struct { FrameHeader Data [8]byte @@ -870,7 +870,7 @@ func (f *Framer) WritePing(ack bool, data [8]byte) error { } // A GoAwayFrame informs the remote peer to stop creating streams on this connection. -// See http://http2.github.io/http2-spec/#rfc.section.6.8 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.8 type GoAwayFrame struct { FrameHeader LastStreamID uint32 @@ -934,7 +934,7 @@ func parseUnknownFrame(_ *frameCache, fh FrameHeader, countError func(string), p } // A WindowUpdateFrame is used to implement flow control. -// See http://http2.github.io/http2-spec/#rfc.section.6.9 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.9 type WindowUpdateFrame struct { FrameHeader Increment uint32 // never read with high bit set @@ -1123,7 +1123,7 @@ func (f *Framer) WriteHeaders(p HeadersFrameParam) error { } // A PriorityFrame specifies the sender-advised priority of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.3 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.3 type PriorityFrame struct { FrameHeader PriorityParam @@ -1193,7 +1193,7 @@ func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { } // A RSTStreamFrame allows for abnormal termination of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.4 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.4 type RSTStreamFrame struct { FrameHeader ErrCode ErrCode @@ -1225,7 +1225,7 @@ func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { } // A ContinuationFrame is used to continue a sequence of header block fragments. -// See http://http2.github.io/http2-spec/#rfc.section.6.10 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.10 type ContinuationFrame struct { FrameHeader headerFragBuf []byte @@ -1266,7 +1266,7 @@ func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlock } // A PushPromiseFrame is used to initiate a server stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.6 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.6 type PushPromiseFrame struct { FrameHeader PromiseID uint32 @@ -1532,7 +1532,8 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) } if !httpguts.ValidHeaderFieldValue(hf.Value) { - invalid = headerFieldValueError(hf.Value) + // Don't include the value in the error, because it may be sensitive. + invalid = headerFieldValueError(hf.Name) } isPseudo := strings.HasPrefix(hf.Name, ":") if isPseudo { diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go new file mode 100644 index 000000000000..aca4b2b31acd --- /dev/null +++ b/vendor/golang.org/x/net/http2/go118.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package http2 + +import ( + "crypto/tls" + "net" +) + +func tlsUnderlyingConn(tc *tls.Conn) net.Conn { + return tc.NetConn() +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go index 9e12941da4c3..149b3dd20e45 100644 --- a/vendor/golang.org/x/net/http2/headermap.go +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -27,7 +27,14 @@ func buildCommonHeaderMaps() { "accept-language", "accept-ranges", "age", + "access-control-allow-credentials", + "access-control-allow-headers", + "access-control-allow-methods", "access-control-allow-origin", + "access-control-expose-headers", + "access-control-max-age", + "access-control-request-headers", + "access-control-request-method", "allow", "authorization", "cache-control", @@ -53,6 +60,7 @@ func buildCommonHeaderMaps() { "link", "location", "max-forwards", + "origin", "proxy-authenticate", "proxy-authorization", "range", @@ -68,6 +76,8 @@ func buildCommonHeaderMaps() { "vary", "via", "www-authenticate", + "x-forwarded-for", + "x-forwarded-proto", } commonLowerHeader = make(map[string]string, len(common)) commonCanonHeader = make(map[string]string, len(common)) @@ -85,3 +95,11 @@ func lowerHeader(v string) (lower string, ascii bool) { } return asciiToLower(v) } + +func canonicalHeader(v string) string { + buildCommonHeaderMapsOnce() + if s, ok := commonCanonHeader[v]; ok { + return s + } + return http.CanonicalHeaderKey(v) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go index 97f17831fc55..46219da2b01b 100644 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -116,6 +116,11 @@ func (e *Encoder) SetMaxDynamicTableSize(v uint32) { e.dynTab.setMaxSize(v) } +// MaxDynamicTableSize returns the current dynamic header table size. +func (e *Encoder) MaxDynamicTableSize() (v uint32) { + return e.dynTab.maxSize +} + // SetMaxDynamicTableSizeLimit changes the maximum value that can be // specified in SetMaxDynamicTableSize to v. By default, it is set to // 4096, which is the same size of the default dynamic header table @@ -191,7 +196,7 @@ func appendTableSize(dst []byte, v uint32) []byte { // bit prefix, to dst and returns the extended buffer. // // See -// http://http2.github.io/http2-spec/compression.html#integer.representation +// https://httpwg.org/specs/rfc7541.html#integer.representation func appendVarInt(dst []byte, n byte, i uint64) []byte { k := uint64((1 << n) - 1) if i < k { diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go index 85f18a2b0a86..ebdfbee964ae 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -59,7 +59,7 @@ func (hf HeaderField) String() string { // Size returns the size of an entry per RFC 7541 section 4.1. func (hf HeaderField) Size() uint32 { - // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // https://httpwg.org/specs/rfc7541.html#rfc.section.4.1 // "The size of the dynamic table is the sum of the size of // its entries. The size of an entry is the sum of its name's // length in octets (as defined in Section 5.2), its value's @@ -158,7 +158,7 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { } type dynamicTable struct { - // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + // https://httpwg.org/specs/rfc7541.html#rfc.section.2.3.2 table headerFieldTable size uint32 // in bytes maxSize uint32 // current maxSize @@ -307,27 +307,27 @@ func (d *Decoder) parseHeaderFieldRepr() error { case b&128 != 0: // Indexed representation. // High bit set? - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.1 return d.parseFieldIndexed() case b&192 == 64: // 6.2.1 Literal Header Field with Incremental Indexing // 0b10xxxxxx: top two bits are 10 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.1 return d.parseFieldLiteral(6, indexedTrue) case b&240 == 0: // 6.2.2 Literal Header Field without Indexing // 0b0000xxxx: top four bits are 0000 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.2 return d.parseFieldLiteral(4, indexedFalse) case b&240 == 16: // 6.2.3 Literal Header Field never Indexed // 0b0001xxxx: top four bits are 0001 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.3 return d.parseFieldLiteral(4, indexedNever) case b&224 == 32: // 6.3 Dynamic Table Size Update // Top three bits are '001'. - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.3 return d.parseDynamicTableSizeUpdate() } @@ -420,7 +420,7 @@ var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} // readVarInt reads an unsigned variable length integer off the // beginning of p. n is the parameter as described in -// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// https://httpwg.org/specs/rfc7541.html#rfc.section.5.1. // // n must always be between 1 and 8. // diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go index fe0b84ccd467..20d083a716da 100644 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -169,25 +169,50 @@ func buildRootHuffmanNode() { // AppendHuffmanString appends s, as encoded in Huffman codes, to dst // and returns the extended buffer. func AppendHuffmanString(dst []byte, s string) []byte { - rembits := uint8(8) - + // This relies on the maximum huffman code length being 30 (See tables.go huffmanCodeLen array) + // So if a uint64 buffer has less than 32 valid bits can always accommodate another huffmanCode. + var ( + x uint64 // buffer + n uint // number valid of bits present in x + ) for i := 0; i < len(s); i++ { - if rembits == 8 { - dst = append(dst, 0) + c := s[i] + n += uint(huffmanCodeLen[c]) + x <<= huffmanCodeLen[c] % 64 + x |= uint64(huffmanCodes[c]) + if n >= 32 { + n %= 32 // Normally would be -= 32 but %= 32 informs compiler 0 <= n <= 31 for upcoming shift + y := uint32(x >> n) // Compiler doesn't combine memory writes if y isn't uint32 + dst = append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y)) } - dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i]) } - - if rembits < 8 { - // special EOS symbol - code := uint32(0x3fffffff) - nbits := uint8(30) - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t + // Add padding bits if necessary + if over := n % 8; over > 0 { + const ( + eosCode = 0x3fffffff + eosNBits = 30 + eosPadByte = eosCode >> (eosNBits - 8) + ) + pad := 8 - over + x = (x << pad) | (eosPadByte >> over) + n += pad // 8 now divides into n exactly } - - return dst + // n in (0, 8, 16, 24, 32) + switch n / 8 { + case 0: + return dst + case 1: + return append(dst, byte(x)) + case 2: + y := uint16(x) + return append(dst, byte(y>>8), byte(y)) + case 3: + y := uint16(x >> 8) + return append(dst, byte(y>>8), byte(y), byte(x)) + } + // case 4: + y := uint32(x) + return append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y)) } // HuffmanEncodeLength returns the number of bytes required to encode @@ -199,35 +224,3 @@ func HuffmanEncodeLength(s string) uint64 { } return (n + 7) / 8 } - -// appendByteToHuffmanCode appends Huffman code for c to dst and -// returns the extended buffer and the remaining bits in the last -// element. The appending is not byte aligned and the remaining bits -// in the last element of dst is given in rembits. -func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { - code := huffmanCodes[c] - nbits := huffmanCodeLen[c] - - for { - if rembits > nbits { - t := uint8(code << (rembits - nbits)) - dst[len(dst)-1] |= t - rembits -= nbits - break - } - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t - - nbits -= rembits - rembits = 8 - - if nbits == 0 { - break - } - - dst = append(dst, 0) - } - - return dst, rembits -} diff --git a/vendor/golang.org/x/net/http2/hpack/static_table.go b/vendor/golang.org/x/net/http2/hpack/static_table.go new file mode 100644 index 000000000000..754a1eb919e9 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/static_table.go @@ -0,0 +1,188 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +package hpack + +var staticTable = &headerFieldTable{ + evictCount: 0, + byName: map[string]uint64{ + ":authority": 1, + ":method": 3, + ":path": 5, + ":scheme": 7, + ":status": 14, + "accept-charset": 15, + "accept-encoding": 16, + "accept-language": 17, + "accept-ranges": 18, + "accept": 19, + "access-control-allow-origin": 20, + "age": 21, + "allow": 22, + "authorization": 23, + "cache-control": 24, + "content-disposition": 25, + "content-encoding": 26, + "content-language": 27, + "content-length": 28, + "content-location": 29, + "content-range": 30, + "content-type": 31, + "cookie": 32, + "date": 33, + "etag": 34, + "expect": 35, + "expires": 36, + "from": 37, + "host": 38, + "if-match": 39, + "if-modified-since": 40, + "if-none-match": 41, + "if-range": 42, + "if-unmodified-since": 43, + "last-modified": 44, + "link": 45, + "location": 46, + "max-forwards": 47, + "proxy-authenticate": 48, + "proxy-authorization": 49, + "range": 50, + "referer": 51, + "refresh": 52, + "retry-after": 53, + "server": 54, + "set-cookie": 55, + "strict-transport-security": 56, + "transfer-encoding": 57, + "user-agent": 58, + "vary": 59, + "via": 60, + "www-authenticate": 61, + }, + byNameValue: map[pairNameValue]uint64{ + {name: ":authority", value: ""}: 1, + {name: ":method", value: "GET"}: 2, + {name: ":method", value: "POST"}: 3, + {name: ":path", value: "/"}: 4, + {name: ":path", value: "/index.html"}: 5, + {name: ":scheme", value: "http"}: 6, + {name: ":scheme", value: "https"}: 7, + {name: ":status", value: "200"}: 8, + {name: ":status", value: "204"}: 9, + {name: ":status", value: "206"}: 10, + {name: ":status", value: "304"}: 11, + {name: ":status", value: "400"}: 12, + {name: ":status", value: "404"}: 13, + {name: ":status", value: "500"}: 14, + {name: "accept-charset", value: ""}: 15, + {name: "accept-encoding", value: "gzip, deflate"}: 16, + {name: "accept-language", value: ""}: 17, + {name: "accept-ranges", value: ""}: 18, + {name: "accept", value: ""}: 19, + {name: "access-control-allow-origin", value: ""}: 20, + {name: "age", value: ""}: 21, + {name: "allow", value: ""}: 22, + {name: "authorization", value: ""}: 23, + {name: "cache-control", value: ""}: 24, + {name: "content-disposition", value: ""}: 25, + {name: "content-encoding", value: ""}: 26, + {name: "content-language", value: ""}: 27, + {name: "content-length", value: ""}: 28, + {name: "content-location", value: ""}: 29, + {name: "content-range", value: ""}: 30, + {name: "content-type", value: ""}: 31, + {name: "cookie", value: ""}: 32, + {name: "date", value: ""}: 33, + {name: "etag", value: ""}: 34, + {name: "expect", value: ""}: 35, + {name: "expires", value: ""}: 36, + {name: "from", value: ""}: 37, + {name: "host", value: ""}: 38, + {name: "if-match", value: ""}: 39, + {name: "if-modified-since", value: ""}: 40, + {name: "if-none-match", value: ""}: 41, + {name: "if-range", value: ""}: 42, + {name: "if-unmodified-since", value: ""}: 43, + {name: "last-modified", value: ""}: 44, + {name: "link", value: ""}: 45, + {name: "location", value: ""}: 46, + {name: "max-forwards", value: ""}: 47, + {name: "proxy-authenticate", value: ""}: 48, + {name: "proxy-authorization", value: ""}: 49, + {name: "range", value: ""}: 50, + {name: "referer", value: ""}: 51, + {name: "refresh", value: ""}: 52, + {name: "retry-after", value: ""}: 53, + {name: "server", value: ""}: 54, + {name: "set-cookie", value: ""}: 55, + {name: "strict-transport-security", value: ""}: 56, + {name: "transfer-encoding", value: ""}: 57, + {name: "user-agent", value: ""}: 58, + {name: "vary", value: ""}: 59, + {name: "via", value: ""}: 60, + {name: "www-authenticate", value: ""}: 61, + }, + ents: []HeaderField{ + {Name: ":authority", Value: "", Sensitive: false}, + {Name: ":method", Value: "GET", Sensitive: false}, + {Name: ":method", Value: "POST", Sensitive: false}, + {Name: ":path", Value: "/", Sensitive: false}, + {Name: ":path", Value: "/index.html", Sensitive: false}, + {Name: ":scheme", Value: "http", Sensitive: false}, + {Name: ":scheme", Value: "https", Sensitive: false}, + {Name: ":status", Value: "200", Sensitive: false}, + {Name: ":status", Value: "204", Sensitive: false}, + {Name: ":status", Value: "206", Sensitive: false}, + {Name: ":status", Value: "304", Sensitive: false}, + {Name: ":status", Value: "400", Sensitive: false}, + {Name: ":status", Value: "404", Sensitive: false}, + {Name: ":status", Value: "500", Sensitive: false}, + {Name: "accept-charset", Value: "", Sensitive: false}, + {Name: "accept-encoding", Value: "gzip, deflate", Sensitive: false}, + {Name: "accept-language", Value: "", Sensitive: false}, + {Name: "accept-ranges", Value: "", Sensitive: false}, + {Name: "accept", Value: "", Sensitive: false}, + {Name: "access-control-allow-origin", Value: "", Sensitive: false}, + {Name: "age", Value: "", Sensitive: false}, + {Name: "allow", Value: "", Sensitive: false}, + {Name: "authorization", Value: "", Sensitive: false}, + {Name: "cache-control", Value: "", Sensitive: false}, + {Name: "content-disposition", Value: "", Sensitive: false}, + {Name: "content-encoding", Value: "", Sensitive: false}, + {Name: "content-language", Value: "", Sensitive: false}, + {Name: "content-length", Value: "", Sensitive: false}, + {Name: "content-location", Value: "", Sensitive: false}, + {Name: "content-range", Value: "", Sensitive: false}, + {Name: "content-type", Value: "", Sensitive: false}, + {Name: "cookie", Value: "", Sensitive: false}, + {Name: "date", Value: "", Sensitive: false}, + {Name: "etag", Value: "", Sensitive: false}, + {Name: "expect", Value: "", Sensitive: false}, + {Name: "expires", Value: "", Sensitive: false}, + {Name: "from", Value: "", Sensitive: false}, + {Name: "host", Value: "", Sensitive: false}, + {Name: "if-match", Value: "", Sensitive: false}, + {Name: "if-modified-since", Value: "", Sensitive: false}, + {Name: "if-none-match", Value: "", Sensitive: false}, + {Name: "if-range", Value: "", Sensitive: false}, + {Name: "if-unmodified-since", Value: "", Sensitive: false}, + {Name: "last-modified", Value: "", Sensitive: false}, + {Name: "link", Value: "", Sensitive: false}, + {Name: "location", Value: "", Sensitive: false}, + {Name: "max-forwards", Value: "", Sensitive: false}, + {Name: "proxy-authenticate", Value: "", Sensitive: false}, + {Name: "proxy-authorization", Value: "", Sensitive: false}, + {Name: "range", Value: "", Sensitive: false}, + {Name: "referer", Value: "", Sensitive: false}, + {Name: "refresh", Value: "", Sensitive: false}, + {Name: "retry-after", Value: "", Sensitive: false}, + {Name: "server", Value: "", Sensitive: false}, + {Name: "set-cookie", Value: "", Sensitive: false}, + {Name: "strict-transport-security", Value: "", Sensitive: false}, + {Name: "transfer-encoding", Value: "", Sensitive: false}, + {Name: "user-agent", Value: "", Sensitive: false}, + {Name: "vary", Value: "", Sensitive: false}, + {Name: "via", Value: "", Sensitive: false}, + {Name: "www-authenticate", Value: "", Sensitive: false}, + }, +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go index a66cfbea69d9..8cbdf3f019cb 100644 --- a/vendor/golang.org/x/net/http2/hpack/tables.go +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -96,8 +96,7 @@ func (t *headerFieldTable) evictOldest(n int) { // meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic // table, the return value i actually refers to the entry t.ents[t.len()-i]. // -// All tables are assumed to be a dynamic tables except for the global -// staticTable pointer. +// All tables are assumed to be a dynamic tables except for the global staticTable. // // See Section 2.3.3. func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { @@ -125,81 +124,6 @@ func (t *headerFieldTable) idToIndex(id uint64) uint64 { return k + 1 } -// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = newStaticTable() -var staticTableEntries = [...]HeaderField{ - {Name: ":authority"}, - {Name: ":method", Value: "GET"}, - {Name: ":method", Value: "POST"}, - {Name: ":path", Value: "/"}, - {Name: ":path", Value: "/index.html"}, - {Name: ":scheme", Value: "http"}, - {Name: ":scheme", Value: "https"}, - {Name: ":status", Value: "200"}, - {Name: ":status", Value: "204"}, - {Name: ":status", Value: "206"}, - {Name: ":status", Value: "304"}, - {Name: ":status", Value: "400"}, - {Name: ":status", Value: "404"}, - {Name: ":status", Value: "500"}, - {Name: "accept-charset"}, - {Name: "accept-encoding", Value: "gzip, deflate"}, - {Name: "accept-language"}, - {Name: "accept-ranges"}, - {Name: "accept"}, - {Name: "access-control-allow-origin"}, - {Name: "age"}, - {Name: "allow"}, - {Name: "authorization"}, - {Name: "cache-control"}, - {Name: "content-disposition"}, - {Name: "content-encoding"}, - {Name: "content-language"}, - {Name: "content-length"}, - {Name: "content-location"}, - {Name: "content-range"}, - {Name: "content-type"}, - {Name: "cookie"}, - {Name: "date"}, - {Name: "etag"}, - {Name: "expect"}, - {Name: "expires"}, - {Name: "from"}, - {Name: "host"}, - {Name: "if-match"}, - {Name: "if-modified-since"}, - {Name: "if-none-match"}, - {Name: "if-range"}, - {Name: "if-unmodified-since"}, - {Name: "last-modified"}, - {Name: "link"}, - {Name: "location"}, - {Name: "max-forwards"}, - {Name: "proxy-authenticate"}, - {Name: "proxy-authorization"}, - {Name: "range"}, - {Name: "referer"}, - {Name: "refresh"}, - {Name: "retry-after"}, - {Name: "server"}, - {Name: "set-cookie"}, - {Name: "strict-transport-security"}, - {Name: "transfer-encoding"}, - {Name: "user-agent"}, - {Name: "vary"}, - {Name: "via"}, - {Name: "www-authenticate"}, -} - -func newStaticTable() *headerFieldTable { - t := &headerFieldTable{} - t.init() - for _, e := range staticTableEntries[:] { - t.addEntry(e) - } - return t -} - var huffmanCodes = [256]uint32{ 0x1ff8, 0x7fffd8, diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 5571ccfd2613..6f2df281872e 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -13,7 +13,6 @@ // See https://http2.github.io/ for more information on HTTP/2. // // See https://http2.golang.org/ for a test server running this code. -// package http2 // import "golang.org/x/net/http2" import ( @@ -56,14 +55,14 @@ const ( ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" // SETTINGS_MAX_FRAME_SIZE default - // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + // https://httpwg.org/specs/rfc7540.html#rfc.section.6.5.2 initialMaxFrameSize = 16384 // NextProtoTLS is the NPN/ALPN protocol negotiated during // HTTP/2's TLS setup. NextProtoTLS = "h2" - // http://http2.github.io/http2-spec/#SettingValues + // https://httpwg.org/specs/rfc7540.html#SettingValues initialHeaderTableSize = 4096 initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size @@ -112,7 +111,7 @@ func (st streamState) String() string { // Setting is a setting parameter: which setting it is, and its value. type Setting struct { // ID is which setting is being set. - // See http://http2.github.io/http2-spec/#SettingValues + // See https://httpwg.org/specs/rfc7540.html#SettingFormat ID SettingID // Val is the value. @@ -144,7 +143,7 @@ func (s Setting) Valid() error { } // A SettingID is an HTTP/2 setting as defined in -// http://http2.github.io/http2-spec/#iana-settings +// https://httpwg.org/specs/rfc7540.html#iana-settings type SettingID uint16 const ( @@ -176,10 +175,11 @@ func (s SettingID) String() string { // name (key). See httpguts.ValidHeaderName for the base rules. // // Further, http2 says: -// "Just as in HTTP/1.x, header field names are strings of ASCII -// characters that are compared in a case-insensitive -// fashion. However, header field names MUST be converted to -// lowercase prior to their encoding in HTTP/2. " +// +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " func validWireHeaderFieldName(v string) bool { if len(v) == 0 { return false @@ -365,8 +365,8 @@ func (s *sorter) SortStrings(ss []string) { // validPseudoPath reports whether v is a valid :path pseudo-header // value. It must be either: // -// *) a non-empty string starting with '/' -// *) the string '*', for OPTIONS requests. +// - a non-empty string starting with '/' +// - the string '*', for OPTIONS requests. // // For now this is only used a quick check for deciding when to clean // up Opaque URLs before sending requests from the Transport. diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go new file mode 100644 index 000000000000..eab532c96bc0 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go118.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package http2 + +import ( + "crypto/tls" + "net" +) + +func tlsUnderlyingConn(tc *tls.Conn) net.Conn { + return nil +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index e644d9b2f34d..b624dc0a705e 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -98,6 +98,19 @@ type Server struct { // the HTTP/2 spec's recommendations. MaxConcurrentStreams uint32 + // MaxDecoderHeaderTableSize optionally specifies the http2 + // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It + // informs the remote endpoint of the maximum size of the header compression + // table used to decode header blocks, in octets. If zero, the default value + // of 4096 is used. + MaxDecoderHeaderTableSize uint32 + + // MaxEncoderHeaderTableSize optionally specifies an upper limit for the + // header compression table used for encoding request headers. Received + // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero, + // the default value of 4096 is used. + MaxEncoderHeaderTableSize uint32 + // MaxReadFrameSize optionally specifies the largest frame // this server is willing to read. A valid value is between // 16k and 16M, inclusive. If zero or otherwise invalid, a @@ -143,7 +156,7 @@ type Server struct { } func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection > initialWindowSize { + if s.MaxUploadBufferPerConnection >= initialWindowSize { return s.MaxUploadBufferPerConnection } return 1 << 20 @@ -170,6 +183,20 @@ func (s *Server) maxConcurrentStreams() uint32 { return defaultMaxStreams } +func (s *Server) maxDecoderHeaderTableSize() uint32 { + if v := s.MaxDecoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + +func (s *Server) maxEncoderHeaderTableSize() uint32 { + if v := s.MaxEncoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + // maxQueuedControlFrames is the maximum number of control frames like // SETTINGS, PING and RST_STREAM that will be queued for writing before // the connection is closed to prevent memory exhaustion attacks. @@ -315,6 +342,20 @@ type ServeConnOpts struct { // requests. If nil, BaseConfig.Handler is used. If BaseConfig // or BaseConfig.Handler is nil, http.DefaultServeMux is used. Handler http.Handler + + // UpgradeRequest is an initial request received on a connection + // undergoing an h2c upgrade. The request body must have been + // completely read from the connection before calling ServeConn, + // and the 101 Switching Protocols response written. + UpgradeRequest *http.Request + + // Settings is the decoded contents of the HTTP2-Settings header + // in an h2c upgrade request. + Settings []byte + + // SawClientPreface is set if the HTTP/2 connection preface + // has already been read from the connection. + SawClientPreface bool } func (o *ServeConnOpts) context() context.Context { @@ -380,9 +421,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { advMaxStreams: s.maxConcurrentStreams(), initialStreamSendWindowSize: initialWindowSize, maxFrameSize: initialMaxFrameSize, - headerTableSize: initialHeaderTableSize, serveG: newGoroutineLock(), pushEnabled: true, + sawClientPreface: opts.SawClientPreface, } s.state.registerConn(sc) @@ -400,21 +441,22 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { if s.NewWriteScheduler != nil { sc.writeSched = s.NewWriteScheduler() } else { - sc.writeSched = NewRandomWriteScheduler() + sc.writeSched = NewPriorityWriteScheduler(nil) } // These start at the RFC-specified defaults. If there is a higher // configured value for inflow, that will be updated when we send a // WINDOW_UPDATE shortly after sending SETTINGS. sc.flow.add(initialWindowSize) - sc.inflow.add(initialWindowSize) + sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) fr := NewFramer(sc.bw, c) if s.CountError != nil { fr.countError = s.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() fr.SetMaxReadFrameSize(s.maxReadFrameSize()) sc.framer = fr @@ -465,9 +507,27 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { } } + if opts.Settings != nil { + fr := &SettingsFrame{ + FrameHeader: FrameHeader{valid: true}, + p: opts.Settings, + } + if err := fr.ForeachSetting(sc.processSetting); err != nil { + sc.rejectConn(ErrCodeProtocol, "invalid settings") + return + } + opts.Settings = nil + } + if hook := testHookGetServerConn; hook != nil { hook(sc) } + + if opts.UpgradeRequest != nil { + sc.upgradeRequest(opts.UpgradeRequest) + opts.UpgradeRequest = nil + } + sc.serve() } @@ -503,8 +563,8 @@ type serverConn struct { wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes bodyReadCh chan bodyReadMsg // from handlers -> serve serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop - flow flow // conn-wide (not stream-specific) outbound flow control - inflow flow // conn-wide inbound flow control + flow outflow // conn-wide (not stream-specific) outbound flow control + inflow inflow // conn-wide inbound flow control tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler @@ -512,6 +572,7 @@ type serverConn struct { // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() pushEnabled bool + sawClientPreface bool // preface has already been read, used in h2c upgrade sawFirstSettings bool // got the initial SETTINGS frame after the preface needToSendSettingsAck bool unackedSettings int // how many SETTINGS have we sent without ACKs? @@ -525,9 +586,9 @@ type serverConn struct { streams map[uint32]*stream initialStreamSendWindowSize int32 maxFrameSize int32 - headerTableSize uint32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + canonHeaderKeysSize int // canonHeader keys size in bytes writingFrame bool // started writing a frame (on serve goroutine or separate) writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh needsFrameFlush bool // last frame write wasn't a flush @@ -580,15 +641,17 @@ type stream struct { cancelCtx func() // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow outflow // limits writing from Handler to client + inflow inflow // what the client is allowed to POST/etc to us state streamState resetQueued bool // RST_STREAM queued for write; set by sc.resetStream gotTrailerHeader bool // HEADER frame for trailers was seen wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline *time.Timer // nil if unused writeDeadline *time.Timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -704,6 +767,13 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { } } +// maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size +// of the entries in the canonHeader cache. +// This should be larger than the size of unique, uncommon header keys likely to +// be sent by the peer, while not so high as to permit unreasonable memory usage +// if the peer sends an unbounded number of unique header keys. +const maxCachedCanonicalHeadersKeysSize = 2048 + func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() buildCommonHeaderMapsOnce() @@ -719,14 +789,10 @@ func (sc *serverConn) canonicalHeader(v string) string { sc.canonHeader = make(map[string]string) } cv = http.CanonicalHeaderKey(v) - // maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of - // entries in the canonHeader cache. This should be larger than the number - // of unique, uncommon header keys likely to be sent by the peer, while not - // so high as to permit unreasonable memory usage if the peer sends an unbounded - // number of unique header keys. - const maxCachedCanonicalHeaders = 32 - if len(sc.canonHeader) < maxCachedCanonicalHeaders { + size := 100 + len(v)*2 // 100 bytes of map overhead + key + value + if sc.canonHeaderKeysSize+size <= maxCachedCanonicalHeadersKeysSize { sc.canonHeader[v] = cv + sc.canonHeaderKeysSize += size } return cv } @@ -828,6 +894,7 @@ func (sc *serverConn) serve() { {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, }, }) @@ -914,6 +981,8 @@ func (sc *serverConn) serve() { } case *startPushRequest: sc.startPush(v) + case func(*serverConn): + v(sc) default: panic(fmt.Sprintf("unexpected type %T", v)) } @@ -974,6 +1043,9 @@ var errPrefaceTimeout = errors.New("timeout waiting for client preface") // returns errPrefaceTimeout on timeout, or an error if the greeting // is invalid. func (sc *serverConn) readPreface() error { + if sc.sawClientPreface { + return nil + } errc := make(chan error, 1) go func() { // Read the client preface @@ -1334,6 +1406,9 @@ func (sc *serverConn) startGracefulShutdownInternal() { func (sc *serverConn) goAway(code ErrCode) { sc.serveG.check() if sc.inGoAway { + if sc.goAwayCode == ErrCodeNo { + sc.goAwayCode = code + } return } sc.inGoAway = true @@ -1421,6 +1496,21 @@ func (sc *serverConn) processFrame(f Frame) error { sc.sawFirstSettings = true } + // Discard frames for streams initiated after the identified last + // stream sent in a GOAWAY, or all frames after sending an error. + // We still need to return connection-level flow control for DATA frames. + // RFC 9113 Section 6.8. + if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) { + + if f, ok := f.(*DataFrame); ok { + if !sc.inflow.take(f.Length) { + return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl)) + } + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + } + return nil + } + switch f := f.(type) { case *SettingsFrame: return sc.processSettings(f) @@ -1463,9 +1553,6 @@ func (sc *serverConn) processPing(f *PingFrame) error { // PROTOCOL_ERROR." return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol)) } - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) return nil } @@ -1527,6 +1614,9 @@ func (sc *serverConn) closeStream(st *stream, err error) { panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) } st.state = stateClosed + if st.readDeadline != nil { + st.readDeadline.Stop() + } if st.writeDeadline != nil { st.writeDeadline.Stop() } @@ -1552,6 +1642,14 @@ func (sc *serverConn) closeStream(st *stream, err error) { p.CloseWithError(err) } + if e, ok := err.(StreamError); ok { + if e.Cause != nil { + err = e.Cause + } else { + err = errStreamClosed + } + } + st.closeErr = err st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } @@ -1594,7 +1692,6 @@ func (sc *serverConn) processSetting(s Setting) error { } switch s.ID { case SettingHeaderTableSize: - sc.headerTableSize = s.Val sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) case SettingEnablePush: sc.pushEnabled = s.Val != 0 @@ -1648,16 +1745,6 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { func (sc *serverConn) processData(f *DataFrame) error { sc.serveG.check() id := f.Header().StreamID - if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || id > sc.maxClientStreamID) { - // Discard all DATA frames if the GOAWAY is due to an - // error, or: - // - // Section 6.8: After sending a GOAWAY frame, the sender - // can discard frames for streams initiated by the - // receiver with identifiers higher than the identified - // last stream. - return nil - } data := f.Data() state, st := sc.state(id) @@ -1688,14 +1775,9 @@ func (sc *serverConn) processData(f *DataFrame) error { // But still enforce their connection-level flow control, // and return any flow control bytes since we're not going // to consume them. - if sc.inflow.available() < int32(f.Length) { + if !sc.inflow.take(f.Length) { return sc.countError("data_flow", streamError(id, ErrCodeFlowControl)) } - // Deduct the flow control from inflow, since we're - // going to immediately add it back in - // sendWindowUpdate, which also schedules sending the - // frames. - sc.inflow.take(int32(f.Length)) sc.sendWindowUpdate(nil, int(f.Length)) // conn-level if st != nil && st.resetQueued { @@ -1710,6 +1792,11 @@ func (sc *serverConn) processData(f *DataFrame) error { // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + if !sc.inflow.take(f.Length) { + return sc.countError("data_flow", streamError(id, ErrCodeFlowControl)) + } + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the // value of a content-length header field does not equal the sum of the @@ -1718,10 +1805,9 @@ func (sc *serverConn) processData(f *DataFrame) error { } if f.Length > 0 { // Check whether the client has flow control quota. - if st.inflow.available() < int32(f.Length) { + if !takeInflows(&sc.inflow, &st.inflow, f.Length) { return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl)) } - st.inflow.take(int32(f.Length)) if len(data) > 0 { wrote, err := st.body.Write(data) @@ -1737,10 +1823,12 @@ func (sc *serverConn) processData(f *DataFrame) error { // Return any padded flow control now, since we won't // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - sc.sendWindowUpdate32(nil, pad) - sc.sendWindowUpdate32(st, pad) - } + // Call sendWindowUpdate even if there is no padding, + // to return buffered flow control credit if the sent + // window has shrunk. + pad := int32(f.Length) - int32(len(data)) + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) } if f.StreamEnded() { st.endStream() @@ -1794,19 +1882,27 @@ func (st *stream) copyTrailersToHandlerRequest() { } } +// onReadTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's ReadTimeout has fired. +func (st *stream) onReadTimeout() { + // Wrap the ErrDeadlineExceeded to avoid callers depending on us + // returning the bare error. + st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded)) +} + // onWriteTimeout is run on its own goroutine (from time.AfterFunc) // when the stream's WriteTimeout has fired. func (st *stream) onWriteTimeout() { - st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) + st.sc.writeFrameFromHandler(FrameWriteRequest{write: StreamError{ + StreamID: st.id, + Code: ErrCodeInternal, + Cause: os.ErrDeadlineExceeded, + }}) } func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { sc.serveG.check() id := f.StreamID - if sc.inGoAway { - // Ignore. - return nil - } // http://tools.ietf.org/html/rfc7540#section-5.1.1 // Streams initiated by a client MUST use odd-numbered stream // identifiers. [...] An endpoint that receives an unexpected @@ -1909,12 +2005,35 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout != 0 { sc.conn.SetReadDeadline(time.Time{}) + if st.body != nil { + st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + } } go sc.runHandler(rw, req, handler) return nil } +func (sc *serverConn) upgradeRequest(req *http.Request) { + sc.serveG.check() + id := uint32(1) + sc.maxClientStreamID = id + st := sc.newStream(id, 0, stateHalfClosedRemote) + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + rw := sc.newResponseWriter(st, req) + + // Disable any read deadline set by the net/http package + // prior to the upgrade. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) +} + func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { sc := st.sc sc.serveG.check() @@ -1957,9 +2076,6 @@ func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error { } func (sc *serverConn) processPriority(f *PriorityFrame) error { - if sc.inGoAway { - return nil - } if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil { return err } @@ -1984,8 +2100,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.srv.initialStreamRecvWindowSize()) if sc.hs.WriteTimeout != 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -2033,12 +2148,6 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - bodyOpen := !f.StreamEnded() - if rp.method == "HEAD" && bodyOpen { - // HEAD requests can't have bodies - return nil, nil, sc.countError("head_body", streamError(f.StreamID, ErrCodeProtocol)) - } - rp.header = make(http.Header) for _, hf := range f.RegularFields() { rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) @@ -2051,6 +2160,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if err != nil { return nil, nil, err } + bodyOpen := !f.StreamEnded() if bodyOpen { if vv, ok := rp.header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { @@ -2145,6 +2255,11 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r } req = req.WithContext(st.ctx) + rw := sc.newResponseWriter(st, req) + return rw, req, nil +} + +func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *responseWriter { rws := responseWriterStatePool.Get().(*responseWriterState) bwSave := rws.bw *rws = responseWriterState{} // zero all the fields @@ -2153,10 +2268,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r rws.bw.Reset(chunkWriter{rws}) rws.stream = st rws.req = req - rws.body = body - - rw := &responseWriter{rws: rws} - return rw, req, nil + return &responseWriter{rws: rws} } // Run on its own goroutine. @@ -2164,6 +2276,9 @@ func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler didPanic := true defer func() { rw.rws.stream.cancelCtx() + if req.MultipartForm != nil { + req.MultipartForm.RemoveAll() + } if didPanic { e := recover() sc.writeFrameFromHandler(FrameWriteRequest{ @@ -2267,47 +2382,28 @@ func (sc *serverConn) noteBodyRead(st *stream, n int) { } // st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate(st *stream, n int) { - sc.serveG.check() - // "The legal range for the increment to the flow control - // window is 1 to 2^31-1 (2,147,483,647) octets." - // A Go Read call on 64-bit machines could in theory read - // a larger Read than this. Very unlikely, but we handle it here - // rather than elsewhere for now. - const maxUint31 = 1<<31 - 1 - for n >= maxUint31 { - sc.sendWindowUpdate32(st, maxUint31) - n -= maxUint31 - } - sc.sendWindowUpdate32(st, int32(n)) +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.sendWindowUpdate(st, int(n)) } // st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { sc.serveG.check() - if n == 0 { - return - } - if n < 0 { - panic("negative update") - } var streamID uint32 - if st != nil { + var send int32 + if st == nil { + send = sc.inflow.add(n) + } else { streamID = st.id + send = st.inflow.add(n) + } + if send == 0 { + return } sc.writeFrame(FrameWriteRequest{ - write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + write: writeWindowUpdate{streamID: streamID, n: uint32(send)}, stream: st, }) - var ok bool - if st == nil { - ok = sc.inflow.add(n) - } else { - ok = st.inflow.add(n) - } - if !ok { - panic("internal error; sent too many window updates without decrements?") - } } // requestBody is the Handler's Request.Body type. @@ -2316,17 +2412,18 @@ type requestBody struct { _ incomparable stream *stream conn *serverConn - closed bool // for use by Close only - sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body - needsContinue bool // need to send a 100-continue + closeOnce sync.Once // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue } func (b *requestBody) Close() error { - if b.pipe != nil && !b.closed { - b.pipe.BreakWithError(errClosedBody) - } - b.closed = true + b.closeOnce.Do(func() { + if b.pipe != nil { + b.pipe.BreakWithError(errClosedBody) + } + }) return nil } @@ -2370,7 +2467,6 @@ type responseWriterState struct { // immutable within a request: stream *stream req *http.Request - body *requestBody // to close at end of request, if DATA frames didn't conn *serverConn // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc @@ -2395,7 +2491,15 @@ type responseWriterState struct { type chunkWriter struct{ rws *responseWriterState } -func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } +func (cw chunkWriter) Write(p []byte) (n int, err error) { + n, err = cw.rws.writeChunk(p) + if err == errStreamClosed { + // If writing failed because the stream has been closed, + // return the reason it was closed. + err = cw.rws.stream.closeErr + } + return n, err +} func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 } @@ -2434,6 +2538,10 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { rws.writeHeader(200) } + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + isHeadResp := rws.req.Method == "HEAD" if !rws.sentHeader { rws.sentHeader = true @@ -2505,10 +2613,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { return 0, nil } - if rws.handlerDone { - rws.promoteUndeclaredTrailers() - } - // only send trailers if they have actually been defined by the // server handler. hasNonemptyTrailers := rws.hasNonemptyTrailers() @@ -2546,8 +2650,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { // prior to the headers being written. If the set of trailers is fixed // or known before the header is written, the normal Go trailers mechanism // is preferred: -// https://golang.org/pkg/net/http/#ResponseWriter -// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +// +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers const TrailerPrefix = "Trailer:" // promoteUndeclaredTrailers permits http.Handlers to set trailers @@ -2588,23 +2693,85 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { } } +func (w *responseWriter) SetReadDeadline(deadline time.Time) error { + st := w.rws.stream + if !deadline.IsZero() && deadline.Before(time.Now()) { + // If we're setting a deadline in the past, reset the stream immediately + // so writes after SetWriteDeadline returns will fail. + st.onReadTimeout() + return nil + } + w.rws.conn.sendServeMsg(func(sc *serverConn) { + if st.readDeadline != nil { + if !st.readDeadline.Stop() { + // Deadline already exceeded, or stream has been closed. + return + } + } + if deadline.IsZero() { + st.readDeadline = nil + } else if st.readDeadline == nil { + st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) + } else { + st.readDeadline.Reset(deadline.Sub(time.Now())) + } + }) + return nil +} + +func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { + st := w.rws.stream + if !deadline.IsZero() && deadline.Before(time.Now()) { + // If we're setting a deadline in the past, reset the stream immediately + // so writes after SetWriteDeadline returns will fail. + st.onWriteTimeout() + return nil + } + w.rws.conn.sendServeMsg(func(sc *serverConn) { + if st.writeDeadline != nil { + if !st.writeDeadline.Stop() { + // Deadline already exceeded, or stream has been closed. + return + } + } + if deadline.IsZero() { + st.writeDeadline = nil + } else if st.writeDeadline == nil { + st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) + } else { + st.writeDeadline.Reset(deadline.Sub(time.Now())) + } + }) + return nil +} + func (w *responseWriter) Flush() { + w.FlushError() +} + +func (w *responseWriter) FlushError() error { rws := w.rws if rws == nil { panic("Header called after Handler finished") } + var err error if rws.bw.Buffered() > 0 { - if err := rws.bw.Flush(); err != nil { - // Ignore the error. The frame writer already knows. - return - } + err = rws.bw.Flush() } else { // The bufio.Writer won't call chunkWriter.Write // (writeChunk with zero bytes, so we have to do it // ourselves to force the HTTP response header and/or // final DATA frame (with END_STREAM) to be sent. - rws.writeChunk(nil) + _, err = chunkWriter{rws}.Write(nil) + if err == nil { + select { + case <-rws.stream.cw: + err = rws.stream.closeErr + default: + } + } } + return err } func (w *responseWriter) CloseNotify() <-chan bool { @@ -2643,8 +2810,7 @@ func checkWriteHeaderCode(code int) { // Issue 22880: require valid WriteHeader status codes. // For now we only enforce that it's three digits. // In the future we might block things over 599 (600 and above aren't defined - // at http://httpwg.org/specs/rfc7231.html#status.codes) - // and we might block under 200 (once we have more mature 1xx support). + // at http://httpwg.org/specs/rfc7231.html#status.codes). // But for now any three digits. // // We used to send "HTTP/1.1 000 0" on the wire in responses but there's @@ -2665,13 +2831,41 @@ func (w *responseWriter) WriteHeader(code int) { } func (rws *responseWriterState) writeHeader(code int) { - if !rws.wroteHeader { - checkWriteHeaderCode(code) - rws.wroteHeader = true - rws.status = code - if len(rws.handlerHeader) > 0 { - rws.snapHeader = cloneHeader(rws.handlerHeader) + if rws.wroteHeader { + return + } + + checkWriteHeaderCode(code) + + // Handle informational headers + if code >= 100 && code <= 199 { + // Per RFC 8297 we must not clear the current header map + h := rws.handlerHeader + + _, cl := h["Content-Length"] + _, te := h["Transfer-Encoding"] + if cl || te { + h = h.Clone() + h.Del("Content-Length") + h.Del("Transfer-Encoding") + } + + if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: code, + h: h, + endStream: rws.handlerDone && !rws.hasTrailers(), + }) != nil { + rws.dirty = true } + + return + } + + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) } } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f135b0f75189..b43ec10cfed9 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -16,7 +16,7 @@ import ( "errors" "fmt" "io" - "io/ioutil" + "io/fs" "log" "math" mathrand "math/rand" @@ -47,10 +47,6 @@ const ( // we buffer per stream. transportDefaultStreamFlow = 4 << 20 - // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send - // a stream-level WINDOW_UPDATE for at a time. - transportDefaultStreamMinRefresh = 4 << 10 - defaultUserAgent = "Go-http-client/2.0" // initialMaxConcurrentStreams is a connections maxConcurrentStreams until @@ -68,13 +64,23 @@ const ( // A Transport internally caches connections to servers. It is safe // for concurrent use by multiple goroutines. type Transport struct { - // DialTLS specifies an optional dial function for creating - // TLS connections for requests. + // DialTLSContext specifies an optional dial function with context for + // creating TLS connections for requests. // - // If DialTLS is nil, tls.Dial is used. + // If DialTLSContext and DialTLS is nil, tls.Dial is used. // // If the returned net.Conn has a ConnectionState method like tls.Conn, // it will be used to set http.Response.TLS. + DialTLSContext func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error) + + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLSContext and DialTLS is nil, tls.Dial is used. + // + // Deprecated: Use DialTLSContext instead, which allows the transport + // to cancel dials as soon as they are no longer needed. + // If both are set, DialTLSContext takes priority. DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) // TLSClientConfig specifies the TLS configuration to use with @@ -108,6 +114,28 @@ type Transport struct { // to mean no limit. MaxHeaderListSize uint32 + // MaxReadFrameSize is the http2 SETTINGS_MAX_FRAME_SIZE to send in the + // initial settings frame. It is the size in bytes of the largest frame + // payload that the sender is willing to receive. If 0, no setting is + // sent, and the value is provided by the peer, which should be 16384 + // according to the spec: + // https://datatracker.ietf.org/doc/html/rfc7540#section-6.5.2. + // Values are bounded in the range 16k to 16M. + MaxReadFrameSize uint32 + + // MaxDecoderHeaderTableSize optionally specifies the http2 + // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It + // informs the remote endpoint of the maximum size of the header compression + // table used to decode header blocks, in octets. If zero, the default value + // of 4096 is used. + MaxDecoderHeaderTableSize uint32 + + // MaxEncoderHeaderTableSize optionally specifies an upper limit for the + // header compression table used for encoding request headers. Received + // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero, + // the default value of 4096 is used. + MaxEncoderHeaderTableSize uint32 + // StrictMaxConcurrentStreams controls whether the server's // SETTINGS_MAX_CONCURRENT_STREAMS should be respected // globally. If false, new TCP connections are created to the @@ -161,6 +189,19 @@ func (t *Transport) maxHeaderListSize() uint32 { return t.MaxHeaderListSize } +func (t *Transport) maxFrameReadSize() uint32 { + if t.MaxReadFrameSize == 0 { + return 0 // use the default provided by the peer + } + if t.MaxReadFrameSize < minMaxFrameSize { + return minMaxFrameSize + } + if t.MaxReadFrameSize > maxFrameSize { + return maxFrameSize + } + return t.MaxReadFrameSize +} + func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } @@ -249,7 +290,8 @@ func (t *Transport) initConnPool() { // HTTP/2 server. type ClientConn struct { t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls + tconn net.Conn // usually *tls.Conn, except specialized impls + tconnClosed bool tlsState *tls.ConnectionState // nil only for specialized impls reused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request @@ -264,8 +306,8 @@ type ClientConn struct { mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow flow // our conn-level flow control quota (cs.flow is per stream) - inflow flow // peer's conn-level flow control + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control doNotReuse bool // whether conn is marked to not be reused for any future requests closing bool closed bool @@ -282,10 +324,11 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -329,14 +372,14 @@ type clientStream struct { respHeaderRecv chan struct{} // closed when headers are received res *http.Response // set if respHeaderRecv is closed - flow flow // guarded by cc.mu - inflow flow // guarded by cc.mu - bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read - readErr error // sticky read error; owned by transportResponseBody.Read + flow outflow // guarded by cc.mu + inflow inflow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read reqBody io.ReadCloser - reqBodyContentLength int64 // -1 means unknown - reqBodyClosed bool // body has been closed; guarded by cc.mu + reqBodyContentLength int64 // -1 means unknown + reqBodyClosed chan struct{} // guarded by cc.mu; non-nil on Close, closed when done // owned by writeRequest: sentEndStream bool // sent an END_STREAM flag to the peer @@ -376,9 +419,8 @@ func (cs *clientStream) abortStreamLocked(err error) { cs.abortErr = err close(cs.abort) }) - if cs.reqBody != nil && !cs.reqBodyClosed { - cs.reqBody.Close() - cs.reqBodyClosed = true + if cs.reqBody != nil { + cs.closeReqBodyLocked() } // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { @@ -391,13 +433,24 @@ func (cs *clientStream) abortRequestBodyWrite() { cc := cs.cc cc.mu.Lock() defer cc.mu.Unlock() - if cs.reqBody != nil && !cs.reqBodyClosed { - cs.reqBody.Close() - cs.reqBodyClosed = true + if cs.reqBody != nil && cs.reqBodyClosed == nil { + cs.closeReqBodyLocked() cc.cond.Broadcast() } } +func (cs *clientStream) closeReqBodyLocked() { + if cs.reqBodyClosed != nil { + return + } + cs.reqBodyClosed = make(chan struct{}) + reqBodyClosed := cs.reqBodyClosed + go func() { + cs.reqBody.Close() + close(reqBodyClosed) + }() +} + type stickyErrWriter struct { conn net.Conn timeout time.Duration @@ -481,6 +534,15 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } +var retryBackoffHook func(time.Duration) *time.Timer + +func backoffNewTimer(d time.Duration) *time.Timer { + if retryBackoffHook != nil { + return retryBackoffHook(d) + } + return time.NewTimer(d) +} + // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -501,14 +563,19 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res if req, err = shouldRetryRequest(req, err); err == nil { // After the first retry, do exponential backoff with 10% jitter. if retry == 0 { + t.vlogf("RoundTrip retrying after failure: %v", err) continue } backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) + d := time.Second * time.Duration(backoff) + timer := backoffNewTimer(d) select { - case <-time.After(time.Second * time.Duration(backoff)): + case <-timer.C: + t.vlogf("RoundTrip retrying after failure: %v", err) continue case <-req.Context().Done(): + timer.Stop() err = req.Context().Err() } } @@ -591,7 +658,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - tconn, err := t.dialTLS(ctx)("tcp", addr, t.newTLSConfig(host)) + tconn, err := t.dialTLS(ctx, "tcp", addr, t.newTLSConfig(host)) if err != nil { return nil, err } @@ -612,24 +679,25 @@ func (t *Transport) newTLSConfig(host string) *tls.Config { return cfg } -func (t *Transport) dialTLS(ctx context.Context) func(string, string, *tls.Config) (net.Conn, error) { - if t.DialTLS != nil { - return t.DialTLS +func (t *Transport) dialTLS(ctx context.Context, network, addr string, tlsCfg *tls.Config) (net.Conn, error) { + if t.DialTLSContext != nil { + return t.DialTLSContext(ctx, network, addr, tlsCfg) + } else if t.DialTLS != nil { + return t.DialTLS(network, addr, tlsCfg) } - return func(network, addr string, cfg *tls.Config) (net.Conn, error) { - tlsCn, err := t.dialTLSWithContext(ctx, network, addr, cfg) - if err != nil { - return nil, err - } - state := tlsCn.ConnectionState() - if p := state.NegotiatedProtocol; p != NextProtoTLS { - return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) - } - if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("http2: could not negotiate protocol mutually") - } - return tlsCn, nil + + tlsCn, err := t.dialTLSWithContext(ctx, network, addr, tlsCfg) + if err != nil { + return nil, err + } + state := tlsCn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return tlsCn, nil } // disableKeepAlives reports whether connections should be closed as @@ -645,6 +713,20 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } +func (t *Transport) maxDecoderHeaderTableSize() uint32 { + if v := t.MaxDecoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + +func (t *Transport) maxEncoderHeaderTableSize() uint32 { + if v := t.MaxEncoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } @@ -685,15 +767,19 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) + if t.maxFrameReadSize() != 0 { + cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) + } if t.CountError != nil { cc.fr.countError = t.CountError } - cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + maxHeaderTableSize := t.maxDecoderHeaderTableSize() + cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() - // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on - // henc in response to SETTINGS frames? cc.henc = hpack.NewEncoder(&cc.hbuf) + cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.peerMaxHeaderTableSize = initialHeaderTableSize if t.AllowHTTP { cc.nextStreamID = 3 @@ -708,14 +794,20 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro {ID: SettingEnablePush, Val: 0}, {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, } + if max := t.maxFrameReadSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + } if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } + if maxHeaderTableSize != initialHeaderTableSize { + initialSettings = append(initialSettings, Setting{ID: SettingHeaderTableSize, Val: maxHeaderTableSize}) + } cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.inflow.init(transportDefaultConnFlow + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -732,11 +824,13 @@ func (cc *ClientConn) healthCheck() { // trigger the healthCheck again if there is no frame received. ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) defer cancel() + cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) if err != nil { + cc.vlogf("http2: Transport health check failure: %v", err) cc.closeForLostPing() - cc.t.connPool().MarkDead(cc) - return + } else { + cc.vlogf("http2: Transport health check success") } } @@ -907,6 +1001,24 @@ func (cc *ClientConn) onIdleTimeout() { cc.closeIfIdle() } +func (cc *ClientConn) closeConn() { + t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) + defer t.Stop() + cc.tconn.Close() +} + +// A tls.Conn.Close can hang for a long time if the peer is unresponsive. +// Try to shut it down more aggressively. +func (cc *ClientConn) forceCloseConn() { + tc, ok := cc.tconn.(*tls.Conn) + if !ok { + return + } + if nc := tlsUnderlyingConn(tc); nc != nil { + nc.Close() + } +} + func (cc *ClientConn) closeIfIdle() { cc.mu.Lock() if len(cc.streams) > 0 || cc.streamsReserved > 0 { @@ -921,7 +1033,7 @@ func (cc *ClientConn) closeIfIdle() { if VerboseLogs { cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) } - cc.tconn.Close() + cc.closeConn() } func (cc *ClientConn) isDoNotReuseAndIdle() bool { @@ -938,7 +1050,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { return err } // Wait for all in-flight streams to complete or connection to close - done := make(chan error, 1) + done := make(chan struct{}) cancelled := false // guarded by cc.mu go func() { cc.mu.Lock() @@ -946,7 +1058,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { for { if len(cc.streams) == 0 || cc.closed { cc.closed = true - done <- cc.tconn.Close() + close(done) break } if cancelled { @@ -957,8 +1069,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { }() shutdownEnterWaitStateHook() select { - case err := <-done: - return err + case <-done: + cc.closeConn() + return nil case <-ctx.Done(): cc.mu.Lock() // Free the goroutine above @@ -995,15 +1108,15 @@ func (cc *ClientConn) sendGoAway() error { // closes the client connection immediately. In-flight requests are interrupted. // err is sent to streams. -func (cc *ClientConn) closeForError(err error) error { +func (cc *ClientConn) closeForError(err error) { cc.mu.Lock() cc.closed = true for _, cs := range cc.streams { cs.abortStreamLocked(err) } - defer cc.cond.Broadcast() - defer cc.mu.Unlock() - return cc.tconn.Close() + cc.cond.Broadcast() + cc.mu.Unlock() + cc.closeConn() } // Close closes the client connection immediately. @@ -1011,16 +1124,17 @@ func (cc *ClientConn) closeForError(err error) error { // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. func (cc *ClientConn) Close() error { err := errors.New("http2: client connection force closed via ClientConn.Close") - return cc.closeForError(err) + cc.closeForError(err) + return nil } // closes the client connection immediately. In-flight requests are interrupted. -func (cc *ClientConn) closeForLostPing() error { +func (cc *ClientConn) closeForLostPing() { err := errors.New("http2: client connection lost") if f := cc.t.CountError; f != nil { f("conn_close_lost_ping") } - return cc.closeForError(err) + cc.closeForError(err) } // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not @@ -1030,7 +1144,7 @@ var errRequestCanceled = errors.New("net/http: request canceled") func commaSeparatedTrailers(req *http.Request) (string, error) { keys := make([]string, 0, len(req.Trailer)) for k := range req.Trailer { - k = http.CanonicalHeaderKey(k) + k = canonicalHeader(k) switch k { case "Transfer-Encoding", "Trailer", "Content-Length": return "", fmt.Errorf("invalid Trailer key %q", k) @@ -1398,11 +1512,19 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // and in multiple cases: server replies <=299 and >299 // while still writing request body cc.mu.Lock() + mustCloseBody := false + if cs.reqBody != nil && cs.reqBodyClosed == nil { + mustCloseBody = true + cs.reqBodyClosed = make(chan struct{}) + } bodyClosed := cs.reqBodyClosed - cs.reqBodyClosed = true cc.mu.Unlock() - if !bodyClosed && cs.reqBody != nil { + if mustCloseBody { cs.reqBody.Close() + close(bodyClosed) + } + if bodyClosed != nil { + <-bodyClosed } if err != nil && cs.sentEndStream { @@ -1559,7 +1681,7 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { var sawEOF bool for !sawEOF { - n, err := body.Read(buf[:len(buf)]) + n, err := body.Read(buf) if hasContentLen { remainLen -= int64(n) if remainLen == 0 && err == nil { @@ -1582,7 +1704,7 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { } if err != nil { cc.mu.Lock() - bodyClosed := cs.reqBodyClosed + bodyClosed := cs.reqBodyClosed != nil cc.mu.Unlock() switch { case bodyClosed: @@ -1677,7 +1799,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) if cc.closed { return 0, errClientConnClosed } - if cs.reqBodyClosed { + if cs.reqBodyClosed != nil { return 0, errStopReqBodyWrite } select { @@ -1748,7 +1870,8 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } for _, v := range vv { if !httpguts.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + // Don't include the value in the error, because it may be sensitive. + return nil, fmt.Errorf("invalid HTTP header value for header %q", k) } } } @@ -1861,7 +1984,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // Header list size is ok. Write the headers. enumerateHeaders(func(name, value string) { - name, ascii := asciiToLower(name) + name, ascii := lowerHeader(name) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -1914,7 +2037,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { } for k, vv := range trailer { - lowKey, ascii := asciiToLower(k) + lowKey, ascii := lowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -1946,8 +2069,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.add(transportDefaultStreamFlow) - cs.inflow.setConnFlow(&cc.inflow) + cs.inflow.init(transportDefaultStreamFlow) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -1972,13 +2094,13 @@ func (cc *ClientConn) forgetStreamID(id uint32) { // wake up RoundTrip if there is a pending request. cc.cond.Broadcast() - closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { if VerboseLogs { cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2) } cc.closed = true - defer cc.tconn.Close() + defer cc.closeConn() } cc.mu.Unlock() @@ -2025,8 +2147,8 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - defer cc.tconn.Close() - defer cc.t.connPool().MarkDead(cc) + cc.t.connPool().MarkDead(cc) + defer cc.closeConn() defer close(cc.readerDone) if cc.idleTimer != nil { @@ -2048,6 +2170,7 @@ func (rl *clientConnReadLoop) cleanup() { err = io.ErrUnexpectedEOF } cc.closed = true + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2246,7 +2369,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra Status: status + " " + http.StatusText(statusCode), } for _, hf := range regularFields { - key := http.CanonicalHeaderKey(hf.Name) + key := canonicalHeader(hf.Name) if key == "Trailer" { t := res.Trailer if t == nil { @@ -2254,7 +2377,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra res.Trailer = t } foreachHeaderElement(hf.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil + t[canonicalHeader(v)] = nil }) } else { vv := header[key] @@ -2359,7 +2482,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr trailer := make(http.Header) for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) + key := canonicalHeader(hf.Name) trailer[key] = append(trailer[key], hf.Value) } cs.trailer = trailer @@ -2405,21 +2528,10 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) { } cc.mu.Lock() - var connAdd, streamAdd int32 - // Check the conn-level first, before the stream-level. - if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { - connAdd = transportDefaultConnFlow - v - cc.inflow.add(connAdd) - } + connAdd := cc.inflow.add(n) + var streamAdd int32 if err == nil { // No need to refresh if the stream is over or failed. - // Consider any buffered body data (read from the conn but not - // consumed by the client) when computing flow control for this - // stream. - v := int(cs.inflow.available()) + cs.bufPipe.Len() - if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { - streamAdd = int32(transportDefaultStreamFlow - v) - cs.inflow.add(streamAdd) - } + streamAdd = cs.inflow.add(n) } cc.mu.Unlock() @@ -2447,17 +2559,15 @@ func (b transportResponseBody) Close() error { if unread > 0 { cc.mu.Lock() // Return connection-level flow control. - if unread > 0 { - cc.inflow.add(int32(unread)) - } + connAdd := cc.inflow.add(unread) cc.mu.Unlock() // TODO(dneil): Acquiring this mutex can block indefinitely. // Move flow control return to a goroutine? cc.wmu.Lock() // Return connection-level flow control. - if unread > 0 { - cc.fr.WriteWindowUpdate(0, uint32(unread)) + if connAdd > 0 { + cc.fr.WriteWindowUpdate(0, uint32(connAdd)) } cc.bw.Flush() cc.wmu.Unlock() @@ -2500,13 +2610,18 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { // But at least return their flow control: if f.Length > 0 { cc.mu.Lock() - cc.inflow.add(int32(f.Length)) + ok := cc.inflow.take(f.Length) + connAdd := cc.inflow.add(int(f.Length)) cc.mu.Unlock() - - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(f.Length)) - cc.bw.Flush() - cc.wmu.Unlock() + if !ok { + return ConnectionError(ErrCodeFlowControl) + } + if connAdd > 0 { + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(connAdd)) + cc.bw.Flush() + cc.wmu.Unlock() + } } return nil } @@ -2537,9 +2652,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { } // Check connection-level flow control. cc.mu.Lock() - if cs.inflow.available() >= int32(f.Length) { - cs.inflow.take(int32(f.Length)) - } else { + if !takeInflows(&cc.inflow, &cs.inflow, f.Length) { cc.mu.Unlock() return ConnectionError(ErrCodeFlowControl) } @@ -2561,19 +2674,20 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { } } - if refund > 0 { - cc.inflow.add(int32(refund)) - if !didReset { - cs.inflow.add(int32(refund)) - } + sendConn := cc.inflow.add(refund) + var sendStream int32 + if !didReset { + sendStream = cs.inflow.add(refund) } cc.mu.Unlock() - if refund > 0 { + if sendConn > 0 || sendStream > 0 { cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(refund)) - if !didReset { - cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + if sendConn > 0 { + cc.fr.WriteWindowUpdate(0, uint32(sendConn)) + } + if sendStream > 0 { + cc.fr.WriteWindowUpdate(cs.ID, uint32(sendStream)) } cc.bw.Flush() cc.wmu.Unlock() @@ -2641,7 +2755,6 @@ func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { if fn := cc.t.CountError; fn != nil { fn("recv_goaway_" + f.ErrCode.stringToken()) } - } cc.setGoAway(f) return nil @@ -2706,8 +2819,10 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { cc.cond.Broadcast() cc.initialWindowSize = s.Val + case SettingHeaderTableSize: + cc.henc.SetMaxDynamicTableSize(s.Val) + cc.peerMaxHeaderTableSize = s.Val default: - // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. cc.vlogf("Unhandled Setting: %v", s) } return nil @@ -2881,7 +2996,12 @@ func (t *Transport) logf(format string, args ...interface{}) { log.Printf(format, args...) } -var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) +var noBody io.ReadCloser = noBodyReader{} + +type noBodyReader struct{} + +func (noBodyReader) Close() error { return nil } +func (noBodyReader) Read([]byte) (int, error) { return 0, io.EOF } type missingBody struct{} @@ -2926,7 +3046,11 @@ func (gz *gzipReader) Read(p []byte) (n int, err error) { } func (gz *gzipReader) Close() error { - return gz.body.Close() + if err := gz.body.Close(); err != nil { + return err + } + gz.zerr = fs.ErrClosed + return nil } type errorReader struct{ err error } @@ -2990,7 +3114,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Now().Sub(cc.lastActive) + ci.IdleTime = time.Since(cc.lastActive) } cc.mu.Unlock() diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 2618b2c11d22..0a242c669e2c 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -383,16 +383,15 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { var n *priorityNode - if id := wr.StreamID(); id == 0 { + if wr.isControl() { n = &ws.root } else { + id := wr.StreamID() n = ws.nodes[id] if n == nil { // id is an idle or closed stream. wr should not be a HEADERS or - // DATA frame. However, wr can be a RST_STREAM. In this case, we - // push wr onto the root, rather than creating a new priorityNode, - // since RST_STREAM is tiny and the stream's priority is unknown - // anyway. See issue #17919. + // DATA frame. In other case, we push wr onto the root, rather + // than creating a new priorityNode. if wr.DataSize() > 0 { panic("add DATA on non-open stream") } diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go index 7a8cf889b5bc..9c070a44b377 100644 --- a/vendor/golang.org/x/net/idna/trieval.go +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -17,23 +17,23 @@ package idna // // The per-rune values have the following format: // -// if mapped { -// if inlinedXOR { -// 15..13 inline XOR marker -// 12..11 unused -// 10..3 inline XOR mask -// } else { -// 15..3 index into xor or mapping table -// } -// } else { -// 15..14 unused -// 13 mayNeedNorm -// 12..11 attributes -// 10..8 joining type -// 7..3 category type -// } -// 2 use xor pattern -// 1..0 mapped category +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category // // See the definitions below for a more detailed description of the various // bits. diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go index 0bfcf7afc6bf..41883c530c80 100644 --- a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -172,7 +172,23 @@ type mmsgTmpsPool struct { } func (p *mmsgTmpsPool) Get() *mmsgTmps { - return p.p.Get().(*mmsgTmps) + m := p.p.Get().(*mmsgTmps) + // Clear fields up to the len (not the cap) of the slice, + // assuming that the previous caller only used that many elements. + for i := range m.packer.sockaddrs { + m.packer.sockaddrs[i] = 0 + } + m.packer.sockaddrs = m.packer.sockaddrs[:0] + for i := range m.packer.vs { + m.packer.vs[i] = iovec{} + } + m.packer.vs = m.packer.vs[:0] + for i := range m.packer.hs { + m.packer.hs[i].Len = 0 + m.packer.hs[i].Hdr = msghdr{} + } + m.packer.hs = m.packer.hs[:0] + return m } func (p *mmsgTmpsPool) Put(tmps *mmsgTmps) { diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go index c3c7cc4c83ad..5a38798cc0cd 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_linux.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go @@ -17,9 +17,6 @@ func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { if sa != nil { h.Name = (*byte)(unsafe.Pointer(&sa[0])) h.Namelen = uint32(len(sa)) - } else { - h.Name = nil - h.Namelen = 0 } } diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go index ba53f564bb46..f7d0b0d2b853 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -8,22 +8,21 @@ package socket import ( + "net" "os" ) func (c *Conn) recvMsg(m *Message, flags int) error { m.raceWrite() - var h msghdr - vs := make([]iovec, len(m.Buffers)) - var sa []byte - if c.network != "tcp" { - sa = make([]byte, sizeofSockaddrInet6) - } - h.pack(vs, m.Buffers, m.OOB, sa) - var operr error - var n int + var ( + operr error + n int + oobn int + recvflags int + from net.Addr + ) fn := func(s uintptr) bool { - n, operr = recvmsg(s, &h, flags) + n, oobn, recvflags, from, operr = recvmsg(s, m.Buffers, m.OOB, flags, c.network) return ioComplete(flags, operr) } if err := c.c.Read(fn); err != nil { @@ -32,34 +31,21 @@ func (c *Conn) recvMsg(m *Message, flags int) error { if operr != nil { return os.NewSyscallError("recvmsg", operr) } - if c.network != "tcp" { - var err error - m.Addr, err = parseInetAddr(sa[:], c.network) - if err != nil { - return err - } - } + m.Addr = from m.N = n - m.NN = h.controllen() - m.Flags = h.flags() + m.NN = oobn + m.Flags = recvflags return nil } func (c *Conn) sendMsg(m *Message, flags int) error { m.raceRead() - var h msghdr - vs := make([]iovec, len(m.Buffers)) - var sa []byte - if m.Addr != nil { - var a [sizeofSockaddrInet6]byte - n := marshalInetAddr(m.Addr, a[:]) - sa = a[:n] - } - h.pack(vs, m.Buffers, m.OOB, sa) - var operr error - var n int + var ( + operr error + n int + ) fn := func(s uintptr) bool { - n, operr = sendmsg(s, &h, flags) + n, operr = sendmsg(s, m.Buffers, m.OOB, m.Addr, flags) return ioComplete(flags, operr) } if err := c.c.Write(fn); err != nil { diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go index 381e45e167f9..7cfb349c0cd5 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_stub.go +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -36,11 +36,11 @@ func setsockopt(s uintptr, level, name int, b []byte) error { return errNotImplemented } -func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { - return 0, errNotImplemented +func recvmsg(s uintptr, buffers [][]byte, oob []byte, flags int, network string) (n, oobn int, recvflags int, from net.Addr, err error) { + return 0, 0, 0, nil, errNotImplemented } -func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { +func sendmsg(s uintptr, buffers [][]byte, oob []byte, to net.Addr, flags int) (int, error) { return 0, errNotImplemented } diff --git a/vendor/golang.org/x/net/internal/socket/sys_unix.go b/vendor/golang.org/x/net/internal/socket/sys_unix.go index d203e2984cac..de823932b9a7 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_unix.go +++ b/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -8,8 +8,10 @@ package socket import ( - "syscall" + "net" "unsafe" + + "golang.org/x/sys/unix" ) //go:linkname syscall_getsockopt syscall.getsockopt @@ -18,12 +20,6 @@ func syscall_getsockopt(s, level, name int, val unsafe.Pointer, vallen *uint32) //go:linkname syscall_setsockopt syscall.setsockopt func syscall_setsockopt(s, level, name int, val unsafe.Pointer, vallen uintptr) error -//go:linkname syscall_recvmsg syscall.recvmsg -func syscall_recvmsg(s int, msg *syscall.Msghdr, flags int) (int, error) - -//go:linkname syscall_sendmsg syscall.sendmsg -func syscall_sendmsg(s int, msg *syscall.Msghdr, flags int) (int, error) - func getsockopt(s uintptr, level, name int, b []byte) (int, error) { l := uint32(len(b)) err := syscall_getsockopt(int(s), level, name, unsafe.Pointer(&b[0]), &l) @@ -34,10 +30,93 @@ func setsockopt(s uintptr, level, name int, b []byte) error { return syscall_setsockopt(int(s), level, name, unsafe.Pointer(&b[0]), uintptr(len(b))) } -func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { - return syscall_recvmsg(int(s), (*syscall.Msghdr)(unsafe.Pointer(h)), flags) +func recvmsg(s uintptr, buffers [][]byte, oob []byte, flags int, network string) (n, oobn int, recvflags int, from net.Addr, err error) { + var unixFrom unix.Sockaddr + n, oobn, recvflags, unixFrom, err = unix.RecvmsgBuffers(int(s), buffers, oob, flags) + if unixFrom != nil { + from = sockaddrToAddr(unixFrom, network) + } + return } -func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { - return syscall_sendmsg(int(s), (*syscall.Msghdr)(unsafe.Pointer(h)), flags) +func sendmsg(s uintptr, buffers [][]byte, oob []byte, to net.Addr, flags int) (int, error) { + var unixTo unix.Sockaddr + if to != nil { + unixTo = addrToSockaddr(to) + } + return unix.SendmsgBuffers(int(s), buffers, oob, unixTo, flags) +} + +// addrToSockaddr converts a net.Addr to a unix.Sockaddr. +func addrToSockaddr(a net.Addr) unix.Sockaddr { + var ( + ip net.IP + port int + zone string + ) + switch a := a.(type) { + case *net.TCPAddr: + ip = a.IP + port = a.Port + zone = a.Zone + case *net.UDPAddr: + ip = a.IP + port = a.Port + zone = a.Zone + case *net.IPAddr: + ip = a.IP + zone = a.Zone + default: + return nil + } + + if ip4 := ip.To4(); ip4 != nil { + sa := unix.SockaddrInet4{Port: port} + copy(sa.Addr[:], ip4) + return &sa + } + + if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { + sa := unix.SockaddrInet6{Port: port} + copy(sa.Addr[:], ip6) + if zone != "" { + sa.ZoneId = uint32(zoneCache.index(zone)) + } + return &sa + } + + return nil +} + +// sockaddrToAddr converts a unix.Sockaddr to a net.Addr. +func sockaddrToAddr(sa unix.Sockaddr, network string) net.Addr { + var ( + ip net.IP + port int + zone string + ) + switch sa := sa.(type) { + case *unix.SockaddrInet4: + ip = make(net.IP, net.IPv4len) + copy(ip, sa.Addr[:]) + port = sa.Port + case *unix.SockaddrInet6: + ip = make(net.IP, net.IPv6len) + copy(ip, sa.Addr[:]) + port = sa.Port + if sa.ZoneId > 0 { + zone = zoneCache.name(int(sa.ZoneId)) + } + default: + return nil + } + + switch network { + case "tcp", "tcp4", "tcp6": + return &net.TCPAddr{IP: ip, Port: port, Zone: zone} + case "udp", "udp4", "udp6": + return &net.UDPAddr{IP: ip, Port: port, Zone: zone} + default: + return &net.IPAddr{IP: ip, Zone: zone} + } } diff --git a/vendor/golang.org/x/net/internal/socket/sys_windows.go b/vendor/golang.org/x/net/internal/socket/sys_windows.go index 2de0d68c619a..b738b89ddd0a 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_windows.go +++ b/vendor/golang.org/x/net/internal/socket/sys_windows.go @@ -5,6 +5,7 @@ package socket import ( + "net" "syscall" "unsafe" @@ -37,11 +38,11 @@ func setsockopt(s uintptr, level, name int, b []byte) error { return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), int32(len(b))) } -func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { - return 0, errNotImplemented +func recvmsg(s uintptr, buffers [][]byte, oob []byte, flags int, network string) (n, oobn int, recvflags int, from net.Addr, err error) { + return 0, 0, 0, nil, errNotImplemented } -func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { +func sendmsg(s uintptr, buffers [][]byte, oob []byte, to net.Addr, flags int) (int, error) { return 0, errNotImplemented } diff --git a/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go b/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go index 1e38b9223281..eaa896cb5700 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go +++ b/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go @@ -5,6 +5,7 @@ package socket import ( + "net" "syscall" "unsafe" ) @@ -27,12 +28,39 @@ func setsockopt(s uintptr, level, name int, b []byte) error { return errnoErr(errno) } -func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { - n, _, errno := syscall_syscall(syscall.SYS___RECVMSG_A, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) - return int(n), errnoErr(errno) +func recvmsg(s uintptr, buffers [][]byte, oob []byte, flags int, network string) (n, oobn int, recvflags int, from net.Addr, err error) { + var h msghdr + vs := make([]iovec, len(buffers)) + var sa []byte + if network != "tcp" { + sa = make([]byte, sizeofSockaddrInet6) + } + h.pack(vs, buffers, oob, sa) + sn, _, errno := syscall_syscall(syscall.SYS___RECVMSG_A, s, uintptr(unsafe.Pointer(&h)), uintptr(flags)) + n = int(sn) + oobn = h.controllen() + recvflags = h.flags() + err = errnoErr(errno) + if network != "tcp" { + var err2 error + from, err2 = parseInetAddr(sa, network) + if err2 != nil && err == nil { + err = err2 + } + } + return } -func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { - n, _, errno := syscall_syscall(syscall.SYS___SENDMSG_A, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) +func sendmsg(s uintptr, buffers [][]byte, oob []byte, to net.Addr, flags int) (int, error) { + var h msghdr + vs := make([]iovec, len(buffers)) + var sa []byte + if to != nil { + var a [sizeofSockaddrInet6]byte + n := marshalInetAddr(to, a[:]) + sa = a[:n] + } + h.pack(vs, buffers, oob, sa) + n, _, errno := syscall_syscall(syscall.SYS___SENDMSG_A, s, uintptr(unsafe.Pointer(&h)), uintptr(flags)) return int(n), errnoErr(errno) } diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go deleted file mode 100644 index 5acf6db6ea56..000000000000 --- a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs defs_darwin.go - -package socket - -type iovec struct { - Base *byte - Len uint32 -} - -type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen int32 - Control *byte - Controllen uint32 - Flags int32 -} - -type cmsghdr struct { - Len uint32 - Level int32 - Type int32 -} - -const ( - sizeofIovec = 0x8 - sizeofMsghdr = 0x1c -) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_riscv64.go similarity index 79% rename from vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go rename to vendor/golang.org/x/net/internal/socket/zsys_freebsd_riscv64.go index 5acf6db6ea56..965c0b28b510 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_riscv64.go @@ -1,11 +1,11 @@ // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs defs_darwin.go +// cgo -godefs defs_freebsd.go package socket type iovec struct { Base *byte - Len uint32 + Len uint64 } type msghdr struct { @@ -25,6 +25,6 @@ type cmsghdr struct { } const ( - sizeofIovec = 0x8 - sizeofMsghdr = 0x1c + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 ) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc.go index 59b71da57163..4c19269bee07 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc.go @@ -4,32 +4,32 @@ package socket type iovec struct { - Base *byte - Len uint32 + Base *byte + Len uint32 } type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen uint32 - Control *byte - Controllen uint32 - Flags int32 + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 } type mmsghdr struct { - Hdr msghdr - Len uint32 + Hdr msghdr + Len uint32 } type cmsghdr struct { - Len uint32 - Level int32 - Type int32 + Len uint32 + Level int32 + Type int32 } const ( - sizeofIovec = 0x8 - sizeofMsghdr = 0x1c + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c ) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go new file mode 100644 index 000000000000..cebde7634f34 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go @@ -0,0 +1,30 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go new file mode 100644 index 000000000000..cebde7634f34 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go @@ -0,0 +1,30 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 +) diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go index 245834979938..6fbdc52b969b 100644 --- a/vendor/golang.org/x/net/ipv4/doc.go +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -16,8 +16,7 @@ // 3376. // Source-specific multicast is defined in RFC 4607. // -// -// Unicasting +// # Unicasting // // The options for unicasting are available for net.TCPConn, // net.UDPConn and net.IPConn which are created as network connections @@ -51,8 +50,7 @@ // }(c) // } // -// -// Multicasting +// # Multicasting // // The options for multicasting are available for net.UDPConn and // net.IPConn which are created as network connections that use the @@ -141,8 +139,7 @@ // } // } // -// -// More multicasting +// # More multicasting // // An application that uses PacketConn or RawConn may join multiple // multicast groups. For example, a UDP listener with port 1024 might @@ -200,8 +197,7 @@ // // error handling // } // -// -// Source-specific multicasting +// # Source-specific multicasting // // An application that uses PacketConn or RawConn on IGMPv3 supported // platform is able to join source-specific multicast groups. diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_riscv64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_riscv64.go new file mode 100644 index 000000000000..0feb9a7536db --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_riscv64.go @@ -0,0 +1,52 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]uint8 + X__ss_align int64 + X__ss_pad2 [112]uint8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go index e0be9d50d70d..2148b814ff2a 100644 --- a/vendor/golang.org/x/net/ipv6/doc.go +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -17,8 +17,7 @@ // On Darwin, this package requires OS X Mavericks version 10.9 or // above, or equivalent. // -// -// Unicasting +// # Unicasting // // The options for unicasting are available for net.TCPConn, // net.UDPConn and net.IPConn which are created as network connections @@ -52,8 +51,7 @@ // }(c) // } // -// -// Multicasting +// # Multicasting // // The options for multicasting are available for net.UDPConn and // net.IPConn which are created as network connections that use the @@ -140,8 +138,7 @@ // } // } // -// -// More multicasting +// # More multicasting // // An application that uses PacketConn may join multiple multicast // groups. For example, a UDP listener with port 1024 might join two @@ -199,8 +196,7 @@ // // error handling // } // -// -// Source-specific multicasting +// # Source-specific multicasting // // An application that uses PacketConn on MLDv2 supported platform is // able to join source-specific multicast groups. diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_riscv64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_riscv64.go new file mode 100644 index 000000000000..5b39eb8dfd29 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_riscv64.go @@ -0,0 +1,64 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]uint8 + X__ss_align int64 + X__ss_pad2 [112]uint8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/publicsuffix/data/children b/vendor/golang.org/x/net/publicsuffix/data/children new file mode 100644 index 0000000000000000000000000000000000000000..1038c561ade4683e91b37e8298fc5ead7c306779 GIT binary patch literal 2876 zcmWO8`CAib0svrUzRb+`nSk6zK|{VtNTRiJd7yyG8JU16mRliLD^_b=t;%||>na8l zP!yF%fdB@t+Ui#Avu#}!)CeZrS5%ZMplz*Iv9&5~*B{>h;dOCwadCeq;GIS9q`Z^& z4zVS!huE^`0kP%QL!#hTKe0dV5pkd}k|?!C6Nl&oqRgryj?$?_d0`G=rZt2)emhZX z-9en7jfADpL|Ci`i8}faai*}0IAc9YoTX2R&&HotpS7M5e;NNJao+kBaiQ><_=2^8 zxJch1F2>u5ONGtEC2I%qt+kW*&U&Bt!TN}}690_2YJE;zx4sqEGeBIQz$5DSQiP46 z346kOMDyNYqMeyTbTF|*XM&RGx}8MyGpQt*u$=@DVW1RXU~nZTDBVa8s31KJv7}dH zBIym6lHSS`(z|gP>8ng7eGQqUP?<$eHK@s{jhpc_xP=T*Zp8tHe~|%=yGSwoHz`r> z)<_JcSPBnfsj`ez7!GR`jVH+&@Dv%`cn*ia+c-qoh(iobI27K&p-MXrH8hiA?+&y|}@M@D2V1e1j9<8#Y&blbeWd+C1_t-LV zFPDvbjVnn9e-(BZ^RUCF!FO$1e2@DG-!tapd$u+BKKC)cZ(N7__@9t{+^1xpZ3BK_ z+^BiTZN?961>`V)8y?}C@Ca9iM~sK@DE|l^HJ0O1+cErze;hwDR^UgrD*Tvl#*evb z^0Bc7|IF3mpN(JPpKV{`C;ao|6Yc_jV*C$&V*3XF!oP@r;V$8){LA<$_h0g<@jLPv z|9kS8?F#uTca40(uP4WhjpT3q7V>v~7x}x*LB6)#C*N?7@EhZgCI~j&0$~Cx8>VVw!|d%~wxAQtHFbe- z!9%dvKG>A@uAl4OuxMFt@*X#=tTqgl#u|G&m!hma509ElUken0(Qe4A9O41^* zym>KL(aeFg;#82@KJFwSYKQPHo9H~8wqhMdMH`rIA02L+E*-E#6u$9T1*vgX6*vgj8Y?a#< zwkmlmTUAoPR<-;SnBBGkbMki9T(X0$F4@V}xb0$VN_Mj~Ero1t@?N&Kq=>C;*#|7i zMsTvE6r3(O#&d6}m3X+vNIX(vB_0RjBpz+?JkPcSo_8C^Qyoait;Ej8= z@c!=nmEv{&O$k=uMdO=r+-qkyl^6me1$6X8Kq1|gj8iuh`!2qi@qvt zD`oL5pw9FhpuXujMSXw7MqTasiE8$JOSPqkQ9bF4sRu_hsnJQBsh=j5Q_m-z);~|b zNsS%7MUC~gP`~%KQhyx1PmT8uQGfQ1QGchuqUqj0X(q#uM#8D|1fhf$2<3r-j3C<0 z5ll}ME}$otN6_w$DB80;hV~LB(q0Y~?JZnNPjaNtLSZgFIU|qubLeURjP$^7w=>?-ckWN6n~%?+Tm9zH?b#7@ zXLcOjdpwDD_^k?bWakAsj;rarej55OKV9Ho*?$E7b^JBslKn>JQb8~-eI!HV02%2| z$$&qUfeL|)m*d9pDm-MoK2I5)<0Yf}Cd-%{KN(XoRR;a1$zVk!2=9l1)T!@NY+X-;H1`;(b2(Nd->H-+gk zFOzlkFK4<%sZ4k73Z~oq0n^=|ChN&fXL`(;OizCn(<{oB_2%X<3z;Ev5i^{-j~O->Ll;qr{M`oRE(3&|2mo>-j|YhX z3QnwM$KsKCQt%ZKoA40!@ zPoRImdMK*?6sq!e!VGaR%uwgSj6pTb5^G_WdNo{GlMmJ6&2qJRH`I#vKz)q~>IaX& zj|Pvz{2DX-c<>}#J9r+h6JLbu)R*7}@nyJS@Fv`(zAfKW(++pmbjWuOzJZ^M-@-4% zT<`gC~Y|LJQsx>of=8Da~Pc23Nu}8Vfv&>)<(j8lJ}&;Q65|@XF9N z_&+=buWDYxtF^D;b^Hci*Sf%ZmVoucJlMc8u;B!R4Z{=Q4VDjYX$7!}^?^-V3AAaY zuw{5AY#&}A?_d*PhgJzYhL=ExmV=HHYUmi&z`Lv#KG5pmL+xj9h%JCaS_2%>7Qr{{ zVK}aRj7au5;yIy$(s?N;i;seG`Xbsn2|=A7nqm5d_}q!P)U)ktAE zfu`$Dq8XAiXom9~de3qm&D4E^f+Uwwkn=hUw%kA=Ix7m5G@($Z9fj#y(QHXOn(gdD zb1c1RuI?V1CwYM8IR{X2h9V^|P^9xEiqgG83nj17LgzSI zWceE{){`h&N}=cxh+^vaC|2)=V(UCmoZb_~kNBVjsUK2G{ZXPc043_>XsKR-mexh0 zl#wX3LK=h8q$wy(pMlawGEurT8)fQqP?lbgvPbgKO8t7YYUDGd)^9_ax;;oI-G_Ag Z1B%rnr6^x|0)0GUL2F0Oqfew4{}2BS2s{7) literal 0 HcmV?d00001 diff --git a/vendor/golang.org/x/net/publicsuffix/data/nodes b/vendor/golang.org/x/net/publicsuffix/data/nodes new file mode 100644 index 0000000000000000000000000000000000000000..34751cd5b9d1b14c3a0977e4fa4b04db058b241d GIT binary patch literal 48280 zcmaH!2Xs``*2m9GGBc?V0-=T?iYSVxfMUZtbEnr#dJh5?RIDhrr;tup#4a`z6&1yT zVja>GniRXJ*q+b+?A`CTPk2UM?|qB4_Bv&k|K4Yxa?iaJlqxv8RVnAhWTmPq>`ImF zN)ke;Jk+67WgWf#E9&VT(xg;@k)o1}309S~=N5-jWrG7s#s9Tf$P%SoyXurOzi3d& z&dgd$%Mr_KpsoB-+7jo`R#MffZ0^%UpSwZXT8e4&-eoFP(_>e5BdtZL;zFNNZg+>W zC+5183OeXD9`FJ(+N@M~JE+WfvT_(F&|9z}@18xvrhIXFN((+_ea?XCXxs@vUeMvff|nREqPdT1qMS z;lD4aQWBrFf_@KT@vrEpXirtCiE&_Y9)w#3Uc`ch4Pd#wUZriGhz(;LF1T)Yt8^n5 z7{%vM+IvZxQogf$REA--!jgj1CHHly{>Euk12<4D8il^1QBB0}Hygy+Np(U8FfK)D zX&IQkA)m@NhNIMc*;Lt^Z*wVCz06btjp?=U9ow!3Cd$y~DM3|W8<<@^=yOl%MwSWOzy zs5uBgmG9KiTVyKho`_ZD7^i|D_#S~UP9-FjO<1eC9tHkYR70cGQ@ZG>dF@I6n- zcm=!0D{kU)l!MZb@Hvwh0OvR_&z52%2?QHz64m3k%a&$dNJtuS#Y*&*IW~g}?Hfpwj zCH9m}s>pixg3r4bmhk3eHFj6Ho`AXOaHqRrE)jjBlE8Njs@y?H{ALWfZZH{??_eo@ zqE#K3nBYtNdSSb6tW&`AhIs79|UzS~V9y zRdfC75F_AHha?U`!e5D@!tDSr{?J3#UE0W^HJ@tv%oKH~aY(zG;iAND=Gy2boDTul^^#8=zxgJdDSVr%?~`VAy6cy2oH>cA=>$+w z0+8lefHW_{xOvT1$UQOu>xJcWa>H$<3G|v z&h+%JRcE%uq~nqX$enHKY}eCNi_dANn$b!~uB2p~0U-Yz07YB9jODWs;}ZO>sc_@l zg>~v2!vIV8&3eNAJ-y*K(H9=jNMhcbg2ZC@%AO(yl|43A$D=4kciYLFH}TCs!L82k zn&4FzxK^6naywF5;zTY8)m{xamVLYE+qpgkDdti3eyVN}6Gf3jG zT9Vyycxh;umR|={|c^IwLrYwS#iX+1JoEIVzMeC3? z4mZ_BovUiq#pbu|bc9;fd?Tj?%{L)de6mf=-;A|OTDLAzmpXH;>N4XPdV^QtrLzt# z!HMWAm0T;`&hQj367mmvW9i+>nm7>V_>yOrToQ)vB)Wmv*Qt5~(ga&h1kF zFzj^r9_>+A#>!jORb{!2czLFu%(kJvygy@JFdpBWx#U4!7Qrr&kts_~Rad<^0pJlG zgZt`mbzrhu;9{he>&{M$cOx766lQm318q+d7te0mJg;=B1)GaNP%x_=eZ!^WYxulJ zBI})kQtvSiRU67=O1eK!rg~N?mALX|O!s1^L&MGVfwkZ%~qeaHE*TSTdR9Zvh^ zn5r%@A2H87VR@BQ@gJBZ1oi%Z0kZf8KOM5(2cE;4^KO&jIRf=X(*Yhl)sF1KI;>p? zNY793MP|U~`N@Xt*pYp*y9Q7)E(f!BT)nDy52#i3Z3F65gOP5f zBONWqKx~9h0fuu4HhfRFsD{K?dfij%Rb$V>TGeC>K|$H^053ZpYi2)eRBgAbro3uVe?951p^l!%fHT1dybcCk3k?`Sy_i_f$ zcQH094l~u##1vfclSs-~)uNV#c7Uq#8Q~j@AK~Hfx%V!jG?KW4CD>tqH>9!wa;}j2 zF14)nuw=E|dmf6H6fxNzo;dbPqCw5k<}<@i=`J@`uQ z^T7NJV#UKyP*n(U=ZT`~HB((}>k?M9c+h{DzRD&P1t2>N-lq%+c*?87eYQ|{4x+9U}A_eYA zChW(d%9GiFA5Tj4h9tGdC~m<)c@Bv9Y>>((a_wG$ab_!%{e1LA&aZ=9RHxP?R!}Wi zfoyRA1{PcY6<`!+RLx8(Gwb@ni73>_g)0`;1cQ>B%aQu)|`y(@}r^au8gz z0?b=m)#mMFyR|i1t6Gc$T#U^3C@or!4RfxEjWKqRx*)O39mIZKn`+tIU{$ToVK5hY zz#O}hj`BfX)!Na9jj|g+5P6|awHYO~s_oUc&=PsWP3~4NQti=>TGier0V`hEt~#PU zF4bvxQB^VpEoDPxl{%=MF!>k&qtmWBTb@CmnMH=>kH#Z!Im9?W2^X>&X&Z`|zd~$S zI#gHU?poDt?y!P6mo{%j1JEvQqH5xT=SQj)vJ-n6D8*TeyAyk0E?x;f@B5lWh)^Xd zP-tT!y01;f>2F~!lEt!cS3SLwlde;2swX@iiEzxLdRh;lBd^A*ws_>R!2LDGi|(kS zR0WH>$^$vYiwzH@{OjFnOY8Bi`0+~zs=jE)nS>3})5ZW~AQsA4s@9EqIqI1!}Cgt zdNFahRqZsc#E|=pI<@P?ATCtMEQ^1(2rUseyp%N-wjlq^(kt1_v|KRsTMW`}ZZmx5`wnIm1@<+74M{BCptpp@?6{bGXsvrT>Kx)@or z)Ks4)4xr?E9c%tCP4!tK$;wjj2__33N1|%Ahm^k)%+9Nbf%Enb^||qCt@=DM*o8^i zor9}a5|`!ln#Y>t+FD)mC~M0NE$VmUKJ-;S06ybBY?Q1>QonCL8(H(@B#ScMYN2BQ zy;XORNa1OX*!Y#$*YxWU!W1vxiyzLH>n#?evKB07wOcHSwHR`5MxSAAhrDW9tch2> zV3C^>W1X8U-fp)fRbAd_NlM6iUsZro{|7#ht^{-53t}i?T2fy;tr z){@b=fvW#5Jc`SfAC6)?e6x+wzd)08OJ?3IJJop*&%R``WQy04`Qq6~__C02|D=f^ zD0V}3qagpA21~!vsa8wBgx6~6pYV#RS@cF`gLI1r zh|9L^T*+n_pFv#WGg%qpXC(bL5(c5#_#kx$CyIU;v9om-h=cOK8lv;gXF#UEco)~_E-idT(lKl)Jd?1 z!xzd$B9t3o8Z7gmsss0ptR%}2=XywDY%K`J+bu(5-_jeEqlWixH0S^9whT?YPH*)Y z$(CW8FKMBa(*U45lP$Tf*{zn`#CnZmoj8@<%y!0I%hD;S}KQuD%H#;n&Hec3c8C`yMn`MkqWwne+9D;1PgWf8h zk5tr8$y@2itz(mk>8%|YnrO2e&=pKU%Vc2q=e0wwuVZ9bhaiag?yj?p^=xuk#wN0B zLC{WM{oN}*%gQ2#CtmbBU|2X4NFs{2PgFIns*1+2-W%BLxogN%MH0BUuk z<*RynZ4S#pi5z-e6;{hQV;^vheI`rz3G}+<2Cd}zcFVX{Ih^Dl2sHC$6a?l|ihkZ; zIoLImj=+n_mV*-+lxoJ|pmUlXYe`+O%m+cib*PHG=dp};zbjMYsWzNmipkKVR;;zu zS;i+?a3L@QrNJILoB=D=2AGyZjKi#!LlPD=mp=7a_5pFcn|z zr{nNcWcPMj4vlZIu_Ic+QZ~B-@&K9xRW{3^i3plYex{nY5`6LM4$B0;JlVv%>KX2H znjp*UmI<3*hRV;onEsNS@RK$n~!)cN489IS&lM|76xjz$8uE5beH95V?eEC zvVSh3SS|FvU`_^TJw7Wul;k@s%~({7TX`36l3(2C8P&S*rZ0 zwU(+xKNLh|t;=_|(>ugu`Yp3ll>=^IQ5~g1+VV@K?VMz+_4A_OQA+ujVJ&}Q&{7=< zqs4obhw3ehAvdEc{{iWJuo1EzX6ItNrMhLbmGM;_tQPGyMm3vo27Iu@-hn0djswlr z4MeTQ2j*L;nzwQTUL_lS)u?t$jj@o9s*jo=9q<`uo291Z&RUG;(B^%+-ZH}*bEEXF zEtvca^pVFpr39A9MoO+*TUiP|1lcME?E{MEsDR?&x^1ihFGo4jn0_qEo*$H`-R4IHNvBD3e z<8z`>_$$>SM=@kL){4rJEvlBbO|TSg>xO)f(qbfO96Ds7iimq=^@OW7bWmpRc=wghWsD>EQ# z5G#9x{j%(FOqM;H2T8;r9jGeXvJ|qNu$S%H1=&Ze%0B7``S%tf!t$*!|0r$0T+Ilo zYazBcBo*26jOCC47%CrH067x%`(~4nLb@In|gBUo6KCIXD?r zi`__k*lIZ~+EZ&eEwPri(1<3>>8?9nD3CMtaGp(-lgnUzm)84as2_t2Iqr{e5M5)D z37KG3Dy^Fc=@$xe@%DFS<44*G?-@*re#+2BrV|qPX&@~=6X5x)K~+-yBJ0(lfw=Gx z9f5~@0D1*4J+(dLWMwx~=(bccC!c`n_k#t$(BbkD67y_e#9ksU`V~&>7e=AR!yvnE z>m*F0GAY^W;ad@e+<{o{tBN70cRzw5Pd2^&30^>|HgOu?ToFL#zebzB?e>Hi54q#+ zy{Fc<(*>t3Cc4Wzuq%tX@dc{fho+&=AeHrfVsII7-B+SYUt$I1NL+O@EEOG;0vDjq zWhHc#Pi(<+d2laGrZ@gz0KU_E(6W>cL*A#9#jaybJdCAOKS;Ra@D-g5i|KEI<(WFL zbf7AhpiQ5Az4EB#m!qUU@CBMts*hmNT_7!dnb7GyIl2%9o)qF|j3MQFS9D2%KaIXT z|LTF)gRB#zHyLyaNJjLvxi(VO%%iPI`*TD0{z;$QGxTLsU;1c1*N+6pD(~(zi{)^M zuGp|lZJ<;@iW-kW@*W}e47t)XhEOs&CtB+tfJMC$R4V(|b|FL~2^}~iw^lkXRVTSQ$z10kOa7Sz^9KNTK7^{k#jt37{U!BsmZXF4)nrPO>nNSo zt}+rbIqlgQanZ;v3Lazt;jFBle z^nkjnlPC?6anqUe93bO*rIF`1*C><3bO_4`I>c^P@E+oe8wa|O zXsp{GKW(i~R;NH8vyx}sq=T`c-ujQVzWLTC))z?~KmB;)lEZOM1IhZwf}GR#&Pp{n zdV;?qGZwf zpS=Z}Fkba*GI72gyZXb9sJv&7Z9u&ibB|_rSL_V3RXzzm^HV=1d1%$Xd1k<@*B9qp zR5{OQiuWa^_XU`HmzyPwzFu?krghRLv8sLykK(r?rn9Ny+(s6Kw<5GTQXoYvmhqoq zDZ3vmMk|3ZatN6FFa|zQ(*tHnJww(%{rFJW1GE05D%h38F8&@-^RY6hs+kbO#wZkA zL`=gc0n+%amA$7QM1Mcz+$SZ;tN2DdsyG?GYIhO}TI=YLS86&q#lxu17=>G<$5Sdf zCzFHHD_x)pgFat2pbB{nShK!Wjo7SLr01n~G$D3BNry)knZhj~^%Uc!?+B`{G0XB~X!O7%`iMVpNn$hD?-3+VN$U+=GU z_}rlKonk^3K+0NatCYKI=MqTKiFJhOxF$7H&qniUdP^Rs%{UoiPLt|RNcBNPqs)^8 z=FbAySU_9;#5y&yU1H#zM29h$Hs@gC5AZO@`oEbQp5M z9(e`XXgBC}Mil3EDr-XT>`$8@7=nb;Y(-Yz2kV5|GfnkSZlc!I{wv`Orh#2jr>gCC$tm$v^WW zdP5JP%5^;?{xxEe#dW0eEF{Vvrpj#!TI4px$f3hu)=FExiOGLKf<09AmfD-FZFO|a zg5+PB1XFu$j!E~SBTsayXB72h*L7g(&n6Nr6jvS?`@-8 z8yYQA9RN|XsjCxcQ!<&9z)%8135*nh(YuWa3?(p91%?tBDf^^NU?d3)B`}o0Py!=a zU?_p11cpUm=uJr-CLIDp2@EAL{^x3@%$EPBH(4!0fh>o5Ym+tS0If;e8v#YC3UZoYkE^EJoZ1b+L}9AOkM`j=LnZvm+O}_ z^0uXSfj3%&Mb0Hod4XbBTObl4Pk*WIgM_}XRR>mi+??{Rr9i059xMrqP?_EjW^B#M($PERaG$S6xBPEj&5&Y9|O2v!z@JvR%d7LZnY+f z>fu(1Yy?UIAxFR7ji6vWb_+h`*?I3g#CrF58MnT3tPIqlZ!PmF>yppyIG6?*adNks z;45xY6ZU@QCmK8&9Tqt@NQiA+>hL=bXJPCoN0N;3eom1CL(017#>Hx4io8YcKfhZ| zyvEl>b|(4M5&lX@$JqGTc?h^4Q8blU_&in<+2SVYC}I| zWCkw>sXj?3AC;tz7NvjZ6PC$;>(F9qSqsbtAazpfgp-)|&Yr%B(>oVrWiY0=Abc-6 zb+c@I?x#?oN8c;Zy{_*ddcqpzv}qX5nY6iYwNjP6BK}0JI(C=ZN>!E--h@E1VbO03xJ;)s zf-1MVAl*n+-{I-2tBT`^o+kt9n7u`dm38@34t1S!wqVhjz;q90Fr1Nmvu0AP~HoZ(JWK4@3b`XC9)|;PI%g_ ztg~SmnT=R)i1oIJTh<17G?HEIxT_C^CG$p-L7#&3Oi}X4BgA^k4xo*^kXM(tzS2f% z56Lj3qlcV)z^YCdIh1u{G~b1*)KvgaT_wv1pEasM9Rt(}?YOG?JxMk913!Mq1OA{~ zR)O5awW?&vmrMvX4ns+4T%QaYF0=6SQp+-WE%$pNZ#O|cCD_SV0VAiIxn;?7kmHviJ6J}4@JIbo|HcYr2g4^TYw6Y* zxIgsr6jhe}cm@l=-D#@q^;41P|6>mxWl~DLy%~H#rley{7Y-`?$T^-;(il2)dMCZs zUs+dv5@Atzf%o0RUP}k_SY#~^QnK9R2bBZ2a@Ge?HJvuw*UZMW6KEUS%xDb?plZ~q zOJP}#AN}PHASr;2n@bGz(emfLtwYE1Mv9NcABpJtpKSAEZ-CJyP20fb0R>6!%cj&(osh9oJyNPun;`OBk$Sw8B0fl=kZK__h0Jz9 z&TfLp7fe}}G=}z2oX%atxFtVGTkdwUZs@oHbc|?Z**uR?$f#aSaAY6sKbokiv6Awa z!ZJucMe4T%l6DITtYp1%$PI4B#6^(f8J-M-fzK#PQ5Abe@t&#w(Os(2x1p1^rIN+V z$>Q9-sj6~wvZ!)v!l0z(Z>M)p7?9VaW#mJWK6~g$k(>O~19w6`Xa?W&C>?PmNt4z< z9;A0M$m#FoKz@ZUBfST*SW4^2oQ#d2?>}z`38LW39FjqT45TA-Jq~8Rv<(DlQOH2V z`i)!)xv~(_%-wLmmB{vcAclf(Ff{Z!@b%k?f_}e%s{i@)X0-!yz(-`*pyeUR@5`9` z(~uZ?1geH^L-VlDu3>e&7KsrT5Q7nqqblPCv}Ame2l+P=8Q;;7nM5@+)ep%)bLIpv z=dMkGJPk`$8kmRvt6No7edbqH+b$#anTuHFl4}=X@*mw)A7vO7ADyX`FBxCEL0j^* zX{tKjYG=7_&E&)HcjKxi?^70!?5|N^9g@bFf4_|j@LSXI=%0w$7A;XThDA_nU5)yo zD`DyXF0t>Aan=_fSOjENza|F$5hRjcM>gwH&}UtVTUiU?lLOz37Z>Ad*6k^fwM@T} zlbcy6t|IJ76VSo|P@NK)zeJs~dqoj0d`U-6S1QAE9O?(2f)>licrPIb*q` z1sBdE*tTn4#Kv0M?3E0>zO6{Vf`QLC4;YRkftK622+ftGZT1{OGGcfa3)wRPB;KX! z7=x>h>LrkQFgxzWuHz|K9D87KybSu3abd)MB0?FPJ0V}BF)}_)_0Q^YEG5Kz}0W2lk+T*xF?>S}WA- zx4N3~P0nj+{b13Tah6YTHM@}DTz^bhomzEd4=aB(s7~!Z)sNHX3W66Uew%#Q-GRRJ zWHAo<)3RSBm9yWJs%&@JUoKat-ElNSoAZbmn%<(UwJS|^Vo`ICIw{b#NKG$$E2>W3 zd+v7S3_jLRxr*-UQRYjv%ay0-$f)wZG;eF~L4?ytYVSeBU$t|o3M9VkQNhiYpbEWp zctAy}uSr$WSK7K&-i#x`r_YHaQ<;q;&mDmAyNFfJLNYsN5^Xv2NUo7Df^<{~NJm|b z3q#s6h=T!N<`!NB^xs5vU>zNUJ|=28JzI&|3A7D+<0hOA4kN@#uOWNrD#&rXdm1H2 zgHd}}P}G7|DjwU^t@3YsW|1l^s83ZzFKt<*iW7AKb>g0GMr)W{xTK!8T%GjS>U61| z92iEH{K2A~F$(E0x}~G<4E$FX=U-L1q-tta|4YIxO5IxNUsZWT*85*z_(a0@|HAN# zY~cU38x$>l4u<|(4NEorUrbqm4T;4Zuz z+yxc^X}x^`n~3FoZnqY_!c0*kWZC?Tbytfij&I5)YiIW3DXdMOLy~W)hh=CZ_~d-m zU)~gEtwE`!nI%ZcO|<1r?CKl!%rD>@9V8@LmDZf%LcNktsx0yrz*-MccO?;s-=x~e z9*`mrQ#muNOxT}Ug0d#EXsHWfk%wTV56ALQz5n`2p1}3nFK3aIjzz5ZnO&BD{XD22 z_Vz#2w9+fj_WD6gAN>1R^h*n0j(dM^Uq_#WN-^n`=cA-)c&Tp}inz{Perv1IW zpF_*FO`oiOP1$=VXta_W?$pdab`~Q|GiTW5{wIYE zznK~!@7@P&@RNUOoh)-tr!gM#y2~mDX6wEcn7rA?$P_Z5a%8tIYNC{}nvRs+3`O?W zZR*_m<=fSHW^S0aYiS#tLRvTu3@{C(G<=3gTuKt!zGv$lWMISgElP*~TdYlmFY9G` zt&_lFlRZ@5@#v$F?K`T22hou>wg;BU!aSJV=>N8t(#a7BXBsukw;jIU*y;Lpder%O zx6&q8LuxR;*H!%$&Sd1V@{drkSk|D=aSkMdjnukw7cAN3NMxT*?+`a%EDVWqaJ4=p z@e`uoj!5aeL=FR`lT}ZR!5fpnTt-G%jAS8bk(aE>{v{8_ix0%^pm}Umw(l9sl`~l= zZp&*{7wq^MllmpAzUI^V^zo?QbxT-UxEp~m3i3kLdfnPils?2g*TNs%>cS;oEMf$I z>0pw4!}1{SH2;)Cc6pB1AxV-O>jCn?-=FeX61Xohd*3ujmgt%C*Xyi5@kN)xqMzus zguYe#-%59jzG+F^B#`YFFZr^874tz8%yS4+pO{mw;QCDJYlmX_(zEYgUL)=Oz(TH1 zWC~qkd`rpNATf|R(Rfd*Ebz~*I60)A@z^e>zOY7 z4@Dvib=y0aEU|Cr8k4&|2 z@5yOMd}f7D1|`V>p9VhU{TLmxcIthrcjecMs$8D+z2WV<75c9clEu7lRQr-kK2gva zt8(rELBVDg8J#&*Elk~N`65{d*0~e(;a37&kG;mE8}Qob{rXbQCGT^thg8`(qYqov z!IB~F_gxcICG{s(`y9OAl9!Gyd1B5#j7J{#8BP1W>>Zw`F+T8&M(*o;OnbjvTgQ#o z-V{}N{~~I+4dRD>_b!tpcM=2eC%5#(`RlSCm(5FGE?Z+Ju|FA!KiqH3l^@Fu;j-($ zYti}qN80KT>#f82I>z-Lr6+dZ-E}Xt(f{d9+Q7K!UFh#1|6tKOaY}wbMj^!L%l7`a zsjGja@(1Sqn& z!YfnxNKt=5r|(%SP9h_!ekLcYKZLLPQ>r!hBQ|3oRW*Uw_l3Q84f~tZhrq1Au^FgH zV8;{WpZz_Y);HE^cXyGUqd=APhL^J>)#(E7|aq`y*|sVHGYRU<%{?#*)=M6(W{MDb-MAl1bq5e z_If;Aikl*=PO3+lZAm#gBk zp?1dOkpCFh-?tan!Q#0JyRvI2?;_$-IR^y#gW^AqTURG!fht}O^YG(gc6e=Q)<-`E z;2PItdV2?c3|iv3Xwl!?{c#;_UG~1Y-%CB|dY5n(yp>wBZY#PEXnHHs=div+(!oiV z4;|FJG&Cc2trpYC(8s67*3uidGGzLT07=KZmr+`f2dc8)>Che-b6u3=)urA8YHr0D zz3ys5zGIr{mA5kbU56pp%!atHt@Vz1K`m)3-;_It(A&F49N2;ueVsqB6$zO=6GQ5rj`oHX1^O3rf10%;L8?c~_cXoyf(HqmFY;-fzTMZ~t@rwt?1I^0g~-Q2 zYN5%RJ&SJ}bDmkX-z|-Pz@v-kJ)H3v_TZBJ2P#LpgHjQLocnL)WzxMpjLaO4bhZbV z((&&g2hs_|K!0;G;89Xa-{K@qLyP|K#IUzu=&58Btl++B`8Pb?jGW%cRBPR;t`B|A ztkr=XvMt0~{PYra{jOm?bwmEJpt|Ah6Y2rv$L^>`Y>b*pW?KunaTxptb|u6&23{sY zshf!IfE|>q&mxg)Fh=RmHc_nyc-ppfbz@>Rv$p@y@C}k*S>=>M(ppFmNB34Vy9tR* z`J)jAFn^%r#T2+-e)`Z0uL z!0aw{bCTgvH#hW0>41Fzvc&PtZVRbfLNytPh3LrKhevXhROxb^m1C`Aec6q^+QF!j z@AYnZeWuto9O|~%8b4U%sWR;v#v|j*9(CI*0ot->ap~0|%YHl= z^m2Xow^CnqgK@~^9H?_`px(c3F;ocd;?UI6+&*Z$M;Dp&G-TfUwZ zePX%$2yD)LLYy;&;Ea?t%ykfvtddi`kw)tILIFzh{efrI7T(E~Gv*Pw^EJ|IVNMWi z@4vXA@?6is#y5*^dOf#Xi^-?xbv{M>s*Vf8vLl_2%67yySXs2bqBnmOvWC3SieAzI z^Vgj8iw&5oJ|}916+UBX3(PGkl;o3#!bwOJUBZ#BXzJDIOSsYeNfCBuW73dqv9y@X zu9VkAm5%_j{B!(}zrv#og@WpH?UXL2sw1TTYL4-I3s~fSyz+Mu`#lbpXF=+Gmgg;J zE7tTM!g2l#i%agG<&WMd*OBNeOrUSZm=yq@$hDkn9c_9wb1fm8^k1~`K0(QIZzdgw z5m;j)&iMN8fDBv?IZNPHG9@cd$->nI#2{YBjH-5Ht+WJE)tV|UxR4!+UElZ&$OOV^ z$jjoGoW6`sO2#2V{#Zaw3uEcNSTIY(J*iEQ@jg(=dg@#av!{S8)_*dka5t`c7f0xj z)1mJz7MW-W%ri2RP&x<&W-EgdzuAREvB>r?w4R-!^wV0zisH!S9fwUn;3qPbrTl1-RA13PJ(i9TbKVA z8q0{U>FFT|0F`ipESS{vJtR1Q0l&3 zG#?0aVt#5(PfvWf+ zWJ&22$QoI+1?1OafxE02{{a$T!Jx!f;79xhsXhTQEG-y65s+q!h#gK<|AN;0xFqYF zL@iiMc18oBGA?FwSUl|zY5bokmR?K*p=&4=gX9G zONB2L9%U*dMW?dv17%wo`UfTFpcZ&ICqu25oT|{&^5TQHzuy z5xJb+${XR!zk#;+;%o+QH_Xuwt|j#{cR{AEfLu*GjEGQ#Q~`5bxPfxadb z^oJ$6l~J&ro5HmE7|o6|VM(6DoE}my!}A!iA2G+S*1r9-2hAs8Xh3%uiN|5Kufv-C z5j$dONH}&ZR`&+pZ&3Gkf8VL@3+66T_jUY$KHGV0MJj5;%cDimkqUsp_F!pEqOe$27G*Dh~*18mh9Qg6~1M zTEAsLntIrqSFaxKxv^b661u`W72?O>FI>X zaX)i;z1rYCx=wA_n$)Ns^>egWS+~G4^bE+5c>#{jN3^O(ce^{(WBKz^)MKrwjXYrt z^r^=^yhV9DF}g)Pv3WNkwx)OBbSkA3!>u-kXSb=1-S;-BC-vXANP54X(6RSCxn*So z#wTLPvc4G`FQCu*nUDexfDZA3;3gaA@exEQYYR#TTuR3Pkg5S!BawTgiI<1^3FqtL z)(WqB%6)2^dTMWJliK8-2LcB{RQ7xAYE#c)jp}K4qfI@%#q_CXO0Gvib{F~v%Ae~> zI;W0D_zzOhcPp-rtVA~Hy9V{lYtuT_v%&iosb_bLY=>oJmwGOCo=-j3Gao=1Z_}m@ zwBdr>Y4tk*^!<)R>CkKG&A!yGo-aQXLn#}n+IhTDIX{EcbsDRgTMT*nZ(%V%0a@Qx zO5sUMSRFH|7G$MEhAe^{C2dorP527pK}azpSWw9tR&dJ}$o*-oC2#hyjlIlTT=cGx z52zLggp|NiT)qS{i&E(=luGYh3|Sw7Y)FGVCe@9U$}gg~Tz+U=k+mH%2$l-@)o;ah zQhFJ_iq}#h@>p6S&&1V9^j3eq1@cV~>yZ2aw&ov8*vm4vsONjqo7D@!if;A77G5SM z^zR@?9z-;XUnES1t(9f?Ei%lRLJ+;**$KqObOa-`g=Brn-%d5;V9ZM#os=HNWb|PS zc_#TF6N#yBC#7JBQvSj1sCpIk?&c=cPiw=O_fZv_H(zFMaGO?-DCdt{-U2k2|ub%gsoJQdEG) z_S)e4kf8{tQLXk#TRlp>uftMeN3%IUjp}?ahMuM){%=Y(x1%q3o`c?Z+JW1FmVjK4 zgiU(OFSoOOJV~&F*;osnOIvXfHljti8k_5%e3*8&!p; zN_DOe&7U@7{2{air@$95Iw&2FaX0g_Hw8*xS`4!lmKdL!smKYiRA*z-lS8%0Y^3e= zWMKTjgblpgN$6M|RkQ))#d8|4cCa0#d%6L1F$e;WdvWGgX^YTS6d)~%+@wWiJE#Kp z&=J}Jmgp?1)%T#U`WE=g{{gDV0#!vJEoz25 z)CgI>h=4^pKp*#^S+djE*hRGgeFb)yi(hO9%V#Ladx9xhTniTSV>g=RU|k?Hs-mKY zNKQt=Jdmnvf&95!#C+{Q(=)|;6q)FYR3hcp#@J+a-N4D!B7X`JO>i0qQhAYsorjfd1E7DyU|zvF8X5YQ}E~#s@_Bg zN>2ozaUp!+C-9^EIgEQo(&qdX2R%b5xi=@HYKMnXA(1Tls0l+dc+MRt2);r{0uOqz zn-7b#jgUmg6R`X}^t$B>$&wWSuei!V=?dENWFhlhZW7(6(Hz)qr@D=j^CFa1cvEp& z-ou8r1qsuQsIU0J2fl5{>c5F9M-mluH$%$3*vM(zAGNTkH=oE6L?*u^ndfMO1k{r5fa}RQc;+32aP8>;@iI%yG2o zWDWe7ferb?7r|$-8$1rp5f6r385j?&L?W<~j_AkObyk8UREo*q+02Tfn=xc;qt|(e zn`*pgzccP>Fjw5%Ot9aEC3=#mf5uLj?uI$Y)}{*QHX`Ar!}kID%H`QFuoe=Ux`Ws+ zp;Ue^AWP*mTs0OF+C`YEWCR17ZD=`^-snD5g^r-A*QRJ8>I>Y!a80Kpcr9Au$D+P? zlpT<7kwZok#D7u%@*?_z_crd2ee6;sH0H>vF3E^3^uE?fV*UVgNREyEq zP1fR)!U*bbLX{!uUHqM!7~BoWz??4Jk{KWUm{Kqs5_l56NIUof8zJGB92n|IWz1(n z+!5T0H((<;!A_iCZyO&;<5g5+5loNFPm>1Zjo5{=3w$hx0wK%LW>n4(N6!&ikAl4!1?`ADo$^r z^d<^ineAw9MtxMyj`3+-$R3YuaV;!`mte>_HWkd*!xEei+`^|Q`Dgjb*lzUI+=iI@ z7?f5`LL#(*-U<)#^T<&=(gvSSW|OvGC7CO`%qR0EG;L;qP^pZuwtuw^O4 zLEh`m|ok@^7P+yzp5D@9C>bZxRyY|k^BmBZJch~OAbNuVv;^(+1~wq;GMdr1 zbrBM>pPTck7UeNoC6Btw>pUv#aSNXb2H8=*S`7~%B{MWx&Wd*$#Hy#dVL7OMKd;u?UK zko?URvl+3-(iE9i?S!NgYtb)+&qbSgRUCw0!ldA*c29~to zp`glJMZ#4{U~4uZk$*7?j3>OL&$lRb{|XlORL}lk=bdgxeGeq`5gooIKD2xab7>d_ z#Rp?IuK*WfE}o~OU)gYLCrYd3q~Q4#W|!RaN9QM_FQo^~Ly#yQD*DcBhFOj{RR_Qv z-qaRWuFd?Cre!Y=+#$E(G=SJT^(q-9V~fAY(cXF zLp5^7${5Qi7NUvrPw_%SJz^r@Co2n zUQ1i>Edo~lvklMPkn&$>3oLF0^RILmr%?4wq*PJS0bE)CjPu(ehXBYt5<~g(F;wzU zJs}QIEsbHLbSh%{jO2a>F=Gn~^x;lFOoht=0NN{>+mfuTTCo0N+omjWntAfNQ7pQ z7J18M>P~D#Hf(3e-lt@~jSHcjXpXcZTNJdR^q~k)!Wm=Mi*F~?*@TPFyjhJ<^xTHZa(U( z$h#2Zq%HtSW}Cm%Ltc>TBuqMgh3EkdIWSp$2dZ5CQDx3=NBuQ+lHn2Tnr9}1^y^02 zUP6KY9&A+KMs-HAozar#WdE%w&^uj8CctB7`7u5M=HQ5B$QDyA+>|1~z1Wark6ykC zPeeAfh8V>DEhBRvr<=Tk&>Rr;(H~)s%cDX8H?BM*LWUz zAWKWhOYj+sh`9S1FTJ~bsFK@?q8n`}m;+1oS9T(P4*2rVL5u$zY?R3F>4NV!Q~kCL zlkY9b2@CE=?Uy7kn8>@HN4WGHsgA3KfIeej1Pw_#{ z=mtv|kMtg@2Tvcj3T}frD%l=B0OQdO5V`2)&s_jA_yL)19Ezbl*?P-YqN?b(c1SE0 z%r7uq0`q`tj6@=ujYq}2=Hg9jCw|OEvvDY66nYY9=27@jGQmN$!=auiy0VG;=K)yL zpVb;q!B^nHMtB42L+4;Tn;xdA2R6h8V zcLXYshpLL38j*Oyhf?`SJ0woWFG~i&F0pYK>Z|8@Q1BgqoCjgDSRW?z%5{q$eVM2) zzYmkapqFY7F#Nl*Rv`DvHO)jQaBmaj0c^N`K|&8yNbVU*CtxTz6wM(?QC|lv{*!4d z{RJ(B{je6RLpF9ZRp0aQ#bqbaS688bBH_QG0ezpOfWD@Ssc{Ew)$?d8`34E&0P*PT zZpb~{tw$XgFU^CL-Ik7B*`3W)V3@6t&{`G+=fPMDy@g%9kw(u&m41t2?sO9?AHC6; zSPS3nWq5X@IeHvv<~$g?p*Z@&_aj>|98!D$NIgx>_>qZ8x10ox3q17753u!(Txf1P z=oc>{R!)=#S9YM~>aCF5xN8X|*hoV=QJ|0Zdg;?gwUP};lnlYR??WWaNe&cTgl1Ew zY3VH53d~IOB~jJec2t4DOo-UQ^p?MlT|H1WMk;OVFs?7?{Cn{-w35u$Cw+6BovQ4d z{uzk5U)h4v?*sUjfr9d9#X)(-@tgrEigcm$e0oE*Ot8YosphY5q$JNIt`8{{nc}Zp zy%=YE8Px_@yptPHa4H>^OmIq$C*|ycGn&yXXxj zFcjQ@$>3Ykt8XSwh#_`5w=??nfZltmq8=Rlz8M6AyHS5TC37MY?w&3>21d=2daYGP+wI_hkrHB#2!OJkEL-eeBsfU%$M)TqI)}Fk;8HH zq-L^M{$QeK78BNU1$^c^01DMZsxC%L{=-NVji)2fhWgN*_z`nD{(P`5co9RfWw;eu zj7NbLh((@3Ouq;!ksGe;zuK^?;c7ILbmiLFq;wW^vV-+$skI3`GhItp~INhO2-B5=&K@aBe&3DK8RRpLlf5SplZl03wTqA z^G7z2HU&ZOA)ZIRh3H%2s&C<&kpxSHTm0CEf*Hd*QCf+$ScMO}*Egf@JmC6o6p(tf z!nBo5b2GsTlfnE6eBtLHk?*llVPd!DtHs#3n6|tp9B4idKL4YTSZyPKR@4h2H%flR z1>-~jh2Cg`$T_y?cVHMcO8Wgwi99L<#E?~20XeB1@4{F`wi95 zHdyo#F|rUt#baqR=Gw6A_kEqy0l{EZ)v zF2`DEHxj{lpfa9-#9y}4A+Ja(d)iU;0l?$$!x!I;m)ehl-6?>SQ&r>|AE>sYG;lH= z>EnLMI5q-#_ul)SDf+c|o{Ca^jMo7R$a8#fTmUVVXmM8JrLhs>k~qiC23qvYMyg|2 zfg^H#67AQH#9g$N$c2GB7v|8n*yw#@cNba;laLM0?_k`Lfm>k1ZtvTh1k4`U#%7G9 zE&g6A9kM=0SKx=XTd>wcNzx~@5LEi0S^l~gEiPF6n;|8e>cR2=h9VDu&q!`Y%Nm&V z)1qD(3gy#heWe$Y*Rs{Cv8x|d^~VK;%*dT-oBwMZ@{ z^x-Ld1rqvYSW%d^@FfE1^bXpdZzgo<5OWDB@i8y=5pdT<|e_n72}mFZIFBE2tG-2g;GR)p*SO17e5$7HL}|q^5@M%9bTOF zL-b3oV7b7Ug-O4-75cy8&O1t~Dr@xjR(Dl(qG=jvkSG`t6N8E(>a85QsybJ7ZgfUW z7{=^i0y>e=0dp9Y?=y`f>gbFZrfHfE&<1ow5FK^U5i^L2+4tMKu#2gu^VVDIt?&J_ z*E#p>boSZzoOAa*VMQ45Bv#YNj;sM+n_PM&3x<-LizHmMcaKviOj!hzJb5X(UP`_% z(-vm2M4ojclL7nehmZwaHrdEwWD1ad4yoO7srBRO`coDsOD!cWwNO2+3d%rF7PL%Q>DqX$uO%OB{;x4z*}WpD zg346aVa}#c@B=-sFLsNQl-RH4)*c$_6M9IdCf1tpCwYgcu>s2+G#!m2FRh5Im~2shtaw{HWuWOY$;mGUW~_;^aw7tj6N zGuhxG+uOVQ9Nb*HT*+inWKUH6|f6+!B@sxx~CK$Zqji>Ldp98Z>X8nLb((5G2B*f?no&ob(f zVqld+k!JgJy9i3!jx^mBm0J0zswG`uprv*9yiNKheVO!KFk}OSDx~$$7gqNfsyTJf zkGV68C2p^yt}xwn^A+7JFxwwH%O+bRa|l^I_%_$Tv$YDkPp6-j$ zzC;CC>(Ea+54BZVdw7ngG;Kq23MJJ`rMz7`qg0z`NE|)=4HbV$QK+VBXCw2cCR6*R z1Zmdsgom|$eZhTbx2#RAnH_M3q)&<~r>eh?zwA#{RiVmTv1DWmO=dj^=fHNFwqjj? zHy}<6dYWk?Jxbp@n*#6b1p@fGtC>kl>&@U+?tJhha_o`0isx=-zLw0^n_hsT{xu3B zJ$%R4zYIlGcBSan_~43d!|&m(vU46I#IQkUW{dX9hTus(*i zGuogyRP>YCVbukUZx!;bRyLyxF2mth`v{=o3EZdTbezGe$);fK0#aw8T3J7?;u%fd ztU2iH?GMp4pM%J{A4TNvVBaA8^h$2e&0g%X4&<2 zp>D%(22qbOlqO^L-aQ|(x&qzk;qn7X(h!Y-8~;Ou@0LGB%FyK(P=v-@BKx%Xs&yal@iw>(ewI|S*C z&{Yz<@$|yp(~Dg^yM(hR3`pQF-qVogy$Pnl1d2EIppf?k6!OY+rpgN93?bP2Lg*aW z=ENEottWdqt>wz6W&AZzSZBf|a06D0#t@i@=Qo^v4WRTQtx1<&ilR`tCo9*8Ui zuZRX{EaGgXngXFX)Q-sNZulJpD{sCZ85=jCUmwr(YT}X{Y^C=T*G}xCMn6K~&&&c0 zMq2U);%Pmkru715kP7O5ji;!BtHg)g02&0UK~CNdETIxlE!n`5JitR#Kk#%rK)V0n z0W@RyNLcAjzdnyIqh0w;v z7xQj)p`WGSA5*AS^sY8%zDZni5{&mo2eh(TT`vr+ z_)RZLQNZ^?_L$avLG&39*^n)IU9Zh-e3%vpLoo>O`} z`~5201_KsCRzI9=JoR&A-^o^(Hca!O&<;}Wj20Ye6|HY=r9s`Yv^^-lT&l0hCcBwb z`61{WSWC*<1G3(U>5&mT}M4m;q z>Rd|WB?t;htL4|zv7QYIY(C&q>dZnH=axDgU+bNacqe~VAd#Au9=CYH$ z35u9K$m@48`su|l)>F{NuZApe(*kOA2f}sZS=ITFg-bV}Y14XW^FVR!hcwqU0QaTn z+;lC^+z>QOg-;{;%}nT0Tb=@j>For01RYst?I&@HRG?7EE|M; z>v&Uq>u6P<%oHlykx|+H68=1!ROD^g``dD0dKFo{tgv<%s=0G|C|!rIS#R^2)=LC3 zaT4sKp8>4A7`8fY5TG8zTAi4rY#4Y685RS#kxi_CU+r$H;F(1&gEe?lq#{6ez8wvQ zg=x3jK-OykYnJ8VDtCma`5G))e3)c6QW}4v9q@8Dc(=Bq#)?d;^$vnc?}tH9E@^6P zNBhKIeN^xS*z2`>B{w$X=r>}V#t>@g20HI|sG+yvg__W+em5wd5k1(d8rqs_Xyb>Y znp^(3N8W@?udCDlQPv-k7T%0%!T2%?UR!`6V)#|##vtlu!>_oo4XqlnefYO1*_04X zKSAfFeh2!!AnGn56&-`1^6D%=trIX?vU4ddpOXu}^=RO_6ycs8)CkCVRo+HKdV6Tb zutd~dv$9cg6{%1iT-+aEt11b_dcSJNJ71@ztMYh%tA@OT+8~ zpZsJ9jiEGl1+2WYn(>2!5UEq*)KMNft=;DXRi^eOFJg%FDZ4~=^t&hZb@y|u^fOW! zq^9R-p9RU2Oa>;~$}bHiIoTFn($r85^oon*%zWU}GVP;Kw}4xRxzSl-0Lkxw5jL_D z=3)EeyWlJ#R=KHouZ)3Ny`Dm$FI{rqqC{fcRc_;r4)%Wi!*f4|aE|NV3-oJIPyf-A zrHwPNxUC02826{e#+#kXXw~eG`;E75dj`+Rnnu=fKXP+iw8!ATd5IUa|pnVG`W*cueXYv}1$iM{*+2o!3 zDactna@ef&!SlvH^Cm51)77jfT5&|1ZRri5=sD5)mFk#4J@nX#)Pc`K9&JIc7eRJ; zyY+DAs z=yQA7P^RZF?_3P+aczK@fQJJZM`r8B95*$;i>!GJD7o_eJL*;Jl0TJac@xPd4;Ze> z0$Gj_Hl!K#+_${`T##p?!I(EOY>@|kdSWBNNzhzVNDd>`aq?qK5cH`Z; z+n~*?q?Q?1Ib#kfc&IR9(UVJYmImfGCeqC`1eqZ zJOvxr)@B?3DxHOw*e7A8;b)>+!N-&iDPhZ}9xS{6kzq_C$H|(PcN+g%zc|-;FBIN@ zZ~RDs;{>=EhhmcaPH6QslKkN)o+sPB^h{aj1=$#O6wjP@sLc75rPkXzY{abNv&_mxf=K%?@EZ90>AM8)xU82WL6U%otzY53-zrXv*3YU49{_Y$ngw6O<1c z1kbi?9iH)K6~hU-UGbREWzR z#OxC0GxRDJy){ZBXeKf~PWDLBeK{^d7h@=?WUptq$Z8x;`#Z0MS zRG=cQSJv=JvZEd7@RAWejooPHI}pH8ty0mqk!z&pQp=&wVqHUGEskAuUxQBTrWx`3 zuGdLv+loMeZ`4>*u zxSr;5oeaOo8@RH!tr^-XvUM4x!k1x^fCXOS0{FRq?SsKQbn&>`7=+uPjjqNn@h=?= za=(>=+3P`;-|p25(B4z*gY0t{R4gP_sXrLEI#I2X*Tr)TBIAcHKympZ@a3U><5hHw zQ8_53Y8y>0Og9Bbi47{z&x#}5YJ@>@;PXJ?mG42%C^%PsNvUTX>Uze(+1-en zxo>STc6J0inN|6Ej9tYiw^LdGO6}$Ap{)z=ZL*Mjnuq=_6vd*8g}=DYrYmsE@#gLm zpMz`@GNMQNF!2omY*5n&mjlr+$R=f@+Q1sFS3DR(J}$~005lwoAlECTyt0O`Y!2DT z-vNzJ@XR+J?S13hU~owmW;)vsa`s@dUk5PBE(dZSLz+8|Bl%=vT|0C>Rtr$zzY4j2 z`3DsK9F*Wis9XC~JAP1(R{k4l6#s27^~*cx-@4w|_1v%Up+F~Ucus(Q;A}hK6qp7Y z{D2n8-U3xCT!M%9ckLXwFgsHU*4cz4v_%FwP@lG_TxeDa^ zJf79xolU9CJH7IQF}TzV-f!0fZl_W7I*`~ItyuR)K)eC1>L+)j)h-{xE5Hj>^f24( z!A0w4hf#c$gUQ{86ja>{Mbm9Jd7kSh7K~w*-yduS~`ek#j?U z%Mck^A*^nof{}aufcw@XXjeC>$9Pt=9yR>4z>Du6K-~+;)=h3UKCfCyY0LxDXmKwU z97{n-26&AxQsBRqY*RgkXxd3>BtW*zPHFH8o&{cN$M&bRP!I)_Ue-qGD9Fmfw0^}& zP*g1JgUdY>MBl{uqwfmXu@LZK9&#TACH7|lPjms+gaA)306e<^uvW5NJ?L@)$kyw~ z4TL}r&h18_TtLm4aIS6TS>m%rRBJrQ{)=G{m<|Z-4B$hDW4b^Wsj@YGxO@wjguH&P zHIVrq%oO0j8t3GKcQ_)0E5UQ0nhou!Zl1Z@AzRH`5sy(z*9o9l6046Lj?NJY?KV9^ z*8e%F@>OWi_&5y83|h;-8M%SwWJC9L!_Px1x*FA@_ff&(qagGCD+-q)2V7PI8aIIA z+O{5Z(uEsddBH-zqQ}(Pbx8)o>%T^|+D{P_ls~dAS>sjtvfcQ+qqc;`YiI*B1p%j_ zU&)o-#uvUTY@{B8y?z$x0XK;+o<~WUDa4}Pa5+B*yJYp@G#{c>br9nOFQXtjH4DSK z0U_DHP<0ih9&r`x=m>S2hBSYx=-da2zdC4q(XnM7S^2#Z7k8|mv4HHCXli}bZhYw$ zv%9xp?D{7ut-N9hmA*8HpdW(9mmS0CT3mCRjXin~R(YV?*wax2zp?`{n0F-#S?`FT z`R!QkUD2b2LMAb*cSorN}g^vzP4gitMcJ43~?;9Rdc99)yu-= zMbwSI1YW2b4eEQ~5?stP_fj1w9k?VcC~9XQBkF3!E(w5-Il2+}zz&5( zw&S~`pOk2Y^|lXlRzg<#UN5OqvMvdHxaUJwlkLO?V|i93qf6~t7&MNj1*3oU<6N&o z7P*dR(Pv;9-R_l_iGo-!6qNytUG~>L-j8>vk?T1WFDVj`pcg1P70xARN_JZoVCQ-? zcsd7)VIkui%cL^prD))K64SZF!`v4F%BLXG6Gxh_A%w_#ppD+c06B9q$R!^l(mxhf z{;{BxhlFf81+^cMbzP07o|Vm%R(4Y$Tepg91h2E*_-1niR&k~VM*L1EH6PLk%0y6n z2L+99%R-{0gi1;u4#H&;Dfj3Fc*Mg#GxZ{ zu<=X=Yil`uY}r%rs~;4mzJEdk?_|tS$u96G^Q+l^476j6s02mjW(`0j`9$QbK=~&G4(diJrCcE}{5i9^ffrv{i4w zs_KIXU>CGipOcC=5$lUzL^gg8Oyd$6i$B;8=#bLq@&K=pN(@GBV(fCj1Obzu155!g zQA0x|Zh=AKc0$yNw}t?A45bBa=v?i9LAA`+s`H?zE_?w{1cT}c=vO@##j6)d_6~$s z-}^k^L8MjxO&Dxi3fNi-c!N+~wVdQ${Vp=9cP;_!7FK%%}JgjC8a%--pMm5a>+R&x8meSg5;ZobY z2+%Ii7RLZLNQ%7_Kh)mAu}ZZM!L;@X;%2oHFs*%$1DomwEdh)e44AMCP$j8q$m$wF zuDcvF)!mDrI@!8a_Yh51w|X()X~Fv&4OO>M7`!2++Xeg!&UGJ1>Bn%X&&dM}=2`vF zT)=+N)=z|U{Xt6slW@ZN<6Z{1_0Zbwk#o*1u71~Qrsp`huo0>C+&02fffri)?FbeW(m zMT4g6p9kCkS<|buOw*f4YuZ5rxNqRKS76Cnob=Q%x+ULdyogPU5VQ}n6lk{iXx_`f z5rBP4pQTg_D&@m2A&RF;Qz_+~%BE6!2&K}do@pv2NK?vLRnt%I)OUO;lA4Nq8V6%U z+ud|Q&O7PD^0)WW(;P-vqeDKg^-|p8ADVfV{QRe<(cgQn6W1|DT}-y@86TyoZ+7|e zckf3Bko+mItbgwp7=tN*soZ8t2U;@DbQxW;RIgt#}%ww9C&^LmD~fbl!!bPvZi{LkQvIPXWHtTj$jy?BIPXO??F7D^sNjr7(s`*QS4 z#u$$PMQ$xKbjgi#B4nv+C5D`YQ)h8GkKyQffz+N5LCWs|BNpM=BU+ci<)R2cW04QQ zCGWd7RFmI!gN7CZMq!OX;{;3;a1csrXj1PY(a@xxL7cxCopXQHjn$SyHtaS)foz1< zTd|XI#|J6RsAhOd#U1l!wu2%u#sUU#BUffiMH%1~`k~F~S@=^Faf%q{Xpr~0g=!=< zmpcHA!l9ztA-QID)e=}4^0%+B2#+Y7Dyd^zIbF!DM&5%rG zDa?`RJmezUt!S<^UU?X4+mMm=S0?=k`T_PSHGX!TmP9mDeOPK`VRDt34-L|~Qcp9& zRa)4&jy`b*AR<+@rey#mAeoA@CZnNqJq%voQ!W3Ulo|(-^}e4$c4#lN(kr~oullK0 zV1n{G-al+*aAlS%oA9&@l( zrs^a3`A4F2UIbmrDxYx><{CcS6zEaHGwr5je(oq&KHE`cmi%H>~v@Dxz2 z5Uj#C0wTxeFb>KBi})WXXc|Oz(BmjH=*1q?_%9!`py}A&V@InoQ{n8J4q5CDCj;p? zm|Cmt)VUuDn;j#@olw|j`mxJ5>yh@I&`M;-w+*ZLu7;w{3#&}=KV21(5Mwu92u0$n zR`|&_-@sU6_d)Z$nYKJxDgM53;wiNfq&I$WBt06V--HhrNDs zyXEIgnf#k5yF~CVqaeDZg}PnYiyvfBs&^SwJSG$);F8Egm+a#xb;);B>au}x1q=8w z@<|R=5Ou?+plL|Hn4{$mL@uyn=NM*+#lQ<)(+~SEBWOAqO@m+b!1)~181Wsc8V6iz z#%{#y6Djp)Bh8!LiXU7A=f-c)$~}yfF2Ib*7@)`I*Ni|~`9ahpaWAQ1`S6QexE_jA z5m_l;ZIvT==Gj7{G_3bidI_LhzJSV~f-F=6@QO1Oe$mA<(aL(0XYNUG8F>}74P8Ef zyav^m_fh%?J(A}_Y;TRF)6&D8>sNqWUWCrk4v=HrQt56eN*>Pu zyyWp_DB|E*PB)$?>)TzEVOqA7XEnD9`wJj*wUBj97d$zgSI^`PB|>DI;F9<=OkJ;m z=bIxGPO`z%0G3Iq_i2Fp5ojxZok78k6ojoVgvY%I+K+7c9uF3}jMC`iWGn2=07o~a z7k2LL3TzuGuxVdh;YQlP#49!bGtagoQhMsN9?p^;F4d8RYGIZ7%aiOyTjwLKO61O7 zM?L-@RlGipYo^KXzm=VOff*f5$X|N8Uqk$`g?Z-4{pya@Sp~6#| zR`i2x9SRsbhVCY<=U3$9|D*Bi=yQg9LB4%H?5Ci(s!{trGLpK8HS~~HUE`lxr$1c& zx~T?f>km?cgEIX&;fj)GA^ioWKj2do+KuSoHGC_!(!X=6pQ5C?@O^&P=`bdtzogki z*ucEC3)^p`@$|K7xLE8m8|NSVV=GhLv#_(aDooA&8vo2olAM62TtH?S%FnollzAuY zmFd_7?~x%MG47T&+~;OI){wST{cI~e=BTQ#10YL76_)Up=@e&d6d8sLe0C2`sPmWH zFDcExE{kV18O){L>o!Ntmv!MqLjZ>Kwt5B3;1f`{;Kljo=mIy|7p$bA#;_3F$dDr&ZVlbiptY1b&%>OBExCY&y>NcElN5pJ=n3PF9jzSl; zsajGUB%~vb&bdv zS#M$SUAMOi^3S?5N+Ty7^CKL|l8qtSsu{AU!LkdoV=Z7Dz_)829rsX7?AtY;XL1{b zl_jMzc^dVDw7HL_urlY;OG)6eG{KNx`Z)!b!LzEZl5&`P2UjbDRNY(5ODx-3fEdSe zy37gDn~>pse?3`gX=alhlQN`QdZ}EpZUc5M zI*A5NHkr6oQl$L;Hbw)~l3JScY!;;ht(q~-qsZm~rJWS)KOAJ;k}zyqC$z&d01in> z$E|O&-taa)=-+GIylY4edlC21(s4y>f4PNU0lE=}`@pf4GZTyFZKq$;i;>KuXb-Kz z_)P&^&jxbFtqw#?%fX8~Y0JXwW?cCWgs0Uqy@E2)OhZZTX%LI+O-O;05T52pdXvCD zuJ<*Uo~$U~z0~WP1`VQ(hVDnT(nk$(%rx5MwiX?Lq$xZr`j965kY@?~Rm#qzwxNbL zN?EiW7pTBs+Qa31L(L0XvHg&ZP>jD2YiQq8@a_ie661{@F8?s>xu6XHJv~Fg>7=yh zvdzPOj&CW=`J^4INq=RZSOT~P4>RSzf%aFKkAhcpS2y3L(rM+LLEVPQJZ$)(sB8QP zUalPQHE1gu*=|s> zTAd{M7px_3nUL4Y*n;V- z{b8WPiONtfNe(~*Z)0-k6@N(%+oIn-!{^xfaFH4Xq+7S->nZ8k1CF%MbS71|v@gDC zFwhGYleDLh)h;qzhD|Hqq#3Q<830jy?3_iUM5MkII#V6<9@$^CqBwVrX46`fUGy~)hulk5-RCD#Os!L$XHl@1-LC6=kCNSo)VNNtabWzuXw7FWIC;L5b zqhEd?r`*xCimbjB8M3v^JArY~6N4;r7Jd-vaxz5a0vdKjNL2vhi_tzL2dukVgjNDQ z(a*g!z!tJGxjrE(^K3m&UvG)g^;eNIdf+@$x&ny>c^`pGSi(T1UTUchQZrgpJc}=F zVE~t|H0d-F0V*4e8akm$$cm4KI{_{S1=bEI8iN=oSiAuln>*n=5ry=|kg^|yX$dH? zdr2AjEuSDz6!0pZ2)UE4O(|SwRAra#FDS|B3C|9f=vKz+N7Tjyx3`z z@}#=56H}4t`poI-428B#T(7jnoS`n2*G%=tmvY6$_|y3FGyXgNTdKufpyP7#ApP9^wcD|VBFE?x*($^J9R(snKcTo>CHK+GDg64@a z*>DZQiA?xJpm%K^HsPp|UQi=W4>ZfA6vhGBL3Hgxqe2C9`P zZj%j6wi~F?(B5KFpG3`*Ivj5EWR|3vCwCm=Hczp}begAhoIKC8bn!5sGN#p0;Wk~| z)MUCkk93=E%d&|Np*Xj+neNRp<8eRcF+J9|*pH~Q>FKy=9u+L4;m4G=nO@h6ou;>= zu+#KeSvJ$xQSLVV7RO-uJLCgl)a6uPPe^1Bj2XZk%|O?I^UR>-hhOfNjb^aJVKYM= zhuh3B)-}T&r*&ecRlR00Hw~G^9kp&V!sS6`r1x!`8KvhkqaC@OW{h{$jIBM!ZI)OW zoo30}(~zcHLqjjDGfO?6xy{nGhtD(1+?Lxc>v+*-mNTZ9;X zW=-f^K^-=Z_xx_E@SnA> z-x}JB;(p#k=kYoeiM1^}lW{P`H2SKrlDW6%?+gAZl( zD8*h4w$BtyW+*z#)f)Yq;)uM0Q8y)bpINThK>v8^Rl@ocNon=S1=Ze=`FKi@{*g$| zX=HektZmWuHu^+AKxuU{_LJ2psAXtNF{m#mlwBd`q*mO_2$t9&DOs6RAzwC?TL4v? z7&Yn+%@ROXZw%IZf>Rd*>jX`20Dn-{-8we*i&noaj< zJTamnK<5XMEz)_^c+Aro8~3_EaytEtb8pBoMVtK9GgfG4WB>;IK>uEAXiZx`>8AsY robrQ|owa>aoRhN_X(Mf!48lrA?l?kCzkiF4Q8iUAs#=ol_WAz{i*386 literal 0 HcmV?d00001 diff --git a/vendor/golang.org/x/net/publicsuffix/data/text b/vendor/golang.org/x/net/publicsuffix/data/text new file mode 100644 index 000000000000..124dcd61f402 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/data/text @@ -0,0 +1 @@ +billustrationionjukudoyamakeupowiathletajimageandsoundandvision-riopretobishimagentositecnologiabiocelotenkawabipanasonicatfoodnetworkinggroupperbirdartcenterprisecloudaccesscamdvrcampaniabirkenesoddtangenovarahkkeravjuegoshikikiraraholtalenishikatakazakindependent-revieweirbirthplaceu-1bitbucketrzynishikatsuragirlyuzawabitternidiscoverybjarkoybjerkreimdbaltimore-og-romsdalp1bjugnishikawazukamishihoronobeautydalwaysdatabaseballangenkainanaejrietisalatinabenogatabitorderblackfridaybloombergbauernishimerabloxcms3-website-us-west-2blushakotanishinomiyashironocparachutingjovikarateu-2bmoattachmentsalangenishinoomotegovtattoolforgerockartuzybmsalon-1bmwellbeingzoneu-3bnrwesteuropenairbusantiquesaltdalomzaporizhzhedmarkaratsuginamikatagamilanotairesistanceu-4bondigitaloceanspacesaludishangrilanciabonnishinoshimatsusakahoginankokubunjindianapolis-a-bloggerbookonlinewjerseyboomlahppiacenzachpomorskienishiokoppegardiskussionsbereichattanooganordkapparaglidinglassassinationalheritageu-north-1boschaefflerdalondonetskarelianceu-south-1bostik-serveronagasukevje-og-hornnesalvadordalibabalatinord-aurdalipaywhirlondrinaplesknsalzburgleezextraspace-to-rentalstomakomaibarabostonakijinsekikogentappssejnyaarparalleluxembourglitcheltenham-radio-opensocialorenskogliwicebotanicalgardeno-staginglobodoes-itcouldbeworldisrechtranakamurataiwanairforcechireadthedocsxeroxfinitybotanicgardenishitosashimizunaminamiawajikindianmarketinglogowestfalenishiwakindielddanuorrindigenamsskoganeindustriabotanyanagawallonieruchomoscienceandindustrynissandiegoddabouncemerckmsdnipropetrovskjervoyageorgeorgiabounty-fullensakerrypropertiesamegawaboutiquebecommerce-shopselectaxihuanissayokkaichintaifun-dnsaliasamnangerboutireservditchyouriparasiteboyfriendoftheinternetflixjavaldaostathellevangerbozen-sudtirolottokorozawabozen-suedtirolouvreisenissedalovepoparisor-fronisshingucciprianiigataipeidsvollovesickariyakumodumeloyalistoragebplaceducatorprojectcmembersampalermomahaccapooguybrandywinevalleybrasiliadboxosascoli-picenorddalpusercontentcp4bresciaokinawashirosatobamagazineuesamsclubartowestus2brindisibenikitagataikikuchikumagayagawalmartgorybristoloseyouriparliamentjeldsundivtasvuodnakaniikawatanagurabritishcolumbialowiezaganiyodogawabroadcastlebtimnetzlgloomy-routerbroadwaybroke-itvedestrandivttasvuotnakanojohanamakindlefrakkestadiybrokerbrothermesaverdeatnulmemergencyachtsamsungloppennebrowsersafetymarketsandnessjoenl-ams-1brumunddalublindesnesandoybrunelastxn--0trq7p7nnbrusselsandvikcoromantovalle-daostavangerbruxellesanfranciscofreakunekobayashikaoirmemorialucaniabryanskodjedugit-pagespeedmobilizeroticagliaricoharuovatlassian-dev-builderscbglugsjcbnpparibashkiriabrynewmexicoacharterbuzzwfarmerseinebwhalingmbhartiffany-2bzhitomirbzzcodyn-vpndnsantacruzsantafedjeffersoncoffeedbackdropocznordlandrudupontariobranconavstackasaokamikoaniikappudownloadurbanamexhibitioncogretakamatsukawacollectioncolognewyorkshirebungoonordre-landurhamburgrimstadynamisches-dnsantamariakecolonialwilliamsburgripeeweeklylotterycoloradoplateaudnedalncolumbusheycommunexus-3community-prochowicecomobaravendbambleborkapsicilyonagoyauthgear-stagingivestbyglandroverhallair-traffic-controlleyombomloabaths-heilbronnoysunddnslivegarsheiheijibigawaustraliaustinnfshostrolekamisatokaizukameyamatotakadaustevollivornowtv-infolldalolipopmcdircompanychipstmncomparemarkerryhotelsantoandrepbodynaliasnesoddenmarkhangelskjakdnepropetrovskiervaapsteigenflfannefrankfurtjxn--12cfi8ixb8lutskashibatakashimarshallstatebankashiharacomsecaaskimitsubatamibuildingriwatarailwaycondoshichinohealth-carereformemsettlersanukindustriesteamfamberlevagangaviikanonjinfinitigotembaixadaconferenceconstructionconsuladogadollsaobernardomniweatherchanneluxuryconsultanthropologyconsultingroks-thisayamanobeokakegawacontactkmaxxn--12co0c3b4evalled-aostamayukinsuregruhostingrondarcontagematsubaravennaharimalborkashiwaracontemporaryarteducationalchikugodonnakaiwamizawashtenawsmppl-wawdev-myqnapcloudcontrolledogawarabikomaezakirunoopschlesischesaogoncartoonartdecologiacontractorskenconventureshinodearthickashiwazakiyosatokamachilloutsystemscloudsitecookingchannelsdvrdnsdojogaszkolancashirecifedexetercoolblogdnsfor-better-thanawassamukawatarikuzentakatairavpagecooperativano-frankivskygearapparochernigovernmentksatxn--1ck2e1bananarepublic-inquiryggeebinatsukigatajimidsundevelopmentatarantours3-external-1copenhagencyclopedichiropracticatholicaxiashorokanaiecoproductionsaotomeinforumzcorporationcorsicahcesuoloanswatch-and-clockercorvettenrissagaeroclubmedecincinnativeamericanantiquest-le-patron-k3sapporomuracosenzamamidorittoeigersundynathomebuiltwithdarkasserverrankoshigayaltakasugaintelligencecosidnshome-webservercellikescandypoppdaluzerncostumedicallynxn--1ctwolominamatargets-itlon-2couchpotatofriesardegnarutomobegetmyiparsardiniacouncilvivanovoldacouponsarlcozoracq-acranbrookuwanalyticsarpsborgrongausdalcrankyowariasahikawatchandclockasukabeauxartsandcraftsarufutsunomiyawakasaikaitabashijonawatecrdyndns-at-homedepotaruinterhostsolutionsasayamatta-varjjatmpartinternationalfirearmsaseboknowsitallcreditcardyndns-at-workshoppingrossetouchigasakitahiroshimansionsaskatchewancreditunioncremonashgabadaddjaguarqcxn--1lqs03ncrewhmessinarashinomutashinaintuitoyosatoyokawacricketnedalcrimeast-kazakhstanangercrotonecrownipartsassarinuyamashinazawacrsaudacruisesauheradyndns-blogsitextilegnicapetownnews-stagingroundhandlingroznycuisinellancasterculturalcentertainmentoyotapartysvardocuneocupcakecuritibabymilk3curvallee-d-aosteinkjerusalempresashibetsurugashimaringatlantajirinvestmentsavannahgacutegirlfriendyndns-freeboxoslocalzonecymrulvikasumigaurawa-mazowszexnetlifyinzairtrafficplexus-1cyonabarumesswithdnsaveincloudyndns-homednsaves-the-whalessandria-trani-barletta-andriatranibarlettaandriacyouthruherecipescaracaltanissettaishinomakilovecollegefantasyleaguernseyfembetsukumiyamazonawsglobalacceleratorahimeshimabaridagawatchesciencecentersciencehistoryfermockasuyamegurownproviderferraraferraris-a-catererferrerotikagoshimalopolskanlandyndns-picsaxofetsundyndns-remotewdyndns-ipasadenaroyfgujoinvilleitungsenfhvalerfidontexistmein-iservschulegallocalhostrodawarafieldyndns-serverdalfigueresindevicenzaolkuszczytnoipirangalsaceofilateliafilegear-augustowhoswholdingsmall-webthingscientistordalfilegear-debianfilegear-gbizfilegear-iefilegear-jpmorganfilegear-sg-1filminamiechizenfinalfinancefineartscrapper-sitefinlandyndns-weblikes-piedmonticellocus-4finnoyfirebaseappaviancarrdyndns-wikinkobearalvahkijoetsuldalvdalaskanittedallasalleasecuritytacticschoenbrunnfirenetoystre-slidrettozawafirenzefirestonefirewebpaascrappingulenfirmdaleikangerfishingoldpoint2thisamitsukefitjarvodkafjordyndns-workangerfitnessettlementozsdellogliastradingunmanxn--1qqw23afjalerfldrvalleeaosteflekkefjordyndns1flesberguovdageaidnunjargaflickragerogerscrysecretrosnubar0flierneflirfloginlinefloppythonanywhereggio-calabriafloraflorencefloridatsunangojomedicinakamagayahabackplaneapplinzis-a-celticsfanfloripadoval-daostavalleyfloristanohatakahamalselvendrellflorokunohealthcareerscwienflowerservehalflifeinsurancefltrani-andria-barletta-trani-andriaflynnhosting-clusterfnchiryukyuragifuchungbukharanzanfndynnschokokekschokoladenfnwkaszubytemarkatowicefoolfor-ourfor-somedio-campidano-mediocampidanomediofor-theaterforexrothachijolsterforgotdnservehttpbin-butterforli-cesena-forlicesenaforlillesandefjordynservebbscholarshipschoolbusinessebyforsaleirfjordynuniversityforsandasuolodingenfortalfortefortmissoulangevagrigentomologyeonggiehtavuoatnagahamaroygardencowayfortworthachinoheavyfosneservehumourfotraniandriabarlettatraniandriafoxfordecampobassociatest-iserveblogsytemp-dnserveirchitachinakagawashingtondchernivtsiciliafozfr-par-1fr-par-2franamizuhobby-sitefrancaiseharafranziskanerimalvikatsushikabedzin-addrammenuorochesterfredrikstadtvserveminecraftranoyfreeddnsfreebox-oservemp3freedesktopfizerfreemasonryfreemyiphosteurovisionfreesitefreetlservep2pgfoggiafreiburgushikamifuranorfolkebibleksvikatsuyamarugame-hostyhostingxn--2m4a15efrenchkisshikirkeneservepicservequakefreseniuscultureggio-emilia-romagnakasatsunairguardiannakadomarinebraskaunicommbankaufentigerfribourgfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfroganservesarcasmatartanddesignfrognfrolandynv6from-akrehamnfrom-alfrom-arfrom-azurewebsiteshikagamiishibukawakepnoorfrom-capitalonewportransipharmacienservicesevastopolefrom-coalfrom-ctranslatedynvpnpluscountryestateofdelawareclaimschoolsztynsettsupportoyotomiyazakis-a-candidatefrom-dchitosetodayfrom-dediboxafrom-flandersevenassisienarvikautokeinoticeablewismillerfrom-gaulardalfrom-hichisochikuzenfrom-iafrom-idyroyrvikingruenoharafrom-ilfrom-in-berlindasewiiheyaizuwakamatsubushikusakadogawafrom-ksharpharmacyshawaiijimarcheapartmentshellaspeziafrom-kyfrom-lanshimokawafrom-mamurogawatsonfrom-mdfrom-medizinhistorischeshimokitayamattelekommunikationfrom-mifunefrom-mnfrom-modalenfrom-mshimonitayanagit-reposts-and-telecommunicationshimonosekikawafrom-mtnfrom-nchofunatoriginstantcloudfrontdoorfrom-ndfrom-nefrom-nhktistoryfrom-njshimosuwalkis-a-chefarsundyndns-mailfrom-nminamifuranofrom-nvalleedaostefrom-nynysagamiharafrom-ohdattorelayfrom-oketogolffanshimotsukefrom-orfrom-padualstackazoologicalfrom-pratogurafrom-ris-a-conservativegashimotsumayfirstockholmestrandfrom-schmidtre-gauldalfrom-sdscloudfrom-tnfrom-txn--2scrj9chonanbunkyonanaoshimakanegasakikugawaltervistailscaleforcefrom-utsiracusaikirovogradoyfrom-vald-aostarostwodzislawildlifestylefrom-vtransportefrom-wafrom-wiardwebview-assetshinichinanfrom-wvanylvenneslaskerrylogisticshinjournalismartlabelingfrom-wyfrosinonefrostalowa-wolawafroyal-commissionfruskydivingfujiiderafujikawaguchikonefujiminokamoenairkitapps-auction-rancherkasydneyfujinomiyadattowebhoptogakushimotoganefujiokayamandalfujisatoshonairlinedre-eikerfujisawafujishiroishidakabiratoridedyn-berlincolnfujitsuruokazakiryuohkurafujiyoshidavvenjargap-east-1fukayabeardubaiduckdnsncfdfukuchiyamadavvesiidappnodebalancertmgrazimutheworkpccwilliamhillfukudomigawafukuis-a-cpalacefukumitsubishigakisarazure-mobileirvikazteleportlligatransurlfukuokakamigaharafukuroishikarikaturindalfukusakishiwadazaifudaigokaseljordfukuyamagatakaharunusualpersonfunabashiriuchinadafunagatakahashimamakisofukushimangonnakatombetsumy-gatewayfunahashikamiamakusatsumasendaisenergyfundaciofunkfeuerfuoiskujukuriyamangyshlakasamatsudoomdnstracefuosskoczowinbar1furubirafurudonostiaafurukawajimaniwakuratefusodegaurafussaintlouis-a-anarchistoireggiocalabriafutabayamaguchinomihachimanagementrapaniizafutboldlygoingnowhere-for-morenakatsugawafuttsurutaharafuturecmshinjukumamotoyamashikefuturehostingfuturemailingfvghamurakamigoris-a-designerhandcraftedhandsonyhangglidinghangoutwentehannanmokuizumodenaklodzkochikuseihidorahannorthwesternmutualhanyuzenhapmircloudletshintokushimahappounzenharvestcelebrationhasamap-northeast-3hasaminami-alpshintomikasaharahashbangryhasudahasura-apphiladelphiaareadmyblogspotrdhasvikfh-muensterhatogayahoooshikamaishimofusartshinyoshitomiokamisunagawahatoyamazakitakatakanabeatshiojirishirifujiedahatsukaichikaiseiyoichimkentrendhostinghattfjelldalhayashimamotobusellfylkesbiblackbaudcdn-edgestackhero-networkisboringhazuminobushistoryhelplfinancialhelsinkitakyushuaiahembygdsforbundhemneshioyanaizuerichardlimanowarudahemsedalhepforgeblockshirahamatonbetsurgeonshalloffameiwamasoyheroyhetemlbfanhgtvaohigashiagatsumagoianiahigashichichibuskerudhigashihiroshimanehigashiizumozakitamigrationhigashikagawahigashikagurasoedahigashikawakitaaikitamotosunndalhigashikurumeeresinstaginghigashimatsushimarburghigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycleshirakokonoehigashinarusells-for-lesshiranukamitondabayashiogamagoriziahigashinehigashiomitamanortonsberghigashiosakasayamanakakogawahigashishirakawamatakanezawahigashisumiyoshikawaminamiaikitanakagusukumodernhigashitsunosegawahigashiurausukitashiobarahigashiyamatokoriyamanashifteditorxn--30rr7yhigashiyodogawahigashiyoshinogaris-a-doctorhippyhiraizumisatohnoshoohirakatashinagawahiranairportland-4-salernogiessennanjobojis-a-financialadvisor-aurdalhirarahiratsukaerusrcfastlylbanzaicloudappspotagerhirayaitakaokalmykiahistorichouseshiraois-a-geekhakassiahitachiomiyagildeskaliszhitachiotagonohejis-a-greenhitraeumtgeradegreehjartdalhjelmelandholeckodairaholidayholyhomegoodshiraokamitsuehomeiphilatelyhomelinkyard-cloudjiffyresdalhomelinuxn--32vp30hachiojiyahikobierzycehomeofficehomesecuritymacaparecidahomesecuritypchoseikarugamvikarlsoyhomesenseeringhomesklepphilipsynology-diskstationhomeunixn--3bst00minamiiserniahondahongooglecodebergentinghonjyoitakarazukaluganskharkivaporcloudhornindalhorsells-for-ustkanmakiwielunnerhortendofinternet-dnshiratakahagitapphoenixn--3ds443ghospitalhoteleshishikuis-a-guruhotelwithflightshisognehotmailhoyangerhoylandetakasagophonefosshisuifuettertdasnetzhumanitieshitaramahungryhurdalhurumajis-a-hard-workershizukuishimogosenhyllestadhyogoris-a-hunterhyugawarahyundaiwafuneis-into-carsiiitesilkharkovaresearchaeologicalvinklein-the-bandairtelebitbridgestoneenebakkeshibechambagricultureadymadealstahaugesunderseaportsinfolionetworkdalaheadjudygarlandis-into-cartoonsimple-urlis-into-gamesserlillyis-leetrentin-suedtirolis-lostre-toteneis-a-lawyeris-not-certifiedis-savedis-slickhersonis-uberleetrentino-a-adigeis-very-badajozis-a-liberalis-very-evillageis-very-goodyearis-very-niceis-very-sweetpepperugiais-with-thebandovre-eikerisleofmanaustdaljellybeanjenv-arubahccavuotnagaragusabaerobaticketsirdaljeonnamerikawauejetztrentino-aadigejevnakershusdecorativeartslupskhmelnytskyivarggatrentino-alto-adigejewelryjewishartgalleryjfkhplaystation-cloudyclusterjgorajlljls-sto1jls-sto2jls-sto3jmphotographysiojnjaworznospamproxyjoyentrentino-altoadigejoyokaichibajddarchitecturealtorlandjpnjprslzjurkotohiradomainstitutekotourakouhokutamamurakounosupabasembokukizunokunimilitarykouyamarylhurstjordalshalsenkouzushimasfjordenkozagawakozakis-a-llamarnardalkozowindowskrakowinnersnoasakatakkokamiminersokndalkpnkppspbarcelonagawakkanaibetsubamericanfamilyds3-fips-us-gov-west-1krasnikahokutokashikis-a-musiciankrasnodarkredstonekrelliankristiansandcatsolarssonkristiansundkrodsheradkrokstadelvalle-aostatic-accessolognekryminamiizukaminokawanishiaizubangekumanotteroykumatorinovecoregontrailroadkumejimashikis-a-nascarfankumenantokonamegatakatoris-a-nursells-itrentin-sud-tirolkunisakis-a-painteractivelvetrentin-sudtirolkunitachiaraindropilotsolundbecknx-serversellsyourhomeftphxn--3e0b707ekunitomigusukuleuvenetokigawakunneppuboliviajessheimpertrixcdn77-secureggioemiliaromagnamsosnowiechristiansburgminakamichiharakunstsammlungkunstunddesignkuokgroupimientaketomisatoolsomakurehabmerkurgankurobeeldengeluidkurogimimatakatsukis-a-patsfankuroisoftwarezzoologykuromatsunais-a-personaltrainerkuronkurotakikawasakis-a-photographerokussldkushirogawakustanais-a-playershiftcryptonomichigangwonkusupersalezajskomakiyosemitekutchanelkutnowruzhgorodeokuzumakis-a-republicanonoichinomiyakekvafjordkvalsundkvamscompute-1kvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspdnsomnatalkzmisakis-a-soxfanmisasaguris-a-studentalmisawamisconfusedmishimasudamissilemisugitokuyamatsumaebashikshacknetrentino-sued-tirolmitakeharamitourismilemitoyoakemiuramiyazurecontainerdpolicemiyotamatsukuris-a-teacherkassyno-dshowamjondalenmonstermontrealestatefarmequipmentrentino-suedtirolmonza-brianzapposor-odalmonza-e-della-brianzaptokyotangotpantheonsitemonzabrianzaramonzaebrianzamonzaedellabrianzamoonscalebookinghostedpictetrentinoa-adigemordoviamoriyamatsumotofukemoriyoshiminamiashigaramormonmouthachirogatakamoriokakudamatsuemoroyamatsunomortgagemoscowiosor-varangermoseushimodatemosjoenmoskenesorfoldmossorocabalena-devicesorreisahayakawakamiichikawamisatottoris-a-techietis-a-landscaperspectakasakitchenmosvikomatsushimarylandmoteginowaniihamatamakinoharamoviemovimientolgamozilla-iotrentinoaadigemtranbytomaritimekeepingmuginozawaonsensiositemuikaminoyamaxunispacemukoebenhavnmulhouseoullensvanguardmunakatanemuncienciamuosattemupinbarclaycards3-sa-east-1murmanskomforbar2murotorcraftrentinoalto-adigemusashinoharamuseetrentinoaltoadigemuseumverenigingmusicargodaddyn-o-saurlandesortlandmutsuzawamy-wanggoupilemyactivedirectorymyamazeplaymyasustor-elvdalmycdmycloudnsoruminamimakis-a-rockstarachowicemydattolocalcertificationmyddnsgeekgalaxymydissentrentinos-tirolmydobissmarterthanyoumydrobofageologymydsoundcastronomy-vigorlicemyeffectrentinostirolmyfastly-terrariuminamiminowamyfirewalledreplittlestargardmyforuminamioguni5myfritzmyftpaccessouthcarolinaturalhistorymuseumcentermyhome-servermyjinomykolaivencloud66mymailermymediapchristmasakillucernemyokohamamatsudamypepinkommunalforbundmypetsouthwest1-uslivinghistorymyphotoshibalashovhadanorth-kazakhstanmypicturestaurantrentinosud-tirolmypsxn--3pxu8kommunemysecuritycamerakermyshopblocksowamyshopifymyspreadshopwarendalenugmythic-beastspectruminamisanrikubetsuppliesoomytis-a-bookkeepermaritimodspeedpartnermytuleap-partnersphinxn--41amyvnchromediatechnologymywirepaircraftingvollohmusashimurayamashikokuchuoplantationplantspjelkavikomorotsukagawaplatformsharis-a-therapistoiaplatter-appinokofuefukihaboromskogplatterpioneerplazaplcube-serversicherungplumbingoplurinacionalpodhalepodlasiellaktyubinskiptveterinairealmpmnpodzonepohlpoivronpokerpokrovskomvuxn--3hcrj9choyodobashichikashukujitawaraumalatvuopmicrosoftbankarmoypoliticarrierpolitiendapolkowicepoltavalle-d-aostaticspydebergpomorzeszowitdkongsbergponpesaro-urbino-pesarourbinopesaromasvuotnarusawapordenonepornporsangerporsangugeporsgrunnanyokoshibahikariwanumatakinouepoznanpraxis-a-bruinsfanprdpreservationpresidioprgmrprimetelemarkongsvingerprincipeprivatizehealthinsuranceprofesionalprogressivestfoldpromombetsupplypropertyprotectionprotonetrentinosued-tirolprudentialpruszkowithgoogleapiszprvcyberprzeworskogpulawypunyufuelveruminamiuonumassa-carrara-massacarraramassabuyshousesopotrentino-sud-tirolpupugliapussycateringebuzentsujiiepvhadselfiphdfcbankazunoticiashinkamigototalpvtrentinosuedtirolpwchungnamdalseidsbergmodellingmxn--11b4c3dray-dnsupdaterpzqhaebaruericssongdalenviknakayamaoris-a-cubicle-slavellinodeobjectshinshinotsurfashionstorebaselburguidefinimamateramochizukimobetsumidatlantichirurgiens-dentistes-en-franceqldqotoyohashimotoshimatsuzakis-an-accountantshowtimelbourneqponiatowadaqslgbtrentinsud-tirolqualifioappippueblockbusternopilawaquickconnectrentinsudtirolquicksytesrhtrentinsued-tirolquipelementsrltunestuff-4-saletunkonsulatrobeebyteappigboatsmolaquilanxessmushcdn77-sslingturystykaniepcetuscanytushuissier-justicetuvalleaostaverntuxfamilytwmailvestvagoyvevelstadvibo-valentiavibovalentiavideovillastufftoread-booksnestorfjordvinnicasadelamonedagestangevinnytsiavipsinaappiwatevirginiavirtual-uservecounterstrikevirtualcloudvirtualservervirtualuserveexchangevirtuelvisakuhokksundviterbolognagasakikonaikawagoevivianvivolkenkundenvixn--42c2d9avlaanderennesoyvladikavkazimierz-dolnyvladimirvlogintoyonezawavminanovologdanskonyveloftrentino-stirolvolvolkswagentstuttgartrentinsuedtirolvolyngdalvoorlopervossevangenvotevotingvotoyonovps-hostrowiecircustomer-ocimmobilienwixsitewloclawekoobindalwmcloudwmflabsurnadalwoodsidelmenhorstabackyardsurreyworse-thandawowithyoutuberspacekitagawawpdevcloudwpenginepoweredwphostedmailwpmucdnpixolinodeusercontentrentinosudtirolwpmudevcdnaccessokanagawawritesthisblogoipizzawroclawiwatsukiyonoshiroomgwtcirclerkstagewtfastvps-serverisignwuozuwzmiuwajimaxn--4gbriminingxn--4it168dxn--4it797kooris-a-libertarianxn--4pvxs4allxn--54b7fta0ccivilaviationredumbrellajollamericanexpressexyxn--55qw42gxn--55qx5dxn--5dbhl8dxn--5js045dxn--5rtp49civilisationrenderxn--5rtq34koperviklabudhabikinokawachinaganoharamcocottempurlxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264civilizationthewifiatmallorcafederation-webspacexn--80aaa0cvacationsusonoxn--80adxhksuzakananiimiharuxn--80ao21axn--80aqecdr1axn--80asehdbarclays3-us-east-2xn--80aswgxn--80aukraanghkembuchikujobservableusercontentrevisohughestripperxn--8dbq2axn--8ltr62koryokamikawanehonbetsuwanouchijiwadeliveryxn--8pvr4uxn--8y0a063axn--90a1affinitylotterybnikeisenbahnxn--90a3academiamicable-modemoneyxn--90aeroportalabamagasakishimabaraffleentry-snowplowiczeladzxn--90aishobarakawaharaoxn--90amckinseyxn--90azhytomyrxn--9dbhblg6dietritonxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byandexcloudxn--asky-iraxn--aurskog-hland-jnbarefootballooningjerstadgcapebretonamicrolightingjesdalombardiadembroideryonagunicloudiherokuappanamasteiermarkaracoldwarszawauthgearappspacehosted-by-previderxn--avery-yuasakuragawaxn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbsuzukanazawaxn--bck1b9a5dre4civilwarmiasadoesntexisteingeekarpaczest-a-la-maisondre-landrayddns5yxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyaotsurgeryxn--bjddar-ptargithubpreviewsaitohmannore-og-uvdalxn--blt-elabourxn--bmlo-graingerxn--bod-2naturalsciencesnaturellesuzukis-an-actorxn--bozen-sdtirol-2obanazawaxn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigation-acornxn--brum-voagatroandinosaureportrentoyonakagyokutoyakomaganexn--btsfjord-9zaxn--bulsan-sdtirol-nsbaremetalpha-myqnapcloud9guacuiababia-goracleaningitpagexlimoldell-ogliastraderxn--c1avgxn--c2br7gxn--c3s14mincomcastreserve-onlinexn--cck2b3bargainstances3-us-gov-west-1xn--cckwcxetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-an-actresshwindmillxn--ciqpnxn--clchc0ea0b2g2a9gcdxn--comunicaes-v6a2oxn--correios-e-telecomunicaes-ghc29axn--czr694barreaudiblebesbydgoszczecinemagnethnologyoriikaragandauthordalandroiddnss3-ap-southeast-2ix4432-balsan-suedtirolimiteddnskinggfakefurniturecreationavuotnaritakoelnayorovigotsukisosakitahatakahatakaishimoichinosekigaharaurskog-holandingitlaborxn--czrs0trogstadxn--czru2dxn--czrw28barrel-of-knowledgeappgafanquanpachicappacificurussiautomotivelandds3-ca-central-16-balsan-sudtirollagdenesnaaseinet-freaks3-ap-southeast-123websiteleaf-south-123webseiteckidsmynasushiobarackmazerbaijan-mayen-rootaribeiraogakibichuobiramusementdllpages3-ap-south-123sitewebhareidfjordvagsoyerhcloudd-dnsiskinkyolasiteastcoastaldefenceastus2038xn--d1acj3barrell-of-knowledgecomputerhistoryofscience-fictionfabricafjs3-us-west-1xn--d1alfaromeoxn--d1atromsakegawaxn--d5qv7z876clanbibaidarmeniaxn--davvenjrga-y4axn--djrs72d6uyxn--djty4kosaigawaxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--e1a4cldmailukowhitesnow-dnsangohtawaramotoineppubtlsanjotelulubin-brbambinagisobetsuitagajoburgjerdrumcprequalifymein-vigorgebetsukuibmdeveloperauniteroizumizakinderoyomitanobninskanzakiyokawaraustrheimatunduhrennebulsan-suedtirololitapunk123kotisivultrobjectselinogradimo-siemenscaledekaascolipiceno-ipifony-1337xn--eckvdtc9dxn--efvn9svalbardunloppaderbornxn--efvy88hagakhanamigawaxn--ehqz56nxn--elqq16hagebostadxn--eveni-0qa01gaxn--f6qx53axn--fct429kosakaerodromegallupaasdaburxn--fhbeiarnxn--finny-yuaxn--fiq228c5hsvchurchaseljeepsondriodejaneirockyotobetsuliguriaxn--fiq64barsycenterprisesakievennodesadistcgrouplidlugolekagaminord-frontierxn--fiqs8sveioxn--fiqz9svelvikoninjambylxn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbssvizzeraxn--forlcesena-c8axn--fpcrj9c3dxn--frde-grandrapidsvn-repostorjcloud-ver-jpchowderxn--frna-woaraisaijosoyroroswedenxn--frya-hraxn--fzc2c9e2cleverappsannanxn--fzys8d69uvgmailxn--g2xx48clicketcloudcontrolapparmatsuuraxn--gckr3f0fauskedsmokorsetagayaseralingenoamishirasatogliattipschulserverxn--gecrj9clickrisinglesannohekinannestadraydnsanokaruizawaxn--ggaviika-8ya47haibarakitakamiizumisanofidelitysfjordxn--gildeskl-g0axn--givuotna-8yasakaiminatoyookaneyamazoexn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-an-anarchistoricalsocietysnesigdalxn--gmqw5axn--gnstigbestellen-zvbrplsbxn--45br5cylxn--gnstigliefern-wobihirosakikamijimatsushigexn--h-2failxn--h1aeghair-surveillancexn--h1ahnxn--h1alizxn--h2breg3eveneswidnicasacampinagrandebungotakadaemongolianxn--h2brj9c8clinichippubetsuikilatironporterxn--h3cuzk1digickoseis-a-linux-usershoujis-a-knightpointtohoboleslawieconomiastalbanshizuokamogawaxn--hbmer-xqaxn--hcesuolo-7ya35barsyonlinewhampshirealtychyattorneyagawakuyabukihokumakogeniwaizumiotsurugimbalsfjordeportexaskoyabeagleboardetroitskypecorivneatonoshoes3-eu-west-3utilitiesquare7xn--hebda8basicserversaillesjabbottateshinanomachildrensgardenhlfanhsbc66xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-s4accident-prevention-aptibleangaviikadenaamesjevuemielnoboribetsuckswidnikkolobrzegersundxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyasugithubusercontentromsojamisonxn--io0a7is-an-artistgstagexn--j1adpkomonotogawaxn--j1aefbsbxn--1lqs71dyndns-office-on-the-webhostingrpassagensavonarviikamiokameokamakurazakiwakunigamihamadaxn--j1ael8basilicataniautoscanadaeguambulancentralus-2xn--j1amhakatanorthflankddiamondshinshiroxn--j6w193gxn--jlq480n2rgxn--jlq61u9w7basketballfinanzgorzeleccodespotenzakopanewspaperxn--jlster-byasuokannamihokkaidopaaskvollxn--jrpeland-54axn--jvr189miniserversusakis-a-socialistg-builderxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--45brj9cistrondheimperiaxn--koluokta-7ya57hakodatexn--kprw13dxn--kpry57dxn--kput3is-an-engineeringxn--krager-gyatominamibosogndalxn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jdevcloudfunctionsimplesitexn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyatsukanoyakagexn--kvnangen-k0axn--l-1fairwindswiebodzin-dslattuminamiyamashirokawanabeepilepsykkylvenicexn--l1accentureklamborghinikolaeventswinoujscienceandhistoryxn--laheadju-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52batochigifts3-us-west-2xn--lesund-huaxn--lgbbat1ad8jdfaststackschulplattformetacentrumeteorappassenger-associationxn--lgrd-poacctrusteexn--lhppi-xqaxn--linds-pramericanartrvestnestudioxn--lns-qlavagiskexn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liacliniquedapliexn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddeswisstpetersburgxn--mgb9awbfbx-ostrowwlkpmguitarschwarzgwangjuifminamidaitomanchesterxn--mgba3a3ejtrycloudflarevistaplestudynamic-dnsrvaroyxn--mgba3a4f16axn--mgba3a4fra1-deloittevaksdalxn--mgba7c0bbn0axn--mgbaakc7dvfstdlibestadxn--mgbaam7a8hakonexn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00batsfjordiscordsays3-website-ap-northeast-1xn--mgbai9azgqp6jejuniperxn--mgbayh7gpalmaseratis-an-entertainerxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskosherbrookegawaxn--mgbqly7c0a67fbclintonkotsukubankarumaifarmsteadrobaknoluoktachikawakayamadridvallee-aosteroyxn--mgbqly7cvafr-1xn--mgbt3dhdxn--mgbtf8flapymntrysiljanxn--mgbtx2bauhauspostman-echocolatemasekd1xn--mgbx4cd0abbvieeexn--mix082fbxoschweizxn--mix891fedorainfraclouderaxn--mjndalen-64axn--mk0axin-vpnclothingdustdatadetectjmaxxxn--12c1fe0bradescotlandrrxn--mk1bu44cn-northwest-1xn--mkru45is-bykleclerchoshibuyachiyodancexn--mlatvuopmi-s4axn--mli-tlavangenxn--mlselv-iuaxn--moreke-juaxn--mori-qsakurais-certifiedxn--mosjen-eyawaraxn--mot-tlazioxn--mre-og-romsdal-qqbuseranishiaritakurashikis-foundationxn--msy-ula0hakubaghdadultravelchannelxn--mtta-vrjjat-k7aflakstadaokagakicks-assnasaarlandxn--muost-0qaxn--mxtq1minisitexn--ngbc5azdxn--ngbe9e0axn--ngbrxn--45q11citadelhicampinashikiminohostfoldnavyxn--nit225koshimizumakiyosunnydayxn--nmesjevuemie-tcbalestrandabergamoarekeymachineustarnbergxn--nnx388axn--nodessakyotanabelaudiopsysynology-dstreamlitappittsburghofficialxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeserveftplanetariuminamitanexn--nvuotna-hwaxn--nyqy26axn--o1achernihivgubsxn--o3cw4hakuis-a-democratravelersinsurancexn--o3cyx2axn--od0algxn--od0aq3belementorayoshiokanumazuryukuhashimojibxos3-website-ap-southeast-1xn--ogbpf8flatangerxn--oppegrd-ixaxn--ostery-fyawatahamaxn--osyro-wuaxn--otu796dxn--p1acfedorapeoplegoismailillehammerfeste-ipatriaxn--p1ais-gonexn--pgbs0dhlx3xn--porsgu-sta26fedoraprojectoyotsukaidoxn--pssu33lxn--pssy2uxn--q7ce6axn--q9jyb4cngreaterxn--qcka1pmcpenzaporizhzhiaxn--qqqt11minnesotaketakayamassivegridxn--qxa6axn--qxamsterdamnserverbaniaxn--rady-iraxn--rdal-poaxn--rde-ulaxn--rdy-0nabaris-into-animeetrentin-sued-tirolxn--rennesy-v1axn--rhkkervju-01afeiraquarelleasingujaratoyouraxn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5naturbruksgymnxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31hakusanagochihayaakasakawaiishopitsitexn--rovu88bellevuelosangeles3-website-ap-southeast-2xn--rros-granvindafjordxn--rskog-uuaxn--rst-0naturhistorischesxn--rsta-framercanvasxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithaldenxn--s9brj9cnpyatigorskolecznagatorodoyxn--sandnessjen-ogbellunord-odalombardyn53xn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgne-graphoxn--4dbgdty6citichernovtsyncloudrangedaluccarbonia-iglesias-carboniaiglesiascarboniaxn--skierv-utazasxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5natuurwetenschappenginexn--slt-elabcieszynh-servebeero-stageiseiroumuenchencoreapigeelvinckoshunantankmpspawnextdirectrentino-s-tirolxn--smla-hraxn--smna-gratangentlentapisa-geekosugexn--snase-nraxn--sndre-land-0cbeneventochiokinoshimaintenancebinordreisa-hockeynutazurestaticappspaceusercontentateyamaveroykenglandeltaitogitsumitakagiizeasypanelblagrarchaeologyeongbuk0emmafann-arboretumbriamallamaceiobbcg123homepagefrontappchizip61123minsidaarborteaches-yogasawaracingroks-theatree123hjemmesidealerimo-i-rana4u2-localhistorybolzano-altoadigeometre-experts-comptables3-ap-northeast-123miwebcambridgehirn4t3l3p0rtarumizusawabogadobeaemcloud-fr123paginaweberkeleyokosukanrabruzzombieidskoguchikushinonsenasakuchinotsuchiurakawafaicloudineat-url-o-g-i-naval-d-aosta-valleyokote164-b-datacentermezproxyzgoraetnabudejjudaicadaquest-mon-blogueurodirumaceratabuseating-organicbcn-north-123saitamakawabartheshopencraftrainingdyniajuedischesapeakebayernavigationavoi234lima-cityeats3-ap-northeast-20001wwwedeployokozeastasiamunemurorangecloudplatform0xn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbentleyurihonjournalistjohnikonanporovnobserverxn--srfold-byaxn--srreisa-q1axn--srum-gratis-a-bulls-fanxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbeppublishproxyusuharavocatanzarowegroweiboltashkentatamotorsitestingivingjemnes3-eu-central-1kappleadpages-12hpalmspringsakerxn--stre-toten-zcbeskidyn-ip24xn--t60b56axn--tckweddingxn--tiq49xqyjelasticbeanstalkhmelnitskiyamarumorimachidaxn--tjme-hraxn--tn0agrocerydxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbestbuyshoparenagareyamaizurugbyenvironmentalconservationflashdrivefsnillfjordiscordsezjampaleoceanographics3-website-eu-west-1xn--trentin-sdtirol-7vbetainaboxfuseekloges3-website-sa-east-1xn--trentino-sd-tirol-c3bhzcasertainaioirasebastopologyeongnamegawafflecellclstagemologicaliforniavoues3-eu-west-1xn--trentino-sdtirol-szbielawalbrzycharitypedreamhostersvp4xn--trentinosd-tirol-rzbiellaakesvuemieleccebizenakanotoddeninoheguriitatebayashiibahcavuotnagaivuotnagaokakyotambabybluebitelevisioncilla-speziaxarnetbank8s3-eu-west-2xn--trentinosdtirol-7vbieszczadygeyachimataijiiyamanouchikuhokuryugasakitaurayasudaxn--trentinsd-tirol-6vbievat-band-campaignieznombrendlyngengerdalces3-website-us-east-1xn--trentinsdtirol-nsbifukagawalesundiscountypeformelhusgardeninomiyakonojorpelandiscourses3-website-us-west-1xn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvestre-slidrexn--uc0ay4axn--uist22halsakakinokiaxn--uisz3gxn--unjrga-rtarnobrzegyptianxn--unup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbtularvikonskowolayangroupiemontexn--valle-d-aoste-ehboehringerikexn--valleaoste-e7axn--valledaoste-ebbvadsoccerxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb-hostingxn--vermgensberatung-pwbigvalledaostaobaomoriguchiharag-cloud-championshiphoplixboxenirasakincheonishiazaindependent-commissionishigouvicasinordeste-idclkarasjohkamikitayamatsurindependent-inquest-a-la-masionishiharaxn--vestvgy-ixa6oxn--vg-yiabkhaziaxn--vgan-qoaxn--vgsy-qoa0jelenia-goraxn--vgu402cnsantabarbaraxn--vhquvestre-totennishiawakuraxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861biharstadotsubetsugaruhrxn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1cntjomeldaluroyxn--wgbl6axn--xhq521bihorologyusuisservegame-serverxn--xkc2al3hye2axn--xkc2dl3a5ee0hammarfeastafricaravantaaxn--y9a3aquariumintereitrentino-sudtirolxn--yer-znaumburgxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--4dbrk0cexn--ystre-slidre-ujbikedaejeonbukarasjokarasuyamarriottatsunoceanographiquehimejindependent-inquiryuufcfanishiizunazukindependent-panelomoliseminemrxn--zbx025dxn--zf0ao64axn--zf0avxlxn--zfr164bilbaogashimadachicagoboavistanbulsan-sudtirolbia-tempio-olbiatempioolbialystokkeliwebredirectme-south-1xnbayxz \ No newline at end of file diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go index 200617ea8647..d56e9e762445 100644 --- a/vendor/golang.org/x/net/publicsuffix/list.go +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -33,9 +33,10 @@ // the last two are not (but share the same eTLD+1: "google.com"). // // All of these domains have the same eTLD+1: -// - "www.books.amazon.co.uk" -// - "books.amazon.co.uk" -// - "amazon.co.uk" +// - "www.books.amazon.co.uk" +// - "books.amazon.co.uk" +// - "amazon.co.uk" +// // Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk". // // There is no closed form algorithm to calculate the eTLD of a domain. @@ -100,10 +101,10 @@ loop: break } - u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) + u := uint32(nodes.get(f) >> (nodesBitsTextOffset + nodesBitsTextLength)) icannNode = u&(1<>= nodesBitsICANN - u = children[u&(1<>= childrenBitsLo hi = u & (1<>= nodesBitsTextLength offset := x & (1<= Go 1.11), and Google App Engine flexible environment, it fetches -// credentials from the metadata server. +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on +// how to generate the JSON configuration file for on-prem/non-Google cloud +// platforms. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses +// the appengine.AccessToken function. +// 4. On Google Compute Engine, Google App Engine standard second generation runtimes +// (>= Go 1.11), and Google App Engine flexible environment, it fetches +// credentials from the metadata server. func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsParams) (*Credentials, error) { // Make defensive copy of the slices in params. params = params.deepCopy() @@ -176,7 +179,7 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params if config != nil { return &Credentials{ ProjectID: "", - TokenSource: authhandler.TokenSource(ctx, config, params.State, params.AuthHandler), + TokenSource: authhandler.TokenSourceWithPKCE(ctx, config, params.State, params.AuthHandler, params.PKCE), JSON: jsonData, }, nil } @@ -190,6 +193,7 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params if err != nil { return nil, err } + ts = newErrWrappingTokenSource(ts) return &DefaultCredentials{ ProjectID: f.ProjectID, TokenSource: ts, diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index 8e6a57ce96b3..b3e7bc85ce50 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -15,14 +15,14 @@ // For more information on using workload identity federation, refer to // https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. // -// OAuth2 Configs +// # OAuth2 Configs // // Two functions in this package return golang.org/x/oauth2.Config values from Google credential // data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, // the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or // create an http.Client. // -// Workload Identity Federation +// # Workload Identity Federation // // Using workload identity federation, your application can access Google Cloud // resources from Amazon Web Services (AWS), Microsoft Azure or any identity @@ -36,13 +36,14 @@ // Follow the detailed instructions on how to configure Workload Identity Federation // in various platforms: // -// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws -// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure -// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc +// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws +// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure +// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc // -// For OIDC providers, the library can retrieve OIDC tokens either from a -// local file location (file-sourced credentials) or from a local server -// (URL-sourced credentials). +// For OIDC and SAML providers, the library can retrieve tokens in three ways: +// from a local file location (file-sourced credentials), from a server +// (URL-sourced credentials), or from a local executable (executable-sourced +// credentials). // For file-sourced credentials, a background process needs to be continuously // refreshing the file location with a new OIDC token prior to expiration. // For tokens with one hour lifetimes, the token needs to be updated in the file @@ -50,9 +51,13 @@ // For URL-sourced credentials, a local server needs to host a GET endpoint to // return the OIDC token. The response can be in plain text or JSON. // Additional required request headers can also be specified. +// For executable-sourced credentials, an application needs to be available to +// output the OIDC token and other information in a JSON format. +// For more information on how these work (and how to implement +// executable-sourced credentials), please check out: +// https://cloud.google.com/iam/docs/using-workload-identity-federation#oidc // -// -// Credentials +// # Credentials // // The Credentials type represents Google credentials, including Application Default // Credentials. diff --git a/vendor/golang.org/x/oauth2/google/error.go b/vendor/golang.org/x/oauth2/google/error.go new file mode 100644 index 000000000000..d84dd0047313 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/error.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "errors" + + "golang.org/x/oauth2" +) + +// AuthenticationError indicates there was an error in the authentication flow. +// +// Use (*AuthenticationError).Temporary to check if the error can be retried. +type AuthenticationError struct { + err *oauth2.RetrieveError +} + +func newAuthenticationError(err error) error { + re := &oauth2.RetrieveError{} + if !errors.As(err, &re) { + return err + } + return &AuthenticationError{ + err: re, + } +} + +// Temporary indicates that the network error has one of the following status codes and may be retried: 500, 503, 408, or 429. +func (e *AuthenticationError) Temporary() bool { + if e.err.Response == nil { + return false + } + sc := e.err.Response.StatusCode + return sc == 500 || sc == 503 || sc == 408 || sc == 429 +} + +func (e *AuthenticationError) Error() string { + return e.err.Error() +} + +func (e *AuthenticationError) Unwrap() error { + return e.err +} + +type errWrappingTokenSource struct { + src oauth2.TokenSource +} + +func newErrWrappingTokenSource(ts oauth2.TokenSource) oauth2.TokenSource { + return &errWrappingTokenSource{src: ts} +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *errWrappingTokenSource) Token() (*oauth2.Token, error) { + t, err := s.src.Token() + if err != nil { + return nil, newAuthenticationError(err) + } + return t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index ccc23ee0aef6..8df0c493ee46 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -122,6 +122,7 @@ type credentialsFile struct { TokenURLExternal string `json:"token_url"` TokenInfoURL string `json:"token_info_url"` ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + ServiceAccountImpersonation serviceAccountImpersonationInfo `json:"service_account_impersonation"` Delegates []string `json:"delegates"` CredentialSource externalaccount.CredentialSource `json:"credential_source"` QuotaProjectID string `json:"quota_project_id"` @@ -131,6 +132,10 @@ type credentialsFile struct { SourceCredentials *credentialsFile `json:"source_credentials"` } +type serviceAccountImpersonationInfo struct { + TokenLifetimeSeconds int `json:"token_lifetime_seconds"` +} + func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config { cfg := &jwt.Config{ Email: f.ClientEmail, @@ -139,6 +144,7 @@ func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config Scopes: scopes, TokenURL: f.TokenURL, Subject: subject, // This is the user email to impersonate + Audience: f.Audience, } if cfg.TokenURL == "" { cfg.TokenURL = JWTTokenURL @@ -177,12 +183,13 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar TokenURL: f.TokenURLExternal, TokenInfoURL: f.TokenInfoURL, ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, - ClientSecret: f.ClientSecret, - ClientID: f.ClientID, - CredentialSource: f.CredentialSource, - QuotaProjectID: f.QuotaProjectID, - Scopes: params.Scopes, - WorkforcePoolUserProject: f.WorkforcePoolUserProject, + ServiceAccountImpersonationLifetimeSeconds: f.ServiceAccountImpersonation.TokenLifetimeSeconds, + ClientSecret: f.ClientSecret, + ClientID: f.ClientID, + CredentialSource: f.CredentialSource, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + WorkforcePoolUserProject: f.WorkforcePoolUserProject, } return cfg.TokenSource(ctx) case impersonatedServiceAccount: diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index a5a5423c659a..2bf3202b290f 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -52,9 +52,23 @@ const ( // The AWS authorization header name for the security session token if available. awsSecurityTokenHeader = "x-amz-security-token" + // The name of the header containing the session token for metadata endpoint calls + awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token" + + awsIMDSv2SessionTtlHeader = "X-aws-ec2-metadata-token-ttl-seconds" + + awsIMDSv2SessionTtl = "300" + // The AWS authorization header name for the auto-generated date. awsDateHeader = "x-amz-date" + // Supported AWS configuration environment variables. + awsAccessKeyId = "AWS_ACCESS_KEY_ID" + awsDefaultRegion = "AWS_DEFAULT_REGION" + awsRegion = "AWS_REGION" + awsSecretAccessKey = "AWS_SECRET_ACCESS_KEY" + awsSessionToken = "AWS_SESSION_TOKEN" + awsTimeFormatLong = "20060102T150405Z" awsTimeFormatShort = "20060102" ) @@ -241,6 +255,7 @@ type awsCredentialSource struct { RegionURL string RegionalCredVerificationURL string CredVerificationURL string + IMDSv2SessionTokenURL string TargetResource string requestSigner *awsRequestSigner region string @@ -259,6 +274,49 @@ type awsRequest struct { Headers []awsRequestHeader `json:"headers"` } +func (cs awsCredentialSource) validateMetadataServers() error { + if err := cs.validateMetadataServer(cs.RegionURL, "region_url"); err != nil { + return err + } + if err := cs.validateMetadataServer(cs.CredVerificationURL, "url"); err != nil { + return err + } + return cs.validateMetadataServer(cs.IMDSv2SessionTokenURL, "imdsv2_session_token_url") +} + +var validHostnames []string = []string{"169.254.169.254", "fd00:ec2::254"} + +func (cs awsCredentialSource) isValidMetadataServer(metadataUrl string) bool { + if metadataUrl == "" { + // Zero value means use default, which is valid. + return true + } + + u, err := url.Parse(metadataUrl) + if err != nil { + // Unparseable URL means invalid + return false + } + + for _, validHostname := range validHostnames { + if u.Hostname() == validHostname { + // If it's one of the valid hostnames, everything is good + return true + } + } + + // hostname not found in our allowlist, so not valid + return false +} + +func (cs awsCredentialSource) validateMetadataServer(metadataUrl, urlName string) error { + if !cs.isValidMetadataServer(metadataUrl) { + return fmt.Errorf("oauth2/google: invalid hostname %s for %s", metadataUrl, urlName) + } + + return nil +} + func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { if cs.client == nil { cs.client = oauth2.NewClient(cs.ctx, nil) @@ -266,14 +324,41 @@ func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, erro return cs.client.Do(req.WithContext(cs.ctx)) } +func canRetrieveRegionFromEnvironment() bool { + // The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is + // required. + return getenv(awsRegion) != "" || getenv(awsDefaultRegion) != "" +} + +func canRetrieveSecurityCredentialFromEnvironment() bool { + // Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. + return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != "" +} + +func shouldUseMetadataServer() bool { + return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() +} + func (cs awsCredentialSource) subjectToken() (string, error) { if cs.requestSigner == nil { - awsSecurityCredentials, err := cs.getSecurityCredentials() + headers := make(map[string]string) + if shouldUseMetadataServer() { + awsSessionToken, err := cs.getAWSSessionToken() + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } + } + + awsSecurityCredentials, err := cs.getSecurityCredentials(headers) if err != nil { return "", err } - if cs.region, err = cs.getRegion(); err != nil { + if cs.region, err = cs.getRegion(headers); err != nil { return "", err } @@ -340,12 +425,42 @@ func (cs awsCredentialSource) subjectToken() (string, error) { return url.QueryEscape(string(result)), nil } -func (cs *awsCredentialSource) getRegion() (string, error) { - if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" { - return envAwsRegion, nil +func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { + if cs.IMDSv2SessionTokenURL == "" { + return "", nil + } + + req, err := http.NewRequest("PUT", cs.IMDSv2SessionTokenURL, nil) + if err != nil { + return "", err + } + + req.Header.Add(awsIMDSv2SessionTtlHeader, awsIMDSv2SessionTtl) + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google: unable to retrieve AWS session token - %s", string(respBody)) } - if envAwsRegion := getenv("AWS_DEFAULT_REGION"); envAwsRegion != "" { - return envAwsRegion, nil + + return string(respBody), nil +} + +func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegion); envAwsRegion != "" { + return envAwsRegion, nil + } + return getenv("AWS_DEFAULT_REGION"), nil } if cs.RegionURL == "" { @@ -357,6 +472,10 @@ func (cs *awsCredentialSource) getRegion() (string, error) { return "", err } + for name, value := range headers { + req.Header.Add(name, value) + } + resp, err := cs.doRequest(req) if err != nil { return "", err @@ -381,23 +500,21 @@ func (cs *awsCredentialSource) getRegion() (string, error) { return string(respBody[:respBodyEnd]), nil } -func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCredentials, err error) { - if accessKeyID := getenv("AWS_ACCESS_KEY_ID"); accessKeyID != "" { - if secretAccessKey := getenv("AWS_SECRET_ACCESS_KEY"); secretAccessKey != "" { - return awsSecurityCredentials{ - AccessKeyID: accessKeyID, - SecretAccessKey: secretAccessKey, - SecurityToken: getenv("AWS_SESSION_TOKEN"), - }, nil - } +func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) { + if canRetrieveSecurityCredentialFromEnvironment() { + return awsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyId), + SecretAccessKey: getenv(awsSecretAccessKey), + SecurityToken: getenv(awsSessionToken), + }, nil } - roleName, err := cs.getMetadataRoleName() + roleName, err := cs.getMetadataRoleName(headers) if err != nil { return } - credentials, err := cs.getMetadataSecurityCredentials(roleName) + credentials, err := cs.getMetadataSecurityCredentials(roleName, headers) if err != nil { return } @@ -413,7 +530,7 @@ func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCrede return credentials, nil } -func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) (awsSecurityCredentials, error) { +func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (awsSecurityCredentials, error) { var result awsSecurityCredentials req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.CredVerificationURL, roleName), nil) @@ -422,6 +539,10 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) ( } req.Header.Add("Content-Type", "application/json") + for name, value := range headers { + req.Header.Add(name, value) + } + resp, err := cs.doRequest(req) if err != nil { return result, err @@ -441,7 +562,7 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) ( return result, err } -func (cs *awsCredentialSource) getMetadataRoleName() (string, error) { +func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (string, error) { if cs.CredVerificationURL == "" { return "", errors.New("oauth2/google: unable to determine the AWS metadata server security credentials endpoint") } @@ -451,6 +572,10 @@ func (cs *awsCredentialSource) getMetadataRoleName() (string, error) { return "", err } + for name, value := range headers { + req.Header.Add(name, value) + } + resp, err := cs.doRequest(req) if err != nil { return "", err diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index bc3ce53172ae..3eab8df7ced7 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -39,6 +39,9 @@ type Config struct { // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only // required for workload identity pools when APIs to be accessed have not integrated with UberMint. ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. + ServiceAccountImpersonationLifetimeSeconds int // ClientSecret is currently only required if token_info endpoint also // needs to be called with the generated GCP access token. When provided, STS will be // called with additional basic authentication using client_id as username and client_secret as password. @@ -71,12 +74,14 @@ var ( regexp.MustCompile(`(?i)^sts\.googleapis\.com$`), regexp.MustCompile(`(?i)^sts\.[^\.\s\/\\]+\.googleapis\.com$`), regexp.MustCompile(`(?i)^[^\.\s\/\\]+-sts\.googleapis\.com$`), + regexp.MustCompile(`(?i)^sts-[^\.\s\/\\]+\.p\.googleapis\.com$`), } validImpersonateURLPatterns = []*regexp.Regexp{ regexp.MustCompile(`^[^\.\s\/\\]+\.iamcredentials\.googleapis\.com$`), regexp.MustCompile(`^iamcredentials\.googleapis\.com$`), regexp.MustCompile(`^iamcredentials\.[^\.\s\/\\]+\.googleapis\.com$`), regexp.MustCompile(`^[^\.\s\/\\]+-iamcredentials\.googleapis\.com$`), + regexp.MustCompile(`^iamcredentials-[^\.\s\/\\]+\.p\.googleapis\.com$`), } validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) ) @@ -141,10 +146,11 @@ func (c *Config) tokenSource(ctx context.Context, tokenURLValidPats []*regexp.Re scopes := c.Scopes ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} imp := ImpersonateTokenSource{ - Ctx: ctx, - URL: c.ServiceAccountImpersonationURL, - Scopes: scopes, - Ts: oauth2.ReuseTokenSource(nil, ts), + Ctx: ctx, + URL: c.ServiceAccountImpersonationURL, + Scopes: scopes, + Ts: oauth2.ReuseTokenSource(nil, ts), + TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds, } return oauth2.ReuseTokenSource(nil, imp), nil } @@ -163,7 +169,7 @@ type format struct { } // CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. -// Either the File or the URL field should be filled, depending on the kind of credential in question. +// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question. // The EnvironmentID should start with AWS if being used for an AWS credential. type CredentialSource struct { File string `json:"file"` @@ -171,33 +177,54 @@ type CredentialSource struct { URL string `json:"url"` Headers map[string]string `json:"headers"` + Executable *ExecutableConfig `json:"executable"` + EnvironmentID string `json:"environment_id"` RegionURL string `json:"region_url"` RegionalCredVerificationURL string `json:"regional_cred_verification_url"` CredVerificationURL string `json:"cred_verification_url"` + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` Format format `json:"format"` } -// parse determines the type of CredentialSource needed +type ExecutableConfig struct { + Command string `json:"command"` + TimeoutMillis *int `json:"timeout_millis"` + OutputFile string `json:"output_file"` +} + +// parse determines the type of CredentialSource needed. func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { if awsVersion != 1 { return nil, fmt.Errorf("oauth2/google: aws version '%d' is not supported in the current build", awsVersion) } - return awsCredentialSource{ + + awsCredSource := awsCredentialSource{ EnvironmentID: c.CredentialSource.EnvironmentID, RegionURL: c.CredentialSource.RegionURL, RegionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, CredVerificationURL: c.CredentialSource.URL, TargetResource: c.Audience, ctx: ctx, - }, nil + } + if c.CredentialSource.IMDSv2SessionTokenURL != "" { + awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL + } + + if err := awsCredSource.validateMetadataServers(); err != nil { + return nil, err + } + + return awsCredSource, nil } } else if c.CredentialSource.File != "" { return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil } else if c.CredentialSource.URL != "" { return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil + } else if c.CredentialSource.Executable != nil { + return CreateExecutableCredential(ctx, c.CredentialSource.Executable, c) } return nil, fmt.Errorf("oauth2/google: unable to parse credential source") } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go new file mode 100644 index 000000000000..579bcce5f28b --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go @@ -0,0 +1,309 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + "time" +) + +var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken") + +const ( + executableSupportedMaxVersion = 1 + defaultTimeout = 30 * time.Second + timeoutMinimum = 5 * time.Second + timeoutMaximum = 120 * time.Second + executableSource = "response" + outputFileSource = "output file" +) + +type nonCacheableError struct { + message string +} + +func (nce nonCacheableError) Error() string { + return nce.message +} + +func missingFieldError(source, field string) error { + return fmt.Errorf("oauth2/google: %v missing `%q` field", source, field) +} + +func jsonParsingError(source, data string) error { + return fmt.Errorf("oauth2/google: unable to parse %v\nResponse: %v", source, data) +} + +func malformedFailureError() error { + return nonCacheableError{"oauth2/google: response must include `error` and `message` fields when unsuccessful"} +} + +func userDefinedError(code, message string) error { + return nonCacheableError{fmt.Sprintf("oauth2/google: response contains unsuccessful response: (%v) %v", code, message)} +} + +func unsupportedVersionError(source string, version int) error { + return fmt.Errorf("oauth2/google: %v contains unsupported version: %v", source, version) +} + +func tokenExpiredError() error { + return nonCacheableError{"oauth2/google: the token returned by the executable is expired"} +} + +func tokenTypeError(source string) error { + return fmt.Errorf("oauth2/google: %v contains unsupported token type", source) +} + +func exitCodeError(exitCode int) error { + return fmt.Errorf("oauth2/google: executable command failed with exit code %v", exitCode) +} + +func executableError(err error) error { + return fmt.Errorf("oauth2/google: executable command failed: %v", err) +} + +func executablesDisallowedError() error { + return errors.New("oauth2/google: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") +} + +func timeoutRangeError() error { + return errors.New("oauth2/google: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") +} + +func commandMissingError() error { + return errors.New("oauth2/google: missing `command` field — executable command must be provided") +} + +type environment interface { + existingEnv() []string + getenv(string) string + run(ctx context.Context, command string, env []string) ([]byte, error) + now() time.Time +} + +type runtimeEnvironment struct{} + +func (r runtimeEnvironment) existingEnv() []string { + return os.Environ() +} + +func (r runtimeEnvironment) getenv(key string) string { + return os.Getenv(key) +} + +func (r runtimeEnvironment) now() time.Time { + return time.Now().UTC() +} + +func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) { + splitCommand := strings.Fields(command) + cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...) + cmd.Env = env + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + if ctx.Err() == context.DeadlineExceeded { + return nil, context.DeadlineExceeded + } + + if exitError, ok := err.(*exec.ExitError); ok { + return nil, exitCodeError(exitError.ExitCode()) + } + + return nil, executableError(err) + } + + bytesStdout := bytes.TrimSpace(stdout.Bytes()) + if len(bytesStdout) > 0 { + return bytesStdout, nil + } + return bytes.TrimSpace(stderr.Bytes()), nil +} + +type executableCredentialSource struct { + Command string + Timeout time.Duration + OutputFile string + ctx context.Context + config *Config + env environment +} + +// CreateExecutableCredential creates an executableCredentialSource given an ExecutableConfig. +// It also performs defaulting and type conversions. +func CreateExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { + if ec.Command == "" { + return executableCredentialSource{}, commandMissingError() + } + + result := executableCredentialSource{} + result.Command = ec.Command + if ec.TimeoutMillis == nil { + result.Timeout = defaultTimeout + } else { + result.Timeout = time.Duration(*ec.TimeoutMillis) * time.Millisecond + if result.Timeout < timeoutMinimum || result.Timeout > timeoutMaximum { + return executableCredentialSource{}, timeoutRangeError() + } + } + result.OutputFile = ec.OutputFile + result.ctx = ctx + result.config = config + result.env = runtimeEnvironment{} + return result, nil +} + +type executableResponse struct { + Version int `json:"version,omitempty"` + Success *bool `json:"success,omitempty"` + TokenType string `json:"token_type,omitempty"` + ExpirationTime int64 `json:"expiration_time,omitempty"` + IdToken string `json:"id_token,omitempty"` + SamlResponse string `json:"saml_response,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) { + var result executableResponse + if err := json.Unmarshal(response, &result); err != nil { + return "", jsonParsingError(source, string(response)) + } + + if result.Version == 0 { + return "", missingFieldError(source, "version") + } + + if result.Success == nil { + return "", missingFieldError(source, "success") + } + + if !*result.Success { + if result.Code == "" || result.Message == "" { + return "", malformedFailureError() + } + return "", userDefinedError(result.Code, result.Message) + } + + if result.Version > executableSupportedMaxVersion || result.Version < 0 { + return "", unsupportedVersionError(source, result.Version) + } + + if result.ExpirationTime == 0 && cs.OutputFile != "" { + return "", missingFieldError(source, "expiration_time") + } + + if result.TokenType == "" { + return "", missingFieldError(source, "token_type") + } + + if result.ExpirationTime != 0 && result.ExpirationTime < now { + return "", tokenExpiredError() + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:jwt" || result.TokenType == "urn:ietf:params:oauth:token-type:id_token" { + if result.IdToken == "" { + return "", missingFieldError(source, "id_token") + } + return result.IdToken, nil + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:saml2" { + if result.SamlResponse == "" { + return "", missingFieldError(source, "saml_response") + } + return result.SamlResponse, nil + } + + return "", tokenTypeError(source) +} + +func (cs executableCredentialSource) subjectToken() (string, error) { + if token, err := cs.getTokenFromOutputFile(); token != "" || err != nil { + return token, err + } + + return cs.getTokenFromExecutableCommand() +} + +func (cs executableCredentialSource) getTokenFromOutputFile() (token string, err error) { + if cs.OutputFile == "" { + // This ExecutableCredentialSource doesn't use an OutputFile. + return "", nil + } + + file, err := os.Open(cs.OutputFile) + if err != nil { + // No OutputFile found. Hasn't been created yet, so skip it. + return "", nil + } + defer file.Close() + + data, err := ioutil.ReadAll(io.LimitReader(file, 1<<20)) + if err != nil || len(data) == 0 { + // Cachefile exists, but no data found. Get new credential. + return "", nil + } + + token, err = cs.parseSubjectTokenFromSource(data, outputFileSource, cs.env.now().Unix()) + if err != nil { + if _, ok := err.(nonCacheableError); ok { + // If the cached token is expired we need a new token, + // and if the cache contains a failure, we need to try again. + return "", nil + } + + // There was an error in the cached token, and the developer should be aware of it. + return "", err + } + // Token parsing succeeded. Use found token. + return token, nil +} + +func (cs executableCredentialSource) executableEnvironment() []string { + result := cs.env.existingEnv() + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", cs.config.Audience)) + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", cs.config.SubjectTokenType)) + result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0") + if cs.config.ServiceAccountImpersonationURL != "" { + matches := serviceAccountImpersonationRE.FindStringSubmatch(cs.config.ServiceAccountImpersonationURL) + if matches != nil { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1])) + } + } + if cs.OutputFile != "" { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", cs.OutputFile)) + } + return result +} + +func (cs executableCredentialSource) getTokenFromExecutableCommand() (string, error) { + // For security reasons, we need our consumers to set this environment variable to allow executables to be run. + if cs.env.getenv("GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES") != "1" { + return "", executablesDisallowedError() + } + + ctx, cancel := context.WithDeadline(cs.ctx, cs.env.now().Add(cs.Timeout)) + defer cancel() + + output, err := cs.env.run(ctx, cs.Command, cs.executableEnvironment()) + if err != nil { + return "", err + } + return cs.parseSubjectTokenFromSource(output, executableSource, cs.env.now().Unix()) +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go index 8251fc85e005..54c8f209f3b7 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go @@ -48,12 +48,19 @@ type ImpersonateTokenSource struct { // Each service account must be granted roles/iam.serviceAccountTokenCreator // on the next service account in the chain. Optional. Delegates []string + // TokenLifetimeSeconds is the number of seconds the impersonation token will + // be valid for. + TokenLifetimeSeconds int } // Token performs the exchange to get a temporary service account token to allow access to GCP. func (its ImpersonateTokenSource) Token() (*oauth2.Token, error) { + lifetimeString := "3600s" + if its.TokenLifetimeSeconds != 0 { + lifetimeString = fmt.Sprintf("%ds", its.TokenLifetimeSeconds) + } reqBody := generateAccessTokenReq{ - Lifetime: "3600s", + Lifetime: lifetimeString, Scope: its.Scopes, Delegates: its.Delegates, } diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go index 67d97b990469..e89e6ae17bca 100644 --- a/vendor/golang.org/x/oauth2/google/jwt.go +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -66,7 +66,8 @@ func newJWTSource(jsonKey []byte, audience string, scopes []string) (oauth2.Toke if err != nil { return nil, err } - return oauth2.ReuseTokenSource(tok, ts), nil + rts := newErrWrappingTokenSource(oauth2.ReuseTokenSource(tok, ts)) + return rts, nil } type jwtAccessTokenSource struct { diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go index 683d2d271a30..95015648b43f 100644 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -178,5 +178,5 @@ func Verify(token string, key *rsa.PublicKey) error { h := sha256.New() h.Write([]byte(signedContent)) - return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) } diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vendor/golang.org/x/sync/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vendor/golang.org/x/sync/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 9857fe53d3c9..cbee7a4e230d 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -8,22 +8,35 @@ package errgroup import ( "context" + "fmt" "sync" ) +type token struct{} + // A Group is a collection of goroutines working on subtasks that are part of // the same overall task. // -// A zero Group is valid and does not cancel on error. +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. type Group struct { cancel func() wg sync.WaitGroup + sem chan token + errOnce sync.Once err error } +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + // WithContext returns a new Group and an associated Context derived from ctx. // // The derived Context is canceled the first time a function passed to Go @@ -45,14 +58,48 @@ func (g *Group) Wait() error { } // Go calls the given function in a new goroutine. +// It blocks until the new goroutine can be added without the number of +// active goroutines in the group exceeding the configured limit. // -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. +// The first call to return a non-nil error cancels the group's context, if the +// group was created by calling WithContext. The error will be returned by Wait. func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + g.wg.Add(1) go func() { - defer g.wg.Done() + defer g.done() if err := f(); err != nil { g.errOnce.Do(func() { @@ -63,4 +110,23 @@ func (g *Group) Go(f func() error) { }) } }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if len(g.sem) != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + } + g.sem = make(chan token, n) } diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vendor/golang.org/x/sys/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vendor/golang.org/x/sys/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go index dcbb14ef35a4..271055be0b1e 100644 --- a/vendor/golang.org/x/sys/cpu/byteorder.go +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -46,6 +46,7 @@ func hostByteOrder() byteOrder { case "386", "amd64", "amd64p32", "alpha", "arm", "arm64", + "loong64", "mipsle", "mips64le", "mips64p32le", "nios2", "ppc64le", diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index b56886f26163..83f112c4c808 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -106,8 +106,8 @@ var ARM64 struct { // ARM contains the supported CPU features of the current ARM (32-bit) platform. // All feature flags are false if: -// 1. the current platform is not arm, or -// 2. the current operating system is not Linux. +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. var ARM struct { _ CacheLinePad HasSWP bool // SWP instruction support diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index 87dd5e30215b..f3eb993bf24b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -6,7 +6,10 @@ package cpu import "runtime" -const cacheLineSize = 64 +// cacheLineSize is used to prevent false sharing of cache lines. +// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size. +// It doesn't cost much and is much more future-proof. +const cacheLineSize = 128 func initOptions() { options = []option{ @@ -41,13 +44,10 @@ func archInit() { switch runtime.GOOS { case "freebsd": readARM64Registers() - case "linux", "netbsd": + case "linux", "netbsd", "openbsd": doinit() default: - // Most platforms don't seem to allow reading these registers. - // - // OpenBSD: - // See https://golang.org/issue/31746 + // Many platforms don't seem to allow reading these registers. setMinimalFeatures() } } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c index e363c7d13197..a4605e6d12e8 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -7,6 +7,7 @@ #include #include +#include // Need to wrap __get_cpuid_count because it's declared as static. int @@ -17,27 +18,21 @@ gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); } +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC push_options +#pragma GCC target("xsave") +#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) + // xgetbv reads the contents of an XCR (Extended Control Register) // specified in the ECX register into registers EDX:EAX. // Currently, the only supported value for XCR is 0. -// -// TODO: Replace with a better alternative: -// -// #include -// -// #pragma GCC target("xsave") -// -// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { -// unsigned long long x = _xgetbv(0); -// *eax = x & 0xffffffff; -// *edx = (x >> 32) & 0xffffffff; -// } -// -// Note that _xgetbv is defined starting with GCC 8. void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { - __asm(" xorl %%ecx, %%ecx\n" - " xgetbv" - : "=a"(*eax), "=d"(*edx)); + uint64_t v = _xgetbv(0); + *eax = v & 0xffffffff; + *edx = v >> 32; } + +#pragma clang attribute pop +#pragma GCC pop_options diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index 79a38a0b9bcc..a968b80fa6ab 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -4,6 +4,11 @@ package cpu +import ( + "strings" + "syscall" +) + // HWCAP/HWCAP2 bits. These are exposed by Linux. const ( hwcap_FP = 1 << 0 @@ -32,10 +37,45 @@ const ( hwcap_ASIMDFHM = 1 << 23 ) +// linuxKernelCanEmulateCPUID reports whether we're running +// on Linux 4.11+. Ideally we'd like to ask the question about +// whether the current kernel contains +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 +// but the version number will have to do. +func linuxKernelCanEmulateCPUID() bool { + var un syscall.Utsname + syscall.Uname(&un) + var sb strings.Builder + for _, b := range un.Release[:] { + if b == 0 { + break + } + sb.WriteByte(byte(b)) + } + major, minor, _, ok := parseRelease(sb.String()) + return ok && (major > 4 || major == 4 && minor >= 11) +} + func doinit() { if err := readHWCAP(); err != nil { - // failed to read /proc/self/auxv, try reading registers directly - readARM64Registers() + // We failed to read /proc/self/auxv. This can happen if the binary has + // been given extra capabilities(7) with /bin/setcap. + // + // When this happens, we have two options. If the Linux kernel is new + // enough (4.11+), we can read the arm64 registers directly which'll + // trap into the kernel and then return back to userspace. + // + // But on older kernels, such as Linux 4.4.180 as used on many Synology + // devices, calling readARM64Registers (specifically getisar0) will + // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo + // instead. + // + // See golang/go#57336. + if linuxKernelCanEmulateCPUID() { + readARM64Registers() + } else { + readLinuxProcCPUInfo() + } return } diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go new file mode 100644 index 000000000000..0f57b05bdbe5 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -0,0 +1,13 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 +// +build loong64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go new file mode 100644 index 000000000000..85b64d5ccb73 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -0,0 +1,65 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + // From OpenBSD's sys/sysctl.h. + _CTL_MACHDEP = 7 + + // From OpenBSD's machine/cpu.h. + _CPU_ID_AA64ISAR0 = 2 + _CPU_ID_AA64ISAR1 = 3 +) + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 + +func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +func sysctlUint64(mib []uint32) (uint64, bool) { + var out uint64 + nout := unsafe.Sizeof(out) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { + return 0, false + } + return out, true +} + +func doinit() { + setMinimalFeatures() + + // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. + isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) + if !ok { + return + } + isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) + if !ok { + return + } + parseARM64SystemRegisters(isar0, isar1, 0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s new file mode 100644 index 000000000000..054ba05d607b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go index f8c484f589f5..f3cde129b634 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !linux && !netbsd && arm64 -// +build !linux,!netbsd,arm64 +//go:build !linux && !netbsd && !openbsd && arm64 +// +build !linux,!netbsd,!openbsd,arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go new file mode 100644 index 000000000000..060d46b6eacc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go @@ -0,0 +1,15 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !linux && (ppc64 || ppc64le) +// +build !aix +// +build !linux +// +build ppc64 ppc64le + +package cpu + +func archInit() { + PPC64.IsPOWER8 = true + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go new file mode 100644 index 000000000000..dd10eb79feef --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && riscv64 +// +build !linux,riscv64 + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go new file mode 100644 index 000000000000..762b63d6882c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/parse.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "strconv" + +// parseRelease parses a dot-separated version number. It follows the semver +// syntax, but allows the minor and patch versions to be elided. +// +// This is a copy of the Go runtime's parseRelease from +// https://golang.org/cl/209597. +func parseRelease(rel string) (major, minor, patch int, ok bool) { + // Strip anything after a dash or plus. + for i := 0; i < len(rel); i++ { + if rel[i] == '-' || rel[i] == '+' { + rel = rel[:i] + break + } + } + + next := func() (int, bool) { + for i := 0; i < len(rel); i++ { + if rel[i] == '.' { + ver, err := strconv.Atoi(rel[:i]) + rel = rel[i+1:] + return ver, err == nil + } + } + ver, err := strconv.Atoi(rel) + rel = "" + return ver, err == nil + } + if major, ok = next(); !ok || rel == "" { + return + } + if minor, ok = next(); !ok || rel == "" { + return + } + patch, ok = next() + return +} diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go new file mode 100644 index 000000000000..d87bd6b3eb05 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go @@ -0,0 +1,54 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && arm64 +// +build linux,arm64 + +package cpu + +import ( + "errors" + "io" + "os" + "strings" +) + +func readLinuxProcCPUInfo() error { + f, err := os.Open("/proc/cpuinfo") + if err != nil { + return err + } + defer f.Close() + + var buf [1 << 10]byte // enough for first CPU + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + in := string(buf[:n]) + const features = "\nFeatures : " + i := strings.Index(in, features) + if i == -1 { + return errors.New("no CPU features found") + } + in = in[i+len(features):] + if i := strings.Index(in, "\n"); i != -1 { + in = in[:i] + } + m := map[string]*bool{} + + initOptions() // need it early here; it's harmless to call twice + for _, o := range options { + m[o.Name] = o.Feature + } + // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". + m["evtstrm"] = &ARM64.HasEVTSTRM + + for _, f := range strings.Fields(in) { + if p, ok := m[f]; ok { + *p = true + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go index a864f24d7589..96134157a10d 100644 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -5,7 +5,7 @@ // Recreate a getsystemcfg syscall handler instead of // using the one provided by x/sys/unix to avoid having // the dependency between them. (See golang.org/issue/32102) -// Morever, this file will be used during the building of +// Moreover, this file will be used during the building of // gccgo's libgo and thus must not used a CGo method. //go:build aix && gccgo diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go index 78192498db01..b981cfbb4ae3 100644 --- a/vendor/golang.org/x/sys/execabs/execabs.go +++ b/vendor/golang.org/x/sys/execabs/execabs.go @@ -53,7 +53,7 @@ func relError(file, path string) error { // LookPath instead returns an error. func LookPath(file string) (string, error) { path, err := exec.LookPath(file) - if err != nil { + if err != nil && !isGo119ErrDot(err) { return "", err } if filepath.Base(file) == file && !filepath.IsAbs(path) { diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go new file mode 100644 index 000000000000..6ab5f50894e2 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.19 +// +build !go1.19 + +package execabs + +func isGo119ErrDot(err error) bool { + return false +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go new file mode 100644 index 000000000000..46c5b525e7b8 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package execabs + +import ( + "errors" + "os/exec" +) + +func isGo119ErrDot(err error) bool { + return errors.Is(err, exec.ErrDot) +} diff --git a/vendor/golang.org/x/sys/plan9/mkerrors.sh b/vendor/golang.org/x/sys/plan9/mkerrors.sh index 85309c4a5ba7..526d04ab68cd 100644 --- a/vendor/golang.org/x/sys/plan9/mkerrors.sh +++ b/vendor/golang.org/x/sys/plan9/mkerrors.sh @@ -126,7 +126,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' | + grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT' | sort ) @@ -136,7 +136,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' | + grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go index 602473cba374..67e5b0115c1f 100644 --- a/vendor/golang.org/x/sys/plan9/syscall.go +++ b/vendor/golang.org/x/sys/plan9/syscall.go @@ -29,8 +29,6 @@ import ( "bytes" "strings" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) // ByteSliceFromString returns a NUL-terminated slice of bytes @@ -82,13 +80,7 @@ func BytePtrToString(p *byte) string { ptr = unsafe.Pointer(uintptr(ptr) + 1) } - var s []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - h.Data = unsafe.Pointer(p) - h.Len = n - h.Cap = n - - return string(s) + return string(unsafe.Slice(p, n)) } // Single-word zero for use when we need a valid pointer to 0 bytes. @@ -113,5 +105,6 @@ func (tv *Timeval) Nano() int64 { // use is a no-op, but the compiler cannot see that it is. // Calling use(p) ensures that p is kept live until that point. +// //go:noescape func use(p unsafe.Pointer) diff --git a/vendor/golang.org/x/sys/plan9/syscall_plan9.go b/vendor/golang.org/x/sys/plan9/syscall_plan9.go index 723b1f4002aa..d079d8116e9f 100644 --- a/vendor/golang.org/x/sys/plan9/syscall_plan9.go +++ b/vendor/golang.org/x/sys/plan9/syscall_plan9.go @@ -115,6 +115,7 @@ func Write(fd int, p []byte) (n int, err error) { var ioSync int64 //sys fd2path(fd int, buf []byte) (err error) + func Fd2path(fd int) (path string, err error) { var buf [512]byte @@ -126,6 +127,7 @@ func Fd2path(fd int) (path string, err error) { } //sys pipe(p *[2]int32) (err error) + func Pipe(p []int) (err error) { if len(p) != 2 { return syscall.ErrorString("bad arg in system call") @@ -180,6 +182,7 @@ func (w Waitmsg) ExitStatus() int { } //sys await(s []byte) (n int, err error) + func Await(w *Waitmsg) (err error) { var buf [512]byte var f [5][]byte @@ -301,42 +304,49 @@ func Getgroups() (gids []int, err error) { } //sys open(path string, mode int) (fd int, err error) + func Open(path string, mode int) (fd int, err error) { fixwd() return open(path, mode) } //sys create(path string, mode int, perm uint32) (fd int, err error) + func Create(path string, mode int, perm uint32) (fd int, err error) { fixwd() return create(path, mode, perm) } //sys remove(path string) (err error) + func Remove(path string) error { fixwd() return remove(path) } //sys stat(path string, edir []byte) (n int, err error) + func Stat(path string, edir []byte) (n int, err error) { fixwd() return stat(path, edir) } //sys bind(name string, old string, flag int) (err error) + func Bind(name string, old string, flag int) (err error) { fixwd() return bind(name, old, flag) } //sys mount(fd int, afd int, old string, flag int, aname string) (err error) + func Mount(fd int, afd int, old string, flag int, aname string) (err error) { fixwd() return mount(fd, afd, old, flag, aname) } //sys wstat(path string, edir []byte) (err error) + func Wstat(path string, edir []byte) (err error) { fixwd() return wstat(path, edir) diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s new file mode 100644 index 000000000000..e5b9a84899ac --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s @@ -0,0 +1,31 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd +// +build gc + +#include "textflag.h" + +// +// System call support for ppc64, BSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s new file mode 100644 index 000000000000..d560019ea29e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s @@ -0,0 +1,29 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd +// +build gc + +#include "textflag.h" + +// System call support for RISCV64 BSD + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s new file mode 100644 index 000000000000..565357288a81 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -0,0 +1,54 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && loong64 && gc +// +build linux +// +build loong64 +// +build gc + +#include "textflag.h" + + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + JAL runtime·entersyscall(SB) + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV R0, R7 + MOVV R0, R8 + MOVV R0, R9 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) + MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 + JAL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV R0, R7 + MOVV R0, R8 + MOVV R0, R9 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) + MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 + RET diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index e74e5eaa3bfe..2499f977b070 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index 4362f47e2c00..b0f2bc4ae3b2 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh +//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh +// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go deleted file mode 100644 index 761db66efece..000000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IFF_SMART = 0x20 - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAITH = 0xf2 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_IPXIP = 0xf9 - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_FAITH = 0x16 - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 - SIOCADDRT = 0x8030720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8030720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go deleted file mode 100644 index 070f44b65104..000000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IFF_SMART = 0x20 - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAITH = 0xf2 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_IPXIP = 0xf9 - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_FAITH = 0x16 - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 - SIOCADDRT = 0x8040720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8040720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go deleted file mode 100644 index 856dca325438..000000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix - -const ( - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - - // missing constants on FreeBSD-11.1-RELEASE, copied from old values in ztypes_freebsd_arm.go - IFF_SMART = 0x20 - IFT_FAITH = 0xf2 - IFT_IPXIP = 0xf9 - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IP_FAITH = 0x16 - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - SIOCADDRT = 0x8030720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8030720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go deleted file mode 100644 index 946dcf3fc7ec..000000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index 0dee23222ca8..b06f52d748f6 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gccgo && !aix -// +build gccgo,!aix +//go:build gccgo && !aix && !hurd +// +build gccgo,!aix,!hurd package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index 2cb1fefac640..c4fce0e70036 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build gccgo -// +build !aix +// +build gccgo,!hurd +// +build !aix,!hurd #include #include diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 934af313c323..15721a5104e4 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -8,7 +8,6 @@ package unix import ( - "bytes" "unsafe" ) @@ -45,13 +44,7 @@ func NewIfreq(name string) (*Ifreq, error) { // Name returns the interface name associated with the Ifreq. func (ifr *Ifreq) Name() string { - // BytePtrToString requires a NULL terminator or the program may crash. If - // one is not present, just return the empty string. - if !bytes.Contains(ifr.raw.Ifrn[:], []byte{0x00}) { - return "" - } - - return BytePtrToString(&ifr.raw.Ifrn[0]) + return ByteSliceToString(ifr.raw.Ifrn[:]) } // According to netdevice(7), only AF_INET addresses are returned for numerous diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go index 6c7ad052e6b3..1c51b0ec2bcd 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd || solaris +// +build aix darwin dragonfly freebsd hurd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index 1dadead21e6d..0d12c0851adf 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -4,9 +4,7 @@ package unix -import ( - "unsafe" -) +import "unsafe" // IoctlRetInt performs an ioctl operation specified by req on a device // associated with opened file descriptor fd, and returns a non-negative @@ -194,3 +192,42 @@ func ioctlIfreqData(fd int, req uint, value *ifreqData) error { // identical so pass *IfreqData directly. return ioctlPtr(fd, req, unsafe.Pointer(value)) } + +// IoctlKCMClone attaches a new file descriptor to a multiplexor by cloning an +// existing KCM socket, returning a structure containing the file descriptor of +// the new socket. +func IoctlKCMClone(fd int) (*KCMClone, error) { + var info KCMClone + if err := ioctlPtr(fd, SIOCKCMCLONE, unsafe.Pointer(&info)); err != nil { + return nil, err + } + + return &info, nil +} + +// IoctlKCMAttach attaches a TCP socket and associated BPF program file +// descriptor to a multiplexor. +func IoctlKCMAttach(fd int, info KCMAttach) error { + return ioctlPtr(fd, SIOCKCMATTACH, unsafe.Pointer(&info)) +} + +// IoctlKCMUnattach unattaches a TCP socket file descriptor from a multiplexor. +func IoctlKCMUnattach(fd int, info KCMUnattach) error { + return ioctlPtr(fd, SIOCKCMUNATTACH, unsafe.Pointer(&info)) +} + +// IoctlLoopGetStatus64 gets the status of the loop device associated with the +// file descriptor fd using the LOOP_GET_STATUS64 operation. +func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) { + var value LoopInfo64 + if err := ioctlPtr(fd, LOOP_GET_STATUS64, unsafe.Pointer(&value)); err != nil { + return nil, err + } + return &value, nil +} + +// IoctlLoopSetStatus64 sets the status of the loop device associated with the +// file descriptor fd using the LOOP_SET_STATUS64 operation. +func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error { + return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value)) +} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index ee73623489b0..8e3947c3686c 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -73,12 +73,12 @@ aix_ppc64) darwin_amd64) mkerrors="$mkerrors -m64" mktypes="GOARCH=$GOARCH go tool cgo -godefs" - mkasm="go run mkasm_darwin.go" + mkasm="go run mkasm.go" ;; darwin_arm64) mkerrors="$mkerrors -m64" mktypes="GOARCH=$GOARCH go tool cgo -godefs" - mkasm="go run mkasm_darwin.go" + mkasm="go run mkasm.go" ;; dragonfly_amd64) mkerrors="$mkerrors -m64" @@ -89,25 +89,30 @@ dragonfly_amd64) freebsd_386) mkerrors="$mkerrors -m32" mksyscall="go run mksyscall.go -l32" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_amd64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_arm) mkerrors="$mkerrors" mksyscall="go run mksyscall.go -l32 -arm" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; freebsd_arm64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +freebsd_riscv64) + mkerrors="$mkerrors -m64" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; netbsd_386) @@ -137,42 +142,60 @@ netbsd_arm64) mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_386) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m32" - mksyscall="go run mksyscall.go -l32 -openbsd" + mksyscall="go run mksyscall.go -l32 -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_amd64) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -openbsd" + mksyscall="go run mksyscall.go -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_arm) + mkasm="go run mkasm.go" mkerrors="$mkerrors" - mksyscall="go run mksyscall.go -l32 -openbsd -arm" + mksyscall="go run mksyscall.go -l32 -openbsd -arm -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; openbsd_arm64) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -openbsd" + mksyscall="go run mksyscall.go -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; openbsd_mips64) + mkasm="go run mkasm.go" + mkerrors="$mkerrors -m64" + mksyscall="go run mksyscall.go -openbsd -libc" + mksysctl="go run mksysctl_openbsd.go" + # Let the type of C char be signed for making the bare syscall + # API consistent across platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +openbsd_ppc64) + mkasm="go run mkasm.go" + mkerrors="$mkerrors -m64" + mksyscall="go run mksyscall.go -openbsd -libc" + mksysctl="go run mksysctl_openbsd.go" + # Let the type of C char be signed for making the bare syscall + # API consistent across platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +openbsd_riscv64) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -openbsd" + mksyscall="go run mksyscall.go -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" @@ -209,11 +232,6 @@ esac if [ "$GOOSARCH" == "aix_ppc64" ]; then # aix/ppc64 script generates files instead of writing to stdin. echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ; - elif [ "$GOOS" == "darwin" ]; then - # 1.12 and later, syscalls via libSystem - echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; - # 1.13 and later, syscalls via libSystem (including syscallPtr) - echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go"; elif [ "$GOOS" == "illumos" ]; then # illumos code generation requires a --illumos switch echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go"; @@ -227,5 +245,5 @@ esac if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi - if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi + if [ -n "$mkasm" ]; then echo "$mkasm $GOOS $GOARCH"; fi ) | $run diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index e92ddea00bee..7456d9ddde16 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -128,6 +128,7 @@ includes_FreeBSD=' #include #include #include +#include #include #include #include @@ -202,9 +203,11 @@ struct ltchars { #include #include #include +#include #include #include #include +#include #include #include #include @@ -214,6 +217,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -231,6 +235,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -292,6 +297,10 @@ struct ltchars { #define SOL_NETLINK 270 #endif +#ifndef SOL_SMC +#define SOL_SMC 286 +#endif + #ifdef SOL_BLUETOOTH // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h // but it is already in bluetooth_linux.go @@ -503,6 +512,7 @@ ccflags="$@" $2 ~ /^O?XTABS$/ || $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || + $2 ~ /^KCM/ || $2 ~ /^LANDLOCK_/ || $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || @@ -525,7 +535,7 @@ ccflags="$@" $2 ~ /^(MS|MNT|MOUNT|UMOUNT)_/ || $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || - $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ || + $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|PIOD|TFD)_/ || $2 ~ /^KEXEC_/ || $2 ~ /^LINUX_REBOOT_CMD_/ || $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || @@ -549,6 +559,7 @@ ccflags="$@" $2 ~ /^CLONE_[A-Z_]+/ || $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && $2 ~ /^(BPF|DLT)_/ || + $2 ~ /^AUDIT_/ || $2 ~ /^(CLOCK|TIMER)_/ || $2 ~ /^CAN_/ || $2 ~ /^CAP_/ || @@ -571,7 +582,6 @@ ccflags="$@" $2 ~ /^SEEK_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || - $2 !~ /^AUDIT_RECORD_MAGIC/ && $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || @@ -600,6 +610,7 @@ ccflags="$@" $2 ~ /^ITIMER_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || + $2 ~ /^P_/ || $2 ~/^PPPIOC/ || $2 ~ /^FAN_|FANOTIFY_/ || $2 == "HID_MAX_DESCRIPTOR_SIZE" || @@ -609,6 +620,7 @@ ccflags="$@" $2 ~ /^OTP/ || $2 ~ /^MEM/ || $2 ~ /^WG/ || + $2 ~ /^FIB_RULE_/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} @@ -630,7 +642,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | + grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | sort ) @@ -640,7 +652,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | + grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 453a942c5db3..3865943f6e27 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -52,6 +52,20 @@ func ParseSocketControlMessage(b []byte) ([]SocketControlMessage, error) { return msgs, nil } +// ParseOneSocketControlMessage parses a single socket control message from b, returning the message header, +// message data (a slice of b), and the remainder of b after that single message. +// When there are no remaining messages, len(remainder) == 0. +func ParseOneSocketControlMessage(b []byte) (hdr Cmsghdr, data []byte, remainder []byte, err error) { + h, dbuf, err := socketControlMessageHeaderAndData(b) + if err != nil { + return Cmsghdr{}, nil, nil, err + } + if i := cmsgAlignOf(int(h.Len)); i < len(b) { + remainder = b[i:] + } + return *h, dbuf, remainder, nil +} + func socketControlMessageHeaderAndData(b []byte) (*Cmsghdr, []byte, error) { h := (*Cmsghdr)(unsafe.Pointer(&b[0])) if h.Len < SizeofCmsghdr || uint64(h.Len) > uint64(len(b)) { diff --git a/vendor/golang.org/x/sys/unix/str.go b/vendor/golang.org/x/sys/unix/str.go deleted file mode 100644 index 8ba89ed8694f..000000000000 --- a/vendor/golang.org/x/sys/unix/str.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris - -package unix - -func itoa(val int) string { // do it here rather than with fmt to avoid dependency - if val < 0 { - return "-" + uitoa(uint(-val)) - } - return uitoa(uint(val)) -} - -func uitoa(val uint) string { - var buf [32]byte // big enough for int64 - i := len(buf) - 1 - for val >= 10 { - buf[i] = byte(val%10 + '0') - i-- - val /= 10 - } - buf[i] = byte(val + '0') - return string(buf[i:]) -} diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 649fa87405d1..63e8c838317f 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -29,8 +29,6 @@ import ( "bytes" "strings" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) // ByteSliceFromString returns a NUL-terminated slice of bytes @@ -82,13 +80,7 @@ func BytePtrToString(p *byte) string { ptr = unsafe.Pointer(uintptr(ptr) + 1) } - var s []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - h.Data = unsafe.Pointer(p) - h.Len = n - h.Cap = n - - return string(s) + return string(unsafe.Slice(p, n)) } // Single-word zero for use when we need a valid pointer to 0 bytes. diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 4f55c8d99960..2db1b51e99f0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -37,6 +37,7 @@ func Creat(path string, mode uint32) (fd int, err error) { } //sys utimes(path string, times *[2]Timeval) (err error) + func Utimes(path string, tv []Timeval) error { if len(tv) != 2 { return EINVAL @@ -45,6 +46,7 @@ func Utimes(path string, tv []Timeval) error { } //sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) + func UtimesNano(path string, ts []Timespec) error { if len(ts) != 2 { return EINVAL @@ -215,20 +217,63 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } -func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { - // Recvmsg not implemented on AIX - sa := new(SockaddrUnix) - return -1, -1, -1, sa, ENOSYS -} - -func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { - _, err = SendmsgN(fd, p, oob, to, flags) +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(rsa)) + msg.Namelen = uint32(SizeofSockaddrAny) + var dummy byte + if len(oob) > 0 { + // receive at least one normal byte + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } + if n, err = recvmsg(fd, &msg, flags); n == -1 { + return + } + oobn = int(msg.Controllen) + recvflags = int(msg.Flags) return } -func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { - // SendmsgN not implemented on AIX - return -1, ENOSYS +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Namelen = uint32(salen) + var dummy byte + var empty bool + if len(oob) > 0 { + // send at least one normal byte + empty = emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && empty { + n = 0 + } + return n, nil } func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { @@ -306,11 +351,13 @@ func direntNamlen(buf []byte) (uint64, bool) { } //sys getdirent(fd int, buf []byte) (n int, err error) + func Getdents(fd int, buf []byte) (n int, err error) { return getdirent(fd, buf) } //sys wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error) + func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { var status _C_int var r Pid_t @@ -378,6 +425,7 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range + func Fsync(fd int) error { return fsyncRange(fd, O_SYNC, 0, 0) } @@ -458,8 +506,8 @@ func Fsync(fd int) error { //sys Listen(s int, n int) (err error) //sys lstat(path string, stat *Stat_t) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = pread64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = pwrite64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = pread64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = pwrite64 //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) //sysnb Setregid(rgid int, egid int) (err error) @@ -542,6 +590,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { //sys Getsystemcfg(label int) (n uint64) //sys umount(target string) (err error) + func Unmount(target string, flags int) (err error) { if flags != 0 { // AIX doesn't have any flags for umount. diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 0ce45232611f..eda42671f195 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -325,80 +325,62 @@ func GetsockoptString(fd, level, opt int) (string, error) { //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) -func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr - var rsa RawSockaddrAny - msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = (*byte)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } var dummy byte if len(oob) > 0 { // receive at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Control = (*byte)(unsafe.Pointer(&oob[0])) msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); err != nil { return } oobn = int(msg.Controllen) recvflags = int(msg.Flags) - // source address is only specified if the socket is unconnected - if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(fd, &rsa) - } return } //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) -func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { - _, err = SendmsgN(fd, p, oob, to, flags) - return -} - -func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { - var ptr unsafe.Pointer - var salen _Socklen - if to != nil { - ptr, salen, err = to.sockaddr() - if err != nil { - return 0, err - } - } +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(ptr)) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = (*byte)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } var dummy byte + var empty bool if len(oob) > 0 { // send at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + empty = emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Control = (*byte)(unsafe.Pointer(&oob[0])) msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil @@ -571,12 +553,7 @@ func UtimesNano(path string, ts []Timespec) error { if len(ts) != 2 { return EINVAL } - // Darwin setattrlist can set nanosecond timestamps - err := setattrlistTimes(path, ts, 0) - if err != ENOSYS { - return err - } - err = utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) + err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) if err != ENOSYS { return err } @@ -596,10 +573,6 @@ func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { if len(ts) != 2 { return EINVAL } - err := setattrlistTimes(path, ts, flags) - if err != ENOSYS { - return err - } return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go deleted file mode 100644 index b0098607c706..000000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin && go1.12 && !go1.13 -// +build darwin,go1.12,!go1.13 - -package unix - -import ( - "unsafe" -) - -const _SYS_GETDIRENTRIES64 = 344 - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - // To implement this using libSystem we'd need syscall_syscallPtr for - // fdopendir. However, syscallPtr was only added in Go 1.13, so we fall - // back to raw syscalls for this func on Go 1.12. - var p unsafe.Pointer - if len(buf) > 0 { - p = unsafe.Pointer(&buf[0]) - } else { - p = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - return n, errnoErr(e1) - } - return n, nil -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go deleted file mode 100644 index 1596426b1e2e..000000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin && go1.13 -// +build darwin,go1.13 - -package unix - -import ( - "unsafe" - - "golang.org/x/sys/internal/unsafeheader" -) - -//sys closedir(dir uintptr) (err error) -//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) - -func fdopendir(fd int) (dir uintptr, err error) { - r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0) - dir = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_fdopendir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - // Simulate Getdirentries using fdopendir/readdir_r/closedir. - // We store the number of entries to skip in the seek - // offset of fd. See issue #31368. - // It's not the full required semantics, but should handle the case - // of calling Getdirentries or ReadDirent repeatedly. - // It won't handle assigning the results of lseek to *basep, or handle - // the directory being edited underfoot. - skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) - if err != nil { - return 0, err - } - - // We need to duplicate the incoming file descriptor - // because the caller expects to retain control of it, but - // fdopendir expects to take control of its argument. - // Just Dup'ing the file descriptor is not enough, as the - // result shares underlying state. Use Openat to make a really - // new file descriptor referring to the same directory. - fd2, err := Openat(fd, ".", O_RDONLY, 0) - if err != nil { - return 0, err - } - d, err := fdopendir(fd2) - if err != nil { - Close(fd2) - return 0, err - } - defer closedir(d) - - var cnt int64 - for { - var entry Dirent - var entryp *Dirent - e := readdir_r(d, &entry, &entryp) - if e != 0 { - return n, errnoErr(e) - } - if entryp == nil { - break - } - if skip > 0 { - skip-- - cnt++ - continue - } - - reclen := int(entry.Reclen) - if reclen > len(buf) { - // Not enough room. Return for now. - // The counter will let us know where we should start up again. - // Note: this strategy for suspending in the middle and - // restarting is O(n^2) in the length of the directory. Oh well. - break - } - - // Copy entry into return buffer. - var s []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - hdr.Data = unsafe.Pointer(&entry) - hdr.Cap = reclen - hdr.Len = reclen - copy(buf, s) - - buf = buf[reclen:] - n += reclen - cnt++ - } - // Set the seek offset of the input fd to record - // how many files we've already returned. - _, err = Seek(fd, cnt, 0 /* SEEK_SET */) - if err != nil { - return n, err - } - - return n, nil -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 0eaab91314c6..1f63382182f3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -19,6 +19,96 @@ import ( "unsafe" ) +//sys closedir(dir uintptr) (err error) +//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) + +func fdopendir(fd int) (dir uintptr, err error) { + r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0) + dir = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fdopendir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + // Simulate Getdirentries using fdopendir/readdir_r/closedir. + // We store the number of entries to skip in the seek + // offset of fd. See issue #31368. + // It's not the full required semantics, but should handle the case + // of calling Getdirentries or ReadDirent repeatedly. + // It won't handle assigning the results of lseek to *basep, or handle + // the directory being edited underfoot. + skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + return 0, err + } + + // We need to duplicate the incoming file descriptor + // because the caller expects to retain control of it, but + // fdopendir expects to take control of its argument. + // Just Dup'ing the file descriptor is not enough, as the + // result shares underlying state. Use Openat to make a really + // new file descriptor referring to the same directory. + fd2, err := Openat(fd, ".", O_RDONLY, 0) + if err != nil { + return 0, err + } + d, err := fdopendir(fd2) + if err != nil { + Close(fd2) + return 0, err + } + defer closedir(d) + + var cnt int64 + for { + var entry Dirent + var entryp *Dirent + e := readdir_r(d, &entry, &entryp) + if e != 0 { + return n, errnoErr(e) + } + if entryp == nil { + break + } + if skip > 0 { + skip-- + cnt++ + continue + } + + reclen := int(entry.Reclen) + if reclen > len(buf) { + // Not enough room. Return for now. + // The counter will let us know where we should start up again. + // Note: this strategy for suspending in the middle and + // restarting is O(n^2) in the length of the directory. Oh well. + break + } + + // Copy entry into return buffer. + s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen) + copy(buf, s) + + buf = buf[reclen:] + n += reclen + cnt++ + } + // Set the seek offset of the input fd to record + // how many files we've already returned. + _, err = Seek(fd, cnt, 0 /* SEEK_SET */) + if err != nil { + return n, err + } + + return n, nil +} + // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -141,16 +231,6 @@ func direntNamlen(buf []byte) (uint64, bool) { func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } -type attrList struct { - bitmapCount uint16 - _ uint16 - CommonAttr uint32 - VolAttr uint32 - DirAttr uint32 - FileAttr uint32 - Forkattr uint32 -} - //sysnb pipe(p *[2]int32) (err error) func Pipe(p []int) (err error) { @@ -282,36 +362,7 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) { return flistxattr(fd, xattrPointer(dest), len(dest), 0) } -func setattrlistTimes(path string, times []Timespec, flags int) error { - _p0, err := BytePtrFromString(path) - if err != nil { - return err - } - - var attrList attrList - attrList.bitmapCount = ATTR_BIT_MAP_COUNT - attrList.CommonAttr = ATTR_CMN_MODTIME | ATTR_CMN_ACCTIME - - // order is mtime, atime: the opposite of Chtimes - attributes := [2]Timespec{times[1], times[0]} - options := 0 - if flags&AT_SYMLINK_NOFOLLOW != 0 { - options |= FSOPT_NOFOLLOW - } - return setattrlist( - _p0, - unsafe.Pointer(&attrList), - unsafe.Pointer(&attributes), - unsafe.Sizeof(attributes), - options) -} - -//sys setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) error { - // Darwin doesn't support SYS_UTIMENSAT - return ENOSYS -} +//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) /* * Wrapped @@ -432,6 +483,13 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { return x, err } +func GetsockoptTCPConnectionInfo(fd, level, opt int) (*TCPConnectionInfo, error) { + var value TCPConnectionInfo + vallen := _Socklen(SizeofTCPConnectionInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + func SysctlKinfoProc(name string, args ...int) (*KinfoProc, error) { mib, err := sysctlmib(name, args...) if err != nil { @@ -543,11 +601,12 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) @@ -611,7 +670,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { // Nfssvc // Getfh // Quotactl -// Mount // Csops // Waitid // Add_profil diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 2e37c3167f39..a41111a794e2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -125,12 +125,14 @@ func Pipe2(p []int, flags int) (err error) { } //sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error) -func Pread(fd int, p []byte, offset int64) (n int, err error) { + +func pread(fd int, p []byte, offset int64) (n int, err error) { return extpread(fd, p, 0, offset) } //sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { return extpwrite(fd, p, 0, offset) } @@ -169,11 +171,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } -func setattrlistTimes(path string, times []Timespec, flags int) error { - // used on Darwin for UtimesNano - return ENOSYS -} - //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -258,6 +255,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 2f650ae665cc..d50b9dc250b7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -17,25 +17,12 @@ import ( "unsafe" ) -const ( - SYS_FSTAT_FREEBSD12 = 551 // { int fstat(int fd, _Out_ struct stat *sb); } - SYS_FSTATAT_FREEBSD12 = 552 // { int fstatat(int fd, _In_z_ char *path, \ - SYS_GETDIRENTRIES_FREEBSD12 = 554 // { ssize_t getdirentries(int fd, \ - SYS_STATFS_FREEBSD12 = 555 // { int statfs(_In_z_ char *path, \ - SYS_FSTATFS_FREEBSD12 = 556 // { int fstatfs(int fd, \ - SYS_GETFSSTAT_FREEBSD12 = 557 // { int getfsstat( \ - SYS_MKNODAT_FREEBSD12 = 559 // { int mknodat(int fd, _In_z_ char *path, \ -) - // See https://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/versions.html. var ( osreldateOnce sync.Once osreldate uint32 ) -// INO64_FIRST from /usr/src/lib/libc/sys/compat-ino64.h -const _ino64First = 1200031 - func supportsABI(ver uint32) bool { osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") }) return osreldate >= ver @@ -159,46 +146,21 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { var ( - _p0 unsafe.Pointer - bufsize uintptr - oldBuf []statfs_freebsd11_t - needsConvert bool + _p0 unsafe.Pointer + bufsize uintptr ) - if len(buf) > 0 { - if supportsABI(_ino64First) { - _p0 = unsafe.Pointer(&buf[0]) - bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) - } else { - n := len(buf) - oldBuf = make([]statfs_freebsd11_t, n) - _p0 = unsafe.Pointer(&oldBuf[0]) - bufsize = unsafe.Sizeof(statfs_freebsd11_t{}) * uintptr(n) - needsConvert = true - } - } - var sysno uintptr = SYS_GETFSSTAT - if supportsABI(_ino64First) { - sysno = SYS_GETFSSTAT_FREEBSD12 + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - r0, _, e1 := Syscall(sysno, uintptr(_p0), bufsize, uintptr(flags)) + r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) n = int(r0) if e1 != 0 { err = e1 } - if e1 == 0 && needsConvert { - for i := range oldBuf { - buf[i].convertFrom(&oldBuf[i]) - } - } return } -func setattrlistTimes(path string, times []Timespec, flags int) error { - // used on Darwin for UtimesNano - return ENOSYS -} - //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -250,87 +212,11 @@ func Uname(uname *Utsname) error { } func Stat(path string, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(AT_FDCWD, path, st, 0) - } - err = stat(path, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil + return Fstatat(AT_FDCWD, path, st, 0) } func Lstat(path string, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(AT_FDCWD, path, st, AT_SYMLINK_NOFOLLOW) - } - err = lstat(path, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Fstat(fd int, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstat_freebsd12(fd, st) - } - err = fstat(fd, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Fstatat(fd int, path string, st *Stat_t, flags int) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(fd, path, st, flags) - } - err = fstatat(fd, path, &oldStat, flags) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Statfs(path string, st *Statfs_t) (err error) { - var oldStatfs statfs_freebsd11_t - if supportsABI(_ino64First) { - return statfs_freebsd12(path, st) - } - err = statfs(path, &oldStatfs) - if err != nil { - return err - } - - st.convertFrom(&oldStatfs) - return nil -} - -func Fstatfs(fd int, st *Statfs_t) (err error) { - var oldStatfs statfs_freebsd11_t - if supportsABI(_ino64First) { - return fstatfs_freebsd12(fd, st) - } - err = fstatfs(fd, &oldStatfs) - if err != nil { - return err - } - - st.convertFrom(&oldStatfs) - return nil + return Fstatat(AT_FDCWD, path, st, AT_SYMLINK_NOFOLLOW) } func Getdents(fd int, buf []byte) (n int, err error) { @@ -338,162 +224,25 @@ func Getdents(fd int, buf []byte) (n int, err error) { } func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - if supportsABI(_ino64First) { - if basep == nil || unsafe.Sizeof(*basep) == 8 { - return getdirentries_freebsd12(fd, buf, (*uint64)(unsafe.Pointer(basep))) - } - // The freebsd12 syscall needs a 64-bit base. On 32-bit machines - // we can't just use the basep passed in. See #32498. - var base uint64 = uint64(*basep) - n, err = getdirentries_freebsd12(fd, buf, &base) - *basep = uintptr(base) - if base>>32 != 0 { - // We can't stuff the base back into a uintptr, so any - // future calls would be suspect. Generate an error. - // EIO is allowed by getdirentries. - err = EIO - } - return - } - - // The old syscall entries are smaller than the new. Use 1/4 of the original - // buffer size rounded up to DIRBLKSIZ (see /usr/src/lib/libc/sys/getdirentries.c). - oldBufLen := roundup(len(buf)/4, _dirblksiz) - oldBuf := make([]byte, oldBufLen) - n, err = getdirentries(fd, oldBuf, basep) - if err == nil && n > 0 { - n = convertFromDirents11(buf, oldBuf[:n]) + if basep == nil || unsafe.Sizeof(*basep) == 8 { + return getdirentries(fd, buf, (*uint64)(unsafe.Pointer(basep))) + } + // The syscall needs a 64-bit base. On 32-bit machines + // we can't just use the basep passed in. See #32498. + var base uint64 = uint64(*basep) + n, err = getdirentries(fd, buf, &base) + *basep = uintptr(base) + if base>>32 != 0 { + // We can't stuff the base back into a uintptr, so any + // future calls would be suspect. Generate an error. + // EIO is allowed by getdirentries. + err = EIO } return } func Mknod(path string, mode uint32, dev uint64) (err error) { - var oldDev int - if supportsABI(_ino64First) { - return mknodat_freebsd12(AT_FDCWD, path, mode, dev) - } - oldDev = int(dev) - return mknod(path, mode, oldDev) -} - -func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { - var oldDev int - if supportsABI(_ino64First) { - return mknodat_freebsd12(fd, path, mode, dev) - } - oldDev = int(dev) - return mknodat(fd, path, mode, oldDev) -} - -// round x to the nearest multiple of y, larger or equal to x. -// -// from /usr/include/sys/param.h Macros for counting and rounding. -// #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) -func roundup(x, y int) int { - return ((x + y - 1) / y) * y -} - -func (s *Stat_t) convertFrom(old *stat_freebsd11_t) { - *s = Stat_t{ - Dev: uint64(old.Dev), - Ino: uint64(old.Ino), - Nlink: uint64(old.Nlink), - Mode: old.Mode, - Uid: old.Uid, - Gid: old.Gid, - Rdev: uint64(old.Rdev), - Atim: old.Atim, - Mtim: old.Mtim, - Ctim: old.Ctim, - Btim: old.Btim, - Size: old.Size, - Blocks: old.Blocks, - Blksize: old.Blksize, - Flags: old.Flags, - Gen: uint64(old.Gen), - } -} - -func (s *Statfs_t) convertFrom(old *statfs_freebsd11_t) { - *s = Statfs_t{ - Version: _statfsVersion, - Type: old.Type, - Flags: old.Flags, - Bsize: old.Bsize, - Iosize: old.Iosize, - Blocks: old.Blocks, - Bfree: old.Bfree, - Bavail: old.Bavail, - Files: old.Files, - Ffree: old.Ffree, - Syncwrites: old.Syncwrites, - Asyncwrites: old.Asyncwrites, - Syncreads: old.Syncreads, - Asyncreads: old.Asyncreads, - // Spare - Namemax: old.Namemax, - Owner: old.Owner, - Fsid: old.Fsid, - // Charspare - // Fstypename - // Mntfromname - // Mntonname - } - - sl := old.Fstypename[:] - n := clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Fstypename[:], old.Fstypename[:n]) - - sl = old.Mntfromname[:] - n = clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Mntfromname[:], old.Mntfromname[:n]) - - sl = old.Mntonname[:] - n = clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Mntonname[:], old.Mntonname[:n]) -} - -func convertFromDirents11(buf []byte, old []byte) int { - const ( - fixedSize = int(unsafe.Offsetof(Dirent{}.Name)) - oldFixedSize = int(unsafe.Offsetof(dirent_freebsd11{}.Name)) - ) - - dstPos := 0 - srcPos := 0 - for dstPos+fixedSize < len(buf) && srcPos+oldFixedSize < len(old) { - var dstDirent Dirent - var srcDirent dirent_freebsd11 - - // If multiple direntries are written, sometimes when we reach the final one, - // we may have cap of old less than size of dirent_freebsd11. - copy((*[unsafe.Sizeof(srcDirent)]byte)(unsafe.Pointer(&srcDirent))[:], old[srcPos:]) - - reclen := roundup(fixedSize+int(srcDirent.Namlen)+1, 8) - if dstPos+reclen > len(buf) { - break - } - - dstDirent.Fileno = uint64(srcDirent.Fileno) - dstDirent.Off = 0 - dstDirent.Reclen = uint16(reclen) - dstDirent.Type = srcDirent.Type - dstDirent.Pad0 = 0 - dstDirent.Namlen = uint16(srcDirent.Namlen) - dstDirent.Pad1 = 0 - - copy(dstDirent.Name[:], srcDirent.Name[:srcDirent.Namlen]) - copy(buf[dstPos:], (*[unsafe.Sizeof(dstDirent)]byte)(unsafe.Pointer(&dstDirent))[:]) - padding := buf[dstPos+fixedSize+int(dstDirent.Namlen) : dstPos+reclen] - for i := range padding { - padding[i] = 0 - } - - dstPos += int(dstDirent.Reclen) - srcPos += int(srcDirent.Reclen) - } - - return dstPos + return Mknodat(AT_FDCWD, path, mode, dev) } func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { @@ -506,31 +255,31 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ptrace(request int, pid int, addr uintptr, data int) (err error) func PtraceAttach(pid int) (err error) { - return ptrace(PTRACE_ATTACH, pid, 0, 0) + return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceCont(pid int, signal int) (err error) { - return ptrace(PTRACE_CONT, pid, 1, signal) + return ptrace(PT_CONTINUE, pid, 1, signal) } func PtraceDetach(pid int) (err error) { - return ptrace(PTRACE_DETACH, pid, 1, 0) + return ptrace(PT_DETACH, pid, 1, 0) } func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) { - return ptrace(PTRACE_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) + return ptrace(PT_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) } func PtraceGetRegs(pid int, regsout *Reg) (err error) { - return ptrace(PTRACE_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) + return ptrace(PT_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) } func PtraceLwpEvents(pid int, enable int) (err error) { - return ptrace(PTRACE_LWPEVENTS, pid, 0, enable) + return ptrace(PT_LWP_EVENTS, pid, 0, enable) } func PtraceLwpInfo(pid int, info uintptr) (err error) { - return ptrace(PTRACE_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) + return ptrace(PT_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) } func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { @@ -550,11 +299,11 @@ func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceSetRegs(pid int, regs *Reg) (err error) { - return ptrace(PTRACE_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) + return ptrace(PT_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) } func PtraceSingleStep(pid int) (err error) { - return ptrace(PTRACE_SINGLESTEP, pid, 1, 0) + return ptrace(PT_STEP, pid, 1, 0) } /* @@ -570,6 +319,7 @@ func PtraceSingleStep(pid int) (err error) { //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) @@ -596,16 +346,12 @@ func PtraceSingleStep(pid int) (err error) { //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) -//sys fstat(fd int, stat *stat_freebsd11_t) (err error) -//sys fstat_freebsd12(fd int, stat *Stat_t) (err error) -//sys fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) -//sys fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) -//sys fstatfs(fd int, stat *statfs_freebsd11_t) (err error) -//sys fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) -//sys getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) -//sys getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) +//sys getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) @@ -627,19 +373,16 @@ func PtraceSingleStep(pid int) (err error) { //sys Link(path string, link string) (err error) //sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) -//sys lstat(path string, stat *stat_freebsd11_t) (err error) //sys Mkdir(path string, mode uint32) (err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) -//sys mknod(path string, mode uint32, dev int) (err error) -//sys mknodat(fd int, path string, mode uint32, dev int) (err error) -//sys mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) +//sys Mknodat(fd int, path string, mode uint32, dev uint64) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) @@ -663,9 +406,7 @@ func PtraceSingleStep(pid int) (err error) { //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) -//sys stat(path string, stat *stat_freebsd11_t) (err error) -//sys statfs(path string, stat *statfs_freebsd11_t) (err error) -//sys statfs_freebsd12(path string, stat *Statfs_t) (err error) +//sys Statfs(path string, stat *Statfs_t) (err error) //sys Symlink(path string, link string) (err error) //sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 342fc32b1686..b11ede89a960 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -57,11 +57,11 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) + return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) } func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)} + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index a32d5aa4aed4..9ed8eec6c287 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -57,11 +57,11 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) + return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) } func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 1e36d39abe01..f8ac98247905 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)} + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index a09a1537bd6f..8e932036ec37 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go new file mode 100644 index 000000000000..cbe12227896b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -0,0 +1,63 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + return int(ioDesc.Len), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go new file mode 100644 index 000000000000..4ffb64808d75 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -0,0 +1,22 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build hurd +// +build hurd + +package unix + +/* +#include +int ioctl(int, unsigned long int, uintptr_t); +*/ +import "C" + +func ioctl(fd int, req uint, arg uintptr) (err error) { + r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) + if r0 == -1 && er != nil { + err = er + } + return +} diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go new file mode 100644 index 000000000000..7cf54a3e4f10 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go @@ -0,0 +1,29 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 && hurd +// +build 386,hurd + +package unix + +const ( + TIOCGETA = 0x62251713 +) + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 8d5f294c4250..87db5a6a8ccc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -10,8 +10,6 @@ package unix import ( - "fmt" - "runtime" "unsafe" ) @@ -20,10 +18,9 @@ func bytes2iovec(bs [][]byte) []Iovec { for i, b := range bs { iovecs[i].SetLen(len(b)) if len(b) > 0 { - // somehow Iovec.Base on illumos is (*int8), not (*byte) - iovecs[i].Base = (*int8)(unsafe.Pointer(&b[0])) + iovecs[i].Base = &b[0] } else { - iovecs[i].Base = (*int8)(unsafe.Pointer(&_zero)) + iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero)) } } return iovecs @@ -80,107 +77,3 @@ func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { } return } - -//sys putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) - -func Putmsg(fd int, cl []byte, data []byte, flags int) (err error) { - var clp, datap *strbuf - if len(cl) > 0 { - clp = &strbuf{ - Len: int32(len(cl)), - Buf: (*int8)(unsafe.Pointer(&cl[0])), - } - } - if len(data) > 0 { - datap = &strbuf{ - Len: int32(len(data)), - Buf: (*int8)(unsafe.Pointer(&data[0])), - } - } - return putmsg(fd, clp, datap, flags) -} - -//sys getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) - -func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags int, err error) { - var clp, datap *strbuf - if len(cl) > 0 { - clp = &strbuf{ - Maxlen: int32(len(cl)), - Buf: (*int8)(unsafe.Pointer(&cl[0])), - } - } - if len(data) > 0 { - datap = &strbuf{ - Maxlen: int32(len(data)), - Buf: (*int8)(unsafe.Pointer(&data[0])), - } - } - - if err = getmsg(fd, clp, datap, &flags); err != nil { - return nil, nil, 0, err - } - - if len(cl) > 0 { - retCl = cl[:clp.Len] - } - if len(data) > 0 { - retData = data[:datap.Len] - } - return retCl, retData, flags, nil -} - -func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) { - return ioctlRet(fd, req, uintptr(arg)) -} - -func IoctlSetString(fd int, req uint, val string) error { - bs := make([]byte, len(val)+1) - copy(bs[:len(bs)-1], val) - err := ioctl(fd, req, uintptr(unsafe.Pointer(&bs[0]))) - runtime.KeepAlive(&bs[0]) - return err -} - -// Lifreq Helpers - -func (l *Lifreq) SetName(name string) error { - if len(name) >= len(l.Name) { - return fmt.Errorf("name cannot be more than %d characters", len(l.Name)-1) - } - for i := range name { - l.Name[i] = int8(name[i]) - } - return nil -} - -func (l *Lifreq) SetLifruInt(d int) { - *(*int)(unsafe.Pointer(&l.Lifru[0])) = d -} - -func (l *Lifreq) GetLifruInt() int { - return *(*int)(unsafe.Pointer(&l.Lifru[0])) -} - -func (l *Lifreq) SetLifruUint(d uint) { - *(*uint)(unsafe.Pointer(&l.Lifru[0])) = d -} - -func (l *Lifreq) GetLifruUint() uint { - return *(*uint)(unsafe.Pointer(&l.Lifru[0])) -} - -func IoctlLifreq(fd int, req uint, l *Lifreq) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(l))) -} - -// Strioctl Helpers - -func (s *Strioctl) SetInt(i int) { - s.Len = int32(unsafe.Sizeof(i)) - s.Dp = (*int8)(unsafe.Pointer(&i)) -} - -func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) { - return ioctlRet(fd, req, uintptr(unsafe.Pointer(s))) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 5f28f8fdeda1..d839962e6633 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -13,6 +13,7 @@ package unix import ( "encoding/binary" + "strconv" "syscall" "time" "unsafe" @@ -233,7 +234,7 @@ func Futimesat(dirfd int, path string, tv []Timeval) error { func Futimes(fd int, tv []Timeval) (err error) { // Believe it or not, this is the best we can do on Linux // (and is what glibc does). - return Utimes("/proc/self/fd/"+itoa(fd), tv) + return Utimes("/proc/self/fd/"+strconv.Itoa(fd), tv) } const ImplementsGetwd = true @@ -366,6 +367,8 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, return } +//sys Waitid(idType int, id int, info *Siginfo, options int, rusage *Rusage) (err error) + func Mkfifo(path string, mode uint32) error { return Mknod(path, mode|S_IFIFO, 0) } @@ -510,24 +513,24 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { // // Server example: // -// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) -// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ -// Channel: 1, -// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 -// }) -// _ = Listen(fd, 1) -// nfd, sa, _ := Accept(fd) -// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) -// Read(nfd, buf) +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 +// }) +// _ = Listen(fd, 1) +// nfd, sa, _ := Accept(fd) +// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) +// Read(nfd, buf) // // Client example: // -// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) -// _ = Connect(fd, &SockaddrRFCOMM{ -// Channel: 1, -// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 -// }) -// Write(fd, []byte(`hello`)) +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = Connect(fd, &SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 +// }) +// Write(fd, []byte(`hello`)) type SockaddrRFCOMM struct { // Addr represents a bluetooth address, byte ordering is little-endian. Addr [6]uint8 @@ -554,12 +557,12 @@ func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) { // The SockaddrCAN struct must be bound to the socket file descriptor // using Bind before the CAN socket can be used. // -// // Read one raw CAN frame -// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) -// addr := &SockaddrCAN{Ifindex: index} -// Bind(fd, addr) -// frame := make([]byte, 16) -// Read(fd, frame) +// // Read one raw CAN frame +// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) +// addr := &SockaddrCAN{Ifindex: index} +// Bind(fd, addr) +// frame := make([]byte, 16) +// Read(fd, frame) // // The full SocketCAN documentation can be found in the linux kernel // archives at: https://www.kernel.org/doc/Documentation/networking/can.txt @@ -630,13 +633,13 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { // Here is an example of using an AF_ALG socket with SHA1 hashing. // The initial socket setup process is as follows: // -// // Open a socket to perform SHA1 hashing. -// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0) -// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"} -// unix.Bind(fd, addr) -// // Note: unix.Accept does not work at this time; must invoke accept() -// // manually using unix.Syscall. -// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0) +// // Open a socket to perform SHA1 hashing. +// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0) +// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"} +// unix.Bind(fd, addr) +// // Note: unix.Accept does not work at this time; must invoke accept() +// // manually using unix.Syscall. +// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0) // // Once a file descriptor has been returned from Accept, it may be used to // perform SHA1 hashing. The descriptor is not safe for concurrent use, but @@ -645,39 +648,39 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { // When hashing a small byte slice or string, a single Write and Read may // be used: // -// // Assume hashfd is already configured using the setup process. -// hash := os.NewFile(hashfd, "sha1") -// // Hash an input string and read the results. Each Write discards -// // previous hash state. Read always reads the current state. -// b := make([]byte, 20) -// for i := 0; i < 2; i++ { -// io.WriteString(hash, "Hello, world.") -// hash.Read(b) -// fmt.Println(hex.EncodeToString(b)) -// } -// // Output: -// // 2ae01472317d1935a84797ec1983ae243fc6aa28 -// // 2ae01472317d1935a84797ec1983ae243fc6aa28 +// // Assume hashfd is already configured using the setup process. +// hash := os.NewFile(hashfd, "sha1") +// // Hash an input string and read the results. Each Write discards +// // previous hash state. Read always reads the current state. +// b := make([]byte, 20) +// for i := 0; i < 2; i++ { +// io.WriteString(hash, "Hello, world.") +// hash.Read(b) +// fmt.Println(hex.EncodeToString(b)) +// } +// // Output: +// // 2ae01472317d1935a84797ec1983ae243fc6aa28 +// // 2ae01472317d1935a84797ec1983ae243fc6aa28 // // For hashing larger byte slices, or byte streams such as those read from // a file or socket, use Sendto with MSG_MORE to instruct the kernel to update // the hash digest instead of creating a new one for a given chunk and finalizing it. // -// // Assume hashfd and addr are already configured using the setup process. -// hash := os.NewFile(hashfd, "sha1") -// // Hash the contents of a file. -// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz") -// b := make([]byte, 4096) -// for { -// n, err := f.Read(b) -// if err == io.EOF { -// break -// } -// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr) -// } -// hash.Read(b) -// fmt.Println(hex.EncodeToString(b)) -// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5 +// // Assume hashfd and addr are already configured using the setup process. +// hash := os.NewFile(hashfd, "sha1") +// // Hash the contents of a file. +// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz") +// b := make([]byte, 4096) +// for { +// n, err := f.Read(b) +// if err == io.EOF { +// break +// } +// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr) +// } +// hash.Read(b) +// fmt.Println(hex.EncodeToString(b)) +// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5 // // For more information, see: http://www.chronox.de/crypto-API/crypto/userspace-if.html. type SockaddrALG struct { @@ -1497,19 +1500,13 @@ func KeyctlRestrictKeyring(ringid int, keyType string, restriction string) error //sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL //sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL -func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr - var rsa RawSockaddrAny - msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = &p[0] - iov.SetLen(len(p)) - } var dummy byte if len(oob) > 0 { - if len(p) == 0 { + if emptyIovecs(iov) { var sockType int sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) if err != nil { @@ -1517,53 +1514,36 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from } // receive at least one normal byte if sockType != SOCK_DGRAM { - iov.Base = &dummy - iov.SetLen(1) + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } } msg.Control = &oob[0] msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); err != nil { return } oobn = int(msg.Controllen) recvflags = int(msg.Flags) - // source address is only specified if the socket is unconnected - if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(fd, &rsa) - } - return -} - -func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { - _, err = SendmsgN(fd, p, oob, to, flags) return } -func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { - var ptr unsafe.Pointer - var salen _Socklen - if to != nil { - var err error - ptr, salen, err = to.sockaddr() - if err != nil { - return 0, err - } - } +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(ptr) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = &p[0] - iov.SetLen(len(p)) - } var dummy byte + var empty bool if len(oob) > 0 { - if len(p) == 0 { + empty = emptyIovecs(iov) + if empty { var sockType int sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) if err != nil { @@ -1571,19 +1551,23 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) } // send at least one normal byte if sockType != SOCK_DGRAM { - iov.Base = &dummy - iov.SetLen(1) + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } } msg.Control = &oob[0] msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil @@ -1846,6 +1830,9 @@ func Dup2(oldfd, newfd int) error { //sys Fremovexattr(fd int, attr string) (err error) //sys Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) //sys Fsync(fd int) (err error) +//sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) +//sys Fsopen(fsName string, flags int) (fd int, err error) +//sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) @@ -1876,7 +1863,9 @@ func Getpgrp() (pid int) { //sys MemfdCreate(name string, flags int) (fd int, err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys MoveMount(fromDirfd int, fromPathName string, toDirfd int, toPathName string, flags int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys OpenTree(dfd int, fileName string, flags uint) (r int, err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT //sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 @@ -1904,17 +1893,28 @@ func PrctlRetInt(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uint return int(ret), nil } -// issue 1435. -// On linux Setuid and Setgid only affects the current thread, not the process. -// This does not match what most callers expect so we must return an error -// here rather than letting the caller think that the call succeeded. - func Setuid(uid int) (err error) { - return EOPNOTSUPP + return syscall.Setuid(uid) +} + +func Setgid(gid int) (err error) { + return syscall.Setgid(gid) +} + +func Setreuid(ruid, euid int) (err error) { + return syscall.Setreuid(ruid, euid) +} + +func Setregid(rgid, egid int) (err error) { + return syscall.Setregid(rgid, egid) } -func Setgid(uid int) (err error) { - return EOPNOTSUPP +func Setresuid(ruid, euid, suid int) (err error) { + return syscall.Setresuid(ruid, euid, suid) +} + +func Setresgid(rgid, egid, sgid int) (err error) { + return syscall.Setresgid(rgid, egid, sgid) } // SetfsgidRetGid sets fsgid for current thread and returns previous fsgid set. @@ -1973,36 +1973,46 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PREADV2 //sys pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PWRITEV2 -func bytes2iovec(bs [][]byte) []Iovec { - iovecs := make([]Iovec, len(bs)) - for i, b := range bs { - iovecs[i].SetLen(len(b)) +// minIovec is the size of the small initial allocation used by +// Readv, Writev, etc. +// +// This small allocation gets stack allocated, which lets the +// common use case of len(iovs) <= minIovs avoid more expensive +// heap allocations. +const minIovec = 8 + +// appendBytes converts bs to Iovecs and appends them to vecs. +func appendBytes(vecs []Iovec, bs [][]byte) []Iovec { + for _, b := range bs { + var v Iovec + v.SetLen(len(b)) if len(b) > 0 { - iovecs[i].Base = &b[0] + v.Base = &b[0] } else { - iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero)) + v.Base = (*byte)(unsafe.Pointer(&_zero)) } + vecs = append(vecs, v) } - return iovecs + return vecs } -// offs2lohi splits offs into its lower and upper unsigned long. On 64-bit -// systems, hi will always be 0. On 32-bit systems, offs will be split in half. -// preadv/pwritev chose this calling convention so they don't need to add a -// padding-register for alignment on ARM. +// offs2lohi splits offs into its low and high order bits. func offs2lohi(offs int64) (lo, hi uintptr) { - return uintptr(offs), uintptr(uint64(offs) >> SizeofLong) + const longBits = SizeofLong * 8 + return uintptr(offs), uintptr(uint64(offs) >> longBits) } func Readv(fd int, iovs [][]byte) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) n, err = readv(fd, iovecs) readvRacedetect(iovecs, n, err) return n, err } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) lo, hi := offs2lohi(offset) n, err = preadv(fd, iovecs, lo, hi) readvRacedetect(iovecs, n, err) @@ -2010,7 +2020,8 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Preadv2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) lo, hi := offs2lohi(offset) n, err = preadv2(fd, iovecs, lo, hi, flags) readvRacedetect(iovecs, n, err) @@ -2037,7 +2048,8 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { } func Writev(fd int, iovs [][]byte) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } @@ -2047,7 +2059,8 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } @@ -2058,7 +2071,8 @@ func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Pwritev2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } @@ -2201,7 +2215,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { gid = Getgid() } - if uint32(gid) == st.Gid || isGroupMember(gid) { + if uint32(gid) == st.Gid || isGroupMember(int(st.Gid)) { fmode = (st.Mode >> 3) & 7 } else { fmode = st.Mode & 7 @@ -2253,7 +2267,7 @@ func (fh *FileHandle) Bytes() []byte { if n == 0 { return nil } - return (*[1 << 30]byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&fh.fileHandle.Type)) + 4))[:n:n] + return unsafe.Slice((*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&fh.fileHandle.Type))+4)), n) } // NameToHandleAt wraps the name_to_handle_at system call; it obtains @@ -2316,6 +2330,7 @@ type RemoteIovec struct { //sys PidfdOpen(pid int, flags int) (fd int, err error) = SYS_PIDFD_OPEN //sys PidfdGetfd(pidfd int, targetfd int, flags int) (fd int, err error) = SYS_PIDFD_GETFD +//sys PidfdSendSignal(pidfd int, sig Signal, info *Siginfo, flags int) (err error) = SYS_PIDFD_SEND_SIGNAL //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) //sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) @@ -2368,6 +2383,16 @@ func Setitimer(which ItimerWhich, it Itimerval) (Itimerval, error) { return prev, nil } +//sysnb rtSigprocmask(how int, set *Sigset_t, oldset *Sigset_t, sigsetsize uintptr) (err error) = SYS_RT_SIGPROCMASK + +func PthreadSigmask(how int, set, oldset *Sigset_t) error { + if oldset != nil { + // Explicitly clear in case Sigset_t is larger than _C__NSIG. + *oldset = Sigset_t{} + } + return rtSigprocmask(how, set, oldset, _C__NSIG/8) +} + /* * Unimplemented */ @@ -2426,7 +2451,6 @@ func Setitimer(which ItimerWhich, it Itimerval) (Itimerval, error) { // RestartSyscall // RtSigaction // RtSigpending -// RtSigprocmask // RtSigqueueinfo // RtSigreturn // RtSigsuspend @@ -2464,5 +2488,4 @@ func Setitimer(which ItimerWhich, it Itimerval) (Itimerval, error) { // Vfork // Vhangup // Vserver -// Waitid // _Sysctl diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index d44b8ad5339d..ff5b5899d6db 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -35,16 +35,12 @@ func setTimeval(sec, usec int64) Timeval { //sys Iopl(level int) (err error) //sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32 //sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32 -//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 -//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 -//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32 //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index bd21d93bf84a..9b2703532989 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -28,9 +28,10 @@ func Lstat(path string, stat *Stat_t) (err error) { return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) } +//sys MemfdSecret(flags int) (fd int, err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK @@ -45,11 +46,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 343c91f6b31d..856ad1d635cf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -62,10 +62,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32 //sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32 -//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 -//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 -//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32 //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 @@ -96,8 +92,8 @@ func Utime(path string, buf *Utimbuf) error { //sys utimes(path string, times *[2]Timeval) (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 //sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 8c562868480f..6422704bc52a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -22,8 +22,9 @@ import "unsafe" //sysnb getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys MemfdSecret(flags int) (fd int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK @@ -38,11 +39,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go new file mode 100644 index 000000000000..59dab510e97c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -0,0 +1,222 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +import "unsafe" + +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getuid() (uid int) +//sys Listen(s int, n int) (err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + var ts *Timespec + if timeout != nil { + ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + } + return Pselect(nfd, r, w, e, ts, nil) +} + +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) + +func timespecFromStatxTimestamp(x StatxTimestamp) Timespec { + return Timespec{ + Sec: x.Sec, + Nsec: int64(x.Nsec), + } +} + +func Fstatat(fd int, path string, stat *Stat_t, flags int) error { + var r Statx_t + // Do it the glibc way, add AT_NO_AUTOMOUNT. + if err := Statx(fd, path, AT_NO_AUTOMOUNT|flags, STATX_BASIC_STATS, &r); err != nil { + return err + } + + stat.Dev = Mkdev(r.Dev_major, r.Dev_minor) + stat.Ino = r.Ino + stat.Mode = uint32(r.Mode) + stat.Nlink = r.Nlink + stat.Uid = r.Uid + stat.Gid = r.Gid + stat.Rdev = Mkdev(r.Rdev_major, r.Rdev_minor) + // hope we don't get to process files so large to overflow these size + // fields... + stat.Size = int64(r.Size) + stat.Blksize = int32(r.Blksize) + stat.Blocks = int64(r.Blocks) + stat.Atim = timespecFromStatxTimestamp(r.Atime) + stat.Mtim = timespecFromStatxTimestamp(r.Mtime) + stat.Ctim = timespecFromStatxTimestamp(r.Ctime) + + return nil +} + +func Fstat(fd int, stat *Stat_t) (err error) { + return Fstatat(fd, "", stat, AT_EMPTY_PATH) +} + +func Stat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, 0) +} + +func Lchown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW) +} + +func Lstat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) +} + +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + return ENOSYS +} + +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +//sysnb Gettimeofday(tv *Timeval) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + err = Prlimit(0, resource, nil, rlim) + return +} + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + err = Prlimit(0, resource, rlim, nil) + return +} + +func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(dirfd, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func Time(t *Time_t) (Time_t, error) { + var tv Timeval + err := Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +func Utime(path string, buf *Utimbuf) error { + tv := []Timeval{ + {Sec: buf.Actime}, + {Sec: buf.Modtime}, + } + return Utimes(path, tv) +} + +func utimes(path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func (r *PtraceRegs) PC() uint64 { return r.Era } + +func (r *PtraceRegs) SetPC(era uint64) { r.Era = era } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + +func Pause() error { + _, err := ppoll(nil, 0, nil, nil) + return err +} + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0) +} + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index f0b138002c16..bfef09a39eb0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -21,8 +21,8 @@ package unix //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK @@ -37,11 +37,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index e6163c30fe94..ab302509663e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -25,17 +25,13 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sysnb Getuid() (uid int) //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index 4740e80a8ec9..eac1cf1acc86 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -27,17 +27,13 @@ import ( //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 78bc9166ef6f..4df56616b8f1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -26,19 +26,15 @@ package unix //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 3d6c4eb06810..5f4243dea2c3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -22,8 +22,9 @@ import "unsafe" //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys MemfdSecret(flags int) (fd int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { @@ -37,11 +38,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 89ce84a41680..d0a7d4066851 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -26,19 +26,15 @@ import ( //sys Lchown(path string, uid int, gid int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 35bdb098c5ba..f5c793be26d4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -23,19 +23,15 @@ package unix //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 696fed496f68..35a3ad758f59 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -110,6 +110,20 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } +func SysctlUvmexp(name string) (*Uvmexp, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + n := uintptr(SizeofUvmexp) + var u Uvmexp + if err := sysctl(mib, (*byte)(unsafe.Pointer(&u)), &n, nil, 0); err != nil { + return nil, err + } + return &u, nil +} + func Pipe(p []int) (err error) { return Pipe2(p, 0) } @@ -163,11 +177,6 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return -1, ENOSYS } -func setattrlistTimes(path string, times []Timespec, flags int) error { - // used on Darwin for UtimesNano - return ENOSYS -} - //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -250,6 +259,7 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) @@ -313,8 +323,8 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 11b1d419da91..9b67b908e5f9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -81,6 +81,7 @@ func Pipe(p []int) (err error) { } //sysnb pipe2(p *[2]_C_int, flags int) (err error) + func Pipe2(p []int, flags int) error { if len(p) != 2 { return EINVAL @@ -95,6 +96,7 @@ func Pipe2(p []int, flags int) error { } //sys Getdents(fd int, buf []byte) (n int, err error) + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { n, err = Getdents(fd, buf) if err != nil || basep == nil { @@ -149,11 +151,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } -func setattrlistTimes(path string, times []Timespec, flags int) error { - // used on Darwin for UtimesNano - return ENOSYS -} - //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -223,6 +220,7 @@ func Uname(uname *Utsname) error { //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) @@ -274,8 +272,8 @@ func Uname(uname *Utsname) error { //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go new file mode 100644 index 000000000000..04aa43f41b25 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -0,0 +1,27 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build openbsd +// +build openbsd + +package unix + +import _ "unsafe" + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall syscall.syscall +//go:linkname syscall_syscall6 syscall.syscall6 +//go:linkname syscall_syscall10 syscall.syscall10 +//go:linkname syscall_rawSyscall syscall.rawSyscall +//go:linkname syscall_rawSyscall6 syscall.rawSyscall6 + +func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) { + return syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, 0) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go index 30f285343ee4..1378489f8d7f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go @@ -26,6 +26,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go new file mode 100644 index 000000000000..c2796139c013 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of openbsd/ppc64 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go new file mode 100644 index 000000000000..23199a7ff624 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of openbsd/riscv64 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 5c813921e855..07ac56109a05 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -451,77 +451,59 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_recvmsg -func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr - var rsa RawSockaddrAny - msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = (*int8)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } - var dummy int8 + var dummy byte if len(oob) > 0 { // receive at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Accrightslen = int32(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); n == -1 { return } oobn = int(msg.Accrightslen) - // source address is only specified if the socket is unconnected - if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(fd, &rsa) - } - return -} - -func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { - _, err = SendmsgN(fd, p, oob, to, flags) return } //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_sendmsg -func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { - var ptr unsafe.Pointer - var salen _Socklen - if to != nil { - ptr, salen, err = to.sockaddr() - if err != nil { - return 0, err - } - } +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(ptr)) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = (*int8)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } - var dummy int8 + var dummy byte + var empty bool if len(oob) > 0 { // send at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + empty = emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Accrightslen = int32(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil @@ -608,6 +590,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Creat(path string, mode uint32) (fd int, err error) //sys Dup(fd int) (nfd int, err error) @@ -636,6 +619,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Getpriority(which int, who int) (n int, err error) //sysnb Getrlimit(which int, lim *Rlimit) (err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Getuid() (uid int) //sys Kill(pid int, signum syscall.Signal) (err error) @@ -661,8 +645,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Rename(from string, to string) (err error) @@ -755,8 +739,20 @@ type fileObjCookie struct { type EventPort struct { port int mu sync.Mutex - fds map[uintptr]interface{} + fds map[uintptr]*fileObjCookie paths map[string]*fileObjCookie + // The user cookie presents an interesting challenge from a memory management perspective. + // There are two paths by which we can discover that it is no longer in use: + // 1. The user calls port_dissociate before any events fire + // 2. An event fires and we return it to the user + // The tricky situation is if the event has fired in the kernel but + // the user hasn't requested/received it yet. + // If the user wants to port_dissociate before the event has been processed, + // we should handle things gracefully. To do so, we need to keep an extra + // reference to the cookie around until the event is processed + // thus the otherwise seemingly extraneous "cookies" map + // The key of this map is a pointer to the corresponding fCookie + cookies map[*fileObjCookie]struct{} } // PortEvent is an abstraction of the port_event C struct. @@ -780,9 +776,10 @@ func NewEventPort() (*EventPort, error) { return nil, err } e := &EventPort{ - port: port, - fds: make(map[uintptr]interface{}), - paths: make(map[string]*fileObjCookie), + port: port, + fds: make(map[uintptr]*fileObjCookie), + paths: make(map[string]*fileObjCookie), + cookies: make(map[*fileObjCookie]struct{}), } return e, nil } @@ -797,9 +794,14 @@ func NewEventPort() (*EventPort, error) { func (e *EventPort) Close() error { e.mu.Lock() defer e.mu.Unlock() + err := Close(e.port) + if err != nil { + return err + } e.fds = nil e.paths = nil - return Close(e.port) + e.cookies = nil + return nil } // PathIsWatched checks to see if path is associated with this EventPort. @@ -826,16 +828,16 @@ func (e *EventPort) AssociatePath(path string, stat os.FileInfo, events int, coo if _, found := e.paths[path]; found { return fmt.Errorf("%v is already associated with this Event Port", path) } - fobj, err := createFileObj(path, stat) + fCookie, err := createFileObjCookie(path, stat, cookie) if err != nil { return err } - fCookie := &fileObjCookie{fobj, cookie} - _, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fobj)), events, (*byte)(unsafe.Pointer(&fCookie.cookie))) + _, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fCookie.fobj)), events, (*byte)(unsafe.Pointer(fCookie))) if err != nil { return err } e.paths[path] = fCookie + e.cookies[fCookie] = struct{}{} return nil } @@ -848,11 +850,19 @@ func (e *EventPort) DissociatePath(path string) error { return fmt.Errorf("%v is not associated with this Event Port", path) } _, err := port_dissociate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(f.fobj))) - if err != nil { + // If the path is no longer associated with this event port (ENOENT) + // we should delete it from our map. We can still return ENOENT to the caller. + // But we need to save the cookie + if err != nil && err != ENOENT { return err } + if err == nil { + // dissociate was successful, safe to delete the cookie + fCookie := e.paths[path] + delete(e.cookies, fCookie) + } delete(e.paths, path) - return nil + return err } // AssociateFd wraps calls to port_associate(3c) on file descriptors. @@ -862,12 +872,16 @@ func (e *EventPort) AssociateFd(fd uintptr, events int, cookie interface{}) erro if _, found := e.fds[fd]; found { return fmt.Errorf("%v is already associated with this Event Port", fd) } - pcookie := &cookie - _, err := port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(pcookie))) + fCookie, err := createFileObjCookie("", nil, cookie) + if err != nil { + return err + } + _, err = port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(fCookie))) if err != nil { return err } - e.fds[fd] = pcookie + e.fds[fd] = fCookie + e.cookies[fCookie] = struct{}{} return nil } @@ -880,28 +894,37 @@ func (e *EventPort) DissociateFd(fd uintptr) error { return fmt.Errorf("%v is not associated with this Event Port", fd) } _, err := port_dissociate(e.port, PORT_SOURCE_FD, fd) - if err != nil { + if err != nil && err != ENOENT { return err } + if err == nil { + // dissociate was successful, safe to delete the cookie + fCookie := e.fds[fd] + delete(e.cookies, fCookie) + } delete(e.fds, fd) - return nil + return err } -func createFileObj(name string, stat os.FileInfo) (*fileObj, error) { - fobj := new(fileObj) - bs, err := ByteSliceFromString(name) - if err != nil { - return nil, err - } - fobj.Name = (*int8)(unsafe.Pointer(&bs[0])) - s := stat.Sys().(*syscall.Stat_t) - fobj.Atim.Sec = s.Atim.Sec - fobj.Atim.Nsec = s.Atim.Nsec - fobj.Mtim.Sec = s.Mtim.Sec - fobj.Mtim.Nsec = s.Mtim.Nsec - fobj.Ctim.Sec = s.Ctim.Sec - fobj.Ctim.Nsec = s.Ctim.Nsec - return fobj, nil +func createFileObjCookie(name string, stat os.FileInfo, cookie interface{}) (*fileObjCookie, error) { + fCookie := new(fileObjCookie) + fCookie.cookie = cookie + if name != "" && stat != nil { + fCookie.fobj = new(fileObj) + bs, err := ByteSliceFromString(name) + if err != nil { + return nil, err + } + fCookie.fobj.Name = (*int8)(unsafe.Pointer(&bs[0])) + s := stat.Sys().(*syscall.Stat_t) + fCookie.fobj.Atim.Sec = s.Atim.Sec + fCookie.fobj.Atim.Nsec = s.Atim.Nsec + fCookie.fobj.Mtim.Sec = s.Mtim.Sec + fCookie.fobj.Mtim.Nsec = s.Mtim.Nsec + fCookie.fobj.Ctim.Sec = s.Ctim.Sec + fCookie.fobj.Ctim.Nsec = s.Ctim.Nsec + } + return fCookie, nil } // GetOne wraps port_get(3c) and returns a single PortEvent. @@ -912,24 +935,52 @@ func (e *EventPort) GetOne(t *Timespec) (*PortEvent, error) { return nil, err } p := new(PortEvent) - p.Events = pe.Events - p.Source = pe.Source e.mu.Lock() defer e.mu.Unlock() - switch pe.Source { + err = e.peIntToExt(pe, p) + if err != nil { + return nil, err + } + return p, nil +} + +// peIntToExt converts a cgo portEvent struct into the friendlier PortEvent +// NOTE: Always call this function while holding the e.mu mutex +func (e *EventPort) peIntToExt(peInt *portEvent, peExt *PortEvent) error { + if e.cookies == nil { + return fmt.Errorf("this EventPort is already closed") + } + peExt.Events = peInt.Events + peExt.Source = peInt.Source + fCookie := (*fileObjCookie)(unsafe.Pointer(peInt.User)) + _, found := e.cookies[fCookie] + + if !found { + panic("unexpected event port address; may be due to kernel bug; see https://go.dev/issue/54254") + } + peExt.Cookie = fCookie.cookie + delete(e.cookies, fCookie) + + switch peInt.Source { case PORT_SOURCE_FD: - p.Fd = uintptr(pe.Object) - cookie := (*interface{})(unsafe.Pointer(pe.User)) - p.Cookie = *cookie - delete(e.fds, p.Fd) + peExt.Fd = uintptr(peInt.Object) + // Only remove the fds entry if it exists and this cookie matches + if fobj, ok := e.fds[peExt.Fd]; ok { + if fobj == fCookie { + delete(e.fds, peExt.Fd) + } + } case PORT_SOURCE_FILE: - p.fobj = (*fileObj)(unsafe.Pointer(uintptr(pe.Object))) - p.Path = BytePtrToString((*byte)(unsafe.Pointer(p.fobj.Name))) - cookie := (*interface{})(unsafe.Pointer(pe.User)) - p.Cookie = *cookie - delete(e.paths, p.Path) + peExt.fobj = fCookie.fobj + peExt.Path = BytePtrToString((*byte)(unsafe.Pointer(peExt.fobj.Name))) + // Only remove the paths entry if it exists and this cookie matches + if fobj, ok := e.paths[peExt.Path]; ok { + if fobj == fCookie { + delete(e.paths, peExt.Path) + } + } } - return p, nil + return nil } // Pending wraps port_getn(3c) and returns how many events are pending. @@ -953,7 +1004,7 @@ func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error) got := uint32(min) max := uint32(len(s)) var err error - ps := make([]portEvent, max, max) + ps := make([]portEvent, max) _, err = port_getn(e.port, &ps[0], max, &got, timeout) // got will be trustworthy with ETIME, but not any other error. if err != nil && err != ETIME { @@ -961,22 +1012,122 @@ func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error) } e.mu.Lock() defer e.mu.Unlock() + valid := 0 for i := 0; i < int(got); i++ { - s[i].Events = ps[i].Events - s[i].Source = ps[i].Source - switch ps[i].Source { - case PORT_SOURCE_FD: - s[i].Fd = uintptr(ps[i].Object) - cookie := (*interface{})(unsafe.Pointer(ps[i].User)) - s[i].Cookie = *cookie - delete(e.fds, s[i].Fd) - case PORT_SOURCE_FILE: - s[i].fobj = (*fileObj)(unsafe.Pointer(uintptr(ps[i].Object))) - s[i].Path = BytePtrToString((*byte)(unsafe.Pointer(s[i].fobj.Name))) - cookie := (*interface{})(unsafe.Pointer(ps[i].User)) - s[i].Cookie = *cookie - delete(e.paths, s[i].Path) + err2 := e.peIntToExt(&ps[i], &s[i]) + if err2 != nil { + if valid == 0 && err == nil { + // If err2 is the only error and there are no valid events + // to return, return it to the caller. + err = err2 + } + break + } + valid = i + 1 + } + return valid, err +} + +//sys putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) + +func Putmsg(fd int, cl []byte, data []byte, flags int) (err error) { + var clp, datap *strbuf + if len(cl) > 0 { + clp = &strbuf{ + Len: int32(len(cl)), + Buf: (*int8)(unsafe.Pointer(&cl[0])), + } + } + if len(data) > 0 { + datap = &strbuf{ + Len: int32(len(data)), + Buf: (*int8)(unsafe.Pointer(&data[0])), + } + } + return putmsg(fd, clp, datap, flags) +} + +//sys getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) + +func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags int, err error) { + var clp, datap *strbuf + if len(cl) > 0 { + clp = &strbuf{ + Maxlen: int32(len(cl)), + Buf: (*int8)(unsafe.Pointer(&cl[0])), + } + } + if len(data) > 0 { + datap = &strbuf{ + Maxlen: int32(len(data)), + Buf: (*int8)(unsafe.Pointer(&data[0])), } } - return int(got), err + + if err = getmsg(fd, clp, datap, &flags); err != nil { + return nil, nil, 0, err + } + + if len(cl) > 0 { + retCl = cl[:clp.Len] + } + if len(data) > 0 { + retData = data[:datap.Len] + } + return retCl, retData, flags, nil +} + +func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) { + return ioctlRet(fd, req, uintptr(arg)) +} + +func IoctlSetString(fd int, req uint, val string) error { + bs := make([]byte, len(val)+1) + copy(bs[:len(bs)-1], val) + err := ioctl(fd, req, uintptr(unsafe.Pointer(&bs[0]))) + runtime.KeepAlive(&bs[0]) + return err +} + +// Lifreq Helpers + +func (l *Lifreq) SetName(name string) error { + if len(name) >= len(l.Name) { + return fmt.Errorf("name cannot be more than %d characters", len(l.Name)-1) + } + for i := range name { + l.Name[i] = int8(name[i]) + } + return nil +} + +func (l *Lifreq) SetLifruInt(d int) { + *(*int)(unsafe.Pointer(&l.Lifru[0])) = d +} + +func (l *Lifreq) GetLifruInt() int { + return *(*int)(unsafe.Pointer(&l.Lifru[0])) +} + +func (l *Lifreq) SetLifruUint(d uint) { + *(*uint)(unsafe.Pointer(&l.Lifru[0])) = d +} + +func (l *Lifreq) GetLifruUint() uint { + return *(*uint)(unsafe.Pointer(&l.Lifru[0])) +} + +func IoctlLifreq(fd int, req uint, l *Lifreq) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(l))) +} + +// Strioctl Helpers + +func (s *Strioctl) SetInt(i int) { + s.Len = int32(unsafe.Sizeof(i)) + s.Dp = (*int8)(unsafe.Pointer(&i)) +} + +func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) { + return ioctlRet(fd, req, uintptr(unsafe.Pointer(s))) } diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index cf296a2433a9..a386f8897df3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -13,8 +13,6 @@ import ( "sync" "syscall" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) var ( @@ -117,11 +115,7 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d } // Use unsafe to convert addr into a []byte. - var b []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b)) - hdr.Data = unsafe.Pointer(addr) - hdr.Cap = length - hdr.Len = length + b := unsafe.Slice((*byte)(unsafe.Pointer(addr)), length) // Register mapping in m and return it. p := &b[cap(b)-1] @@ -177,6 +171,30 @@ func Write(fd int, p []byte) (n int, err error) { return } +func Pread(fd int, p []byte, offset int64) (n int, err error) { + n, err = pread(fd, p, offset) + if raceenabled { + if n > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), n) + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } + } + return +} + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = pwrite(fd, p, offset) + if raceenabled && n > 0 { + raceReadRange(unsafe.Pointer(&p[0]), n) + } + return +} + // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. var SocketDisableIPv6 bool @@ -313,16 +331,142 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { return } +// Recvmsg receives a message from a socket using the recvmsg system call. The +// received non-control data will be written to p, and any "out of band" +// control data will be written to oob. The flags are passed to recvmsg. +// +// The results are: +// - n is the number of non-control data bytes read into p +// - oobn is the number of control data bytes read into oob; this may be interpreted using [ParseSocketControlMessage] +// - recvflags is flags returned by recvmsg +// - from is the address of the sender +// +// If the underlying socket type is not SOCK_DGRAM, a received message +// containing oob data and a single '\0' of non-control data is treated as if +// the message contained only control data, i.e. n will be zero on return. +func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var iov [1]Iovec + if len(p) > 0 { + iov[0].Base = &p[0] + iov[0].SetLen(len(p)) + } + var rsa RawSockaddrAny + n, oobn, recvflags, err = recvmsgRaw(fd, iov[:], oob, flags, &rsa) + // source address is only specified if the socket is unconnected + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(fd, &rsa) + } + return +} + +// RecvmsgBuffers receives a message from a socket using the recvmsg system +// call. This function is equivalent to Recvmsg, but non-control data read is +// scattered into the buffers slices. +func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + iov := make([]Iovec, len(buffers)) + for i := range buffers { + if len(buffers[i]) > 0 { + iov[i].Base = &buffers[i][0] + iov[i].SetLen(len(buffers[i])) + } else { + iov[i].Base = (*byte)(unsafe.Pointer(&_zero)) + } + } + var rsa RawSockaddrAny + n, oobn, recvflags, err = recvmsgRaw(fd, iov, oob, flags, &rsa) + if err == nil && rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(fd, &rsa) + } + return +} + +// Sendmsg sends a message on a socket to an address using the sendmsg system +// call. This function is equivalent to SendmsgN, but does not return the +// number of bytes actually sent. +func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { + _, err = SendmsgN(fd, p, oob, to, flags) + return +} + +// SendmsgN sends a message on a socket to an address using the sendmsg system +// call. p contains the non-control data to send, and oob contains the "out of +// band" control data. The flags are passed to sendmsg. The number of +// non-control bytes actually written to the socket is returned. +// +// Some socket types do not support sending control data without accompanying +// non-control data. If p is empty, and oob contains control data, and the +// underlying socket type is not SOCK_DGRAM, p will be treated as containing a +// single '\0' and the return value will indicate zero bytes sent. +// +// The Go function Recvmsg, if called with an empty p and a non-empty oob, +// will read and ignore this additional '\0'. If the message is received by +// code that does not use Recvmsg, or that does not use Go at all, that code +// will need to be written to expect and ignore the additional '\0'. +// +// If you need to send non-empty oob with p actually empty, and if the +// underlying socket type supports it, you can do so via a raw system call as +// follows: +// +// msg := &unix.Msghdr{ +// Control: &oob[0], +// } +// msg.SetControllen(len(oob)) +// n, _, errno := unix.Syscall(unix.SYS_SENDMSG, uintptr(fd), uintptr(unsafe.Pointer(msg)), flags) +func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var iov [1]Iovec + if len(p) > 0 { + iov[0].Base = &p[0] + iov[0].SetLen(len(p)) + } + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + return sendmsgN(fd, iov[:], oob, ptr, salen, flags) +} + +// SendmsgBuffers sends a message on a socket to an address using the sendmsg +// system call. This function is equivalent to SendmsgN, but the non-control +// data is gathered from buffers. +func SendmsgBuffers(fd int, buffers [][]byte, oob []byte, to Sockaddr, flags int) (n int, err error) { + iov := make([]Iovec, len(buffers)) + for i := range buffers { + if len(buffers[i]) > 0 { + iov[i].Base = &buffers[i][0] + iov[i].SetLen(len(buffers[i])) + } else { + iov[i].Base = (*byte)(unsafe.Pointer(&_zero)) + } + } + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + return sendmsgN(fd, iov, oob, ptr, salen, flags) +} + func Send(s int, buf []byte, flags int) (err error) { return sendto(s, buf, flags, nil, 0) } func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) { - ptr, n, err := to.sockaddr() - if err != nil { - return err + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return err + } } - return sendto(fd, p, flags, ptr, n) + return sendto(fd, p, flags, ptr, salen) } func SetsockoptByte(fd, level, opt int, value byte) (err error) { @@ -433,3 +577,13 @@ func Lutimes(path string, tv []Timeval) error { } return UtimesNanoAt(AT_FDCWD, path, ts, AT_SYMLINK_NOFOLLOW) } + +// emptyIovec reports whether there are no bytes in the slice of Iovec. +func emptyIovecs(iov []Iovec) bool { + for i := range iov { + if iov[i].Len > 0 { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index 5898e9a52b75..b6919ca580e7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -2,11 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && gc && !ppc64le && !ppc64 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc +// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris // +build gc -// +build !ppc64le -// +build !ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index f8616f454ec6..68b2f3e1cd0a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -9,8 +9,10 @@ package unix import ( "bytes" + "fmt" "runtime" "sort" + "strings" "sync" "syscall" "unsafe" @@ -55,7 +57,13 @@ func (d *Dirent) NameString() string { if d == nil { return "" } - return string(d.Name[:d.Namlen]) + s := string(d.Name[:]) + idx := strings.IndexByte(s, 0) + if idx == -1 { + return s + } else { + return s[:idx] + } } func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { @@ -1230,6 +1238,14 @@ func Readdir(dir uintptr) (*Dirent, error) { return &ent, err } +func readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { + r0, _, e1 := syscall_syscall(SYS___READDIR_R_A, dirp, uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + if int64(r0) == -1 { + err = errnoErr(Errno(e1)) + } + return +} + func Closedir(dir uintptr) error { _, _, e := syscall_syscall(SYS_CLOSEDIR, dir, 0, 0) if e != 0 { @@ -1821,3 +1837,158 @@ func Unmount(name string, mtm int) (err error) { } return err } + +func fdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + // w_ctrl() + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, + []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + // __e2a_l() + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, + []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) + return string(buffer[:zb]), nil + } + // __errno() + errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, + []uintptr{})))) + // __errno2() + errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, + []uintptr{})) + // strerror_r() + ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, + []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) + } else { + return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) + } +} + +func direntLeToDirentUnix(dirent *direntLE, dir uintptr, path string) (Dirent, error) { + var d Dirent + + d.Ino = uint64(dirent.Ino) + offset, err := Telldir(dir) + if err != nil { + return d, err + } + + d.Off = int64(offset) + s := string(bytes.Split(dirent.Name[:], []byte{0})[0]) + copy(d.Name[:], s) + + d.Reclen = uint16(24 + len(d.NameString())) + var st Stat_t + path = path + "/" + s + err = Lstat(path, &st) + if err != nil { + return d, err + } + + d.Type = uint8(st.Mode >> 24) + return d, err +} + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + // Simulation of Getdirentries port from the Darwin implementation. + // COMMENTS FROM DARWIN: + // It's not the full required semantics, but should handle the case + // of calling Getdirentries or ReadDirent repeatedly. + // It won't handle assigning the results of lseek to *basep, or handle + // the directory being edited underfoot. + + skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + return 0, err + } + + // Get path from fd to avoid unavailable call (fdopendir) + path, err := fdToPath(fd) + if err != nil { + return 0, err + } + d, err := Opendir(path) + if err != nil { + return 0, err + } + defer Closedir(d) + + var cnt int64 + for { + var entryLE direntLE + var entrypLE *direntLE + e := readdir_r(d, &entryLE, &entrypLE) + if e != nil { + return n, e + } + if entrypLE == nil { + break + } + if skip > 0 { + skip-- + cnt++ + continue + } + + // Dirent on zos has a different structure + entry, e := direntLeToDirentUnix(&entryLE, d, path) + if e != nil { + return n, e + } + + reclen := int(entry.Reclen) + if reclen > len(buf) { + // Not enough room. Return for now. + // The counter will let us know where we should start up again. + // Note: this strategy for suspending in the middle and + // restarting is O(n^2) in the length of the directory. Oh well. + break + } + + // Copy entry into return buffer. + s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen) + copy(buf, s) + + buf = buf[reclen:] + n += reclen + cnt++ + } + // Set the seek offset of the input fd to record + // how many files we've already returned. + _, err = Seek(fd, cnt, 0 /* SEEK_SET */) + if err != nil { + return n, err + } + + return n, nil +} + +func ReadDirent(fd int, buf []byte) (n int, err error) { + var base = (*uintptr)(unsafe.Pointer(new(uint64))) + return Getdirentries(fd, buf, base) +} + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false + } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true +} diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 0bb4c8de557b..5bb41d17bc47 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -7,11 +7,7 @@ package unix -import ( - "unsafe" - - "golang.org/x/sys/internal/unsafeheader" -) +import "unsafe" // SysvShmAttach attaches the Sysv shared memory segment associated with the // shared memory identifier id. @@ -34,12 +30,7 @@ func SysvShmAttach(id int, addr uintptr, flag int) ([]byte, error) { } // Use unsafe to convert addr into a []byte. - // TODO: convert to unsafe.Slice once we can assume Go 1.17 - var b []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b)) - hdr.Data = unsafe.Pointer(addr) - hdr.Cap = int(info.Segsz) - hdr.Len = int(info.Segsz) + b := unsafe.Slice((*byte)(unsafe.Pointer(addr)), int(info.Segsz)) return b, nil } diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index 25df1e37801f..663b3779de2d 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -160,13 +160,12 @@ func Lremovexattr(link string, attr string) (err error) { } func Listxattr(file string, dest []byte) (sz int, err error) { - d := initxattrdest(dest, 0) destsiz := len(dest) // FreeBSD won't allow you to list xattrs from multiple namespaces - s := 0 + s, pos := 0, 0 for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListFile(file, nsid, uintptr(d), destsiz) + stmp, e := ListxattrNS(file, nsid, dest[pos:]) /* Errors accessing system attrs are ignored so that * we can implement the Linux-like behavior of omitting errors that @@ -175,66 +174,102 @@ func Listxattr(file string, dest []byte) (sz int, err error) { * Linux will still error if we ask for user attributes on a file that * we don't have read permissions on, so don't ignore those errors */ - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { + if e != nil { + if e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } return s, e } s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 + pos = s + if pos > destsiz { + pos = destsiz } - d = initxattrdest(dest, s) } return s, nil } -func Flistxattr(fd int, dest []byte) (sz int, err error) { +func ListxattrNS(file string, nsid int, dest []byte) (sz int, err error) { d := initxattrdest(dest, 0) destsiz := len(dest) - s := 0 + s, e := ExtattrListFile(file, nsid, uintptr(d), destsiz) + if e != nil { + return 0, err + } + + return s, nil +} + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + destsiz := len(dest) + + s, pos := 0, 0 for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz) - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { + stmp, e := FlistxattrNS(fd, nsid, dest[pos:]) + + if e != nil { + if e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } return s, e } s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 + pos = s + if pos > destsiz { + pos = destsiz } - d = initxattrdest(dest, s) } return s, nil } -func Llistxattr(link string, dest []byte) (sz int, err error) { +func FlistxattrNS(fd int, nsid int, dest []byte) (sz int, err error) { d := initxattrdest(dest, 0) destsiz := len(dest) - s := 0 + s, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz) + if e != nil { + return 0, err + } + + return s, nil +} + +func Llistxattr(link string, dest []byte) (sz int, err error) { + destsiz := len(dest) + + s, pos := 0, 0 for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListLink(link, nsid, uintptr(d), destsiz) - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { + stmp, e := LlistxattrNS(link, nsid, dest[pos:]) + + if e != nil { + if e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } return s, e } s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 + pos = s + if pos > destsiz { + pos = destsiz } - d = initxattrdest(dest, s) + } + + return s, nil +} + +func LlistxattrNS(link string, nsid int, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsiz := len(dest) + + s, e := ExtattrListLink(link, nsid, uintptr(d), destsiz) + if e != nil { + return 0, err } return s, nil diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 440900112cd4..f8c2c5138748 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80084267 BIOCSETFNR = 0x80084282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8008427b BIOCSETZBUF = 0x800c4281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1179,6 +1185,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1189,6 +1197,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1196,6 +1208,60 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETFSBASE = 0x47 + PT_GETGSBASE = 0x49 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETXMMREGS = 0x40 + PT_GETXSTATE = 0x45 + PT_GETXSTATE_INFO = 0x44 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETFSBASE = 0x48 + PT_SETGSBASE = 0x4a + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETXMMREGS = 0x41 + PT_SETXSTATE = 0x46 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1320,10 +1386,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0086924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1414,6 +1482,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1472,22 +1541,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1496,12 +1583,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1541,6 +1634,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1554,7 +1648,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1694,12 +1787,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1842,7 +1936,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1904,6 +1998,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 64520d31226b..96310c3be1b0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80104267 BIOCSETFNR = 0x80104282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8010427b BIOCSETZBUF = 0x80184281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1180,6 +1186,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1190,6 +1198,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1197,6 +1209,58 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETFSBASE = 0x47 + PT_GETGSBASE = 0x49 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETXSTATE = 0x45 + PT_GETXSTATE_INFO = 0x44 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETFSBASE = 0x48 + PT_SETGSBASE = 0x4a + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETXSTATE = 0x46 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1321,10 +1385,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0106924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1415,6 +1481,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1473,22 +1540,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1497,12 +1582,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1542,6 +1633,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1555,7 +1647,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1693,12 +1784,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1841,7 +1933,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1903,6 +1995,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 99e9a0e06e95..777b69defa04 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80084267 BIOCSETFNR = 0x80084282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8008427b BIOCSETZBUF = 0x800c4281 BIOCSHDRCMPLT = 0x80044275 @@ -362,7 +363,7 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0x18 CTL_NET = 0x4 - DIOCGATTR = 0xc144648e + DIOCGATTR = 0xc148648e DIOCGDELETE = 0x80106488 DIOCGFLUSH = 0x20006487 DIOCGFRONTSTUFF = 0x40086486 @@ -377,7 +378,7 @@ const ( DIOCGSTRIPESIZE = 0x4008648b DIOCSKERNELDUMP = 0x804c6490 DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 - DIOCZONECMD = 0xc06c648f + DIOCZONECMD = 0xc078648f DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 @@ -407,7 +408,9 @@ const ( DLT_C_HDLC_WITH_DIR = 0xcd DLT_DBUS = 0xe7 DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 DLT_DVB_CI = 0xeb DLT_ECONET = 0x73 DLT_EN10MB = 0x1 @@ -417,6 +420,7 @@ const ( DLT_ERF = 0xc5 DLT_ERF_ETH = 0xaf DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 DLT_FC_2 = 0xe0 DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 DLT_FDDI = 0xa @@ -444,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -484,9 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x109 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -502,7 +508,9 @@ const ( DLT_NFC_LLCP = 0xf5 DLT_NFLOG = 0xef DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PCI_EXP = 0x7d DLT_PFLOG = 0x75 DLT_PFSYNC = 0x79 @@ -526,15 +534,18 @@ const ( DLT_RTAC_SERIAL = 0xfa DLT_SCCP = 0x8e DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c DLT_SITA = 0xc4 DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xd DLT_STANAG_5066_D_PDU = 0xed DLT_SUNATM = 0x7b DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d DLT_TZSP = 0x80 DLT_USB = 0xba DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a DLT_USB_FREEBSD = 0xba DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc @@ -554,6 +565,7 @@ const ( DLT_USER7 = 0x9a DLT_USER8 = 0x9b DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f DLT_WATTSTOPPER_DLM = 0x107 DLT_WIHART = 0xdf DLT_WIRESHARK_UPPER_PDU = 0xfc @@ -578,6 +590,7 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd EVFILT_FS = -0x9 EVFILT_LIO = -0xa EVFILT_PROC = -0x5 @@ -585,11 +598,12 @@ const ( EVFILT_READ = -0x1 EVFILT_SENDFILE = -0xc EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xc + EVFILT_SYSCOUNT = 0xd EVFILT_TIMER = -0x7 EVFILT_USER = -0xb EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 @@ -606,6 +620,7 @@ const ( EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff EXTATTR_NAMESPACE_EMPTY = 0x0 EXTATTR_NAMESPACE_SYSTEM = 0x2 EXTATTR_NAMESPACE_USER = 0x1 @@ -647,6 +662,7 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 IFF_ALLMULTI = 0x200 IFF_ALTPHYS = 0x4000 IFF_BROADCAST = 0x2 @@ -663,6 +679,7 @@ const ( IFF_MONITOR = 0x40000 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PPROMISC = 0x20000 @@ -719,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -799,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -837,6 +854,7 @@ const ( IPV6_DSTOPTS = 0x32 IPV6_FLOWID = 0x43 IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWTYPE = 0x44 IPV6_FRAGTTL = 0x78 @@ -857,13 +875,13 @@ const ( IPV6_MAX_GROUP_SRC_FILTER = 0x200 IPV6_MAX_MEMBERSHIPS = 0xfff IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 IPV6_PATHMTU = 0x2c IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe @@ -875,6 +893,7 @@ const ( IPV6_RECVFLOWID = 0x46 IPV6_RECVHOPLIMIT = 0x25 IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 IPV6_RECVPATHMTU = 0x2b IPV6_RECVPKTINFO = 0x24 IPV6_RECVRSSBUCKETID = 0x47 @@ -894,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -935,10 +955,8 @@ const ( IP_MAX_MEMBERSHIPS = 0xfff IP_MAX_SOCK_MUTE_FILTER = 0x80 IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MAX_SOURCE_FILTER = 0x400 IP_MF = 0x2000 IP_MINTTL = 0x42 - IP_MIN_MEMBERSHIPS = 0x1f IP_MSFILTER = 0x4a IP_MSS = 0x240 IP_MULTICAST_IF = 0x9 @@ -948,6 +966,7 @@ const ( IP_OFFMASK = 0x1fff IP_ONESBCAST = 0x17 IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b IP_PORTRANGE = 0x13 IP_PORTRANGE_DEFAULT = 0x0 IP_PORTRANGE_HIGH = 0x1 @@ -956,6 +975,7 @@ const ( IP_RECVFLOWID = 0x5d IP_RECVIF = 0x14 IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b IP_RECVRETOPTS = 0x6 IP_RECVRSSBUCKETID = 0x5e IP_RECVTOS = 0x44 @@ -972,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -983,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1071,10 +1094,12 @@ const ( MNT_SUSPEND = 0x4 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 MNT_UPDATE = 0x10000 - MNT_UPDATEMASK = 0x2d8d0807e + MNT_UPDATEMASK = 0xad8d0807e MNT_USER = 0x8000 - MNT_VISFLAGMASK = 0x3fef0ffff + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 @@ -1103,6 +1128,7 @@ const ( NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 NOTE_CLOSE = 0x100 @@ -1159,6 +1185,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1169,6 +1197,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1176,6 +1208,53 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETVFPREGS = 0x40 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETVFPREGS = 0x41 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1257,7 +1336,6 @@ const ( RTV_WEIGHT = 0x100 RT_ALL_FIBS = -0x1 RT_BLACKHOLE = 0x40 - RT_CACHING_CONTEXT = 0x1 RT_DEFAULT_FIB = 0x0 RT_HAS_GW = 0x80 RT_HAS_HEADER = 0x10 @@ -1267,15 +1345,17 @@ const ( RT_LLE_CACHE = 0x100 RT_MAY_LOOP = 0x8 RT_MAY_LOOP_BIT = 0x3 - RT_NORTREF = 0x2 RT_REJECT = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_BINTIME = 0x4 SCM_CREDS = 0x3 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 SEEK_CUR = 0x1 SEEK_DATA = 0x3 SEEK_END = 0x2 @@ -1299,10 +1379,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0086924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1318,8 +1400,11 @@ const ( SIOCGIFPDSTADDR = 0xc0206948 SIOCGIFPHYS = 0xc0206935 SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 SIOCGIFSTATUS = 0xc331693b SIOCGIFXMEDIA = 0xc028698b + SIOCGLANPCP = 0xc0206998 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCGPRIVATE_0 = 0xc0206950 @@ -1350,6 +1435,7 @@ const ( SIOCSIFPHYS = 0x80206936 SIOCSIFRVNET = 0xc020695b SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 SIOCSTUNFIB = 0x8020695f @@ -1369,6 +1455,7 @@ const ( SO_BINTIME = 0x2000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1377,6 +1464,7 @@ const ( SO_LISTENINCQLEN = 0x1013 SO_LISTENQLEN = 0x1012 SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 SO_NOSIGPIPE = 0x800 SO_NO_DDP = 0x8000 SO_NO_OFFLOAD = 0x4000 @@ -1387,13 +1475,22 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 SO_SETFIB = 0x1014 SO_SNDBUF = 0x1001 SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 SO_USER_COOKIE = 0x1015 @@ -1437,10 +1534,69 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DELACK = 0x48 TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 TCP_FUNCTION_BLK = 0x2000 TCP_FUNCTION_NAME_LEN_MAX = 0x20 TCP_INFO = 0x20 @@ -1448,6 +1604,12 @@ const ( TCP_KEEPIDLE = 0x100 TCP_KEEPINIT = 0x80 TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOG_ID_LEN = 0x40 TCP_MAXBURST = 0x4 TCP_MAXHLEN = 0x3c TCP_MAXOLEN = 0x28 @@ -1463,8 +1625,30 @@ const ( TCP_NOPUSH = 0x4 TCP_PCAP_IN = 0x1000 TCP_PCAP_OUT = 0x800 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 TCP_VENDOR = 0x80000000 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCONS = 0x80047462 @@ -1528,6 +1712,8 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1592,12 +1778,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1740,7 +1927,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1802,6 +1989,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 4c837711493f..c557ac2db317 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80104267 BIOCSETFNR = 0x80104282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8010427b BIOCSETZBUF = 0x80184281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1180,6 +1186,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1190,6 +1198,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1197,6 +1209,51 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1321,10 +1378,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0106924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1415,6 +1474,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1473,22 +1533,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1497,12 +1575,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1542,6 +1626,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1555,7 +1640,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1694,12 +1778,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1842,7 +1927,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1904,6 +1989,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go new file mode 100644 index 000000000000..341b4d96265b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go @@ -0,0 +1,2148 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x23 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x24 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_HYPERV = 0x2b + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_INET6_SDP = 0x2a + AF_INET_SDP = 0x28 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x2b + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SCLUSTER = 0x22 + AF_SIP = 0x18 + AF_SLOW = 0x21 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VENDOR00 = 0x27 + AF_VENDOR01 = 0x29 + AF_VENDOR03 = 0x2d + AF_VENDOR04 = 0x2f + AF_VENDOR05 = 0x31 + AF_VENDOR06 = 0x33 + AF_VENDOR07 = 0x35 + AF_VENDOR08 = 0x37 + AF_VENDOR09 = 0x39 + AF_VENDOR10 = 0x3b + AF_VENDOR11 = 0x3d + AF_VENDOR12 = 0x3f + AF_VENDOR13 = 0x41 + AF_VENDOR14 = 0x43 + AF_VENDOR15 = 0x45 + AF_VENDOR16 = 0x47 + AF_VENDOR17 = 0x49 + AF_VENDOR18 = 0x4b + AF_VENDOR19 = 0x4d + AF_VENDOR20 = 0x4f + AF_VENDOR21 = 0x51 + AF_VENDOR22 = 0x53 + AF_VENDOR23 = 0x55 + AF_VENDOR24 = 0x57 + AF_VENDOR25 = 0x59 + AF_VENDOR26 = 0x5b + AF_VENDOR27 = 0x5d + AF_VENDOR28 = 0x5f + AF_VENDOR29 = 0x61 + AF_VENDOR30 = 0x63 + AF_VENDOR31 = 0x65 + AF_VENDOR32 = 0x67 + AF_VENDOR33 = 0x69 + AF_VENDOR34 = 0x6b + AF_VENDOR35 = 0x6d + AF_VENDOR36 = 0x6f + AF_VENDOR37 = 0x71 + AF_VENDOR38 = 0x73 + AF_VENDOR39 = 0x75 + AF_VENDOR40 = 0x77 + AF_VENDOR41 = 0x79 + AF_VENDOR42 = 0x7b + AF_VENDOR43 = 0x7d + AF_VENDOR44 = 0x7f + AF_VENDOR45 = 0x81 + AF_VENDOR46 = 0x83 + AF_VENDOR47 = 0x85 + ALTWERASE = 0x200 + B0 = 0x0 + B1000000 = 0xf4240 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1500000 = 0x16e360 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B2000000 = 0x1e8480 + B230400 = 0x38400 + B2400 = 0x960 + B2500000 = 0x2625a0 + B28800 = 0x7080 + B300 = 0x12c + B3000000 = 0x2dc6c0 + B3500000 = 0x3567e0 + B38400 = 0x9600 + B4000000 = 0x3d0900 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B500000 = 0x7a120 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427c + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRECTION = 0x40044276 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0104279 + BIOCGETBUFMODE = 0x4004427d + BIOCGETIF = 0x4020426b + BIOCGETZMAX = 0x4008427f + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCGTSTAMP = 0x40044283 + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCROTZBUF = 0x40184280 + BIOCSBLEN = 0xc0044266 + BIOCSDIRECTION = 0x80044277 + BIOCSDLT = 0x80044278 + BIOCSETBUFMODE = 0x8004427e + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x80104282 + BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 + BIOCSETWF = 0x8010427b + BIOCSETZBUF = 0x80184281 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCSTSTAMP = 0x80044284 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x8 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_BUFMODE_BUFFER = 0x1 + BPF_BUFMODE_ZBUF = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_T_BINTIME = 0x2 + BPF_T_BINTIME_FAST = 0x102 + BPF_T_BINTIME_MONOTONIC = 0x202 + BPF_T_BINTIME_MONOTONIC_FAST = 0x302 + BPF_T_FAST = 0x100 + BPF_T_FLAG_MASK = 0x300 + BPF_T_FORMAT_MASK = 0x3 + BPF_T_MICROTIME = 0x0 + BPF_T_MICROTIME_FAST = 0x100 + BPF_T_MICROTIME_MONOTONIC = 0x200 + BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 + BPF_T_MONOTONIC = 0x200 + BPF_T_MONOTONIC_FAST = 0x300 + BPF_T_NANOTIME = 0x1 + BPF_T_NANOTIME_FAST = 0x101 + BPF_T_NANOTIME_MONOTONIC = 0x201 + BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 + BPF_T_NONE = 0x3 + BPF_T_NORMAL = 0x0 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + CAP_ACCEPT = 0x200000020000000 + CAP_ACL_CHECK = 0x400000000010000 + CAP_ACL_DELETE = 0x400000000020000 + CAP_ACL_GET = 0x400000000040000 + CAP_ACL_SET = 0x400000000080000 + CAP_ALL0 = 0x20007ffffffffff + CAP_ALL1 = 0x4000000001fffff + CAP_BIND = 0x200000040000000 + CAP_BINDAT = 0x200008000000400 + CAP_CHFLAGSAT = 0x200000000001400 + CAP_CONNECT = 0x200000080000000 + CAP_CONNECTAT = 0x200010000000400 + CAP_CREATE = 0x200000000000040 + CAP_EVENT = 0x400000000000020 + CAP_EXTATTR_DELETE = 0x400000000001000 + CAP_EXTATTR_GET = 0x400000000002000 + CAP_EXTATTR_LIST = 0x400000000004000 + CAP_EXTATTR_SET = 0x400000000008000 + CAP_FCHDIR = 0x200000000000800 + CAP_FCHFLAGS = 0x200000000001000 + CAP_FCHMOD = 0x200000000002000 + CAP_FCHMODAT = 0x200000000002400 + CAP_FCHOWN = 0x200000000004000 + CAP_FCHOWNAT = 0x200000000004400 + CAP_FCNTL = 0x200000000008000 + CAP_FCNTL_ALL = 0x78 + CAP_FCNTL_GETFL = 0x8 + CAP_FCNTL_GETOWN = 0x20 + CAP_FCNTL_SETFL = 0x10 + CAP_FCNTL_SETOWN = 0x40 + CAP_FEXECVE = 0x200000000000080 + CAP_FLOCK = 0x200000000010000 + CAP_FPATHCONF = 0x200000000020000 + CAP_FSCK = 0x200000000040000 + CAP_FSTAT = 0x200000000080000 + CAP_FSTATAT = 0x200000000080400 + CAP_FSTATFS = 0x200000000100000 + CAP_FSYNC = 0x200000000000100 + CAP_FTRUNCATE = 0x200000000000200 + CAP_FUTIMES = 0x200000000200000 + CAP_FUTIMESAT = 0x200000000200400 + CAP_GETPEERNAME = 0x200000100000000 + CAP_GETSOCKNAME = 0x200000200000000 + CAP_GETSOCKOPT = 0x200000400000000 + CAP_IOCTL = 0x400000000000080 + CAP_IOCTLS_ALL = 0x7fffffffffffffff + CAP_KQUEUE = 0x400000000100040 + CAP_KQUEUE_CHANGE = 0x400000000100000 + CAP_KQUEUE_EVENT = 0x400000000000040 + CAP_LINKAT_SOURCE = 0x200020000000400 + CAP_LINKAT_TARGET = 0x200000000400400 + CAP_LISTEN = 0x200000800000000 + CAP_LOOKUP = 0x200000000000400 + CAP_MAC_GET = 0x400000000000001 + CAP_MAC_SET = 0x400000000000002 + CAP_MKDIRAT = 0x200000000800400 + CAP_MKFIFOAT = 0x200000001000400 + CAP_MKNODAT = 0x200000002000400 + CAP_MMAP = 0x200000000000010 + CAP_MMAP_R = 0x20000000000001d + CAP_MMAP_RW = 0x20000000000001f + CAP_MMAP_RWX = 0x20000000000003f + CAP_MMAP_RX = 0x20000000000003d + CAP_MMAP_W = 0x20000000000001e + CAP_MMAP_WX = 0x20000000000003e + CAP_MMAP_X = 0x20000000000003c + CAP_PDGETPID = 0x400000000000200 + CAP_PDKILL = 0x400000000000800 + CAP_PDWAIT = 0x400000000000400 + CAP_PEELOFF = 0x200001000000000 + CAP_POLL_EVENT = 0x400000000000020 + CAP_PREAD = 0x20000000000000d + CAP_PWRITE = 0x20000000000000e + CAP_READ = 0x200000000000001 + CAP_RECV = 0x200000000000001 + CAP_RENAMEAT_SOURCE = 0x200000004000400 + CAP_RENAMEAT_TARGET = 0x200040000000400 + CAP_RIGHTS_VERSION = 0x0 + CAP_RIGHTS_VERSION_00 = 0x0 + CAP_SEEK = 0x20000000000000c + CAP_SEEK_TELL = 0x200000000000004 + CAP_SEM_GETVALUE = 0x400000000000004 + CAP_SEM_POST = 0x400000000000008 + CAP_SEM_WAIT = 0x400000000000010 + CAP_SEND = 0x200000000000002 + CAP_SETSOCKOPT = 0x200002000000000 + CAP_SHUTDOWN = 0x200004000000000 + CAP_SOCK_CLIENT = 0x200007780000003 + CAP_SOCK_SERVER = 0x200007f60000003 + CAP_SYMLINKAT = 0x200000008000400 + CAP_TTYHOOK = 0x400000000000100 + CAP_UNLINKAT = 0x200000010000400 + CAP_UNUSED0_44 = 0x200080000000000 + CAP_UNUSED0_57 = 0x300000000000000 + CAP_UNUSED1_22 = 0x400000000200000 + CAP_UNUSED1_57 = 0x500000000000000 + CAP_WRITE = 0x200000000000002 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x5 + CLOCK_MONOTONIC = 0x4 + CLOCK_MONOTONIC_COARSE = 0xc + CLOCK_MONOTONIC_FAST = 0xc + CLOCK_MONOTONIC_PRECISE = 0xb + CLOCK_PROCESS_CPUTIME_ID = 0xf + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_COARSE = 0xa + CLOCK_REALTIME_FAST = 0xa + CLOCK_REALTIME_PRECISE = 0x9 + CLOCK_SECOND = 0xd + CLOCK_THREAD_CPUTIME_ID = 0xe + CLOCK_UPTIME = 0x5 + CLOCK_UPTIME_FAST = 0x8 + CLOCK_UPTIME_PRECISE = 0x7 + CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0x18 + CTL_NET = 0x4 + DIOCGATTR = 0xc148648e + DIOCGDELETE = 0x80106488 + DIOCGFLUSH = 0x20006487 + DIOCGFWHEADS = 0x40046483 + DIOCGFWSECTORS = 0x40046482 + DIOCGIDENT = 0x41006489 + DIOCGKERNELDUMP = 0xc0986492 + DIOCGMEDIASIZE = 0x40086481 + DIOCGPHYSPATH = 0x4400648d + DIOCGPROVIDERNAME = 0x4400648a + DIOCGSECTORSIZE = 0x40046480 + DIOCGSTRIPEOFFSET = 0x4008648c + DIOCGSTRIPESIZE = 0x4008648b + DIOCSKERNELDUMP = 0x80986491 + DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 + DIOCSKERNELDUMP_FREEBSD12 = 0x80506490 + DIOCZONECMD = 0xc080648f + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_BREDR_BB = 0xff + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_BLUETOOTH_LE_LL = 0xfb + DLT_BLUETOOTH_LE_LL_WITH_PHDR = 0x100 + DLT_BLUETOOTH_LINUX_MONITOR = 0xfe + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_CLASS_NETBSD_RAWAF = 0x2240000 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 + DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_EPON = 0x103 + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_INFINIBAND = 0xf7 + DLT_IPFILTER = 0x74 + DLT_IPMB_KONTRON = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPMI_HPM_2 = 0x104 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_ISO_14443 = 0x108 + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 + DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0x114 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NETLINK = 0xfd + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x79 + DLT_PKTAP = 0x102 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0xe + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PROFIBUS_DL = 0x101 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RDS = 0x109 + DLT_REDBACK_SMARTEDGE = 0x20 + DLT_RIO = 0x7c + DLT_RTAC_SERIAL = 0xfa + DLT_SCCP = 0x8e + DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xd + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a + DLT_USB_FREEBSD = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f + DLT_WATTSTOPPER_DLM = 0x107 + DLT_WIHART = 0xdf + DLT_WIRESHARK_UPPER_PDU = 0xfc + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DLT_ZWAVE_R1_R2 = 0x105 + DLT_ZWAVE_R3 = 0x106 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EHE_DEAD_PRIORITY = -0x1 + EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd + EVFILT_FS = -0x9 + EVFILT_LIO = -0xa + EVFILT_PROC = -0x5 + EVFILT_PROCDESC = -0x8 + EVFILT_READ = -0x1 + EVFILT_SENDFILE = -0xc + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xd + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xb + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DROP = 0x1000 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_FLAG2 = 0x4000 + EV_FORCEONESHOT = 0x100 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff + EXTATTR_NAMESPACE_EMPTY = 0x0 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_NONE = -0xc8 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_ADD_SEALS = 0x13 + F_CANCEL = 0x5 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETOWN = 0x5 + F_GET_SEALS = 0x14 + F_ISUNIONSTACK = 0x15 + F_KINFO = 0x16 + F_OGETLK = 0x7 + F_OK = 0x0 + F_OSETLK = 0x8 + F_OSETLKW = 0x9 + F_RDAHEAD = 0x10 + F_RDLCK = 0x1 + F_READAHEAD = 0xf + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLKW = 0xd + F_SETLK_REMOTE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_UNLCKSYS = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x218f72 + IFF_CANTCONFIG = 0x10000 + IFF_DEBUG = 0x4 + IFF_DRV_OACTIVE = 0x400 + IFF_DRV_RUNNING = 0x40 + IFF_DYING = 0x200000 + IFF_KNOWSEPOCH = 0x20 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RENAMING = 0x400000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_IEEE1394 = 0x90 + IFT_INFINIBAND = 0xc7 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_PPP = 0x17 + IFT_PROPVIRTUAL = 0x35 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_NETMASK_DEFAULT = 0xffffff00 + IN_RFC3021_MASK = 0xfffffffe + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HIP = 0x8b + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MEAS = 0x13 + IPPROTO_MH = 0x87 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OLD_DIVERT = 0xfe + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_RESERVED_253 = 0xfd + IPPROTO_RESERVED_254 = 0xfe + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEND = 0x103 + IPPROTO_SHIM6 = 0x8c + IPPROTO_SKIP = 0x39 + IPPROTO_SPACER = 0x7fff + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDANY = 0x40 + IPV6_BINDMULTI = 0x41 + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FLOWID = 0x43 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOWTYPE = 0x44 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVFLOWID = 0x46 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRSSBUCKETID = 0x47 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RSSBUCKETID = 0x45 + IPV6_RSS_LISTEN_BUCKET = 0x42 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BINDANY = 0x18 + IP_BINDMULTI = 0x19 + IP_BLOCK_SOURCE = 0x48 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x43 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET3 = 0x31 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FLOWID = 0x5a + IP_FLOWTYPE = 0x5b + IP_FW3 = 0x30 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_NAT_CFG = 0x38 + IP_FW_NAT_DEL = 0x39 + IP_FW_NAT_GET_CONFIG = 0x3a + IP_FW_NAT_GET_LOG = 0x3b + IP_FW_RESETLOG = 0x37 + IP_FW_TABLE_ADD = 0x28 + IP_FW_TABLE_DEL = 0x29 + IP_FW_TABLE_FLUSH = 0x2a + IP_FW_TABLE_GETSIZE = 0x2b + IP_FW_TABLE_LIST = 0x2c + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_ONESBCAST = 0x17 + IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVFLOWID = 0x5d + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b + IP_RECVRETOPTS = 0x6 + IP_RECVRSSBUCKETID = 0x5e + IP_RECVTOS = 0x44 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSSBUCKETID = 0x5c + IP_RSS_LISTEN_BUCKET = 0x1a + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b + ISIG = 0x80 + ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_PROTECT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_32BIT = 0x80000 + MAP_ALIGNED_SUPER = 0x1000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_EXCL = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GUARD = 0x2000 + MAP_HASSEMAPHORE = 0x200 + MAP_NOCORE = 0x20000 + MAP_NOSYNC = 0x800 + MAP_PREFAULT_READ = 0x40000 + MAP_PRIVATE = 0x2 + MAP_RESERVED0020 = 0x20 + MAP_RESERVED0040 = 0x40 + MAP_RESERVED0080 = 0x80 + MAP_RESERVED0100 = 0x100 + MAP_SHARED = 0x1 + MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0xfc000000 + MFD_HUGE_SHIFT = 0x1a + MNT_ACLS = 0x8000000 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x200000000 + MNT_BYFSID = 0x8000000 + MNT_CMDFLAGS = 0x300d0f0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EMPTYDIR = 0x2000000000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_EXTLS = 0x4000000000 + MNT_EXTLSCERT = 0x8000000000 + MNT_EXTLSCERTUSER = 0x10000000000 + MNT_FORCE = 0x80000 + MNT_GJOURNAL = 0x2000000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NFS4ACLS = 0x10 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NOCOVER = 0x1000000000 + MNT_NOEXEC = 0x4 + MNT_NONBUSY = 0x4000000 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x1000000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SUJ = 0x100000000 + MNT_SUSPEND = 0x4 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 + MNT_UPDATE = 0x10000 + MNT_UPDATEMASK = 0xad8d0807e + MNT_USER = 0x8000 + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff + MNT_WAIT = 0x1 + MSG_CMSG_CLOEXEC = 0x40000 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_NBIO = 0x4000 + MSG_NOSIGNAL = 0x20000 + MSG_NOTIFICATION = 0x2000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x80000 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLISTL = 0x5 + NET_RT_IFMALIST = 0x4 + NET_RT_NHGRP = 0x7 + NET_RT_NHOP = 0x6 + NFDBITS = 0x40 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_CLOSE = 0x100 + NOTE_CLOSE_WRITE = 0x200 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FILE_POLL = 0x2 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MSECONDS = 0x2 + NOTE_NSECONDS = 0x8 + NOTE_OPEN = 0x80 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_READ = 0x400 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x4 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x100000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x1000000 + O_EMPTY_PATH = 0x2000000 + O_EXCL = 0x800 + O_EXEC = 0x40000 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_PATH = 0x400000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_TTY_INIT = 0x80000 + O_VERIFY = 0x200000 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_COREDUMP = 0x1d + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FIXEDMTU = 0x80000 + RTF_FMASK = 0x1004d808 + RTF_GATEWAY = 0x2 + RTF_GWFLAG_COMPAT = 0x80000000 + RTF_HOST = 0x4 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_STICKY = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RTV_WEIGHT = 0x100 + RT_ALL_FIBS = -0x1 + RT_BLACKHOLE = 0x40 + RT_DEFAULT_FIB = 0x0 + RT_DEFAULT_WEIGHT = 0x1 + RT_HAS_GW = 0x80 + RT_HAS_HEADER = 0x10 + RT_HAS_HEADER_BIT = 0x4 + RT_L2_ME = 0x4 + RT_L2_ME_BIT = 0x2 + RT_LLE_CACHE = 0x100 + RT_MAX_WEIGHT = 0xffffff + RT_MAY_LOOP = 0x8 + RT_MAY_LOOP_BIT = 0x3 + RT_REJECT = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_BINTIME = 0x4 + SCM_CREDS = 0x3 + SCM_CREDS2 = 0x8 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPHYADDR = 0x80206949 + SIOCGDRVSPEC = 0xc028697b + SIOCGETSGCNT = 0xc0207210 + SIOCGETVIFCNT = 0xc028720f + SIOCGHIWAT = 0x40047301 + SIOCGHWADDR = 0xc020693e + SIOCGI2C = 0xc020693d + SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0x8020692c + SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFIB = 0xc020695c + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMAC = 0xc0206926 + SIOCGIFMEDIA = 0xc0306938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 + SIOCGIFSTATUS = 0xc331693b + SIOCGIFXMEDIA = 0xc030698b + SIOCGLANPCP = 0xc0206998 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCGTUNFIB = 0xc020695e + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSDRVSPEC = 0x8028697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDESCR = 0x80206929 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFIB = 0x8020695d + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206927 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFRVNET = 0xc020695b + SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSTUNFIB = 0x8020695f + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BINTIME = 0x2000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1009 + SO_LINGER = 0x80 + SO_LISTENINCQLEN = 0x1013 + SO_LISTENQLEN = 0x1012 + SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 + SO_NOSIGPIPE = 0x800 + SO_NO_DDP = 0x8000 + SO_NO_OFFLOAD = 0x4000 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1010 + SO_PROTOCOL = 0x1016 + SO_PROTOTYPE = 0x1016 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 + SO_SETFIB = 0x1014 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USER_COOKIE = 0x1015 + SO_VENDOR = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB3 = 0x4 + TABDLY = 0x4 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_INIT_RATE = 0x458 + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_USE_RACK_RR = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 + TCP_CA_NAME_MAX = 0x10 + TCP_CCALGOOPT = 0x41 + TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DEFER_OPTIONS = 0x470 + TCP_DELACK = 0x48 + TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 + TCP_FAST_RSM_HACK = 0x471 + TCP_FIN_IS_RST = 0x49 + TCP_FUNCTION_BLK = 0x2000 + TCP_FUNCTION_NAME_LEN_MAX = 0x20 + TCP_HDWR_RATE_CAP = 0x46a + TCP_HDWR_UP_ONLY = 0x46c + TCP_IDLE_REDUCE = 0x46 + TCP_INFO = 0x20 + TCP_IWND_NB = 0x2b + TCP_IWND_NSEG = 0x2c + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x80 + TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOGID_CNT = 0x2e + TCP_LOG_ID_LEN = 0x40 + TCP_LOG_LIMIT = 0x4a + TCP_LOG_TAG = 0x2f + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXPEAKRATE = 0x45 + TCP_MAXSEG = 0x2 + TCP_MAXUNACKTIME = 0x44 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NO_PRR = 0x462 + TCP_PACING_RATE_CAP = 0x46b + TCP_PCAP_IN = 0x1000 + TCP_PCAP_OUT = 0x800 + TCP_PERF_INFO = 0x4e + TCP_PROC_ACCOUNTING = 0x4c + TCP_RACK_ABC_VAL = 0x46d + TCP_RACK_CHEAT_NOT_CONF_RATE = 0x459 + TCP_RACK_DO_DETECTION = 0x449 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_FORCE_MSEG = 0x45d + TCP_RACK_GP_INCREASE = 0x446 + TCP_RACK_GP_INCREASE_CA = 0x45a + TCP_RACK_GP_INCREASE_REC = 0x45c + TCP_RACK_GP_INCREASE_SS = 0x45b + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MBUF_QUEUE = 0x41a + TCP_RACK_MEASURE_CNT = 0x46f + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_NONRXT_CFG_RATE = 0x463 + TCP_RACK_NO_PUSH_AT_MAX = 0x466 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_RATE_CA = 0x45e + TCP_RACK_PACE_RATE_REC = 0x460 + TCP_RACK_PACE_RATE_SS = 0x45f + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PACE_TO_FILL = 0x467 + TCP_RACK_PACING_BETA = 0x472 + TCP_RACK_PACING_BETA_ECN = 0x473 + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROFILE = 0x469 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_RR_CONF = 0x459 + TCP_RACK_TIMER_SLOP = 0x474 + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 + TCP_REC_ABC_VAL = 0x46e + TCP_REMOTE_UDP_ENCAPS_PORT = 0x47 + TCP_REUSPORT_LB_NUMA = 0x402 + TCP_REUSPORT_LB_NUMA_CURDOM = -0x1 + TCP_REUSPORT_LB_NUMA_NODOM = -0x2 + TCP_RXTLS_ENABLE = 0x29 + TCP_RXTLS_MODE = 0x2a + TCP_SHARED_CWND_ALLOWED = 0x4b + TCP_SHARED_CWND_ENABLE = 0x464 + TCP_SHARED_CWND_TIME_LIMIT = 0x468 + TCP_STATS = 0x21 + TCP_TIMELY_DYN_ADJ = 0x465 + TCP_TLS_MODE_IFNET = 0x2 + TCP_TLS_MODE_NONE = 0x0 + TCP_TLS_MODE_SW = 0x1 + TCP_TLS_MODE_TOE = 0x3 + TCP_TXTLS_ENABLE = 0x27 + TCP_TXTLS_MODE = 0x28 + TCP_USER_LOG = 0x30 + TCP_USE_CMP_ACKS = 0x4d + TCP_VENDOR = 0x80000000 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGPTN = 0x4004740f + TIOCGSID = 0x40047463 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DCD = 0x40 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMASTER = 0x2000741c + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WEXITED = 0x10 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECAPMODE = syscall.Errno(0x5e) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x61) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCAPABLE = syscall.Errno(0x5d) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5f) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x60) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGLIBRT = syscall.Signal(0x21) + SIGLWP = syscall.Signal(0x20) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "ECANCELED", "operation canceled"}, + {86, "EILSEQ", "illegal byte sequence"}, + {87, "ENOATTR", "attribute not found"}, + {88, "EDOOFUS", "programming error"}, + {89, "EBADMSG", "bad message"}, + {90, "EMULTIHOP", "multihop attempted"}, + {91, "ENOLINK", "link has been severed"}, + {92, "EPROTO", "protocol error"}, + {93, "ENOTCAPABLE", "capabilities insufficient"}, + {94, "ECAPMODE", "not permitted in capability mode"}, + {95, "ENOTRECOVERABLE", "state not recoverable"}, + {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "unknown signal"}, + {33, "SIGLIBRT", "unknown signal"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 664db640ae72..785d693eb328 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -140,6 +140,306 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUDIT_ADD = 0x3eb + AUDIT_ADD_RULE = 0x3f3 + AUDIT_ALWAYS = 0x2 + AUDIT_ANOM_ABEND = 0x6a5 + AUDIT_ANOM_CREAT = 0x6a7 + AUDIT_ANOM_LINK = 0x6a6 + AUDIT_ANOM_PROMISCUOUS = 0x6a4 + AUDIT_ARCH = 0xb + AUDIT_ARCH_AARCH64 = 0xc00000b7 + AUDIT_ARCH_ALPHA = 0xc0009026 + AUDIT_ARCH_ARCOMPACT = 0x4000005d + AUDIT_ARCH_ARCOMPACTBE = 0x5d + AUDIT_ARCH_ARCV2 = 0x400000c3 + AUDIT_ARCH_ARCV2BE = 0xc3 + AUDIT_ARCH_ARM = 0x40000028 + AUDIT_ARCH_ARMEB = 0x28 + AUDIT_ARCH_C6X = 0x4000008c + AUDIT_ARCH_C6XBE = 0x8c + AUDIT_ARCH_CRIS = 0x4000004c + AUDIT_ARCH_CSKY = 0x400000fc + AUDIT_ARCH_FRV = 0x5441 + AUDIT_ARCH_H8300 = 0x2e + AUDIT_ARCH_HEXAGON = 0xa4 + AUDIT_ARCH_I386 = 0x40000003 + AUDIT_ARCH_IA64 = 0xc0000032 + AUDIT_ARCH_LOONGARCH32 = 0x40000102 + AUDIT_ARCH_LOONGARCH64 = 0xc0000102 + AUDIT_ARCH_M32R = 0x58 + AUDIT_ARCH_M68K = 0x4 + AUDIT_ARCH_MICROBLAZE = 0xbd + AUDIT_ARCH_MIPS = 0x8 + AUDIT_ARCH_MIPS64 = 0x80000008 + AUDIT_ARCH_MIPS64N32 = 0xa0000008 + AUDIT_ARCH_MIPSEL = 0x40000008 + AUDIT_ARCH_MIPSEL64 = 0xc0000008 + AUDIT_ARCH_MIPSEL64N32 = 0xe0000008 + AUDIT_ARCH_NDS32 = 0x400000a7 + AUDIT_ARCH_NDS32BE = 0xa7 + AUDIT_ARCH_NIOS2 = 0x40000071 + AUDIT_ARCH_OPENRISC = 0x5c + AUDIT_ARCH_PARISC = 0xf + AUDIT_ARCH_PARISC64 = 0x8000000f + AUDIT_ARCH_PPC = 0x14 + AUDIT_ARCH_PPC64 = 0x80000015 + AUDIT_ARCH_PPC64LE = 0xc0000015 + AUDIT_ARCH_RISCV32 = 0x400000f3 + AUDIT_ARCH_RISCV64 = 0xc00000f3 + AUDIT_ARCH_S390 = 0x16 + AUDIT_ARCH_S390X = 0x80000016 + AUDIT_ARCH_SH = 0x2a + AUDIT_ARCH_SH64 = 0x8000002a + AUDIT_ARCH_SHEL = 0x4000002a + AUDIT_ARCH_SHEL64 = 0xc000002a + AUDIT_ARCH_SPARC = 0x2 + AUDIT_ARCH_SPARC64 = 0x8000002b + AUDIT_ARCH_TILEGX = 0xc00000bf + AUDIT_ARCH_TILEGX32 = 0x400000bf + AUDIT_ARCH_TILEPRO = 0x400000bc + AUDIT_ARCH_UNICORE = 0x4000006e + AUDIT_ARCH_X86_64 = 0xc000003e + AUDIT_ARCH_XTENSA = 0x5e + AUDIT_ARG0 = 0xc8 + AUDIT_ARG1 = 0xc9 + AUDIT_ARG2 = 0xca + AUDIT_ARG3 = 0xcb + AUDIT_AVC = 0x578 + AUDIT_AVC_PATH = 0x57a + AUDIT_BITMASK_SIZE = 0x40 + AUDIT_BIT_MASK = 0x8000000 + AUDIT_BIT_TEST = 0x48000000 + AUDIT_BPF = 0x536 + AUDIT_BPRM_FCAPS = 0x529 + AUDIT_CAPSET = 0x52a + AUDIT_CLASS_CHATTR = 0x2 + AUDIT_CLASS_CHATTR_32 = 0x3 + AUDIT_CLASS_DIR_WRITE = 0x0 + AUDIT_CLASS_DIR_WRITE_32 = 0x1 + AUDIT_CLASS_READ = 0x4 + AUDIT_CLASS_READ_32 = 0x5 + AUDIT_CLASS_SIGNAL = 0x8 + AUDIT_CLASS_SIGNAL_32 = 0x9 + AUDIT_CLASS_WRITE = 0x6 + AUDIT_CLASS_WRITE_32 = 0x7 + AUDIT_COMPARE_AUID_TO_EUID = 0x10 + AUDIT_COMPARE_AUID_TO_FSUID = 0xe + AUDIT_COMPARE_AUID_TO_OBJ_UID = 0x5 + AUDIT_COMPARE_AUID_TO_SUID = 0xf + AUDIT_COMPARE_EGID_TO_FSGID = 0x17 + AUDIT_COMPARE_EGID_TO_OBJ_GID = 0x4 + AUDIT_COMPARE_EGID_TO_SGID = 0x18 + AUDIT_COMPARE_EUID_TO_FSUID = 0x12 + AUDIT_COMPARE_EUID_TO_OBJ_UID = 0x3 + AUDIT_COMPARE_EUID_TO_SUID = 0x11 + AUDIT_COMPARE_FSGID_TO_OBJ_GID = 0x9 + AUDIT_COMPARE_FSUID_TO_OBJ_UID = 0x8 + AUDIT_COMPARE_GID_TO_EGID = 0x14 + AUDIT_COMPARE_GID_TO_FSGID = 0x15 + AUDIT_COMPARE_GID_TO_OBJ_GID = 0x2 + AUDIT_COMPARE_GID_TO_SGID = 0x16 + AUDIT_COMPARE_SGID_TO_FSGID = 0x19 + AUDIT_COMPARE_SGID_TO_OBJ_GID = 0x7 + AUDIT_COMPARE_SUID_TO_FSUID = 0x13 + AUDIT_COMPARE_SUID_TO_OBJ_UID = 0x6 + AUDIT_COMPARE_UID_TO_AUID = 0xa + AUDIT_COMPARE_UID_TO_EUID = 0xb + AUDIT_COMPARE_UID_TO_FSUID = 0xc + AUDIT_COMPARE_UID_TO_OBJ_UID = 0x1 + AUDIT_COMPARE_UID_TO_SUID = 0xd + AUDIT_CONFIG_CHANGE = 0x519 + AUDIT_CWD = 0x51b + AUDIT_DAEMON_ABORT = 0x4b2 + AUDIT_DAEMON_CONFIG = 0x4b3 + AUDIT_DAEMON_END = 0x4b1 + AUDIT_DAEMON_START = 0x4b0 + AUDIT_DEL = 0x3ec + AUDIT_DEL_RULE = 0x3f4 + AUDIT_DEVMAJOR = 0x64 + AUDIT_DEVMINOR = 0x65 + AUDIT_DIR = 0x6b + AUDIT_DM_CTRL = 0x53a + AUDIT_DM_EVENT = 0x53b + AUDIT_EGID = 0x6 + AUDIT_EOE = 0x528 + AUDIT_EQUAL = 0x40000000 + AUDIT_EUID = 0x2 + AUDIT_EVENT_LISTENER = 0x537 + AUDIT_EXE = 0x70 + AUDIT_EXECVE = 0x51d + AUDIT_EXIT = 0x67 + AUDIT_FAIL_PANIC = 0x2 + AUDIT_FAIL_PRINTK = 0x1 + AUDIT_FAIL_SILENT = 0x0 + AUDIT_FANOTIFY = 0x533 + AUDIT_FD_PAIR = 0x525 + AUDIT_FEATURE_BITMAP_ALL = 0x7f + AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT = 0x1 + AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME = 0x2 + AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND = 0x8 + AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH = 0x4 + AUDIT_FEATURE_BITMAP_FILTER_FS = 0x40 + AUDIT_FEATURE_BITMAP_LOST_RESET = 0x20 + AUDIT_FEATURE_BITMAP_SESSIONID_FILTER = 0x10 + AUDIT_FEATURE_CHANGE = 0x530 + AUDIT_FEATURE_LOGINUID_IMMUTABLE = 0x1 + AUDIT_FEATURE_ONLY_UNSET_LOGINUID = 0x0 + AUDIT_FEATURE_VERSION = 0x1 + AUDIT_FIELD_COMPARE = 0x6f + AUDIT_FILETYPE = 0x6c + AUDIT_FILTERKEY = 0xd2 + AUDIT_FILTER_ENTRY = 0x2 + AUDIT_FILTER_EXCLUDE = 0x5 + AUDIT_FILTER_EXIT = 0x4 + AUDIT_FILTER_FS = 0x6 + AUDIT_FILTER_PREPEND = 0x10 + AUDIT_FILTER_TASK = 0x1 + AUDIT_FILTER_TYPE = 0x5 + AUDIT_FILTER_URING_EXIT = 0x7 + AUDIT_FILTER_USER = 0x0 + AUDIT_FILTER_WATCH = 0x3 + AUDIT_FIRST_KERN_ANOM_MSG = 0x6a4 + AUDIT_FIRST_USER_MSG = 0x44c + AUDIT_FIRST_USER_MSG2 = 0x834 + AUDIT_FSGID = 0x8 + AUDIT_FSTYPE = 0x1a + AUDIT_FSUID = 0x4 + AUDIT_GET = 0x3e8 + AUDIT_GET_FEATURE = 0x3fb + AUDIT_GID = 0x5 + AUDIT_GREATER_THAN = 0x20000000 + AUDIT_GREATER_THAN_OR_EQUAL = 0x60000000 + AUDIT_INODE = 0x66 + AUDIT_INTEGRITY_DATA = 0x708 + AUDIT_INTEGRITY_EVM_XATTR = 0x70e + AUDIT_INTEGRITY_HASH = 0x70b + AUDIT_INTEGRITY_METADATA = 0x709 + AUDIT_INTEGRITY_PCR = 0x70c + AUDIT_INTEGRITY_POLICY_RULE = 0x70f + AUDIT_INTEGRITY_RULE = 0x70d + AUDIT_INTEGRITY_STATUS = 0x70a + AUDIT_IPC = 0x517 + AUDIT_IPC_SET_PERM = 0x51f + AUDIT_KERNEL = 0x7d0 + AUDIT_KERNEL_OTHER = 0x524 + AUDIT_KERN_MODULE = 0x532 + AUDIT_LAST_FEATURE = 0x1 + AUDIT_LAST_KERN_ANOM_MSG = 0x707 + AUDIT_LAST_USER_MSG = 0x4af + AUDIT_LAST_USER_MSG2 = 0xbb7 + AUDIT_LESS_THAN = 0x10000000 + AUDIT_LESS_THAN_OR_EQUAL = 0x50000000 + AUDIT_LIST = 0x3ea + AUDIT_LIST_RULES = 0x3f5 + AUDIT_LOGIN = 0x3ee + AUDIT_LOGINUID = 0x9 + AUDIT_LOGINUID_SET = 0x18 + AUDIT_MAC_CALIPSO_ADD = 0x58a + AUDIT_MAC_CALIPSO_DEL = 0x58b + AUDIT_MAC_CIPSOV4_ADD = 0x57f + AUDIT_MAC_CIPSOV4_DEL = 0x580 + AUDIT_MAC_CONFIG_CHANGE = 0x57d + AUDIT_MAC_IPSEC_ADDSA = 0x583 + AUDIT_MAC_IPSEC_ADDSPD = 0x585 + AUDIT_MAC_IPSEC_DELSA = 0x584 + AUDIT_MAC_IPSEC_DELSPD = 0x586 + AUDIT_MAC_IPSEC_EVENT = 0x587 + AUDIT_MAC_MAP_ADD = 0x581 + AUDIT_MAC_MAP_DEL = 0x582 + AUDIT_MAC_POLICY_LOAD = 0x57b + AUDIT_MAC_STATUS = 0x57c + AUDIT_MAC_UNLBL_ALLOW = 0x57e + AUDIT_MAC_UNLBL_STCADD = 0x588 + AUDIT_MAC_UNLBL_STCDEL = 0x589 + AUDIT_MAKE_EQUIV = 0x3f7 + AUDIT_MAX_FIELDS = 0x40 + AUDIT_MAX_FIELD_COMPARE = 0x19 + AUDIT_MAX_KEY_LEN = 0x100 + AUDIT_MESSAGE_TEXT_MAX = 0x2170 + AUDIT_MMAP = 0x52b + AUDIT_MQ_GETSETATTR = 0x523 + AUDIT_MQ_NOTIFY = 0x522 + AUDIT_MQ_OPEN = 0x520 + AUDIT_MQ_SENDRECV = 0x521 + AUDIT_MSGTYPE = 0xc + AUDIT_NEGATE = 0x80000000 + AUDIT_NETFILTER_CFG = 0x52d + AUDIT_NETFILTER_PKT = 0x52c + AUDIT_NEVER = 0x0 + AUDIT_NLGRP_MAX = 0x1 + AUDIT_NOT_EQUAL = 0x30000000 + AUDIT_NR_FILTERS = 0x8 + AUDIT_OBJ_GID = 0x6e + AUDIT_OBJ_LEV_HIGH = 0x17 + AUDIT_OBJ_LEV_LOW = 0x16 + AUDIT_OBJ_PID = 0x526 + AUDIT_OBJ_ROLE = 0x14 + AUDIT_OBJ_TYPE = 0x15 + AUDIT_OBJ_UID = 0x6d + AUDIT_OBJ_USER = 0x13 + AUDIT_OPENAT2 = 0x539 + AUDIT_OPERATORS = 0x78000000 + AUDIT_PATH = 0x516 + AUDIT_PERM = 0x6a + AUDIT_PERM_ATTR = 0x8 + AUDIT_PERM_EXEC = 0x1 + AUDIT_PERM_READ = 0x4 + AUDIT_PERM_WRITE = 0x2 + AUDIT_PERS = 0xa + AUDIT_PID = 0x0 + AUDIT_POSSIBLE = 0x1 + AUDIT_PPID = 0x12 + AUDIT_PROCTITLE = 0x52f + AUDIT_REPLACE = 0x531 + AUDIT_SADDR_FAM = 0x71 + AUDIT_SECCOMP = 0x52e + AUDIT_SELINUX_ERR = 0x579 + AUDIT_SESSIONID = 0x19 + AUDIT_SET = 0x3e9 + AUDIT_SET_FEATURE = 0x3fa + AUDIT_SGID = 0x7 + AUDIT_SID_UNSET = 0xffffffff + AUDIT_SIGNAL_INFO = 0x3f2 + AUDIT_SOCKADDR = 0x51a + AUDIT_SOCKETCALL = 0x518 + AUDIT_STATUS_BACKLOG_LIMIT = 0x10 + AUDIT_STATUS_BACKLOG_WAIT_TIME = 0x20 + AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL = 0x80 + AUDIT_STATUS_ENABLED = 0x1 + AUDIT_STATUS_FAILURE = 0x2 + AUDIT_STATUS_LOST = 0x40 + AUDIT_STATUS_PID = 0x4 + AUDIT_STATUS_RATE_LIMIT = 0x8 + AUDIT_SUBJ_CLR = 0x11 + AUDIT_SUBJ_ROLE = 0xe + AUDIT_SUBJ_SEN = 0x10 + AUDIT_SUBJ_TYPE = 0xf + AUDIT_SUBJ_USER = 0xd + AUDIT_SUCCESS = 0x68 + AUDIT_SUID = 0x3 + AUDIT_SYSCALL = 0x514 + AUDIT_SYSCALL_CLASSES = 0x10 + AUDIT_TIME_ADJNTPVAL = 0x535 + AUDIT_TIME_INJOFFSET = 0x534 + AUDIT_TRIM = 0x3f6 + AUDIT_TTY = 0x527 + AUDIT_TTY_GET = 0x3f8 + AUDIT_TTY_SET = 0x3f9 + AUDIT_UID = 0x1 + AUDIT_UID_UNSET = 0xffffffff + AUDIT_UNUSED_BITS = 0x7fffc00 + AUDIT_URINGOP = 0x538 + AUDIT_USER = 0x3ed + AUDIT_USER_AVC = 0x453 + AUDIT_USER_TTY = 0x464 + AUDIT_VERSION_BACKLOG_LIMIT = 0x1 + AUDIT_VERSION_BACKLOG_WAIT_TIME = 0x2 + AUDIT_VERSION_LATEST = 0x7f + AUDIT_WATCH = 0x69 + AUDIT_WATCH_INS = 0x3ef + AUDIT_WATCH_LIST = 0x3f1 + AUDIT_WATCH_REM = 0x3f0 AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B110 = 0x3 @@ -184,6 +484,7 @@ const ( BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_KPROBE_MULTI_RETURN = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 @@ -191,6 +492,8 @@ const ( BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 + BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 + BPF_F_XDP_HAS_FRAGS = 0x20 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -260,6 +563,17 @@ const ( BUS_USB = 0x3 BUS_VIRTUAL = 0x6 CAN_BCM = 0x2 + CAN_CTRLMODE_3_SAMPLES = 0x4 + CAN_CTRLMODE_BERR_REPORTING = 0x10 + CAN_CTRLMODE_CC_LEN8_DLC = 0x100 + CAN_CTRLMODE_FD = 0x20 + CAN_CTRLMODE_FD_NON_ISO = 0x80 + CAN_CTRLMODE_LISTENONLY = 0x2 + CAN_CTRLMODE_LOOPBACK = 0x1 + CAN_CTRLMODE_ONE_SHOT = 0x8 + CAN_CTRLMODE_PRESUME_ACK = 0x40 + CAN_CTRLMODE_TDC_AUTO = 0x200 + CAN_CTRLMODE_TDC_MANUAL = 0x400 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d CAN_EFF_MASK = 0x1fffffff @@ -337,6 +651,7 @@ const ( CAN_RTR_FLAG = 0x40000000 CAN_SFF_ID_BITS = 0xb CAN_SFF_MASK = 0x7ff + CAN_TERMINATION_DISABLED = 0x0 CAN_TP16 = 0x3 CAN_TP20 = 0x4 CAP_AUDIT_CONTROL = 0x1e @@ -381,9 +696,11 @@ const ( CAP_SYS_TIME = 0x19 CAP_SYS_TTY_CONFIG = 0x1a CAP_WAKE_ALARM = 0x23 + CEPH_SUPER_MAGIC = 0xc36400 CFLUSH = 0xf CGROUP2_SUPER_MAGIC = 0x63677270 CGROUP_SUPER_MAGIC = 0x27e0eb + CIFS_SUPER_MAGIC = 0xff534d42 CLOCK_BOOTTIME = 0x7 CLOCK_BOOTTIME_ALARM = 0x9 CLOCK_DEFAULT = 0x0 @@ -503,9 +820,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2021-03-22)" + DM_VERSION_EXTRA = "-ioctl (2022-02-22)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2d + DM_VERSION_MINOR = 0x2e DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -521,6 +838,55 @@ const ( EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EM_386 = 0x3 + EM_486 = 0x6 + EM_68K = 0x4 + EM_860 = 0x7 + EM_88K = 0x5 + EM_AARCH64 = 0xb7 + EM_ALPHA = 0x9026 + EM_ALTERA_NIOS2 = 0x71 + EM_ARCOMPACT = 0x5d + EM_ARCV2 = 0xc3 + EM_ARM = 0x28 + EM_BLACKFIN = 0x6a + EM_BPF = 0xf7 + EM_CRIS = 0x4c + EM_CSKY = 0xfc + EM_CYGNUS_M32R = 0x9041 + EM_CYGNUS_MN10300 = 0xbeef + EM_FRV = 0x5441 + EM_H8_300 = 0x2e + EM_HEXAGON = 0xa4 + EM_IA_64 = 0x32 + EM_LOONGARCH = 0x102 + EM_M32 = 0x1 + EM_M32R = 0x58 + EM_MICROBLAZE = 0xbd + EM_MIPS = 0x8 + EM_MIPS_RS3_LE = 0xa + EM_MIPS_RS4_BE = 0xa + EM_MN10300 = 0x59 + EM_NDS32 = 0xa7 + EM_NONE = 0x0 + EM_OPENRISC = 0x5c + EM_PARISC = 0xf + EM_PPC = 0x14 + EM_PPC64 = 0x15 + EM_RISCV = 0xf3 + EM_S390 = 0x16 + EM_S390_OLD = 0xa390 + EM_SH = 0x2a + EM_SPARC = 0x2 + EM_SPARC32PLUS = 0x12 + EM_SPARCV9 = 0x2b + EM_SPU = 0x17 + EM_TILEGX = 0xbf + EM_TILEPRO = 0xbc + EM_TI_C6000 = 0x8c + EM_UNICORE = 0x6e + EM_X86_64 = 0x3e + EM_XTENSA = 0x5e ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -698,6 +1064,7 @@ const ( ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be ETH_P_ERSPAN2 = 0x22eb + ETH_P_ETHERCAT = 0x88a4 ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -735,6 +1102,7 @@ const ( ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 ETH_P_PREAUTH = 0x88c7 + ETH_P_PROFINET = 0x8892 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -772,6 +1140,7 @@ const ( EV_SYN = 0x0 EV_VERSION = 0x10001 EXABYTE_ENABLE_NEST = 0xf0 + EXFAT_SUPER_MAGIC = 0x2011bab0 EXT2_SUPER_MAGIC = 0xef53 EXT3_SUPER_MAGIC = 0xef53 EXT4_SUPER_MAGIC = 0xef53 @@ -814,12 +1183,15 @@ const ( FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc + FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 FAN_MARK_ADD = 0x1 FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_EVICTABLE = 0x200 FAN_MARK_FILESYSTEM = 0x100 FAN_MARK_FLUSH = 0x80 FAN_MARK_IGNORED_MASK = 0x20 @@ -842,17 +1214,27 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_RENAME = 0x10000000 FAN_REPORT_DFID_NAME = 0xc00 + FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 + FAN_REPORT_TARGET_FID = 0x1000 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 + FIB_RULE_DEV_DETACHED = 0x8 + FIB_RULE_FIND_SADDR = 0x10000 + FIB_RULE_IIF_DETACHED = 0x8 + FIB_RULE_INVERT = 0x2 + FIB_RULE_OIF_DETACHED = 0x10 + FIB_RULE_PERMANENT = 0x1 + FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" @@ -915,6 +1297,7 @@ const ( FS_VERITY_METADATA_TYPE_DESCRIPTOR = 0x2 FS_VERITY_METADATA_TYPE_MERKLE_TREE = 0x1 FS_VERITY_METADATA_TYPE_SIGNATURE = 0x3 + FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -1027,7 +1410,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa + IFA_MAX = 0xb IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -1274,12 +1657,15 @@ const ( IUTF8 = 0x4000 IXANY = 0x800 JFFS2_SUPER_MAGIC = 0x72b6 + KCMPROTO_CONNECTED = 0x0 + KCM_RECV_DISABLE = 0x1 KEXEC_ARCH_386 = 0x30000 KEXEC_ARCH_68K = 0x40000 KEXEC_ARCH_AARCH64 = 0xb70000 KEXEC_ARCH_ARM = 0x280000 KEXEC_ARCH_DEFAULT = 0x0 KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_LOONGARCH = 0x1020000 KEXEC_ARCH_MASK = 0xffff0000 KEXEC_ARCH_MIPS = 0x80000 KEXEC_ARCH_MIPS_LE = 0xa0000 @@ -1372,6 +1758,7 @@ const ( LANDLOCK_ACCESS_FS_MAKE_SYM = 0x1000 LANDLOCK_ACCESS_FS_READ_DIR = 0x8 LANDLOCK_ACCESS_FS_READ_FILE = 0x4 + LANDLOCK_ACCESS_FS_REFER = 0x2000 LANDLOCK_ACCESS_FS_REMOVE_DIR = 0x10 LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 @@ -1481,6 +1868,7 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 MOUNT_ATTR_IDMAP = 0x100000 @@ -1726,6 +2114,7 @@ const ( NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_BULK = 0x200 NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 @@ -1835,6 +2224,9 @@ const ( PERF_MEM_BLK_NA = 0x1 PERF_MEM_BLK_SHIFT = 0x28 PERF_MEM_HOPS_0 = 0x1 + PERF_MEM_HOPS_1 = 0x2 + PERF_MEM_HOPS_2 = 0x3 + PERF_MEM_HOPS_3 = 0x4 PERF_MEM_HOPS_SHIFT = 0x2b PERF_MEM_LOCK_LOCKED = 0x2 PERF_MEM_LOCK_NA = 0x1 @@ -2038,6 +2430,13 @@ const ( PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SET_VMA = 0x53564d41 + PR_SET_VMA_ANON_NAME = 0x0 + PR_SME_GET_VL = 0x40 + PR_SME_SET_VL = 0x3f + PR_SME_SET_VL_ONEXEC = 0x40000 + PR_SME_VL_INHERIT = 0x20000 + PR_SME_VL_LEN_MASK = 0xffff PR_SPEC_DISABLE = 0x4 PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 @@ -2121,6 +2520,10 @@ const ( PTRACE_SYSCALL_INFO_NONE = 0x0 PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 + P_ALL = 0x0 + P_PGID = 0x2 + P_PID = 0x1 + P_PIDFD = 0x3 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 RAMFS_MAGIC = 0x858458f6 @@ -2186,8 +2589,9 @@ const ( RTC_FEATURE_ALARM = 0x0 RTC_FEATURE_ALARM_RES_2S = 0x3 RTC_FEATURE_ALARM_RES_MINUTE = 0x1 + RTC_FEATURE_ALARM_WAKEUP_ONLY = 0x7 RTC_FEATURE_BACKUP_SWITCH_MODE = 0x6 - RTC_FEATURE_CNT = 0x7 + RTC_FEATURE_CNT = 0x8 RTC_FEATURE_CORRECTION = 0x5 RTC_FEATURE_NEED_WEEK_DAY = 0x2 RTC_FEATURE_UPDATE_INTERRUPT = 0x4 @@ -2261,6 +2665,7 @@ const ( RTM_DELRULE = 0x21 RTM_DELTCLASS = 0x29 RTM_DELTFILTER = 0x2d + RTM_DELTUNNEL = 0x79 RTM_DELVLAN = 0x71 RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 @@ -2293,8 +2698,9 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e + RTM_GETTUNNEL = 0x7a RTM_GETVLAN = 0x72 - RTM_MAX = 0x77 + RTM_MAX = 0x7b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -2318,11 +2724,13 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x1a - RTM_NR_MSGTYPES = 0x68 + RTM_NEWTUNNEL = 0x78 + RTM_NR_FAMILIES = 0x1b + RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 + RTM_SETSTATS = 0x5f RTNH_ALIGNTO = 0x4 RTNH_COMPARE_MASK = 0x59 RTNH_F_DEAD = 0x1 @@ -2446,6 +2854,9 @@ const ( SIOCGSTAMPNS = 0x8907 SIOCGSTAMPNS_OLD = 0x8907 SIOCGSTAMP_OLD = 0x8906 + SIOCKCMATTACH = 0x89e0 + SIOCKCMCLONE = 0x89e2 + SIOCKCMUNATTACH = 0x89e1 SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d @@ -2488,6 +2899,7 @@ const ( SMART_STATUS = 0xda SMART_WRITE_LOG_SECTOR = 0xd6 SMART_WRITE_THRESHOLDS = 0xd7 + SMB2_SUPER_MAGIC = 0xfe534d42 SMB_SUPER_MAGIC = 0x517b SOCKFS_MAGIC = 0x534f434b SOCK_BUF_LOCK_MASK = 0x3 @@ -2499,6 +2911,9 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_SNDBUF_LOCK = 0x1 + SOCK_TXREHASH_DEFAULT = 0xff + SOCK_TXREHASH_DISABLED = 0x0 + SOCK_TXREHASH_ENABLED = 0x1 SOL_AAL = 0x109 SOL_ALG = 0x117 SOL_ATM = 0x108 @@ -2514,6 +2929,8 @@ const ( SOL_IUCV = 0x115 SOL_KCM = 0x119 SOL_LLC = 0x10c + SOL_MCTP = 0x11d + SOL_MPTCP = 0x11c SOL_NETBEUI = 0x10b SOL_NETLINK = 0x10e SOL_NFC = 0x118 @@ -2523,6 +2940,7 @@ const ( SOL_RAW = 0xff SOL_RDS = 0x114 SOL_RXRPC = 0x110 + SOL_SMC = 0x11e SOL_TCP = 0x6 SOL_TIPC = 0x10f SOL_TLS = 0x11a @@ -2629,7 +3047,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xa + TASKSTATS_VERSION = 0xd TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 234fd4a5d1ad..36c0dfc7c4cf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -m32 +// mkerrors.sh -Wall -Werror -static -I/tmp/386/include -m32 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux // +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -350,6 +351,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 58619b7589b0..4ff942703b7b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -m64 +// mkerrors.sh -Wall -Werror -static -I/tmp/amd64/include -m64 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux // +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go package unix @@ -327,6 +327,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3a64ff59dcec..3eaa0fb78e30 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/arm/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux // +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go package unix @@ -333,6 +333,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -357,6 +358,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index abe0b925789f..d7995bdc3a21 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char +// mkerrors.sh -Wall -Werror -static -I/tmp/arm64/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux // +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go package unix @@ -323,6 +323,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -347,6 +348,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 @@ -511,6 +513,7 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + ZA_MAGIC = 0x54366345 _HIDIOCGRAWNAME = 0x80804804 _HIDIOCGRAWPHYS = 0x80404805 _HIDIOCGRAWUNIQ = 0x80404808 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go new file mode 100644 index 000000000000..928e24c20535 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -0,0 +1,818 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/loong64/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go + +package unix + +import "syscall" + +const ( + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d + FLUSHO = 0x1000 + FPU_CTX_MAGIC = 0x46505501 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + OTPERASE = 0x400c4d19 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_PARAM_GET = 0x40187013 + RTC_PARAM_SET = 0x40187014 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 + SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_RESERVE_MEM = 0x49 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 +) + +// Errors +const ( + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + ECANCELED = syscall.Errno(0x7d) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EISCONN = syscall.Errno(0x6a) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTCONN = syscall.Errno(0x6b) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTUNIQ = syscall.Errno(0x4c) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPFNOSUPPORT = syscall.Errno(0x60) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGIO = syscall.Signal(0x1d) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 14d7a84399de..179bffb474b4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mips/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux // +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 99e7c4ac0b45..1fba17bd75cb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mips64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux // +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 496364c33cc6..b77dde31537e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mips64le/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux // +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 3e40830857dd..78c6c751bfa5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mipsle/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux // +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 1151a7dfab33..1c0d31f0b4c2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/ppc/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux // +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go package unix @@ -381,6 +381,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -405,6 +406,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index ed17f249e758..959dd9bb8fcc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/ppc64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux // +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go package unix @@ -385,6 +385,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -409,6 +410,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index d84a37c1ac23..5a873cdbc9d2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/ppc64le/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux // +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go package unix @@ -385,6 +385,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -409,6 +410,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 5cafba83f6b4..e336d141e1f1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/riscv64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux // +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go package unix @@ -314,6 +314,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -338,6 +339,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 6d122da41c53..390c01d92a53 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char +// mkerrors.sh -Wall -Werror -static -I/tmp/s390x/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux // +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go package unix @@ -389,6 +389,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -413,6 +414,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 6bd19e51dbb9..98a6e5f11f50 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/sparc64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux // +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go package unix @@ -380,6 +380,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 + SO_RCVMARK = 0x54 SO_RCVTIMEO = 0x2000 SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_OLD = 0x2000 @@ -404,6 +405,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x42 SO_TIMESTAMPNS_OLD = 0x21 SO_TIMESTAMP_NEW = 0x46 + SO_TXREHASH = 0x53 SO_TXTIME = 0x3f SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x25 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 6d56edc05ac3..af20e474b388 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -46,6 +46,7 @@ const ( AF_SNA = 0xb AF_UNIX = 0x1 AF_UNSPEC = 0x0 + ALTWERASE = 0x200 ARPHRD_ETHER = 0x1 ARPHRD_FRELAY = 0xf ARPHRD_IEEE1394 = 0x18 @@ -108,6 +109,15 @@ const ( BPF_DIRECTION_IN = 0x1 BPF_DIRECTION_OUT = 0x2 BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -136,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -147,6 +158,12 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 CPUSTATES = 0x6 CP_IDLE = 0x5 CP_INTR = 0x4 @@ -170,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc100445d + DIOCADDRULE = 0xccc84404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xccc8441a + DIOCCLRIFFLAG = 0xc024445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0d04412 + DIOCCLRSTATUS = 0xc0244416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1084460 + DIOCGETQUEUE = 0xc100445f + DIOCGETQUEUES = 0xc100445e + DIOCGETRULE = 0xccc84407 + DIOCGETRULES = 0xccc84406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0084454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0084419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0244457 + DIOCKILLSRCNODES = 0xc068445b + DIOCKILLSTATES = 0xc0d04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc084444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0844450 + DIOCRADDADDRS = 0xc44c4443 + DIOCRADDTABLES = 0xc44c443d + DIOCRCLRADDRS = 0xc44c4442 + DIOCRCLRASTATS = 0xc44c4448 + DIOCRCLRTABLES = 0xc44c443c + DIOCRCLRTSTATS = 0xc44c4441 + DIOCRDELADDRS = 0xc44c4444 + DIOCRDELTABLES = 0xc44c443e + DIOCRGETADDRS = 0xc44c4446 + DIOCRGETASTATS = 0xc44c4447 + DIOCRGETTABLES = 0xc44c443f + DIOCRGETTSTATS = 0xc44c4440 + DIOCRINADEFINE = 0xc44c444d + DIOCRSETADDRS = 0xc44c4445 + DIOCRSETTFLAGS = 0xc44c444a + DIOCRTSTADDRS = 0xc44c4449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0244459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0244414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc00c4451 + DIOCXCOMMIT = 0xc00c4452 + DIOCXROLLBACK = 0xc00c4453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -186,6 +261,7 @@ const ( DLT_LOOP = 0xc DLT_MPLS = 0xdb DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PFLOG = 0x75 DLT_PFSYNC = 0x12 DLT_PPP = 0x9 @@ -196,6 +272,23 @@ const ( DLT_RAW = 0xe DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -215,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -267,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -298,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -326,15 +423,17 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 ETHERTYPE_PPP = 0x880b @@ -409,28 +508,40 @@ const ( ETHER_CRC_POLY_LE = 0xedb88320 ETHER_HDR_LEN = 0xe ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b ETHER_MAX_LEN = 0x5ee ETHER_MIN_LEN = 0x40 ETHER_TYPE_LEN = 0x2 ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x7 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 EV_ENABLE = 0x4 EV_EOF = 0x8000 EV_ERROR = 0x4000 EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 - EV_SYSFLAGS = 0xf000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -443,6 +554,7 @@ const ( F_GETFL = 0x3 F_GETLK = 0x7 F_GETOWN = 0x5 + F_ISATTY = 0xb F_OK = 0x0 F_RDLCK = 0x1 F_SETFD = 0x2 @@ -460,7 +572,6 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 - IFA_ROUTE = 0x1 IFF_ALLMULTI = 0x200 IFF_BROADCAST = 0x2 IFF_CANTCHANGE = 0x8e52 @@ -471,12 +582,12 @@ const ( IFF_LOOPBACK = 0x8 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PROMISC = 0x100 IFF_RUNNING = 0x40 IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 IFF_UP = 0x1 IFNAMSIZ = 0x10 IFT_1822 = 0x2 @@ -605,6 +716,7 @@ const ( IFT_LINEGROUP = 0xd2 IFT_LOCALTALK = 0x2a IFT_LOOP = 0x18 + IFT_MBIM = 0xfa IFT_MEDIAMAILOVERIP = 0x8b IFT_MFSIGLINK = 0xa7 IFT_MIOX25 = 0x26 @@ -695,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -729,8 +842,6 @@ const ( IPPROTO_AH = 0x33 IPPROTO_CARP = 0x70 IPPROTO_DIVERT = 0x102 - IPPROTO_DIVERT_INIT = 0x2 - IPPROTO_DIVERT_RESP = 0x1 IPPROTO_DONE = 0x101 IPPROTO_DSTOPTS = 0x3c IPPROTO_EGP = 0x8 @@ -762,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -787,6 +900,7 @@ const ( IPV6_LEAVE_GROUP = 0xd IPV6_MAXHLIM = 0xff IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 IPV6_MMTU = 0x500 IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 @@ -826,12 +940,12 @@ const ( IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DF = 0x4000 - IP_DIVERTFL = 0x1022 IP_DROP_MEMBERSHIP = 0xd IP_ESP_NETWORK_LEVEL = 0x16 IP_ESP_TRANS_LEVEL = 0x15 IP_HDRINCL = 0x2 IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 IP_IPSECFLOWINFO = 0x24 IP_IPSEC_LOCAL_AUTH = 0x1b IP_IPSEC_LOCAL_CRED = 0x19 @@ -865,10 +979,15 @@ const ( IP_RETOPTS = 0x8 IP_RF = 0x8000 IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 IP_TOS = 0x3 IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -900,10 +1019,11 @@ const ( MAP_INHERIT_COPY = 0x1 MAP_INHERIT_NONE = 0x2 MAP_INHERIT_SHARE = 0x0 - MAP_NOEXTEND = 0x100 - MAP_NORESERVE = 0x40 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 + MAP_RENAME = 0x0 MAP_SHARED = 0x1 MAP_STACK = 0x4000 MAP_TRYFIXED = 0x0 @@ -922,6 +1042,7 @@ const ( MNT_NOATIME = 0x8000 MNT_NODEV = 0x10 MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 MNT_NOSUID = 0x8 MNT_NOWAIT = 0x2 MNT_QUOTA = 0x2000 @@ -929,13 +1050,29 @@ const ( MNT_RELOAD = 0x40000 MNT_ROOTFS = 0x4000 MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 MNT_SYNCHRONOUS = 0x2 MNT_UPDATE = 0x10000 MNT_VISFLAGMASK = 0x400ffff MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 MSG_DONTWAIT = 0x80 @@ -946,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -953,12 +1091,16 @@ const ( NET_RT_DUMP = 0x1 NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 - NET_RT_MAXID = 0x6 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 NOTE_CHILD = 0x4 NOTE_DELETE = 0x1 NOTE_EOF = 0x2 @@ -968,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -977,11 +1120,13 @@ const ( NOTE_TRUNCATE = 0x80 NOTE_WRITE = 0x2 OCRNL = 0x10 + OLCUC = 0x20 ONLCR = 0x2 ONLRET = 0x80 ONOCR = 0x40 ONOEOT = 0x8 OPOST = 0x1 + OXTABS = 0x4 O_ACCMODE = 0x3 O_APPEND = 0x8 O_ASYNC = 0x40 @@ -1015,7 +1160,6 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 - PT_MASK = 0x3ff000 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 @@ -1027,19 +1171,25 @@ const ( RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb RTAX_BRD = 0x7 + RTAX_DNS = 0xc RTAX_DST = 0x0 RTAX_GATEWAY = 0x1 RTAX_GENMASK = 0x3 RTAX_IFA = 0x5 RTAX_IFP = 0x4 RTAX_LABEL = 0xa - RTAX_MAX = 0xb + RTAX_MAX = 0xf RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe RTAX_SRC = 0x8 RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 RTA_BRD = 0x80 + RTA_DNS = 0x1000 RTA_DST = 0x1 RTA_GATEWAY = 0x2 RTA_GENMASK = 0x8 @@ -1047,49 +1197,57 @@ const ( RTA_IFP = 0x10 RTA_LABEL = 0x400 RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 RTA_SRC = 0x100 RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 RTF_CLONED = 0x10000 RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 - RTF_FMASK = 0x10f808 + RTF_FMASK = 0x110fc08 RTF_GATEWAY = 0x2 RTF_HOST = 0x4 RTF_LLINFO = 0x400 - RTF_MASK = 0x80 + RTF_LOCAL = 0x200000 RTF_MODIFIED = 0x20 RTF_MPATH = 0x40000 RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 RTF_PERMANENT_ARP = 0x2000 RTF_PROTO1 = 0x8000 RTF_PROTO2 = 0x4000 RTF_PROTO3 = 0x2000 RTF_REJECT = 0x8 - RTF_SOURCE = 0x20000 RTF_STATIC = 0x800 - RTF_TUNNEL = 0x100000 RTF_UP = 0x1 RTF_USETRAILERS = 0x8000 - RTF_XRESOLVE = 0x200 + RTM_80211INFO = 0x15 RTM_ADD = 0x1 + RTM_BFD = 0x12 RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 RTM_DELADDR = 0xd RTM_DELETE = 0x2 RTM_DESYNC = 0x10 RTM_GET = 0x4 RTM_IFANNOUNCE = 0xf RTM_IFINFO = 0xe - RTM_LOCK = 0x8 + RTM_INVALIDATE = 0x11 RTM_LOSING = 0x5 RTM_MAXSIZE = 0x800 RTM_MISS = 0x7 RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1099,67 +1257,74 @@ const ( RTV_RTTVAR = 0x80 RTV_SPIPE = 0x10 RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff RT_TABLEID_MAX = 0xff RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 SIOCADDMULTI = 0x80206931 SIOCAIFADDR = 0x8040691a SIOCAIFGROUP = 0x80246987 - SIOCALIFADDR = 0x8218691c SIOCATMARK = 0x40047307 - SIOCBRDGADD = 0x8054693c - SIOCBRDGADDS = 0x80546941 - SIOCBRDGARL = 0x806e694d + SIOCBRDGADD = 0x805c693c + SIOCBRDGADDL = 0x805c6949 + SIOCBRDGADDS = 0x805c6941 + SIOCBRDGARL = 0x808c694d SIOCBRDGDADDR = 0x81286947 - SIOCBRDGDEL = 0x8054693d - SIOCBRDGDELS = 0x80546942 - SIOCBRDGFLUSH = 0x80546948 - SIOCBRDGFRL = 0x806e694e + SIOCBRDGDEL = 0x805c693d + SIOCBRDGDELS = 0x805c6942 + SIOCBRDGFLUSH = 0x805c6948 + SIOCBRDGFRL = 0x808c694e SIOCBRDGGCACHE = 0xc0146941 SIOCBRDGGFD = 0xc0146952 SIOCBRDGGHT = 0xc0146951 - SIOCBRDGGIFFLGS = 0xc054693e + SIOCBRDGGIFFLGS = 0xc05c693e SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc03c6958 SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc028694f - SIOCBRDGGSIFS = 0xc054693c SIOCBRDGGTO = 0xc0146946 - SIOCBRDGIFS = 0xc0546942 + SIOCBRDGIFS = 0xc05c6942 SIOCBRDGRTS = 0xc0186943 SIOCBRDGSADDR = 0xc1286944 SIOCBRDGSCACHE = 0x80146940 SIOCBRDGSFD = 0x80146952 SIOCBRDGSHT = 0x80146951 - SIOCBRDGSIFCOST = 0x80546955 - SIOCBRDGSIFFLGS = 0x8054693f - SIOCBRDGSIFPRIO = 0x80546954 + SIOCBRDGSIFCOST = 0x805c6955 + SIOCBRDGSIFFLGS = 0x805c693f + SIOCBRDGSIFPRIO = 0x805c6954 + SIOCBRDGSIFPROT = 0x805c694a SIOCBRDGSMA = 0x80146953 SIOCBRDGSPRI = 0x80146950 SIOCBRDGSPROTO = 0x8014695a SIOCBRDGSTO = 0x80146945 SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 SIOCDIFGROUP = 0x80246989 + SIOCDIFPARENT = 0x802069b4 SIOCDIFPHYADDR = 0x80206949 - SIOCDLIFADDR = 0x8218691e + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af SIOCGETKALIVE = 0xc01869a4 SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae SIOCGETPFLOW = 0xc02069fe SIOCGETPFSYNC = 0xc02069f8 SIOCGETSGCNT = 0xc0147534 SIOCGETVIFCNT = 0xc0147533 SIOCGETVLAN = 0xc0206990 - SIOCGHIWAT = 0x40047301 SIOCGIFADDR = 0xc0206921 - SIOCGIFASYNCMAP = 0xc020697c SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCONF = 0xc0086924 SIOCGIFDATA = 0xc020691b @@ -1168,40 +1333,53 @@ const ( SIOCGIFFLAGS = 0xc0206911 SIOCGIFGATTR = 0xc024698b SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc024698d SIOCGIFGMEMB = 0xc024698a SIOCGIFGROUP = 0xc0246988 SIOCGIFHARDMTU = 0xc02069a5 - SIOCGIFMEDIA = 0xc0286936 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0386938 SIOCGIFMETRIC = 0xc0206917 SIOCGIFMTU = 0xc020697e SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 SIOCGIFPRIORITY = 0xc020699c - SIOCGIFPSRCADDR = 0xc0206947 SIOCGIFRDOMAIN = 0xc02069a0 SIOCGIFRTLABEL = 0xc0206983 - SIOCGIFTIMESLOT = 0xc0206986 + SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 SIOCGIFXFLAGS = 0xc020699e - SIOCGLIFADDR = 0xc218691d SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 SIOCGLIFPHYRTABLE = 0xc02069a2 SIOCGLIFPHYTTL = 0xc02069a9 - SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac SIOCIFCREATE = 0x8020697a SIOCIFDESTROY = 0x80206979 SIOCIFGCLONERS = 0xc00c6978 SIOCSETKALIVE = 0x801869a3 SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad SIOCSETPFLOW = 0x802069fd SIOCSETPFSYNC = 0x802069f7 SIOCSETVLAN = 0x8020698f - SIOCSHIWAT = 0x80047300 SIOCSIFADDR = 0x8020690c - SIOCSIFASYNCMAP = 0x8020697d SIOCSIFBRDADDR = 0x80206913 SIOCSIFDESCR = 0x80206980 SIOCSIFDSTADDR = 0x8020690e @@ -1209,25 +1387,37 @@ const ( SIOCSIFGATTR = 0x8024698c SIOCSIFGENERIC = 0x80206939 SIOCSIFLLADDR = 0x8020691f - SIOCSIFMEDIA = 0xc0206935 + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 SIOCSIFMETRIC = 0x80206918 SIOCSIFMTU = 0x8020697f SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 SIOCSIFPRIORITY = 0x8020699b SIOCSIFRDOMAIN = 0x8020699f SIOCSIFRTLABEL = 0x80206982 - SIOCSIFTIMESLOT = 0x80206985 SIOCSIFXFLAGS = 0x8020699d SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 SIOCSLIFPHYRTABLE = 0x802069a1 SIOCSLIFPHYTTL = 0x802069a8 - SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 + SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 SOCK_RAW = 0x3 SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 @@ -1238,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1245,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1258,6 +1450,7 @@ const ( SO_TIMESTAMP = 0x800 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1287,9 +1480,24 @@ const ( S_IXOTH = 0x1 S_IXUSR = 0x40 TCIFLUSH = 0x1 + TCIOFF = 0x3 TCIOFLUSH = 0x3 + TCION = 0x4 TCOFLUSH = 0x2 - TCP_MAXBURST = 0x4 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1298,11 +1506,15 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 - TCP_NSTATES = 0xb + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d TIOCCONS = 0x80047462 TIOCDRAIN = 0x2000745e TIOCEXCL = 0x2000740d @@ -1357,17 +1569,21 @@ const ( TIOCSETAF = 0x802c7416 TIOCSETAW = 0x802c7415 TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c TIOCSFLAGS = 0x8004745c TIOCSIG = 0x8004745f TIOCSPGRP = 0x80047476 TIOCSTART = 0x2000746e - TIOCSTAT = 0x80047465 - TIOCSTI = 0x80017472 + TIOCSTAT = 0x20007465 TIOCSTOP = 0x2000746f TIOCSTSTAMP = 0x8008745a TIOCSWINSZ = 0x80087467 TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1378,6 +1594,19 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc @@ -1390,8 +1619,8 @@ const ( WCONTINUED = 0x8 WCOREFLAG = 0x80 WNOHANG = 0x1 - WSTOPPED = 0x7f WUNTRACED = 0x2 + XCASE = 0x1000000 ) // Errors @@ -1405,6 +1634,7 @@ const ( EALREADY = syscall.Errno(0x25) EAUTH = syscall.Errno(0x50) EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) EBADRPC = syscall.Errno(0x48) EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x58) @@ -1431,7 +1661,7 @@ const ( EIPSEC = syscall.Errno(0x52) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x5b) + ELAST = syscall.Errno(0x5f) ELOOP = syscall.Errno(0x3e) EMEDIUMTYPE = syscall.Errno(0x56) EMFILE = syscall.Errno(0x18) @@ -1459,12 +1689,14 @@ const ( ENOTCONN = syscall.Errno(0x39) ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) ENOTSOCK = syscall.Errno(0x26) ENOTSUP = syscall.Errno(0x5b) ENOTTY = syscall.Errno(0x19) ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x2d) EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x2e) EPIPE = syscall.Errno(0x20) @@ -1472,6 +1704,7 @@ const ( EPROCUNAVAIL = syscall.Errno(0x4c) EPROGMISMATCH = syscall.Errno(0x4b) EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) EPROTONOSUPPORT = syscall.Errno(0x2b) EPROTOTYPE = syscall.Errno(0x29) ERANGE = syscall.Errno(0x22) @@ -1568,7 +1801,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {35, "EAGAIN", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1624,7 +1857,11 @@ var errorList = [...]struct { {88, "ECANCELED", "operation canceled"}, {89, "EIDRM", "identifier removed"}, {90, "ENOMSG", "no message of desired type"}, - {91, "ELAST", "not supported"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, } // Signal table @@ -1638,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1665,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 25cb6094813c..6015fcb2bf69 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -109,6 +109,15 @@ const ( BPF_DIRECTION_IN = 0x1 BPF_DIRECTION_OUT = 0x2 BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -137,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -177,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -240,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -292,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -323,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -351,15 +423,17 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 ETHERTYPE_PPP = 0x880b @@ -441,10 +515,11 @@ const ( ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x8 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 @@ -466,7 +541,7 @@ const ( EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -732,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -797,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -906,6 +984,9 @@ const ( IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 @@ -970,12 +1051,26 @@ const ( MNT_ROOTFS = 0x4000 MNT_SOFTDEP = 0x4000000 MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 MNT_SYNCHRONOUS = 0x2 MNT_UPDATE = 0x10000 MNT_VISFLAGMASK = 0x400ffff MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -988,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -996,7 +1092,8 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_IFNAMES = 0x6 - NET_RT_MAXID = 0x7 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 @@ -1013,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -1130,9 +1228,11 @@ const ( RTF_STATIC = 0x800 RTF_UP = 0x1 RTF_USETRAILERS = 0x8000 + RTM_80211INFO = 0x15 RTM_ADD = 0x1 RTM_BFD = 0x12 RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 RTM_DELADDR = 0xd RTM_DELETE = 0x2 RTM_DESYNC = 0x10 @@ -1140,7 +1240,6 @@ const ( RTM_IFANNOUNCE = 0xf RTM_IFINFO = 0xe RTM_INVALIDATE = 0x11 - RTM_LOCK = 0x8 RTM_LOSING = 0x5 RTM_MAXSIZE = 0x800 RTM_MISS = 0x7 @@ -1148,7 +1247,7 @@ const ( RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1166,6 +1265,9 @@ const ( RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1182,35 +1284,37 @@ const ( SIOCBRDGDELS = 0x80606942 SIOCBRDGFLUSH = 0x80606948 SIOCBRDGFRL = 0x808c694e - SIOCBRDGGCACHE = 0xc0186941 - SIOCBRDGGFD = 0xc0186952 - SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 SIOCBRDGGIFFLGS = 0xc060693e - SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc0406958 - SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc030694f - SIOCBRDGGTO = 0xc0186946 + SIOCBRDGGTO = 0xc0146946 SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0206943 SIOCBRDGSADDR = 0xc1286944 - SIOCBRDGSCACHE = 0x80186940 - SIOCBRDGSFD = 0x80186952 - SIOCBRDGSHT = 0x80186951 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 SIOCBRDGSIFCOST = 0x80606955 SIOCBRDGSIFFLGS = 0x8060693f SIOCBRDGSIFPRIO = 0x80606954 SIOCBRDGSIFPROT = 0x8060694a - SIOCBRDGSMA = 0x80186953 - SIOCBRDGSPRI = 0x80186950 - SIOCBRDGSPROTO = 0x8018695a - SIOCBRDGSTO = 0x80186945 - SIOCBRDGSTXHC = 0x80186959 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 SIOCDIFGROUP = 0x80286989 SIOCDIFPARENT = 0x802069b4 SIOCDIFPHYADDR = 0x80206949 + SIOCDPWE3NEIGHBOR = 0x802069de SIOCDVNETID = 0x802069af SIOCGETKALIVE = 0xc01869a4 SIOCGETLABEL = 0x8020699a @@ -1229,6 +1333,7 @@ const ( SIOCGIFFLAGS = 0xc0206911 SIOCGIFGATTR = 0xc028698b SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc028698d SIOCGIFGMEMB = 0xc028698a SIOCGIFGROUP = 0xc0286988 SIOCGIFHARDMTU = 0xc02069a5 @@ -1243,13 +1348,21 @@ const ( SIOCGIFRDOMAIN = 0xc02069a0 SIOCGIFRTLABEL = 0xc0206983 SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 SIOCGIFXFLAGS = 0xc020699e SIOCGLIFPHYADDR = 0xc218694b SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 SIOCGLIFPHYRTABLE = 0xc02069a2 SIOCGLIFPHYTTL = 0xc02069a9 SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 SIOCGUMBINFO = 0xc02069be SIOCGUMBPARAM = 0xc02069c0 SIOCGVH = 0xc02069f6 @@ -1287,19 +1400,20 @@ const ( SIOCSIFXFLAGS = 0x8020699d SIOCSLIFPHYADDR = 0x8218694a SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 SIOCSLIFPHYRTABLE = 0x802069a1 SIOCSLIFPHYTTL = 0x802069a8 SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 - SIOCSWGDPID = 0xc018695b - SIOCSWGMAXFLOW = 0xc0186960 - SIOCSWGMAXGROUP = 0xc018695d - SIOCSWSDPID = 0x8018695c - SIOCSWSPORTNO = 0xc060695f SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 SOCK_DNS = 0x1000 @@ -1314,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1321,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1370,7 +1486,18 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 - TCP_MAXBURST = 0x4 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1379,8 +1506,11 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCHKVERAUTH = 0x2000741e @@ -1445,7 +1575,6 @@ const ( TIOCSPGRP = 0x80047476 TIOCSTART = 0x2000746e TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 TIOCSTOP = 0x2000746f TIOCSTSTAMP = 0x8008745a TIOCSWINSZ = 0x80087467 @@ -1467,7 +1596,8 @@ const ( VMIN = 0x10 VM_ANONMIN = 0x7 VM_LOADAVG = 0x2 - VM_MAXID = 0xc + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd VM_MAXSLP = 0xa VM_METER = 0x1 VM_NKMEMPAGES = 0x6 @@ -1745,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1772,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index aef6c085609a..8d44955e44d8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -46,6 +46,7 @@ const ( AF_SNA = 0xb AF_UNIX = 0x1 AF_UNSPEC = 0x0 + ALTWERASE = 0x200 ARPHRD_ETHER = 0x1 ARPHRD_FRELAY = 0xf ARPHRD_IEEE1394 = 0x18 @@ -82,7 +83,7 @@ const ( BIOCGFILDROP = 0x40044278 BIOCGHDRCMPLT = 0x40044274 BIOCGRSIG = 0x40044273 - BIOCGRTIMEOUT = 0x400c426e + BIOCGRTIMEOUT = 0x4010426e BIOCGSTATS = 0x4008426f BIOCIMMEDIATE = 0x80044270 BIOCLOCK = 0x20004276 @@ -96,7 +97,7 @@ const ( BIOCSFILDROP = 0x80044279 BIOCSHDRCMPLT = 0x80044275 BIOCSRSIG = 0x80044272 - BIOCSRTIMEOUT = 0x800c426d + BIOCSRTIMEOUT = 0x8010426d BIOCVERSION = 0x40044271 BPF_A = 0x10 BPF_ABS = 0x20 @@ -108,6 +109,15 @@ const ( BPF_DIRECTION_IN = 0x1 BPF_DIRECTION_OUT = 0x2 BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -136,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -147,6 +158,12 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 CPUSTATES = 0x6 CP_IDLE = 0x5 CP_INTR = 0x4 @@ -170,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc100445d + DIOCADDRULE = 0xcce04404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcce0441a + DIOCCLRIFFLAG = 0xc024445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0d04412 + DIOCCLRSTATUS = 0xc0244416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1084460 + DIOCGETQUEUE = 0xc100445f + DIOCGETQUEUES = 0xc100445e + DIOCGETRULE = 0xcce04407 + DIOCGETRULES = 0xcce04406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0084454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0084419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0244457 + DIOCKILLSRCNODES = 0xc068445b + DIOCKILLSTATES = 0xc0d04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc44c4443 + DIOCRADDTABLES = 0xc44c443d + DIOCRCLRADDRS = 0xc44c4442 + DIOCRCLRASTATS = 0xc44c4448 + DIOCRCLRTABLES = 0xc44c443c + DIOCRCLRTSTATS = 0xc44c4441 + DIOCRDELADDRS = 0xc44c4444 + DIOCRDELTABLES = 0xc44c443e + DIOCRGETADDRS = 0xc44c4446 + DIOCRGETASTATS = 0xc44c4447 + DIOCRGETTABLES = 0xc44c443f + DIOCRGETTSTATS = 0xc44c4440 + DIOCRINADEFINE = 0xc44c444d + DIOCRSETADDRS = 0xc44c4445 + DIOCRSETTFLAGS = 0xc44c444a + DIOCRTSTADDRS = 0xc44c4449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0244459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0244414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc00c4451 + DIOCXCOMMIT = 0xc00c4452 + DIOCXROLLBACK = 0xc00c4453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -186,6 +261,7 @@ const ( DLT_LOOP = 0xc DLT_MPLS = 0xdb DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PFLOG = 0x75 DLT_PFSYNC = 0x12 DLT_PPP = 0x9 @@ -196,6 +272,23 @@ const ( DLT_RAW = 0xe DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -215,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -267,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -298,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -326,15 +423,17 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 ETHERTYPE_PPP = 0x880b @@ -409,28 +508,40 @@ const ( ETHER_CRC_POLY_LE = 0xedb88320 ETHER_HDR_LEN = 0xe ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b ETHER_MAX_LEN = 0x5ee ETHER_MIN_LEN = 0x40 ETHER_TYPE_LEN = 0x2 ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x7 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 EV_ENABLE = 0x4 EV_EOF = 0x8000 EV_ERROR = 0x4000 EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 - EV_SYSFLAGS = 0xf000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -443,6 +554,8 @@ const ( F_GETFL = 0x3 F_GETLK = 0x7 F_GETOWN = 0x5 + F_ISATTY = 0xb + F_OK = 0x0 F_RDLCK = 0x1 F_SETFD = 0x2 F_SETFL = 0x4 @@ -459,7 +572,6 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 - IFA_ROUTE = 0x1 IFF_ALLMULTI = 0x200 IFF_BROADCAST = 0x2 IFF_CANTCHANGE = 0x8e52 @@ -470,12 +582,12 @@ const ( IFF_LOOPBACK = 0x8 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PROMISC = 0x100 IFF_RUNNING = 0x40 IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 IFF_UP = 0x1 IFNAMSIZ = 0x10 IFT_1822 = 0x2 @@ -604,6 +716,7 @@ const ( IFT_LINEGROUP = 0xd2 IFT_LOCALTALK = 0x2a IFT_LOOP = 0x18 + IFT_MBIM = 0xfa IFT_MEDIAMAILOVERIP = 0x8b IFT_MFSIGLINK = 0xa7 IFT_MIOX25 = 0x26 @@ -694,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -728,8 +842,6 @@ const ( IPPROTO_AH = 0x33 IPPROTO_CARP = 0x70 IPPROTO_DIVERT = 0x102 - IPPROTO_DIVERT_INIT = 0x2 - IPPROTO_DIVERT_RESP = 0x1 IPPROTO_DONE = 0x101 IPPROTO_DSTOPTS = 0x3c IPPROTO_EGP = 0x8 @@ -761,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -786,6 +900,7 @@ const ( IPV6_LEAVE_GROUP = 0xd IPV6_MAXHLIM = 0xff IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 IPV6_MMTU = 0x500 IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 @@ -825,12 +940,12 @@ const ( IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DF = 0x4000 - IP_DIVERTFL = 0x1022 IP_DROP_MEMBERSHIP = 0xd IP_ESP_NETWORK_LEVEL = 0x16 IP_ESP_TRANS_LEVEL = 0x15 IP_HDRINCL = 0x2 IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 IP_IPSECFLOWINFO = 0x24 IP_IPSEC_LOCAL_AUTH = 0x1b IP_IPSEC_LOCAL_CRED = 0x19 @@ -864,10 +979,15 @@ const ( IP_RETOPTS = 0x8 IP_RF = 0x8000 IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 IP_TOS = 0x3 IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -922,6 +1042,7 @@ const ( MNT_NOATIME = 0x8000 MNT_NODEV = 0x10 MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 MNT_NOSUID = 0x8 MNT_NOWAIT = 0x2 MNT_QUOTA = 0x2000 @@ -929,12 +1050,27 @@ const ( MNT_RELOAD = 0x40000 MNT_ROOTFS = 0x4000 MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 MNT_SYNCHRONOUS = 0x2 MNT_UPDATE = 0x10000 MNT_VISFLAGMASK = 0x400ffff MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -947,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -954,12 +1091,16 @@ const ( NET_RT_DUMP = 0x1 NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 - NET_RT_MAXID = 0x6 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 NOTE_CHILD = 0x4 NOTE_DELETE = 0x1 NOTE_EOF = 0x2 @@ -969,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -978,11 +1120,13 @@ const ( NOTE_TRUNCATE = 0x80 NOTE_WRITE = 0x2 OCRNL = 0x10 + OLCUC = 0x20 ONLCR = 0x2 ONLRET = 0x80 ONOCR = 0x40 ONOEOT = 0x8 OPOST = 0x1 + OXTABS = 0x4 O_ACCMODE = 0x3 O_APPEND = 0x8 O_ASYNC = 0x40 @@ -1027,19 +1171,25 @@ const ( RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb RTAX_BRD = 0x7 + RTAX_DNS = 0xc RTAX_DST = 0x0 RTAX_GATEWAY = 0x1 RTAX_GENMASK = 0x3 RTAX_IFA = 0x5 RTAX_IFP = 0x4 RTAX_LABEL = 0xa - RTAX_MAX = 0xb + RTAX_MAX = 0xf RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe RTAX_SRC = 0x8 RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 RTA_BRD = 0x80 + RTA_DNS = 0x1000 RTA_DST = 0x1 RTA_GATEWAY = 0x2 RTA_GENMASK = 0x8 @@ -1047,24 +1197,29 @@ const ( RTA_IFP = 0x10 RTA_LABEL = 0x400 RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 RTA_SRC = 0x100 RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 RTF_BLACKHOLE = 0x1000 RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 RTF_CLONED = 0x10000 RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 - RTF_FMASK = 0x70f808 + RTF_FMASK = 0x110fc08 RTF_GATEWAY = 0x2 RTF_HOST = 0x4 RTF_LLINFO = 0x400 RTF_LOCAL = 0x200000 - RTF_MASK = 0x80 RTF_MODIFIED = 0x20 RTF_MPATH = 0x40000 RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 RTF_PERMANENT_ARP = 0x2000 RTF_PROTO1 = 0x8000 RTF_PROTO2 = 0x4000 @@ -1073,23 +1228,26 @@ const ( RTF_STATIC = 0x800 RTF_UP = 0x1 RTF_USETRAILERS = 0x8000 - RTF_XRESOLVE = 0x200 + RTM_80211INFO = 0x15 RTM_ADD = 0x1 + RTM_BFD = 0x12 RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 RTM_DELADDR = 0xd RTM_DELETE = 0x2 RTM_DESYNC = 0x10 RTM_GET = 0x4 RTM_IFANNOUNCE = 0xf RTM_IFINFO = 0xe - RTM_LOCK = 0x8 + RTM_INVALIDATE = 0x11 RTM_LOSING = 0x5 RTM_MAXSIZE = 0x800 RTM_MISS = 0x7 RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1099,67 +1257,74 @@ const ( RTV_RTTVAR = 0x80 RTV_SPIPE = 0x10 RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff RT_TABLEID_MAX = 0xff RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 SIOCADDMULTI = 0x80206931 SIOCAIFADDR = 0x8040691a SIOCAIFGROUP = 0x80246987 - SIOCALIFADDR = 0x8218691c SIOCATMARK = 0x40047307 - SIOCBRDGADD = 0x8054693c - SIOCBRDGADDS = 0x80546941 - SIOCBRDGARL = 0x806e694d + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d SIOCBRDGDADDR = 0x81286947 - SIOCBRDGDEL = 0x8054693d - SIOCBRDGDELS = 0x80546942 - SIOCBRDGFLUSH = 0x80546948 - SIOCBRDGFRL = 0x806e694e + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e SIOCBRDGGCACHE = 0xc0146941 SIOCBRDGGFD = 0xc0146952 SIOCBRDGGHT = 0xc0146951 - SIOCBRDGGIFFLGS = 0xc054693e + SIOCBRDGGIFFLGS = 0xc060693e SIOCBRDGGMA = 0xc0146953 - SIOCBRDGGPARAM = 0xc03c6958 + SIOCBRDGGPARAM = 0xc0406958 SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc028694f - SIOCBRDGGSIFS = 0xc054693c SIOCBRDGGTO = 0xc0146946 - SIOCBRDGIFS = 0xc0546942 + SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0186943 SIOCBRDGSADDR = 0xc1286944 SIOCBRDGSCACHE = 0x80146940 SIOCBRDGSFD = 0x80146952 SIOCBRDGSHT = 0x80146951 - SIOCBRDGSIFCOST = 0x80546955 - SIOCBRDGSIFFLGS = 0x8054693f - SIOCBRDGSIFPRIO = 0x80546954 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a SIOCBRDGSMA = 0x80146953 SIOCBRDGSPRI = 0x80146950 SIOCBRDGSPROTO = 0x8014695a SIOCBRDGSTO = 0x80146945 SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 SIOCDIFGROUP = 0x80246989 + SIOCDIFPARENT = 0x802069b4 SIOCDIFPHYADDR = 0x80206949 - SIOCDLIFADDR = 0x8218691e + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af SIOCGETKALIVE = 0xc01869a4 SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae SIOCGETPFLOW = 0xc02069fe SIOCGETPFSYNC = 0xc02069f8 SIOCGETSGCNT = 0xc0147534 SIOCGETVIFCNT = 0xc0147533 SIOCGETVLAN = 0xc0206990 - SIOCGHIWAT = 0x40047301 SIOCGIFADDR = 0xc0206921 - SIOCGIFASYNCMAP = 0xc020697c SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCONF = 0xc0086924 SIOCGIFDATA = 0xc020691b @@ -1168,41 +1333,53 @@ const ( SIOCGIFFLAGS = 0xc0206911 SIOCGIFGATTR = 0xc024698b SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc024698d SIOCGIFGMEMB = 0xc024698a SIOCGIFGROUP = 0xc0246988 SIOCGIFHARDMTU = 0xc02069a5 - SIOCGIFMEDIA = 0xc0286936 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0386938 SIOCGIFMETRIC = 0xc0206917 SIOCGIFMTU = 0xc020697e SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 SIOCGIFPRIORITY = 0xc020699c - SIOCGIFPSRCADDR = 0xc0206947 SIOCGIFRDOMAIN = 0xc02069a0 SIOCGIFRTLABEL = 0xc0206983 SIOCGIFRXR = 0x802069aa - SIOCGIFTIMESLOT = 0xc0206986 + SIOCGIFSFFPAGE = 0xc1126939 SIOCGIFXFLAGS = 0xc020699e - SIOCGLIFADDR = 0xc218691d SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 SIOCGLIFPHYRTABLE = 0xc02069a2 SIOCGLIFPHYTTL = 0xc02069a9 - SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac SIOCIFCREATE = 0x8020697a SIOCIFDESTROY = 0x80206979 SIOCIFGCLONERS = 0xc00c6978 SIOCSETKALIVE = 0x801869a3 SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad SIOCSETPFLOW = 0x802069fd SIOCSETPFSYNC = 0x802069f7 SIOCSETVLAN = 0x8020698f - SIOCSHIWAT = 0x80047300 SIOCSIFADDR = 0x8020690c - SIOCSIFASYNCMAP = 0x8020697d SIOCSIFBRDADDR = 0x80206913 SIOCSIFDESCR = 0x80206980 SIOCSIFDSTADDR = 0x8020690e @@ -1210,26 +1387,36 @@ const ( SIOCSIFGATTR = 0x8024698c SIOCSIFGENERIC = 0x80206939 SIOCSIFLLADDR = 0x8020691f - SIOCSIFMEDIA = 0xc0206935 + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 SIOCSIFMETRIC = 0x80206918 SIOCSIFMTU = 0x8020697f SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 SIOCSIFPRIORITY = 0x8020699b SIOCSIFRDOMAIN = 0x8020699f SIOCSIFRTLABEL = 0x80206982 - SIOCSIFTIMESLOT = 0x80206985 SIOCSIFXFLAGS = 0x8020699d SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 SIOCSLIFPHYRTABLE = 0x802069a1 SIOCSLIFPHYTTL = 0x802069a8 - SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 SOCK_NONBLOCK = 0x4000 SOCK_RAW = 0x3 SOCK_RDM = 0x4 @@ -1241,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1248,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1261,6 +1450,7 @@ const ( SO_TIMESTAMP = 0x800 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1290,9 +1480,24 @@ const ( S_IXOTH = 0x1 S_IXUSR = 0x40 TCIFLUSH = 0x1 + TCIOFF = 0x3 TCIOFLUSH = 0x3 + TCION = 0x4 TCOFLUSH = 0x2 - TCP_MAXBURST = 0x4 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1301,11 +1506,15 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 - TCP_NSTATES = 0xb + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d TIOCCONS = 0x80047462 TIOCDRAIN = 0x2000745e TIOCEXCL = 0x2000740d @@ -1321,7 +1530,7 @@ const ( TIOCGFLAGS = 0x4004745d TIOCGPGRP = 0x40047477 TIOCGSID = 0x40047463 - TIOCGTSTAMP = 0x400c745b + TIOCGTSTAMP = 0x4010745b TIOCGWINSZ = 0x40087468 TIOCMBIC = 0x8004746b TIOCMBIS = 0x8004746c @@ -1360,17 +1569,21 @@ const ( TIOCSETAF = 0x802c7416 TIOCSETAW = 0x802c7415 TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c TIOCSFLAGS = 0x8004745c TIOCSIG = 0x8004745f TIOCSPGRP = 0x80047476 TIOCSTART = 0x2000746e - TIOCSTAT = 0x80047465 - TIOCSTI = 0x80017472 + TIOCSTAT = 0x20007465 TIOCSTOP = 0x2000746f TIOCSTSTAMP = 0x8008745a TIOCSWINSZ = 0x80087467 TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1381,6 +1594,19 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc @@ -1394,6 +1620,7 @@ const ( WCOREFLAG = 0x80 WNOHANG = 0x1 WUNTRACED = 0x2 + XCASE = 0x1000000 ) // Errors @@ -1407,6 +1634,7 @@ const ( EALREADY = syscall.Errno(0x25) EAUTH = syscall.Errno(0x50) EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) EBADRPC = syscall.Errno(0x48) EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x58) @@ -1433,7 +1661,7 @@ const ( EIPSEC = syscall.Errno(0x52) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x5b) + ELAST = syscall.Errno(0x5f) ELOOP = syscall.Errno(0x3e) EMEDIUMTYPE = syscall.Errno(0x56) EMFILE = syscall.Errno(0x18) @@ -1461,12 +1689,14 @@ const ( ENOTCONN = syscall.Errno(0x39) ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) ENOTSOCK = syscall.Errno(0x26) ENOTSUP = syscall.Errno(0x5b) ENOTTY = syscall.Errno(0x19) ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x2d) EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x2e) EPIPE = syscall.Errno(0x20) @@ -1474,6 +1704,7 @@ const ( EPROCUNAVAIL = syscall.Errno(0x4c) EPROGMISMATCH = syscall.Errno(0x4b) EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) EPROTONOSUPPORT = syscall.Errno(0x2b) EPROTOTYPE = syscall.Errno(0x29) ERANGE = syscall.Errno(0x22) @@ -1570,7 +1801,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {35, "EAGAIN", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1626,7 +1857,11 @@ var errorList = [...]struct { {88, "ECANCELED", "operation canceled"}, {89, "EIDRM", "identifier removed"}, {90, "ENOMSG", "no message of desired type"}, - {91, "ELAST", "not supported"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, } // Signal table @@ -1640,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1667,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index 90de7dfc33a3..ae16fe7542ae 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -112,6 +112,12 @@ const ( BPF_FILDROP_CAPTURE = 0x1 BPF_FILDROP_DROP = 0x2 BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -140,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -180,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -243,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -295,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -326,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -354,15 +423,16 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 @@ -445,10 +515,11 @@ const ( ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x8 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 @@ -470,7 +541,7 @@ const ( EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -736,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -801,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -910,6 +984,9 @@ const ( IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 @@ -981,6 +1058,19 @@ const ( MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -993,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -1001,7 +1092,8 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_IFNAMES = 0x6 - NET_RT_MAXID = 0x7 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 @@ -1018,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -1154,7 +1247,7 @@ const ( RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1172,6 +1265,9 @@ const ( RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1188,30 +1284,30 @@ const ( SIOCBRDGDELS = 0x80606942 SIOCBRDGFLUSH = 0x80606948 SIOCBRDGFRL = 0x808c694e - SIOCBRDGGCACHE = 0xc0186941 - SIOCBRDGGFD = 0xc0186952 - SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 SIOCBRDGGIFFLGS = 0xc060693e - SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc0406958 - SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc030694f - SIOCBRDGGTO = 0xc0186946 + SIOCBRDGGTO = 0xc0146946 SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0206943 SIOCBRDGSADDR = 0xc1286944 - SIOCBRDGSCACHE = 0x80186940 - SIOCBRDGSFD = 0x80186952 - SIOCBRDGSHT = 0x80186951 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 SIOCBRDGSIFCOST = 0x80606955 SIOCBRDGSIFFLGS = 0x8060693f SIOCBRDGSIFPRIO = 0x80606954 SIOCBRDGSIFPROT = 0x8060694a - SIOCBRDGSMA = 0x80186953 - SIOCBRDGSPRI = 0x80186950 - SIOCBRDGSPROTO = 0x8018695a - SIOCBRDGSTO = 0x80186945 - SIOCBRDGSTXHC = 0x80186959 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 @@ -1264,6 +1360,7 @@ const ( SIOCGPWE3CTRLWORD = 0xc02069dc SIOCGPWE3FAT = 0xc02069dd SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 SIOCGTXHPRIO = 0xc02069c6 SIOCGUMBINFO = 0xc02069be @@ -1310,17 +1407,13 @@ const ( SIOCSPWE3CTRLWORD = 0x802069dc SIOCSPWE3FAT = 0x802069dd SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 SIOCSTXHPRIO = 0x802069c5 SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 - SIOCSWGDPID = 0xc018695b - SIOCSWGMAXFLOW = 0xc0186960 - SIOCSWGMAXGROUP = 0xc018695d - SIOCSWSDPID = 0x8018695c - SIOCSWSPORTNO = 0xc060695f SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 SOCK_DNS = 0x1000 @@ -1335,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1342,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1391,7 +1486,18 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 - TCP_MAXBURST = 0x4 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1400,6 +1506,7 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 TIMER_ABSTIME = 0x1 @@ -1768,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1795,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index f1154ff56f6c..03d90fe35501 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -112,6 +112,12 @@ const ( BPF_FILDROP_CAPTURE = 0x1 BPF_FILDROP_DROP = 0x2 BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -140,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -301,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -353,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -413,15 +423,16 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 @@ -504,10 +515,11 @@ const ( ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x8 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 @@ -529,7 +541,7 @@ const ( EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -795,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -860,6 +873,7 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -970,6 +984,9 @@ const ( IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 @@ -1041,6 +1058,19 @@ const ( MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -1053,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -1061,7 +1092,8 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_IFNAMES = 0x6 - NET_RT_MAXID = 0x7 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 @@ -1078,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -1214,7 +1247,7 @@ const ( RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1232,6 +1265,9 @@ const ( RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1248,30 +1284,30 @@ const ( SIOCBRDGDELS = 0x80606942 SIOCBRDGFLUSH = 0x80606948 SIOCBRDGFRL = 0x808c694e - SIOCBRDGGCACHE = 0xc0186941 - SIOCBRDGGFD = 0xc0186952 - SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 SIOCBRDGGIFFLGS = 0xc060693e - SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc0406958 - SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc030694f - SIOCBRDGGTO = 0xc0186946 + SIOCBRDGGTO = 0xc0146946 SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0206943 SIOCBRDGSADDR = 0xc1286944 - SIOCBRDGSCACHE = 0x80186940 - SIOCBRDGSFD = 0x80186952 - SIOCBRDGSHT = 0x80186951 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 SIOCBRDGSIFCOST = 0x80606955 SIOCBRDGSIFFLGS = 0x8060693f SIOCBRDGSIFPRIO = 0x80606954 SIOCBRDGSIFPROT = 0x8060694a - SIOCBRDGSMA = 0x80186953 - SIOCBRDGSPRI = 0x80186950 - SIOCBRDGSPROTO = 0x8018695a - SIOCBRDGSTO = 0x80186945 - SIOCBRDGSTXHC = 0x80186959 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 @@ -1378,11 +1414,6 @@ const ( SIOCSVH = 0xc02069f5 SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 - SIOCSWGDPID = 0xc018695b - SIOCSWGMAXFLOW = 0xc0186960 - SIOCSWGMAXGROUP = 0xc018695d - SIOCSWSDPID = 0x8018695c - SIOCSWSPORTNO = 0xc060695f SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 SOCK_DNS = 0x1000 @@ -1455,7 +1486,18 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 - TCP_MAXBURST = 0x4 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1833,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1860,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {81920, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go new file mode 100644 index 000000000000..8e2c51b1eec0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go @@ -0,0 +1,1905 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ALTWERASE = 0x200 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc010427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80104277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x8010426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RND = 0xc0 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 + CREAD = 0x800 + CRTSCTS = 0x10000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f + DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PBB = 0x88e7 + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x9 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf800 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_ISATTY = 0xb + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MBIM = 0xfa + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IUCLC = 0x1000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0xfff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x0 + MAP_SHARED = 0x1 + MAP_STACK = 0x4000 + MAP_TRYFIXED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_DOOMED = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_NOATIME = 0x8000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 + MNT_SYNCHRONOUS = 0x2 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x400ffff + MNT_WAIT = 0x1 + MNT_WANTRDWR = 0x2000000 + MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NFDBITS = 0x20 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OLCUC = 0x20 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb + RTAX_BRD = 0x7 + RTAX_DNS = 0xc + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xf + RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd + RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 + RTA_BRD = 0x80 + RTA_DNS = 0x1000 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 + RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x110fc08 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTM_80211INFO = 0x15 + RTM_ADD = 0x1 + RTM_BFD = 0x12 + RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_INVALIDATE = 0x11 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_SOURCE = 0x16 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 + SIOCBRDGGIFFLGS = 0xc060693e + SIOCBRDGGMA = 0xc0146953 + SIOCBRDGGPARAM = 0xc0406958 + SIOCBRDGGPRI = 0xc0146950 + SIOCBRDGGRL = 0xc030694f + SIOCBRDGGTO = 0xc0146946 + SIOCBRDGIFS = 0xc0606942 + SIOCBRDGRTS = 0xc0206943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPARENT = 0x802069b4 + SIOCDIFPHYADDR = 0x80206949 + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0207534 + SIOCGETVIFCNT = 0xc0287533 + SIOCGETVLAN = 0xc0206990 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc028698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc028698d + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0406938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 + SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 + SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8028698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db + SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf + SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 + SIOCSVNETID = 0x802069a6 + SOCK_CLOEXEC = 0x8000 + SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x4010745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b + TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WUNTRACED = 0x2 + XCASE = 0x1000000 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5f) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go new file mode 100644 index 000000000000..13d403031ed6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go @@ -0,0 +1,1904 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ALTWERASE = 0x200 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc010427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80104277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x8010426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RND = 0xc0 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 + CREAD = 0x800 + CRTSCTS = 0x10000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f + DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PBB = 0x88e7 + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x9 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf800 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_ISATTY = 0xb + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MBIM = 0xfa + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IUCLC = 0x1000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0xfff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x0 + MAP_SHARED = 0x1 + MAP_STACK = 0x4000 + MAP_TRYFIXED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_DOOMED = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_NOATIME = 0x8000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 + MNT_SYNCHRONOUS = 0x2 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x400ffff + MNT_WAIT = 0x1 + MNT_WANTRDWR = 0x2000000 + MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NFDBITS = 0x20 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OLCUC = 0x20 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb + RTAX_BRD = 0x7 + RTAX_DNS = 0xc + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xf + RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd + RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 + RTA_BRD = 0x80 + RTA_DNS = 0x1000 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 + RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x110fc08 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTM_80211INFO = 0x15 + RTM_ADD = 0x1 + RTM_BFD = 0x12 + RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_INVALIDATE = 0x11 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_SOURCE = 0x16 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 + SIOCBRDGGIFFLGS = 0xc060693e + SIOCBRDGGMA = 0xc0146953 + SIOCBRDGGPARAM = 0xc0406958 + SIOCBRDGGPRI = 0xc0146950 + SIOCBRDGGRL = 0xc030694f + SIOCBRDGGTO = 0xc0146946 + SIOCBRDGIFS = 0xc0606942 + SIOCBRDGRTS = 0xc0206943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPARENT = 0x802069b4 + SIOCDIFPHYADDR = 0x80206949 + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0207534 + SIOCGETVIFCNT = 0xc0287533 + SIOCGETVLAN = 0xc0206990 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc028698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc028698d + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0406938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 + SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 + SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8028698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db + SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf + SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 + SIOCSVNETID = 0x802069a6 + SOCK_CLOEXEC = 0x8000 + SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x4010745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b + TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WUNTRACED = 0x2 + XCASE = 0x1000000 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5f) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index 85e0cc386678..870215d2c479 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -975,7 +975,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] @@ -992,7 +992,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index f1d4a73b0898..a89b0bfa53ca 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -931,7 +931,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] @@ -946,7 +946,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go deleted file mode 100644 index a06eb0932420..000000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go +++ /dev/null @@ -1,40 +0,0 @@ -// go run mksyscall.go -tags darwin,amd64,go1.13 syscall_darwin.1_13.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build darwin && amd64 && go1.13 -// +build darwin,amd64,go1.13 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_closedir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - res = Errno(r0) - return -} - -var libc_readdir_r_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s deleted file mode 100644 index d6c3e25c018a..000000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s +++ /dev/null @@ -1,25 +0,0 @@ -// go run mkasm_darwin.go amd64 -// Code generated by the command above; DO NOT EDIT. - -//go:build go1.13 -// +build go1.13 - -#include "textflag.h" - -TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fdopendir(SB) - -GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) - -TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_closedir(SB) - -GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) - -TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_readdir_r(SB) - -GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 -DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 0ae0ed4cb8af..c2461c496797 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1,8 +1,8 @@ -// go run mksyscall.go -tags darwin,amd64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go +// go run mksyscall.go -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. -//go:build darwin && amd64 && go1.12 -// +build darwin,amd64,go1.12 +//go:build darwin && amd64 +// +build darwin,amd64 package unix @@ -463,6 +463,32 @@ var libc_munlockall_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_closedir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +var libc_readdir_r_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]int32) (err error) { _, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -643,17 +669,22 @@ var libc_flistxattr_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setattrlist_trampoline_addr uintptr +var libc_utimensat_trampoline_addr uintptr -//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic libc_utimensat utimensat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1638,6 +1669,30 @@ var libc_mknod_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1698,7 +1753,7 @@ var libc_pathconf_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1719,7 +1774,7 @@ var libc_pread_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index eac6ca806f4d..95fe4c0eb962 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -1,11 +1,14 @@ -// go run mkasm_darwin.go amd64 +// go run mkasm.go darwin amd64 // Code generated by the command above; DO NOT EDIT. -//go:build go1.12 -// +build go1.12 - #include "textflag.h" +TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) + +GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) + TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) @@ -174,6 +177,18 @@ TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) +TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) + +GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) + +TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) + +GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) + TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) @@ -228,11 +243,11 @@ TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) -TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setattrlist(SB) +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) -GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) @@ -600,6 +615,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) + +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go deleted file mode 100644 index cec595d553a4..000000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go +++ /dev/null @@ -1,40 +0,0 @@ -// go run mksyscall.go -tags darwin,arm64,go1.13 syscall_darwin.1_13.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build darwin && arm64 && go1.13 -// +build darwin,arm64,go1.13 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_closedir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - res = Errno(r0) - return -} - -var libc_readdir_r_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s deleted file mode 100644 index 357989722cfb..000000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s +++ /dev/null @@ -1,25 +0,0 @@ -// go run mkasm_darwin.go arm64 -// Code generated by the command above; DO NOT EDIT. - -//go:build go1.13 -// +build go1.13 - -#include "textflag.h" - -TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fdopendir(SB) - -GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) - -TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_closedir(SB) - -GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) - -TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_readdir_r(SB) - -GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 -DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index cf71be3edb3b..26a0fdc505bb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1,8 +1,8 @@ -// go run mksyscall.go -tags darwin,arm64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go +// go run mksyscall.go -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. -//go:build darwin && arm64 && go1.12 -// +build darwin,arm64,go1.12 +//go:build darwin && arm64 +// +build darwin,arm64 package unix @@ -463,6 +463,32 @@ var libc_munlockall_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_closedir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +var libc_readdir_r_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]int32) (err error) { _, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -643,17 +669,22 @@ var libc_flistxattr_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setattrlist_trampoline_addr uintptr +var libc_utimensat_trampoline_addr uintptr -//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic libc_utimensat utimensat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1638,6 +1669,30 @@ var libc_mknod_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1698,7 +1753,7 @@ var libc_pathconf_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1719,7 +1774,7 @@ var libc_pread_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 4ebcf2175854..efa5b4c987c5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -1,11 +1,14 @@ -// go run mkasm_darwin.go arm64 +// go run mkasm.go darwin arm64 // Code generated by the command above; DO NOT EDIT. -//go:build go1.12 -// +build go1.12 - #include "textflag.h" +TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) + +GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) + TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) @@ -174,6 +177,18 @@ TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) +TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) + +GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) + +TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) + +GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) + TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) @@ -228,11 +243,11 @@ TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) -TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setattrlist(SB) +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) -GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) @@ -600,6 +615,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) + +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 1b6eedfa6115..54749f9c5ed7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -552,6 +552,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 3e9bddb7b224..77479d458155 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -544,6 +544,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -912,7 +922,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +932,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +947,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +957,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +977,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +994,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1215,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,43 +1260,13 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), uintptr(dev>>32), 0) + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), uintptr(dev>>32), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1420,7 +1333,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1437,7 +1350,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1753,22 +1666,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1681,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index c72a462b91e1..2e966d4d7a6c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -544,6 +544,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -912,7 +922,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +932,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +947,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +957,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +977,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +994,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1215,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,22 +1260,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1347,21 +1275,6 @@ func mknodat(fd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1420,7 +1333,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1437,7 +1350,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1753,22 +1666,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1681,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 530d5df90c0c..d65a7c0fa6e9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -351,22 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -404,6 +388,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data int) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -544,6 +544,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -912,7 +922,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +932,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +947,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +957,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +977,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +994,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1215,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,43 +1260,13 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, uintptr(dev), uintptr(dev>>32)) if e1 != 0 { err = errnoErr(e1) } @@ -1420,7 +1333,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1437,7 +1350,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1753,22 +1666,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1681,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 71e7df9e8558..6f0b97c6db3a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -544,6 +544,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -912,7 +922,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +932,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +947,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +957,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +977,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +994,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1215,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,22 +1260,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1347,21 +1275,6 @@ func mknodat(fd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1420,7 +1333,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1437,7 +1350,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1753,22 +1666,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1681,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go new file mode 100644 index 000000000000..e1c23b527236 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -0,0 +1,1899 @@ +// go run mksyscall.go -tags freebsd,riscv64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_riscv64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build freebsd && riscv64 +// +build freebsd,riscv64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CapEnter() (err error) { + _, _, e1 := Syscall(SYS_CAP_ENTER, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsGet(version int, fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS___CAP_RIGHTS_GET, uintptr(version), uintptr(fd), uintptr(unsafe.Pointer(rightsp))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsLimit(fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS_CAP_RIGHTS_LIMIT, uintptr(fd), uintptr(unsafe.Pointer(rightsp)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(fdat), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index af5cb064ec4f..b57c7050d7a8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -15,25 +15,19 @@ import ( //go:cgo_import_dynamic libc_writev writev "libc.so" //go:cgo_import_dynamic libc_pwritev pwritev "libc.so" //go:cgo_import_dynamic libc_accept4 accept4 "libsocket.so" -//go:cgo_import_dynamic libc_putmsg putmsg "libc.so" -//go:cgo_import_dynamic libc_getmsg getmsg "libc.so" //go:linkname procreadv libc_readv //go:linkname procpreadv libc_preadv //go:linkname procwritev libc_writev //go:linkname procpwritev libc_pwritev //go:linkname procaccept4 libc_accept4 -//go:linkname procputmsg libc_putmsg -//go:linkname procgetmsg libc_getmsg var ( procreadv, procpreadv, procwritev, procpwritev, - procaccept4, - procputmsg, - procgetmsg syscallFunc + procaccept4 syscallFunc ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -106,23 +100,3 @@ func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 30fa4055ec1f..293cf36804e9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -231,6 +231,16 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Waitid(idType int, id int, info *Siginfo, options int, rusage *Rusage) (err error) { + _, _, e1 := Syscall6(SYS_WAITID, uintptr(idType), uintptr(id), uintptr(unsafe.Pointer(info)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) ret = int(r0) @@ -818,6 +828,49 @@ func Fsync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) { + r0, _, e1 := Syscall(SYS_FSMOUNT, uintptr(fd), uintptr(flags), uintptr(mountAttrs)) + fsfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsopen(fsName string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsName) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_FSOPEN, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathName) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_FSPICK, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -1195,6 +1248,26 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MoveMount(fromDirfd int, fromPathName string, toDirfd int, toPathName string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fromPathName) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(toPathName) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOVE_MOUNT, uintptr(fromDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(toDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1205,6 +1278,22 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func OpenTree(dfd int, fileName string, flags uint) (r int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fileName) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN_TREE, uintptr(dfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + r = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) fd = int(r0) @@ -1992,6 +2081,16 @@ func PidfdGetfd(pidfd int, targetfd int, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PidfdSendSignal(pidfd int, sig Signal, info *Siginfo, flags int) (err error) { + _, _, e1 := Syscall6(SYS_PIDFD_SEND_SIGNAL, uintptr(pidfd), uintptr(sig), uintptr(unsafe.Pointer(info)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { r0, _, e1 := Syscall(SYS_SHMAT, uintptr(id), uintptr(addr), uintptr(flag)) ret = uintptr(r0) @@ -2052,3 +2151,13 @@ func setitimer(which int, newValue *Itimerval, oldValue *Itimerval) (err error) } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func rtSigprocmask(how int, set *Sigset_t, oldset *Sigset_t, sigsetsize uintptr) (err error) { + _, _, e1 := RawSyscall6(SYS_RT_SIGPROCMASK, uintptr(how), uintptr(unsafe.Pointer(set)), uintptr(unsafe.Pointer(oldset)), uintptr(sigsetsize), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 2fc6271f47cf..c81b0ad47772 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -200,7 +200,7 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -217,7 +217,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -287,46 +287,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 43d9f0128146..2206bce7f4dd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -215,6 +215,17 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdSecret(flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pause() (err error) { _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) if e1 != 0 { @@ -225,7 +236,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -242,7 +253,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -323,36 +334,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -363,16 +344,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 7df0cb179586..edf6b39f1615 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -412,46 +412,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -538,7 +498,7 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -555,7 +515,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 076e8f1c5bbb..190609f2140d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -180,7 +180,18 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func MemfdSecret(flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -197,7 +208,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -278,36 +289,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -318,16 +299,6 @@ func setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go new file mode 100644 index 000000000000..806ffd1e125e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -0,0 +1,487 @@ +// go run mksyscall.go -tags linux,loong64 syscall_linux.go syscall_linux_loong64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build linux && loong64 +// +build linux,loong64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 7b3c84746780..5f984cbb1ca7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -150,7 +150,7 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -167,7 +167,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -248,46 +248,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 0d3c45fbddad..46fc380a40e5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -180,7 +180,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -197,7 +197,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -278,36 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -318,16 +288,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index cb46b2aaa227..cbd0d4dadbad 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -180,7 +180,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -197,7 +197,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -278,36 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -318,16 +288,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 21c9baa6ab14..0c13d15f07cf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -150,7 +150,7 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -167,7 +167,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -248,46 +248,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index 02b8f08871e4..e01432aed51f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -210,7 +210,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -227,7 +227,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -308,46 +308,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index ac8cb09ba350..13c7ee7baff6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -240,7 +240,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -257,7 +257,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -349,36 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -389,16 +359,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index bd08d887a140..02d0c0fd61ec 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -240,7 +240,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -257,7 +257,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -349,36 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -389,16 +359,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index a834d2173042..9fee3b1d2396 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -180,7 +180,18 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func MemfdSecret(flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -197,7 +208,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -258,36 +269,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -298,16 +279,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 9e462a96fbe3..647bbfecd6aa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -210,7 +210,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -227,7 +227,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -319,36 +319,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -359,16 +329,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 96d340242c40..ada057f89144 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -220,7 +220,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -237,7 +237,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -329,36 +329,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -369,16 +339,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 51d0c0742bfa..79f7389963ec 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -521,6 +521,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -1330,7 +1340,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1347,7 +1357,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index df2efb6db3fa..fb161f3a2636 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -521,6 +521,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -1330,7 +1340,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1347,7 +1357,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index c8536c2c9f09..4c8ac993a880 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -521,6 +521,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -1330,7 +1340,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1347,7 +1357,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 8b981bfc2eb9..76dd8ec4fdb9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -521,6 +521,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -1330,7 +1340,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1347,7 +1357,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 8f80f4ade511..caeb807bd4e8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -openbsd -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go +// go run mksyscall.go -l32 -openbsd -libc -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && 386 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,49 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +733,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +784,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +845,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +878,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +914,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +940,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), uintptr(length>>32)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1040,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1091,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1120,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1149,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1216,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1228,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1252,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1276,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1309,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1328,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1347,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1366,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1385,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1404,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1423,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1456,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1464,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1476,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1484,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1496,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,16 +1504,20 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,16 +1525,20 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1546,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1559,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1567,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1585,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1593,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1611,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1619,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1636,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1660,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1679,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1698,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + r0, r1, e1 := syscall_syscall6(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) newoffset = int64(int64(r1)<<32 | int64(r0)) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1720,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1735,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1789,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1923,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1963,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1982,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +2006,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2030,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2063,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2094,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2113,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2132,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2152,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2160,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + r0, _, e1 := syscall_syscall9(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2175,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2207,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2223,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s new file mode 100644 index 000000000000..087444250c9a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -0,0 +1,669 @@ +// go run mkasm.go openbsd 386 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4 +DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4 +DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4 +DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4 +DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4 +DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_clock_gettime_trampoline_addr(SB)/4, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4 +DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4 +DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4 +DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 +DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4 +DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4 +DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4 +DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4 +DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4 +DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4 +DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 3a47aca7bf70..a05e5f4fff6d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -openbsd -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go +// go run mksyscall.go -openbsd -libc -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && amd64 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,49 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +733,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +784,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +845,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +878,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +914,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +940,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1040,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1091,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1120,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1149,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1216,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1228,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1252,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1276,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1309,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1328,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1347,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1366,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1385,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1404,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1423,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1456,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1464,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1476,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1484,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1496,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,16 +1504,20 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,16 +1525,20 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1546,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1559,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1567,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1585,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1593,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1611,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1619,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1636,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1660,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1679,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1698,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1720,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1735,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1789,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1923,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1963,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1982,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +2006,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2030,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2063,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2094,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2113,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2132,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2152,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2160,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2175,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2207,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2223,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s new file mode 100644 index 000000000000..5782cd108447 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -0,0 +1,669 @@ +// go run mkasm.go openbsd amd64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 883a9b45e8e2..b2da8e50cc7a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go +// go run mksyscall.go -l32 -openbsd -arm -libc -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,49 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +733,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +784,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +845,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +878,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +914,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +940,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall6(libc_ftruncate_trampoline_addr, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1040,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1091,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1120,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1149,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1216,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1228,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1252,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1276,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1309,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1328,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1347,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1366,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1385,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1404,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1423,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1456,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1464,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1476,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1484,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1496,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,16 +1504,20 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,16 +1525,20 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1546,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1559,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1567,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1585,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1593,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1611,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1619,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1636,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1660,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1679,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1698,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + r0, r1, e1 := syscall_syscall6(libc_lseek_trampoline_addr, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) newoffset = int64(int64(r1)<<32 | int64(r0)) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1720,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1735,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1789,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1923,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1963,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1982,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +2006,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2030,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2063,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall6(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2094,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2113,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2132,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2152,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2160,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + r0, _, e1 := syscall_syscall9(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2175,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2207,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2223,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s new file mode 100644 index 000000000000..cf310420c942 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -0,0 +1,669 @@ +// go run mkasm.go openbsd arm +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4 +DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4 +DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4 +DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4 +DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4 +DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_clock_gettime_trampoline_addr(SB)/4, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4 +DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4 +DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4 +DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 +DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4 +DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4 +DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4 +DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4 +DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4 +DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4 +DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index aac7fdc95e28..048b2655e6f8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -openbsd -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go +// go run mksyscall.go -openbsd -libc -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm64 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,49 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +733,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +784,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +845,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +878,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +914,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +940,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1040,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1091,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1120,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1149,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1216,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1228,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1252,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1276,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1309,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1328,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1347,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1366,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1385,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1404,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1423,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1456,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1464,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1476,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1484,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1496,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,16 +1504,20 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,16 +1525,20 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1546,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1559,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1567,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1585,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1593,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1611,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1619,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1636,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1660,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1679,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1698,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1720,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1735,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1789,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1923,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1963,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1982,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +2006,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2030,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2063,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2094,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2113,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2132,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2152,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2160,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2175,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2207,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2223,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s new file mode 100644 index 000000000000..484bb42e0a89 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -0,0 +1,669 @@ +// go run mkasm.go openbsd arm64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 8776187462b7..6f33e37e723f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -openbsd -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go +// go run mksyscall.go -openbsd -libc -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && mips64 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,49 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +733,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +784,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +845,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +878,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +914,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +940,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1040,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1091,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1120,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1149,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1216,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1228,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1252,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1276,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1309,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1328,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1347,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1366,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1385,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1404,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1423,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1456,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1464,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1476,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1484,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1496,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,16 +1504,20 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,16 +1525,20 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1546,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1559,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1567,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1585,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1593,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1611,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1619,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1636,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1660,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1679,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1698,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1720,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1735,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1789,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1923,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1963,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1982,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +2006,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2030,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2063,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2094,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2113,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2132,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2152,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2160,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2175,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2207,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2223,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s new file mode 100644 index 000000000000..55af27263ad7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -0,0 +1,669 @@ +// go run mkasm.go openbsd mips64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go new file mode 100644 index 000000000000..330cf7f7ac66 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -0,0 +1,2235 @@ +// go run mksyscall.go -openbsd -libc -tags openbsd,ppc64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_ppc64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build openbsd && ppc64 +// +build openbsd,ppc64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) + return +} + +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) + egid = int(r0) + return +} + +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) + uid = int(r0) + return +} + +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) + gid = int(r0) + return +} + +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) + pgrp = int(r0) + return +} + +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) + pid = int(r0) + return +} + +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) + ppid = int(r0) + return +} + +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrtable() (rtable int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) + uid = int(r0) + return +} + +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrtable(rtable int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s new file mode 100644 index 000000000000..4028255b0d5b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -0,0 +1,802 @@ +// go run mkasm.go openbsd ppc64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getgroups(SB) + RET +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setgroups(SB) + RET +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_wait4(SB) + RET +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_accept(SB) + RET +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_bind(SB) + RET +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_connect(SB) + RET +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_socket(SB) + RET +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getsockopt(SB) + RET +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setsockopt(SB) + RET +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpeername(SB) + RET +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getsockname(SB) + RET +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_shutdown(SB) + RET +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_socketpair(SB) + RET +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_recvfrom(SB) + RET +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_sendto(SB) + RET +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_recvmsg(SB) + RET +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_sendmsg(SB) + RET +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_kevent(SB) + RET +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_utimes(SB) + RET +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_futimes(SB) + RET +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_poll(SB) + RET +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_madvise(SB) + RET +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mlock(SB) + RET +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mlockall(SB) + RET +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mprotect(SB) + RET +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_msync(SB) + RET +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_munlock(SB) + RET +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_munlockall(SB) + RET +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pipe2(SB) + RET +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getdents(SB) + RET +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getcwd(SB) + RET +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_ioctl(SB) + RET +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_sysctl(SB) + RET +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_ppoll(SB) + RET +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_access(SB) + RET +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_adjtime(SB) + RET +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chdir(SB) + RET +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chflags(SB) + RET +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chmod(SB) + RET +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chown(SB) + RET +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chroot(SB) + RET +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_clock_gettime(SB) + RET +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_close(SB) + RET +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_dup(SB) + RET +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_dup2(SB) + RET +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_dup3(SB) + RET +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_exit(SB) + RET +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_faccessat(SB) + RET +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchdir(SB) + RET +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchflags(SB) + RET +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchmod(SB) + RET +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchmodat(SB) + RET +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchown(SB) + RET +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchownat(SB) + RET +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_flock(SB) + RET +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fpathconf(SB) + RET +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fstat(SB) + RET +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fstatat(SB) + RET +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fstatfs(SB) + RET +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fsync(SB) + RET +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_ftruncate(SB) + RET +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getegid(SB) + RET +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_geteuid(SB) + RET +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getgid(SB) + RET +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpgid(SB) + RET +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpgrp(SB) + RET +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpid(SB) + RET +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getppid(SB) + RET +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpriority(SB) + RET +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getrlimit(SB) + RET +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getrtable(SB) + RET +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getrusage(SB) + RET +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getsid(SB) + RET +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_gettimeofday(SB) + RET +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getuid(SB) + RET +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_issetugid(SB) + RET +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_kill(SB) + RET +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_kqueue(SB) + RET +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_lchown(SB) + RET +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_link(SB) + RET +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_linkat(SB) + RET +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_listen(SB) + RET +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_lstat(SB) + RET +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mkdir(SB) + RET +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mkdirat(SB) + RET +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mkfifo(SB) + RET +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mkfifoat(SB) + RET +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mknod(SB) + RET +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mknodat(SB) + RET +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_nanosleep(SB) + RET +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_open(SB) + RET +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_openat(SB) + RET +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pathconf(SB) + RET +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pread(SB) + RET +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pwrite(SB) + RET +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_read(SB) + RET +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_readlink(SB) + RET +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_readlinkat(SB) + RET +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_rename(SB) + RET +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_renameat(SB) + RET +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_revoke(SB) + RET +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_rmdir(SB) + RET +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_lseek(SB) + RET +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_select(SB) + RET +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setegid(SB) + RET +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_seteuid(SB) + RET +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setgid(SB) + RET +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setlogin(SB) + RET +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setpgid(SB) + RET +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setpriority(SB) + RET +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setregid(SB) + RET +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setreuid(SB) + RET +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setresgid(SB) + RET +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setresuid(SB) + RET +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setrlimit(SB) + RET +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setrtable(SB) + RET +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setsid(SB) + RET +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_settimeofday(SB) + RET +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setuid(SB) + RET +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_stat(SB) + RET +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_statfs(SB) + RET +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_symlink(SB) + RET +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_symlinkat(SB) + RET +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_sync(SB) + RET +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_truncate(SB) + RET +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_umask(SB) + RET +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unlink(SB) + RET +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unlinkat(SB) + RET +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unmount(SB) + RET +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_write(SB) + RET +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mmap(SB) + RET +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_munmap(SB) + RET +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_utimensat(SB) + RET +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go new file mode 100644 index 000000000000..5f24de0d9d76 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -0,0 +1,2235 @@ +// go run mksyscall.go -openbsd -libc -tags openbsd,riscv64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_riscv64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build openbsd && riscv64 +// +build openbsd,riscv64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) + return +} + +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) + egid = int(r0) + return +} + +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) + uid = int(r0) + return +} + +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) + gid = int(r0) + return +} + +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) + pgrp = int(r0) + return +} + +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) + pid = int(r0) + return +} + +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) + ppid = int(r0) + return +} + +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrtable() (rtable int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) + uid = int(r0) + return +} + +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrtable(rtable int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s new file mode 100644 index 000000000000..e1fbd4dfa8c8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -0,0 +1,669 @@ +// go run mkasm.go openbsd riscv64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index b5f926cee2a9..78d4a4240e9c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -38,6 +38,7 @@ import ( //go:cgo_import_dynamic libc_chmod chmod "libc.so" //go:cgo_import_dynamic libc_chown chown "libc.so" //go:cgo_import_dynamic libc_chroot chroot "libc.so" +//go:cgo_import_dynamic libc_clockgettime clockgettime "libc.so" //go:cgo_import_dynamic libc_close close "libc.so" //go:cgo_import_dynamic libc_creat creat "libc.so" //go:cgo_import_dynamic libc_dup dup "libc.so" @@ -66,6 +67,7 @@ import ( //go:cgo_import_dynamic libc_getpriority getpriority "libc.so" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" //go:cgo_import_dynamic libc_getrusage getrusage "libc.so" +//go:cgo_import_dynamic libc_getsid getsid "libc.so" //go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" //go:cgo_import_dynamic libc_getuid getuid "libc.so" //go:cgo_import_dynamic libc_kill kill "libc.so" @@ -146,6 +148,8 @@ import ( //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" //go:cgo_import_dynamic libc_port_get port_get "libc.so" //go:cgo_import_dynamic libc_port_getn port_getn "libc.so" +//go:cgo_import_dynamic libc_putmsg putmsg "libc.so" +//go:cgo_import_dynamic libc_getmsg getmsg "libc.so" //go:linkname procpipe libc_pipe //go:linkname procpipe2 libc_pipe2 @@ -174,6 +178,7 @@ import ( //go:linkname procChmod libc_chmod //go:linkname procChown libc_chown //go:linkname procChroot libc_chroot +//go:linkname procClockGettime libc_clockgettime //go:linkname procClose libc_close //go:linkname procCreat libc_creat //go:linkname procDup libc_dup @@ -202,6 +207,7 @@ import ( //go:linkname procGetpriority libc_getpriority //go:linkname procGetrlimit libc_getrlimit //go:linkname procGetrusage libc_getrusage +//go:linkname procGetsid libc_getsid //go:linkname procGettimeofday libc_gettimeofday //go:linkname procGetuid libc_getuid //go:linkname procKill libc_kill @@ -227,8 +233,8 @@ import ( //go:linkname procOpenat libc_openat //go:linkname procPathconf libc_pathconf //go:linkname procPause libc_pause -//go:linkname procPread libc_pread -//go:linkname procPwrite libc_pwrite +//go:linkname procpread libc_pread +//go:linkname procpwrite libc_pwrite //go:linkname procread libc_read //go:linkname procReadlink libc_readlink //go:linkname procRename libc_rename @@ -282,6 +288,8 @@ import ( //go:linkname procport_dissociate libc_port_dissociate //go:linkname procport_get libc_port_get //go:linkname procport_getn libc_port_getn +//go:linkname procputmsg libc_putmsg +//go:linkname procgetmsg libc_getmsg var ( procpipe, @@ -311,6 +319,7 @@ var ( procChmod, procChown, procChroot, + procClockGettime, procClose, procCreat, procDup, @@ -339,6 +348,7 @@ var ( procGetpriority, procGetrlimit, procGetrusage, + procGetsid, procGettimeofday, procGetuid, procKill, @@ -364,8 +374,8 @@ var ( procOpenat, procPathconf, procPause, - procPread, - procPwrite, + procpread, + procpwrite, procread, procReadlink, procRename, @@ -418,7 +428,9 @@ var ( procport_associate, procport_dissociate, procport_get, - procport_getn syscallFunc + procport_getn, + procputmsg, + procgetmsg syscallFunc ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -741,6 +753,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClockGettime)), 2, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1044,6 +1066,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetsid)), 1, uintptr(pid), 0, 0, 0, 0, 0) + sid = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1380,12 +1413,12 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] } - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = e1 @@ -1395,12 +1428,12 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] } - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = e1 @@ -2051,3 +2084,23 @@ func port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Times } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 9e9d0b2a9c45..55e0484719c4 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -17,6 +17,7 @@ var sysctlMib = []mibentry{ {"ddb.max_line", []_C_int{9, 3}}, {"ddb.max_width", []_C_int{9, 2}}, {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, {"ddb.radix", []_C_int{9, 1}}, {"ddb.tab_stop_width", []_C_int{9, 4}}, {"ddb.trigger", []_C_int{9, 8}}, @@ -33,29 +34,37 @@ var sysctlMib = []mibentry{ {"hw.ncpufound", []_C_int{6, 21}}, {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, {"hw.usermem", []_C_int{6, 20}}, {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, - {"kern.arandom", []_C_int{1, 37}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, {"kern.boottime", []_C_int{1, 21}}, {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, - {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.cpustats", []_C_int{1, 85}}, {"kern.domainname", []_C_int{1, 22}}, {"kern.file", []_C_int{1, 73}}, {"kern.forkstat", []_C_int{1, 42}}, {"kern.fscale", []_C_int{1, 46}}, {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, {"kern.hostid", []_C_int{1, 11}}, {"kern.hostname", []_C_int{1, 10}}, {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, @@ -78,17 +87,16 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, - {"kern.random", []_C_int{1, 31}}, {"kern.rawpartition", []_C_int{1, 24}}, {"kern.saved_ids", []_C_int{1, 20}}, {"kern.securelevel", []_C_int{1, 9}}, @@ -106,21 +114,20 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, - {"kern.tty.maxptys", []_C_int{1, 44, 6}}, - {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, - {"kern.userasymcrypto", []_C_int{1, 60}}, - {"kern.usercrypto", []_C_int{1, 52}}, - {"kern.usermount", []_C_int{1, 30}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, - {"kern.vnode", []_C_int{1, 13}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, {"net.bpf.bufsize", []_C_int{4, 31, 1}}, {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, @@ -148,7 +155,9 @@ var sysctlMib = []mibentry{ {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, @@ -157,8 +166,10 @@ var sysctlMib = []mibentry{ {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, @@ -175,9 +186,7 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, - {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, @@ -191,6 +200,7 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, @@ -198,9 +208,12 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, @@ -213,13 +226,8 @@ var sysctlMib = []mibentry{ {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, - {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, - {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, - {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, - {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, - {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, @@ -232,20 +240,19 @@ var sysctlMib = []mibentry{ {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, - {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, - {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, - {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, - {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, {"net.key.sadb_dump", []_C_int{4, 30, 1}}, {"net.key.spd_dump", []_C_int{4, 30, 2}}, {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, @@ -254,12 +261,12 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, {"vm.anonmin", []_C_int{2, 7}}, {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, {"vm.maxslp", []_C_int{2, 10}}, {"vm.nkmempages", []_C_int{2, 6}}, {"vm.psstrings", []_C_int{2, 3}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index adecd09667d0..d2243cf83f5b 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -36,23 +36,29 @@ var sysctlMib = []mibentry{ {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, {"hw.usermem", []_C_int{6, 20}}, {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, {"kern.boottime", []_C_int{1, 21}}, {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, - {"kern.dnsjackport", []_C_int{1, 13}}, + {"kern.cpustats", []_C_int{1, 85}}, {"kern.domainname", []_C_int{1, 22}}, {"kern.file", []_C_int{1, 73}}, {"kern.forkstat", []_C_int{1, 42}}, @@ -81,13 +87,13 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, @@ -108,15 +114,19 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, {"kern.wxabort", []_C_int{1, 74}}, {"net.bpf.bufsize", []_C_int{4, 31, 1}}, {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, @@ -176,7 +186,6 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, @@ -252,12 +261,12 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, {"vm.anonmin", []_C_int{2, 7}}, {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, {"vm.maxslp", []_C_int{2, 10}}, {"vm.nkmempages", []_C_int{2, 6}}, {"vm.psstrings", []_C_int{2, 3}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 8ea52a4a1810..82dc51bd8b57 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -17,6 +17,7 @@ var sysctlMib = []mibentry{ {"ddb.max_line", []_C_int{9, 3}}, {"ddb.max_width", []_C_int{9, 2}}, {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, {"ddb.radix", []_C_int{9, 1}}, {"ddb.tab_stop_width", []_C_int{9, 4}}, {"ddb.trigger", []_C_int{9, 8}}, @@ -33,29 +34,37 @@ var sysctlMib = []mibentry{ {"hw.ncpufound", []_C_int{6, 21}}, {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, {"hw.usermem", []_C_int{6, 20}}, {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, - {"kern.arandom", []_C_int{1, 37}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, {"kern.boottime", []_C_int{1, 21}}, {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, - {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.cpustats", []_C_int{1, 85}}, {"kern.domainname", []_C_int{1, 22}}, {"kern.file", []_C_int{1, 73}}, {"kern.forkstat", []_C_int{1, 42}}, {"kern.fscale", []_C_int{1, 46}}, {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, {"kern.hostid", []_C_int{1, 11}}, {"kern.hostname", []_C_int{1, 10}}, {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, @@ -78,17 +87,16 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, - {"kern.random", []_C_int{1, 31}}, {"kern.rawpartition", []_C_int{1, 24}}, {"kern.saved_ids", []_C_int{1, 20}}, {"kern.securelevel", []_C_int{1, 9}}, @@ -106,21 +114,20 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, - {"kern.tty.maxptys", []_C_int{1, 44, 6}}, - {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, - {"kern.userasymcrypto", []_C_int{1, 60}}, - {"kern.usercrypto", []_C_int{1, 52}}, - {"kern.usermount", []_C_int{1, 30}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, - {"kern.vnode", []_C_int{1, 13}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, {"net.bpf.bufsize", []_C_int{4, 31, 1}}, {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, @@ -148,7 +155,9 @@ var sysctlMib = []mibentry{ {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, @@ -157,8 +166,10 @@ var sysctlMib = []mibentry{ {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, @@ -175,9 +186,7 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, - {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, @@ -191,6 +200,7 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, @@ -198,9 +208,12 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, @@ -213,13 +226,8 @@ var sysctlMib = []mibentry{ {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, - {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, - {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, - {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, - {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, - {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, @@ -232,20 +240,19 @@ var sysctlMib = []mibentry{ {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, - {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, - {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, - {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, - {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, {"net.key.sadb_dump", []_C_int{4, 30, 1}}, {"net.key.spd_dump", []_C_int{4, 30, 2}}, {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, @@ -254,12 +261,12 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, {"vm.anonmin", []_C_int{2, 7}}, {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, {"vm.maxslp", []_C_int{2, 10}}, {"vm.nkmempages", []_C_int{2, 6}}, {"vm.psstrings", []_C_int{2, 3}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index 154b57ae3e2a..cbdda1a4ae24 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -36,6 +36,7 @@ var sysctlMib = []mibentry{ {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, @@ -44,6 +45,7 @@ var sysctlMib = []mibentry{ {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, {"kern.audio", []_C_int{1, 84}}, @@ -51,6 +53,8 @@ var sysctlMib = []mibentry{ {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, @@ -83,13 +87,13 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, @@ -110,13 +114,16 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, {"kern.witnesswatch", []_C_int{1, 53}}, @@ -179,7 +186,6 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, @@ -255,7 +261,6 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index d96bb2ba4db6..f55eae1a8211 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -36,6 +36,7 @@ var sysctlMib = []mibentry{ {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, @@ -86,7 +87,6 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, @@ -123,6 +123,7 @@ var sysctlMib = []mibentry{ {"kern.ttycount", []_C_int{1, 57}}, {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, {"kern.witnesswatch", []_C_int{1, 53}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go new file mode 100644 index 000000000000..e44054470b7e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go @@ -0,0 +1,281 @@ +// go run mksysctl_openbsd.go +// Code generated by the command above; DO NOT EDIT. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cpustats", []_C_int{1, 85}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go new file mode 100644 index 000000000000..a0db82fce206 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go @@ -0,0 +1,282 @@ +// go run mksysctl_openbsd.go +// Code generated by the command above; DO NOT EDIT. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cpustats", []_C_int{1, 85}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 59d5dfc20922..4e0d96107b9e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 342d471d2eb1..01636b838d30 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index e2e3d72c5b04..ad99bc106a86 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 61ad5ca3c19b..89dcc4274765 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go new file mode 100644 index 000000000000..ee37aaa0c906 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go @@ -0,0 +1,394 @@ +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void sys_exit(int rval); } exit sys_exit_args void + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); } + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_BREAK = 17 // { caddr_t break(char *nsize); } + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, struct sockaddr * __restrict from, __socklen_t * __restrict fromlenaddr); } + SYS_ACCEPT = 30 // { int accept(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen); } + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } + SYS_UMASK = 60 // { int umask(int newmask); } + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } + SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); } + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); } + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); } + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); } + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); } + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); } + SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); } + SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); } + SYS_LGETFH = 160 // { int lgetfh(char *fname, struct fhandle *fhp); } + SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); } + SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } + SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } + SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct itimerspec *value); } + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } + SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } + SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); } + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); } + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, sigset_t *oset); } + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec *timeout); } + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, siginfo_t *info); } + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, acl_type_t type); } + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); } + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, struct mac *mac_p); } + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, struct mac *mac_p); } + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, struct mac *mac_p); } + SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); } + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, u_long flags); } + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } + SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } + SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } + SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } + SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); } + SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, unsigned int value); } + SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, const char *name, int oflag, mode_t mode, unsigned int value); } + SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); } + SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); } + SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); } + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } + SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, acl_type_t type); } + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, int *sig); } + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, int flags); } + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, u_int length); } + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, lwpid_t lwpid, struct rtprio *rtp); } + SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, int whence); } + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, mode_t mode); } + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, cpusetid_t setid); } + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); } + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask); } + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *mask); } + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, int flag); } + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, int fd, cap_rights_t *rightsp); } + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *sm); } + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, size_t namelen); } + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, off_t offset, off_t len); } + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, off_t len, int advice); } + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *info); } + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, cap_rights_t *rightsp); } + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, const u_long *cmds, size_t ncmds); } + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, u_long *cmds, size_t maxcmds); } + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, uint32_t fcntlrights); } + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, uint32_t *fcntlrightsp); } + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, int namelen); } + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, int namelen); } + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, u_long flags, int atflag); } + SYS_ACCEPT4 = 541 // { int accept4(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen, int flags); } + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, int com, void *data); } + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } + SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } + SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index cac1f758bf7e..c9c4ad0314f9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m32 /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/386/include -m32 /tmp/386/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux @@ -446,4 +446,5 @@ const ( SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f327e4a0bccb..12ff3417c5fd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m64 /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/amd64/include -m64 /tmp/amd64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux @@ -368,4 +368,5 @@ const ( SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index fb06a08d4ee8..c3fb5e77ab43 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/arm/include /tmp/arm/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux @@ -410,4 +410,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 58285646eb79..358c847a40c5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/arm64/include -fsigned-char /tmp/arm64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux @@ -313,4 +313,5 @@ const ( SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go new file mode 100644 index 000000000000..81c4849b1619 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -0,0 +1,311 @@ +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/loong64/include /tmp/loong64/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +const ( + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 + SYS_RSEQ = 293 + SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 3b0418e68944..202a57e90086 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips/include /tmp/mips/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux @@ -430,4 +430,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 4446 SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 + SYS_SET_MEMPOLICY_HOME_NODE = 4450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 314ebf166ab9..1fbceb52d7cf 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips64/include /tmp/mips64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux @@ -360,4 +360,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 5446 SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 + SYS_SET_MEMPOLICY_HOME_NODE = 5450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index b8fbb937a333..b4ffb7a207d5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips64le/include /tmp/mips64le/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux @@ -360,4 +360,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 5446 SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 + SYS_SET_MEMPOLICY_HOME_NODE = 5450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index ee309b2bac96..867985f9b440 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mipsle/include /tmp/mipsle/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux @@ -430,4 +430,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 4446 SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 + SYS_SET_MEMPOLICY_HOME_NODE = 4450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index ac3748104ed0..a8cce69ede2f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc/include /tmp/ppc/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux @@ -437,4 +437,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5aa472111041..d44c5b39d79d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc64/include /tmp/ppc64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux @@ -409,4 +409,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0793ac1a65be..4214dd9c03a7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc64le/include /tmp/ppc64le/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux @@ -409,4 +409,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a520962e3954..3e594a8c0910 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/riscv64/include /tmp/riscv64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux @@ -309,6 +309,8 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index d1738586b4f6..7ea465204b7c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/s390x/include -fsigned-char /tmp/s390x/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux @@ -374,4 +374,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index dfd5660f9741..92f628ef4f23 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/sparc64/include /tmp/sparc64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux @@ -388,4 +388,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 817edbf95c0a..597733813e37 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index ea453614e697..16af29189940 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index 467971eed661..f59b18a97795 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index 32eec5ed56f1..721ef5910321 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index a37f77375636..01c43a01fda7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go new file mode 100644 index 000000000000..f258cfa24ed4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go @@ -0,0 +1,218 @@ +// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +package unix + +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int sys_open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, size_t psize); } + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, dev_t dev); } + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, gid_t gid); } + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, struct rusage *rusage); } + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, int flags, void *data); } + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, const struct sigaction *nsa, struct sigaction *osa); } + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, u_long com, ... void *data); } + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int sys_execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, int flags); } + SYS_STATFS = 63 // { int sys_statfs(const char *path, struct statfs *buf); } + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, struct statfs *buf); } + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, const struct timezone *tzp); } + SYS_SETITIMER = 69 // { int sys_setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 70 // { int sys_getitimer(int which, struct itimerval *itv); } + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_KEVENT = 72 // { int sys_kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, int behav); } + SYS_UTIMES = 76 // { int sys_utimes(const char *path, const struct timeval *tptr); } + SYS_FUTIMES = 77 // { int sys_futimes(int fd, const struct timeval *tptr); } + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } + SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, const struct timespec *timeout, uint32_t *g); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, const struct timespec *times, int flag); } + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, const struct timespec *times); } + SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, size_t psize, int64_t proc_cookie); } + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, socklen_t *anamelen, int flags); } + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, clockid_t clock_id, const struct timespec *tp, void *lock, const int *abort); } + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); } + SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, u_int flags, int atflags); } + SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, const char *execpromises); } + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, int flags); } + SYS_UNVEIL = 114 // { int sys_unveil(const char *path, const char *permissions); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); } + SYS_READV = 120 // { ssize_t sys_readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_KILL = 122 // { int sys_kill(int pid, int signum); } + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); } + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, int uid, char *arg); } + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, off_t length); } + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, size_t len); } + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, int inherit); } + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, struct sigaltstack *oss); } + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, struct stat *sb); } + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, int n); } + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, siginfo_t *info, const struct timespec *timeout); } + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, int64_t *oldfreq); } + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, uid_t uid, gid_t gid, int flag); } + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, const char *path2, int flag); } + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, mode_t mode); } + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, mode_t mode, dev_t dev); } + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, ... mode_t mode); } + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, char *buf, size_t count); } + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, const char *link); } + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, int flag); } + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go new file mode 100644 index 000000000000..07919e0eccd9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go @@ -0,0 +1,219 @@ +// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +package unix + +// Deprecated: Use libc wrappers instead of direct syscalls. +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int sys_open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, size_t psize); } + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, dev_t dev); } + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, gid_t gid); } + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, struct rusage *rusage); } + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, int flags, void *data); } + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, const struct sigaction *nsa, struct sigaction *osa); } + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, u_long com, ... void *data); } + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int sys_execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, int flags); } + SYS_STATFS = 63 // { int sys_statfs(const char *path, struct statfs *buf); } + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, struct statfs *buf); } + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, const struct timezone *tzp); } + SYS_SETITIMER = 69 // { int sys_setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 70 // { int sys_getitimer(int which, struct itimerval *itv); } + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_KEVENT = 72 // { int sys_kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, int behav); } + SYS_UTIMES = 76 // { int sys_utimes(const char *path, const struct timeval *tptr); } + SYS_FUTIMES = 77 // { int sys_futimes(int fd, const struct timeval *tptr); } + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } + SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, const struct timespec *timeout, uint32_t *g); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, const struct timespec *times, int flag); } + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, const struct timespec *times); } + SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, size_t psize, int64_t proc_cookie); } + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, socklen_t *anamelen, int flags); } + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, clockid_t clock_id, const struct timespec *tp, void *lock, const int *abort); } + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); } + SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, u_int flags, int atflags); } + SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, const char *execpromises); } + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, int flags); } + SYS_UNVEIL = 114 // { int sys_unveil(const char *path, const char *permissions); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); } + SYS_READV = 120 // { ssize_t sys_readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_KILL = 122 // { int sys_kill(int pid, int signum); } + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); } + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, int uid, char *arg); } + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, off_t length); } + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, size_t len); } + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, int inherit); } + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, struct sigaltstack *oss); } + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, struct stat *sb); } + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, int n); } + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, siginfo_t *info, const struct timespec *timeout); } + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, int64_t *oldfreq); } + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, uid_t uid, gid_t gid, int flag); } + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, const char *path2, int flag); } + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, mode_t mode); } + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, mode_t mode, dev_t dev); } + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, ... mode_t mode); } + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, char *buf, size_t count); } + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, const char *link); } + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, int flag); } + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 885842c0eb40..e2a64f0991a0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -366,30 +366,57 @@ type ICMPv6Filter struct { Filt [8]uint32 } +type TCPConnectionInfo struct { + State uint8 + Snd_wscale uint8 + Rcv_wscale uint8 + _ uint8 + Options uint32 + Flags uint32 + Rto uint32 + Maxseg uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Snd_wnd uint32 + Snd_sbbytes uint32 + Rcv_wnd uint32 + Rttcur uint32 + Srtt uint32 + Rttvar uint32 + Txpackets uint64 + Txbytes uint64 + Txretransmitbytes uint64 + Rxpackets uint64 + Rxbytes uint64 + Rxoutoforderbytes uint64 + Txretransmitpackets uint64 +} + const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofSockaddrVM = 0xc - SizeofXvsockpcb = 0xa8 - SizeofXSocket = 0x64 - SizeofXSockbuf = 0x18 - SizeofXVSockPgen = 0x20 - SizeofXucred = 0x4c - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 + SizeofSockaddrVM = 0xc + SizeofXvsockpcb = 0xa8 + SizeofXSocket = 0x64 + SizeofXSockbuf = 0x18 + SizeofXVSockPgen = 0x20 + SizeofXucred = 0x4c + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofTCPConnectionInfo = 0x70 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index b23c02337db3..34aa775219f0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -366,30 +366,57 @@ type ICMPv6Filter struct { Filt [8]uint32 } +type TCPConnectionInfo struct { + State uint8 + Snd_wscale uint8 + Rcv_wscale uint8 + _ uint8 + Options uint32 + Flags uint32 + Rto uint32 + Maxseg uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Snd_wnd uint32 + Snd_sbbytes uint32 + Rcv_wnd uint32 + Rttcur uint32 + Srtt uint32 + Rttvar uint32 + Txpackets uint64 + Txbytes uint64 + Txretransmitbytes uint64 + Rxpackets uint64 + Rxbytes uint64 + Rxoutoforderbytes uint64 + Txretransmitpackets uint64 +} + const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofSockaddrVM = 0xc - SizeofXvsockpcb = 0xa8 - SizeofXSocket = 0x64 - SizeofXSockbuf = 0x18 - SizeofXVSockPgen = 0x20 - SizeofXucred = 0x4c - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 + SizeofSockaddrVM = 0xc + SizeofXvsockpcb = 0xa8 + SizeofXSocket = 0x64 + SizeofXSockbuf = 0x18 + SizeofXVSockPgen = 0x20 + SizeofXucred = 0x4c + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofTCPConnectionInfo = 0x70 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 4eec078e5249..d9c78cdcbc45 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -90,27 +90,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec - _ [8]byte -} - type Statfs_t struct { Version uint32 Type uint32 @@ -136,31 +115,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -181,14 +135,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -337,41 +283,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -380,7 +294,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -398,6 +312,17 @@ type __Siginfo struct { Value [4]byte _ [32]byte } +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [4]byte + _ [32]byte +} type Sigset_t struct { Val [4]uint32 @@ -432,10 +357,12 @@ type FpReg struct { Pad [64]uint8 } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 - Offs *byte - Addr *byte + Offs uintptr + Addr uintptr Len uint32 } @@ -444,8 +371,9 @@ type Kevent_t struct { Filter int16 Flags uint16 Fflags uint32 - Data int32 + Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7622904a532f..26991b165596 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -86,26 +86,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -131,31 +111,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -177,14 +132,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -333,41 +280,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -376,7 +291,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -395,6 +310,18 @@ type __Siginfo struct { _ [40]byte } +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [8]byte + _ [40]byte +} + type Sigset_t struct { Val [4]uint32 } @@ -435,10 +362,12 @@ type FpReg struct { Spare [12]uint64 } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 - Offs *byte - Addr *byte + Offs uintptr + Addr uintptr Len uint64 } @@ -449,6 +378,7 @@ type Kevent_t struct { Fflags uint32 Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 19223ce8ecf9..f8324e7e7f49 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -33,7 +33,7 @@ type Timeval struct { _ [4]byte } -type Time_t int32 +type Time_t int64 type Rusage struct { Utime Timeval @@ -88,26 +88,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -133,31 +113,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -179,14 +134,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -335,41 +282,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -378,7 +293,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -386,15 +301,27 @@ type PtraceLwpInfoStruct struct { } type __Siginfo struct { - Signo int32 - Errno int32 - Code int32 - Pid int32 - Uid uint32 - Status int32 - Addr *byte - Value [4]byte - X_reason [32]byte + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [4]byte + _ [32]byte +} + +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [4]byte + _ [32]byte } type Sigset_t struct { @@ -402,22 +329,28 @@ type Sigset_t struct { } type Reg struct { - R [13]uint32 - R_sp uint32 - R_lr uint32 - R_pc uint32 - R_cpsr uint32 + R [13]uint32 + Sp uint32 + Lr uint32 + Pc uint32 + Cpsr uint32 } type FpReg struct { - Fpr_fpsr uint32 - Fpr [8][3]uint32 + Fpsr uint32 + Fpr [8]FpExtendedPrecision +} + +type FpExtendedPrecision struct { + Exponent uint32 + Mantissa_hi uint32 + Mantissa_lo uint32 } type PtraceIoDesc struct { Op int32 - Offs *byte - Addr *byte + Offs uintptr + Addr uintptr Len uint32 } @@ -426,8 +359,11 @@ type Kevent_t struct { Filter int16 Flags uint16 Fflags uint32 - Data int32 + _ [4]byte + Data int64 Udata *byte + _ [4]byte + Ext [4]uint64 } type FdSet struct { @@ -453,7 +389,7 @@ type ifMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Data ifData } @@ -464,7 +400,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -532,7 +467,7 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Metric int32 } @@ -543,7 +478,7 @@ type IfmaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 } type IfAnnounceMsghdr struct { @@ -560,7 +495,7 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte + _ uint16 Flags int32 Addrs int32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 8e3e33f67905..4220411f341a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -86,26 +86,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -131,31 +111,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -177,14 +132,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -333,39 +280,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -374,7 +291,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -393,6 +310,18 @@ type __Siginfo struct { _ [40]byte } +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [8]byte + _ [40]byte +} + type Sigset_t struct { Val [4]uint32 } @@ -413,10 +342,12 @@ type FpReg struct { _ [8]byte } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 - Offs *byte - Addr *byte + Offs uintptr + Addr uintptr Len uint64 } @@ -427,6 +358,7 @@ type Kevent_t struct { Fflags uint32 Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go new file mode 100644 index 000000000000..0660fd45c7c6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -0,0 +1,638 @@ +// cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Time_t int64 + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + _statfsVersion = 0x20140518 + _dirblksiz = 0x400 +) + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 +} + +type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]byte + Mntfromname [1024]byte + Mntonname [1024]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 + Sysid int32 + _ [4]byte +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Pad0 uint8 + Namlen uint16 + Pad1 uint16 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [46]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x58 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type PtraceLwpInfoStruct struct { + Lwpid int32 + Event int32 + Flags int32 + Sigmask Sigset_t + Siglist Sigset_t + Siginfo __PtraceSiginfo + Tdname [20]int8 + Child_pid int32 + Syscall_code uint32 + Syscall_narg uint32 +} + +type __Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [8]byte + _ [40]byte +} + +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [8]byte + _ [40]byte +} + +type Sigset_t struct { + Val [4]uint32 +} + +type Reg struct { + Ra uint64 + Sp uint64 + Gp uint64 + Tp uint64 + T [7]uint64 + S [12]uint64 + A [8]uint64 + Sepc uint64 + Sstatus uint64 +} + +type FpReg struct { + X [32][2]uint64 + Fcsr uint64 +} + +type FpExtendedPrecision struct{} + +type PtraceIoDesc struct { + Op int32 + Offs uintptr + Addr uintptr + Len uint64 +} + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte + Ext [4]uint64 +} + +type FdSet struct { + Bits [16]uint64 +} + +const ( + sizeofIfMsghdr = 0xa8 + SizeofIfMsghdr = 0xa8 + sizeofIfData = 0x98 + SizeofIfData = 0x98 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x98 + SizeofRtMetrics = 0x70 +) + +type ifMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 + Data ifData +} + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Data IfData +} + +type ifData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Datalen uint16 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Hwassist uint64 + _ [8]byte + _ [16]byte +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Spare_char1 uint8 + Spare_char2 uint8 + Datalen uint8 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Hwassist uint64 + Epoch int64 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ uint16 + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Fmask int32 + Inits uint64 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Hopcount uint64 + Expire uint64 + Recvpipe uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Pksent uint64 + Weight uint64 + Nhidx uint64 + Filler [2]uint64 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfZbuf = 0x18 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x20 + SizeofBpfZbufHeader = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfZbuf struct { + Bufa *byte + Bufb *byte + Buflen uint64 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [6]byte +} + +type BpfZbufHeader struct { + Kernel_gen uint32 + Kernel_len uint32 + User_gen uint32 + _ [5]uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_EACCESS = 0x100 + AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLINIGNEOF = 0x2000 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type CapRights struct { + Rights [2]uint64 +} + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Spare int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go deleted file mode 100644 index 4c485261d6df..000000000000 --- a/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go +++ /dev/null @@ -1,42 +0,0 @@ -// cgo -godefs types_illumos.go | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build amd64 && illumos -// +build amd64,illumos - -package unix - -const ( - TUNNEWPPA = 0x540001 - TUNSETPPA = 0x540002 - - I_STR = 0x5308 - I_POP = 0x5303 - I_PUSH = 0x5302 - I_LINK = 0x530c - I_UNLINK = 0x530d - I_PLINK = 0x5316 - I_PUNLINK = 0x5317 - - IF_UNITSEL = -0x7ffb8cca -) - -type strbuf struct { - Maxlen int32 - Len int32 - Buf *int8 -} - -type Strioctl struct { - Cmd int32 - Timout int32 - Len int32 - Dp *int8 -} - -type Lifreq struct { - Name [32]int8 - Lifru1 [4]byte - Type uint32 - Lifru [336]byte -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 824cadb419c1..ff6881167d97 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -754,6 +754,25 @@ const ( AT_SYMLINK_NOFOLLOW = 0x100 AT_EACCESS = 0x200 + + OPEN_TREE_CLONE = 0x1 + + MOVE_MOUNT_F_SYMLINKS = 0x1 + MOVE_MOUNT_F_AUTOMOUNTS = 0x2 + MOVE_MOUNT_F_EMPTY_PATH = 0x4 + MOVE_MOUNT_T_SYMLINKS = 0x10 + MOVE_MOUNT_T_AUTOMOUNTS = 0x20 + MOVE_MOUNT_T_EMPTY_PATH = 0x40 + MOVE_MOUNT_SET_GROUP = 0x100 + + FSOPEN_CLOEXEC = 0x1 + + FSPICK_CLOEXEC = 0x1 + FSPICK_SYMLINK_NOFOLLOW = 0x2 + FSPICK_NO_AUTOMOUNT = 0x4 + FSPICK_EMPTY_PATH = 0x8 + + FSMOUNT_CLOEXEC = 0x1 ) type OpenHow struct { @@ -926,6 +945,9 @@ type PerfEventAttr struct { Aux_watermark uint32 Sample_max_stack uint16 _ uint16 + Aux_sample_size uint32 + _ uint32 + Sig_data uint64 } type PerfEventMmapPage struct { @@ -1108,7 +1130,9 @@ const ( PERF_BR_SYSRET = 0x8 PERF_BR_COND_CALL = 0x9 PERF_BR_COND_RET = 0xa - PERF_BR_MAX = 0xb + PERF_BR_ERET = 0xb + PERF_BR_IRQ = 0xc + PERF_BR_MAX = 0xd PERF_SAMPLE_REGS_ABI_NONE = 0x0 PERF_SAMPLE_REGS_ABI_32 = 0x1 PERF_SAMPLE_REGS_ABI_64 = 0x2 @@ -1442,6 +1466,11 @@ const ( IFLA_ALT_IFNAME = 0x35 IFLA_PERM_ADDRESS = 0x36 IFLA_PROTO_DOWN_REASON = 0x37 + IFLA_PARENT_DEV_NAME = 0x38 + IFLA_PARENT_DEV_BUS_NAME = 0x39 + IFLA_GRO_MAX_SIZE = 0x3a + IFLA_TSO_MAX_SIZE = 0x3b + IFLA_TSO_MAX_SEGS = 0x3c IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -2950,7 +2979,7 @@ const ( DEVLINK_CMD_TRAP_POLICER_NEW = 0x47 DEVLINK_CMD_TRAP_POLICER_DEL = 0x48 DEVLINK_CMD_HEALTH_REPORTER_TEST = 0x49 - DEVLINK_CMD_MAX = 0x4d + DEVLINK_CMD_MAX = 0x51 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -3179,7 +3208,7 @@ const ( DEVLINK_ATTR_RATE_NODE_NAME = 0xa8 DEVLINK_ATTR_RATE_PARENT_NODE_NAME = 0xa9 DEVLINK_ATTR_REGION_MAX_SNAPSHOTS = 0xaa - DEVLINK_ATTR_MAX = 0xaa + DEVLINK_ATTR_MAX = 0xae DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3619,7 +3648,11 @@ const ( ETHTOOL_A_RINGS_RX_MINI = 0x7 ETHTOOL_A_RINGS_RX_JUMBO = 0x8 ETHTOOL_A_RINGS_TX = 0x9 - ETHTOOL_A_RINGS_MAX = 0x9 + ETHTOOL_A_RINGS_RX_BUF_LEN = 0xa + ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb + ETHTOOL_A_RINGS_CQE_SIZE = 0xc + ETHTOOL_A_RINGS_TX_PUSH = 0xd + ETHTOOL_A_RINGS_MAX = 0xd ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -3771,6 +3804,8 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const SPEED_UNKNOWN = -0x1 + type EthtoolDrvinfo struct { Cmd uint32 Driver [32]byte @@ -4070,3 +4105,1505 @@ const ( NL_POLICY_TYPE_ATTR_MASK = 0xc NL_POLICY_TYPE_ATTR_MAX = 0xc ) + +type CANBitTiming struct { + Bitrate uint32 + Sample_point uint32 + Tq uint32 + Prop_seg uint32 + Phase_seg1 uint32 + Phase_seg2 uint32 + Sjw uint32 + Brp uint32 +} + +type CANBitTimingConst struct { + Name [16]uint8 + Tseg1_min uint32 + Tseg1_max uint32 + Tseg2_min uint32 + Tseg2_max uint32 + Sjw_max uint32 + Brp_min uint32 + Brp_max uint32 + Brp_inc uint32 +} + +type CANClock struct { + Freq uint32 +} + +type CANBusErrorCounters struct { + Txerr uint16 + Rxerr uint16 +} + +type CANCtrlMode struct { + Mask uint32 + Flags uint32 +} + +type CANDeviceStats struct { + Bus_error uint32 + Error_warning uint32 + Error_passive uint32 + Bus_off uint32 + Arbitration_lost uint32 + Restarts uint32 +} + +const ( + CAN_STATE_ERROR_ACTIVE = 0x0 + CAN_STATE_ERROR_WARNING = 0x1 + CAN_STATE_ERROR_PASSIVE = 0x2 + CAN_STATE_BUS_OFF = 0x3 + CAN_STATE_STOPPED = 0x4 + CAN_STATE_SLEEPING = 0x5 + CAN_STATE_MAX = 0x6 +) + +const ( + IFLA_CAN_UNSPEC = 0x0 + IFLA_CAN_BITTIMING = 0x1 + IFLA_CAN_BITTIMING_CONST = 0x2 + IFLA_CAN_CLOCK = 0x3 + IFLA_CAN_STATE = 0x4 + IFLA_CAN_CTRLMODE = 0x5 + IFLA_CAN_RESTART_MS = 0x6 + IFLA_CAN_RESTART = 0x7 + IFLA_CAN_BERR_COUNTER = 0x8 + IFLA_CAN_DATA_BITTIMING = 0x9 + IFLA_CAN_DATA_BITTIMING_CONST = 0xa + IFLA_CAN_TERMINATION = 0xb + IFLA_CAN_TERMINATION_CONST = 0xc + IFLA_CAN_BITRATE_CONST = 0xd + IFLA_CAN_DATA_BITRATE_CONST = 0xe + IFLA_CAN_BITRATE_MAX = 0xf +) + +type KCMAttach struct { + Fd int32 + Bpf_fd int32 +} + +type KCMUnattach struct { + Fd int32 +} + +type KCMClone struct { + Fd int32 +} + +const ( + NL80211_AC_BE = 0x2 + NL80211_AC_BK = 0x3 + NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED = 0x0 + NL80211_ACL_POLICY_DENY_UNLESS_LISTED = 0x1 + NL80211_AC_VI = 0x1 + NL80211_AC_VO = 0x0 + NL80211_ATTR_4ADDR = 0x53 + NL80211_ATTR_ACK = 0x5c + NL80211_ATTR_ACK_SIGNAL = 0x107 + NL80211_ATTR_ACL_POLICY = 0xa5 + NL80211_ATTR_ADMITTED_TIME = 0xd4 + NL80211_ATTR_AIRTIME_WEIGHT = 0x112 + NL80211_ATTR_AKM_SUITES = 0x4c + NL80211_ATTR_AP_ISOLATE = 0x60 + NL80211_ATTR_AUTH_DATA = 0x9c + NL80211_ATTR_AUTH_TYPE = 0x35 + NL80211_ATTR_BANDS = 0xef + NL80211_ATTR_BEACON_HEAD = 0xe + NL80211_ATTR_BEACON_INTERVAL = 0xc + NL80211_ATTR_BEACON_TAIL = 0xf + NL80211_ATTR_BG_SCAN_PERIOD = 0x98 + NL80211_ATTR_BSS_BASIC_RATES = 0x24 + NL80211_ATTR_BSS = 0x2f + NL80211_ATTR_BSS_CTS_PROT = 0x1c + NL80211_ATTR_BSS_HT_OPMODE = 0x6d + NL80211_ATTR_BSSID = 0xf5 + NL80211_ATTR_BSS_SELECT = 0xe3 + NL80211_ATTR_BSS_SHORT_PREAMBLE = 0x1d + NL80211_ATTR_BSS_SHORT_SLOT_TIME = 0x1e + NL80211_ATTR_CENTER_FREQ1 = 0xa0 + NL80211_ATTR_CENTER_FREQ1_OFFSET = 0x123 + NL80211_ATTR_CENTER_FREQ2 = 0xa1 + NL80211_ATTR_CHANNEL_WIDTH = 0x9f + NL80211_ATTR_CH_SWITCH_BLOCK_TX = 0xb8 + NL80211_ATTR_CH_SWITCH_COUNT = 0xb7 + NL80211_ATTR_CIPHER_SUITE_GROUP = 0x4a + NL80211_ATTR_CIPHER_SUITES = 0x39 + NL80211_ATTR_CIPHER_SUITES_PAIRWISE = 0x49 + NL80211_ATTR_CNTDWN_OFFS_BEACON = 0xba + NL80211_ATTR_CNTDWN_OFFS_PRESP = 0xbb + NL80211_ATTR_COALESCE_RULE = 0xb6 + NL80211_ATTR_COALESCE_RULE_CONDITION = 0x2 + NL80211_ATTR_COALESCE_RULE_DELAY = 0x1 + NL80211_ATTR_COALESCE_RULE_MAX = 0x3 + NL80211_ATTR_COALESCE_RULE_PKT_PATTERN = 0x3 + NL80211_ATTR_CONN_FAILED_REASON = 0x9b + NL80211_ATTR_CONTROL_PORT = 0x44 + NL80211_ATTR_CONTROL_PORT_ETHERTYPE = 0x66 + NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT = 0x67 + NL80211_ATTR_CONTROL_PORT_NO_PREAUTH = 0x11e + NL80211_ATTR_CONTROL_PORT_OVER_NL80211 = 0x108 + NL80211_ATTR_COOKIE = 0x58 + NL80211_ATTR_CQM_BEACON_LOSS_EVENT = 0x8 + NL80211_ATTR_CQM = 0x5e + NL80211_ATTR_CQM_MAX = 0x9 + NL80211_ATTR_CQM_PKT_LOSS_EVENT = 0x4 + NL80211_ATTR_CQM_RSSI_HYST = 0x2 + NL80211_ATTR_CQM_RSSI_LEVEL = 0x9 + NL80211_ATTR_CQM_RSSI_THOLD = 0x1 + NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT = 0x3 + NL80211_ATTR_CQM_TXE_INTVL = 0x7 + NL80211_ATTR_CQM_TXE_PKTS = 0x6 + NL80211_ATTR_CQM_TXE_RATE = 0x5 + NL80211_ATTR_CRIT_PROT_ID = 0xb3 + NL80211_ATTR_CSA_C_OFF_BEACON = 0xba + NL80211_ATTR_CSA_C_OFF_PRESP = 0xbb + NL80211_ATTR_CSA_C_OFFSETS_TX = 0xcd + NL80211_ATTR_CSA_IES = 0xb9 + NL80211_ATTR_DEVICE_AP_SME = 0x8d + NL80211_ATTR_DFS_CAC_TIME = 0x7 + NL80211_ATTR_DFS_REGION = 0x92 + NL80211_ATTR_DISABLE_HE = 0x12d + NL80211_ATTR_DISABLE_HT = 0x93 + NL80211_ATTR_DISABLE_VHT = 0xaf + NL80211_ATTR_DISCONNECTED_BY_AP = 0x47 + NL80211_ATTR_DONT_WAIT_FOR_ACK = 0x8e + NL80211_ATTR_DTIM_PERIOD = 0xd + NL80211_ATTR_DURATION = 0x57 + NL80211_ATTR_EXT_CAPA = 0xa9 + NL80211_ATTR_EXT_CAPA_MASK = 0xaa + NL80211_ATTR_EXTERNAL_AUTH_ACTION = 0x104 + NL80211_ATTR_EXTERNAL_AUTH_SUPPORT = 0x105 + NL80211_ATTR_EXT_FEATURES = 0xd9 + NL80211_ATTR_FEATURE_FLAGS = 0x8f + NL80211_ATTR_FILS_CACHE_ID = 0xfd + NL80211_ATTR_FILS_DISCOVERY = 0x126 + NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM = 0xfb + NL80211_ATTR_FILS_ERP_REALM = 0xfa + NL80211_ATTR_FILS_ERP_RRK = 0xfc + NL80211_ATTR_FILS_ERP_USERNAME = 0xf9 + NL80211_ATTR_FILS_KEK = 0xf2 + NL80211_ATTR_FILS_NONCES = 0xf3 + NL80211_ATTR_FRAME = 0x33 + NL80211_ATTR_FRAME_MATCH = 0x5b + NL80211_ATTR_FRAME_TYPE = 0x65 + NL80211_ATTR_FREQ_AFTER = 0x3b + NL80211_ATTR_FREQ_BEFORE = 0x3a + NL80211_ATTR_FREQ_FIXED = 0x3c + NL80211_ATTR_FREQ_RANGE_END = 0x3 + NL80211_ATTR_FREQ_RANGE_MAX_BW = 0x4 + NL80211_ATTR_FREQ_RANGE_START = 0x2 + NL80211_ATTR_FTM_RESPONDER = 0x10e + NL80211_ATTR_FTM_RESPONDER_STATS = 0x10f + NL80211_ATTR_GENERATION = 0x2e + NL80211_ATTR_HANDLE_DFS = 0xbf + NL80211_ATTR_HE_6GHZ_CAPABILITY = 0x125 + NL80211_ATTR_HE_BSS_COLOR = 0x11b + NL80211_ATTR_HE_CAPABILITY = 0x10d + NL80211_ATTR_HE_OBSS_PD = 0x117 + NL80211_ATTR_HIDDEN_SSID = 0x7e + NL80211_ATTR_HT_CAPABILITY = 0x1f + NL80211_ATTR_HT_CAPABILITY_MASK = 0x94 + NL80211_ATTR_IE_ASSOC_RESP = 0x80 + NL80211_ATTR_IE = 0x2a + NL80211_ATTR_IE_PROBE_RESP = 0x7f + NL80211_ATTR_IE_RIC = 0xb2 + NL80211_ATTR_IFACE_SOCKET_OWNER = 0xcc + NL80211_ATTR_IFINDEX = 0x3 + NL80211_ATTR_IFNAME = 0x4 + NL80211_ATTR_IFTYPE_AKM_SUITES = 0x11c + NL80211_ATTR_IFTYPE = 0x5 + NL80211_ATTR_IFTYPE_EXT_CAPA = 0xe6 + NL80211_ATTR_INACTIVITY_TIMEOUT = 0x96 + NL80211_ATTR_INTERFACE_COMBINATIONS = 0x78 + NL80211_ATTR_KEY_CIPHER = 0x9 + NL80211_ATTR_KEY = 0x50 + NL80211_ATTR_KEY_DATA = 0x7 + NL80211_ATTR_KEY_DEFAULT = 0xb + NL80211_ATTR_KEY_DEFAULT_MGMT = 0x28 + NL80211_ATTR_KEY_DEFAULT_TYPES = 0x6e + NL80211_ATTR_KEY_IDX = 0x8 + NL80211_ATTR_KEYS = 0x51 + NL80211_ATTR_KEY_SEQ = 0xa + NL80211_ATTR_KEY_TYPE = 0x37 + NL80211_ATTR_LOCAL_MESH_POWER_MODE = 0xa4 + NL80211_ATTR_LOCAL_STATE_CHANGE = 0x5f + NL80211_ATTR_MAC_ACL_MAX = 0xa7 + NL80211_ATTR_MAC_ADDRS = 0xa6 + NL80211_ATTR_MAC = 0x6 + NL80211_ATTR_MAC_HINT = 0xc8 + NL80211_ATTR_MAC_MASK = 0xd7 + NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca + NL80211_ATTR_MAX = 0x137 + NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 + NL80211_ATTR_MAX_CSA_COUNTERS = 0xce + NL80211_ATTR_MAX_MATCH_SETS = 0x85 + NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 + NL80211_ATTR_MAX_NUM_SCAN_SSIDS = 0x2b + NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS = 0xde + NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS = 0x7b + NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION = 0x6f + NL80211_ATTR_MAX_SCAN_IE_LEN = 0x38 + NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL = 0xdf + NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS = 0xe0 + NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN = 0x7c + NL80211_ATTR_MCAST_RATE = 0x6b + NL80211_ATTR_MDID = 0xb1 + NL80211_ATTR_MEASUREMENT_DURATION = 0xeb + NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY = 0xec + NL80211_ATTR_MESH_CONFIG = 0x23 + NL80211_ATTR_MESH_ID = 0x18 + NL80211_ATTR_MESH_PEER_AID = 0xed + NL80211_ATTR_MESH_SETUP = 0x70 + NL80211_ATTR_MGMT_SUBTYPE = 0x29 + NL80211_ATTR_MNTR_FLAGS = 0x17 + NL80211_ATTR_MPATH_INFO = 0x1b + NL80211_ATTR_MPATH_NEXT_HOP = 0x1a + NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED = 0xf4 + NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR = 0xe8 + NL80211_ATTR_MU_MIMO_GROUP_DATA = 0xe7 + NL80211_ATTR_NAN_FUNC = 0xf0 + NL80211_ATTR_NAN_MASTER_PREF = 0xee + NL80211_ATTR_NAN_MATCH = 0xf1 + NL80211_ATTR_NETNS_FD = 0xdb + NL80211_ATTR_NOACK_MAP = 0x95 + NL80211_ATTR_NSS = 0x106 + NL80211_ATTR_OFFCHANNEL_TX_OK = 0x6c + NL80211_ATTR_OPER_CLASS = 0xd6 + NL80211_ATTR_OPMODE_NOTIF = 0xc2 + NL80211_ATTR_P2P_CTWINDOW = 0xa2 + NL80211_ATTR_P2P_OPPPS = 0xa3 + NL80211_ATTR_PAD = 0xe5 + NL80211_ATTR_PBSS = 0xe2 + NL80211_ATTR_PEER_AID = 0xb5 + NL80211_ATTR_PEER_MEASUREMENTS = 0x111 + NL80211_ATTR_PID = 0x52 + NL80211_ATTR_PMK = 0xfe + NL80211_ATTR_PMKID = 0x55 + NL80211_ATTR_PMK_LIFETIME = 0x11f + NL80211_ATTR_PMKR0_NAME = 0x102 + NL80211_ATTR_PMK_REAUTH_THRESHOLD = 0x120 + NL80211_ATTR_PMKSA_CANDIDATE = 0x86 + NL80211_ATTR_PORT_AUTHORIZED = 0x103 + NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 0x5 + NL80211_ATTR_POWER_RULE_MAX_EIRP = 0x6 + NL80211_ATTR_PREV_BSSID = 0x4f + NL80211_ATTR_PRIVACY = 0x46 + NL80211_ATTR_PROBE_RESP = 0x91 + NL80211_ATTR_PROBE_RESP_OFFLOAD = 0x90 + NL80211_ATTR_PROTOCOL_FEATURES = 0xad + NL80211_ATTR_PS_STATE = 0x5d + NL80211_ATTR_QOS_MAP = 0xc7 + NL80211_ATTR_RADAR_EVENT = 0xa8 + NL80211_ATTR_REASON_CODE = 0x36 + NL80211_ATTR_RECEIVE_MULTICAST = 0x121 + NL80211_ATTR_RECONNECT_REQUESTED = 0x12b + NL80211_ATTR_REG_ALPHA2 = 0x21 + NL80211_ATTR_REG_INDOOR = 0xdd + NL80211_ATTR_REG_INITIATOR = 0x30 + NL80211_ATTR_REG_RULE_FLAGS = 0x1 + NL80211_ATTR_REG_RULES = 0x22 + NL80211_ATTR_REG_TYPE = 0x31 + NL80211_ATTR_REKEY_DATA = 0x7a + NL80211_ATTR_REQ_IE = 0x4d + NL80211_ATTR_RESP_IE = 0x4e + NL80211_ATTR_ROAM_SUPPORT = 0x83 + NL80211_ATTR_RX_FRAME_TYPES = 0x64 + NL80211_ATTR_RXMGMT_FLAGS = 0xbc + NL80211_ATTR_RX_SIGNAL_DBM = 0x97 + NL80211_ATTR_S1G_CAPABILITY = 0x128 + NL80211_ATTR_S1G_CAPABILITY_MASK = 0x129 + NL80211_ATTR_SAE_DATA = 0x9c + NL80211_ATTR_SAE_PASSWORD = 0x115 + NL80211_ATTR_SAE_PWE = 0x12a + NL80211_ATTR_SAR_SPEC = 0x12c + NL80211_ATTR_SCAN_FLAGS = 0x9e + NL80211_ATTR_SCAN_FREQ_KHZ = 0x124 + NL80211_ATTR_SCAN_FREQUENCIES = 0x2c + NL80211_ATTR_SCAN_GENERATION = 0x2e + NL80211_ATTR_SCAN_SSIDS = 0x2d + NL80211_ATTR_SCAN_START_TIME_TSF_BSSID = 0xea + NL80211_ATTR_SCAN_START_TIME_TSF = 0xe9 + NL80211_ATTR_SCAN_SUPP_RATES = 0x7d + NL80211_ATTR_SCHED_SCAN_DELAY = 0xdc + NL80211_ATTR_SCHED_SCAN_INTERVAL = 0x77 + NL80211_ATTR_SCHED_SCAN_MATCH = 0x84 + NL80211_ATTR_SCHED_SCAN_MATCH_SSID = 0x1 + NL80211_ATTR_SCHED_SCAN_MAX_REQS = 0x100 + NL80211_ATTR_SCHED_SCAN_MULTI = 0xff + NL80211_ATTR_SCHED_SCAN_PLANS = 0xe1 + NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI = 0xf6 + NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST = 0xf7 + NL80211_ATTR_SMPS_MODE = 0xd5 + NL80211_ATTR_SOCKET_OWNER = 0xcc + NL80211_ATTR_SOFTWARE_IFTYPES = 0x79 + NL80211_ATTR_SPLIT_WIPHY_DUMP = 0xae + NL80211_ATTR_SSID = 0x34 + NL80211_ATTR_STA_AID = 0x10 + NL80211_ATTR_STA_CAPABILITY = 0xab + NL80211_ATTR_STA_EXT_CAPABILITY = 0xac + NL80211_ATTR_STA_FLAGS2 = 0x43 + NL80211_ATTR_STA_FLAGS = 0x11 + NL80211_ATTR_STA_INFO = 0x15 + NL80211_ATTR_STA_LISTEN_INTERVAL = 0x12 + NL80211_ATTR_STA_PLINK_ACTION = 0x19 + NL80211_ATTR_STA_PLINK_STATE = 0x74 + NL80211_ATTR_STA_SUPPORTED_CHANNELS = 0xbd + NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES = 0xbe + NL80211_ATTR_STA_SUPPORTED_RATES = 0x13 + NL80211_ATTR_STA_SUPPORT_P2P_PS = 0xe4 + NL80211_ATTR_STATUS_CODE = 0x48 + NL80211_ATTR_STA_TX_POWER = 0x114 + NL80211_ATTR_STA_TX_POWER_SETTING = 0x113 + NL80211_ATTR_STA_VLAN = 0x14 + NL80211_ATTR_STA_WME = 0x81 + NL80211_ATTR_SUPPORT_10_MHZ = 0xc1 + NL80211_ATTR_SUPPORT_5_MHZ = 0xc0 + NL80211_ATTR_SUPPORT_AP_UAPSD = 0x82 + NL80211_ATTR_SUPPORTED_COMMANDS = 0x32 + NL80211_ATTR_SUPPORTED_IFTYPES = 0x20 + NL80211_ATTR_SUPPORT_IBSS_RSN = 0x68 + NL80211_ATTR_SUPPORT_MESH_AUTH = 0x73 + NL80211_ATTR_SURVEY_INFO = 0x54 + NL80211_ATTR_SURVEY_RADIO_STATS = 0xda + NL80211_ATTR_TDLS_ACTION = 0x88 + NL80211_ATTR_TDLS_DIALOG_TOKEN = 0x89 + NL80211_ATTR_TDLS_EXTERNAL_SETUP = 0x8c + NL80211_ATTR_TDLS_INITIATOR = 0xcf + NL80211_ATTR_TDLS_OPERATION = 0x8a + NL80211_ATTR_TDLS_PEER_CAPABILITY = 0xcb + NL80211_ATTR_TDLS_SUPPORT = 0x8b + NL80211_ATTR_TESTDATA = 0x45 + NL80211_ATTR_TID_CONFIG = 0x11d + NL80211_ATTR_TIMED_OUT = 0x41 + NL80211_ATTR_TIMEOUT = 0x110 + NL80211_ATTR_TIMEOUT_REASON = 0xf8 + NL80211_ATTR_TSID = 0xd2 + NL80211_ATTR_TWT_RESPONDER = 0x116 + NL80211_ATTR_TX_FRAME_TYPES = 0x63 + NL80211_ATTR_TX_NO_CCK_RATE = 0x87 + NL80211_ATTR_TXQ_LIMIT = 0x10a + NL80211_ATTR_TXQ_MEMORY_LIMIT = 0x10b + NL80211_ATTR_TXQ_QUANTUM = 0x10c + NL80211_ATTR_TXQ_STATS = 0x109 + NL80211_ATTR_TX_RATES = 0x5a + NL80211_ATTR_UNSOL_BCAST_PROBE_RESP = 0x127 + NL80211_ATTR_UNSPEC = 0x0 + NL80211_ATTR_USE_MFP = 0x42 + NL80211_ATTR_USER_PRIO = 0xd3 + NL80211_ATTR_USER_REG_HINT_TYPE = 0x9a + NL80211_ATTR_USE_RRM = 0xd0 + NL80211_ATTR_VENDOR_DATA = 0xc5 + NL80211_ATTR_VENDOR_EVENTS = 0xc6 + NL80211_ATTR_VENDOR_ID = 0xc3 + NL80211_ATTR_VENDOR_SUBCMD = 0xc4 + NL80211_ATTR_VHT_CAPABILITY = 0x9d + NL80211_ATTR_VHT_CAPABILITY_MASK = 0xb0 + NL80211_ATTR_VLAN_ID = 0x11a + NL80211_ATTR_WANT_1X_4WAY_HS = 0x101 + NL80211_ATTR_WDEV = 0x99 + NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX = 0x72 + NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX = 0x71 + NL80211_ATTR_WIPHY_ANTENNA_RX = 0x6a + NL80211_ATTR_WIPHY_ANTENNA_TX = 0x69 + NL80211_ATTR_WIPHY_BANDS = 0x16 + NL80211_ATTR_WIPHY_CHANNEL_TYPE = 0x27 + NL80211_ATTR_WIPHY = 0x1 + NL80211_ATTR_WIPHY_COVERAGE_CLASS = 0x59 + NL80211_ATTR_WIPHY_DYN_ACK = 0xd1 + NL80211_ATTR_WIPHY_EDMG_BW_CONFIG = 0x119 + NL80211_ATTR_WIPHY_EDMG_CHANNELS = 0x118 + NL80211_ATTR_WIPHY_FRAG_THRESHOLD = 0x3f + NL80211_ATTR_WIPHY_FREQ = 0x26 + NL80211_ATTR_WIPHY_FREQ_HINT = 0xc9 + NL80211_ATTR_WIPHY_FREQ_OFFSET = 0x122 + NL80211_ATTR_WIPHY_NAME = 0x2 + NL80211_ATTR_WIPHY_RETRY_LONG = 0x3e + NL80211_ATTR_WIPHY_RETRY_SHORT = 0x3d + NL80211_ATTR_WIPHY_RTS_THRESHOLD = 0x40 + NL80211_ATTR_WIPHY_SELF_MANAGED_REG = 0xd8 + NL80211_ATTR_WIPHY_TX_POWER_LEVEL = 0x62 + NL80211_ATTR_WIPHY_TX_POWER_SETTING = 0x61 + NL80211_ATTR_WIPHY_TXQ_PARAMS = 0x25 + NL80211_ATTR_WOWLAN_TRIGGERS = 0x75 + NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED = 0x76 + NL80211_ATTR_WPA_VERSIONS = 0x4b + NL80211_AUTHTYPE_AUTOMATIC = 0x8 + NL80211_AUTHTYPE_FILS_PK = 0x7 + NL80211_AUTHTYPE_FILS_SK = 0x5 + NL80211_AUTHTYPE_FILS_SK_PFS = 0x6 + NL80211_AUTHTYPE_FT = 0x2 + NL80211_AUTHTYPE_MAX = 0x7 + NL80211_AUTHTYPE_NETWORK_EAP = 0x3 + NL80211_AUTHTYPE_OPEN_SYSTEM = 0x0 + NL80211_AUTHTYPE_SAE = 0x4 + NL80211_AUTHTYPE_SHARED_KEY = 0x1 + NL80211_BAND_2GHZ = 0x0 + NL80211_BAND_5GHZ = 0x1 + NL80211_BAND_60GHZ = 0x2 + NL80211_BAND_6GHZ = 0x3 + NL80211_BAND_ATTR_EDMG_BW_CONFIG = 0xb + NL80211_BAND_ATTR_EDMG_CHANNELS = 0xa + NL80211_BAND_ATTR_FREQS = 0x1 + NL80211_BAND_ATTR_HT_AMPDU_DENSITY = 0x6 + NL80211_BAND_ATTR_HT_AMPDU_FACTOR = 0x5 + NL80211_BAND_ATTR_HT_CAPA = 0x4 + NL80211_BAND_ATTR_HT_MCS_SET = 0x3 + NL80211_BAND_ATTR_IFTYPE_DATA = 0x9 + NL80211_BAND_ATTR_MAX = 0xb + NL80211_BAND_ATTR_RATES = 0x2 + NL80211_BAND_ATTR_VHT_CAPA = 0x8 + NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 + NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA = 0x6 + NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC = 0x2 + NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET = 0x4 + NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY = 0x3 + NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE = 0x5 + NL80211_BAND_IFTYPE_ATTR_IFTYPES = 0x1 + NL80211_BAND_IFTYPE_ATTR_MAX = 0xb + NL80211_BAND_S1GHZ = 0x4 + NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE = 0x2 + NL80211_BITRATE_ATTR_MAX = 0x2 + NL80211_BITRATE_ATTR_RATE = 0x1 + NL80211_BSS_BEACON_IES = 0xb + NL80211_BSS_BEACON_INTERVAL = 0x4 + NL80211_BSS_BEACON_TSF = 0xd + NL80211_BSS_BSSID = 0x1 + NL80211_BSS_CAPABILITY = 0x5 + NL80211_BSS_CHAIN_SIGNAL = 0x13 + NL80211_BSS_CHAN_WIDTH_10 = 0x1 + NL80211_BSS_CHAN_WIDTH_1 = 0x3 + NL80211_BSS_CHAN_WIDTH_20 = 0x0 + NL80211_BSS_CHAN_WIDTH_2 = 0x4 + NL80211_BSS_CHAN_WIDTH_5 = 0x2 + NL80211_BSS_CHAN_WIDTH = 0xc + NL80211_BSS_FREQUENCY = 0x2 + NL80211_BSS_FREQUENCY_OFFSET = 0x14 + NL80211_BSS_INFORMATION_ELEMENTS = 0x6 + NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf + NL80211_BSS_MAX = 0x14 + NL80211_BSS_PAD = 0x10 + NL80211_BSS_PARENT_BSSID = 0x12 + NL80211_BSS_PARENT_TSF = 0x11 + NL80211_BSS_PRESP_DATA = 0xe + NL80211_BSS_SEEN_MS_AGO = 0xa + NL80211_BSS_SELECT_ATTR_BAND_PREF = 0x2 + NL80211_BSS_SELECT_ATTR_MAX = 0x3 + NL80211_BSS_SELECT_ATTR_RSSI_ADJUST = 0x3 + NL80211_BSS_SELECT_ATTR_RSSI = 0x1 + NL80211_BSS_SIGNAL_MBM = 0x7 + NL80211_BSS_SIGNAL_UNSPEC = 0x8 + NL80211_BSS_STATUS_ASSOCIATED = 0x1 + NL80211_BSS_STATUS_AUTHENTICATED = 0x0 + NL80211_BSS_STATUS = 0x9 + NL80211_BSS_STATUS_IBSS_JOINED = 0x2 + NL80211_BSS_TSF = 0x3 + NL80211_CHAN_HT20 = 0x1 + NL80211_CHAN_HT40MINUS = 0x2 + NL80211_CHAN_HT40PLUS = 0x3 + NL80211_CHAN_NO_HT = 0x0 + NL80211_CHAN_WIDTH_10 = 0x7 + NL80211_CHAN_WIDTH_160 = 0x5 + NL80211_CHAN_WIDTH_16 = 0xc + NL80211_CHAN_WIDTH_1 = 0x8 + NL80211_CHAN_WIDTH_20 = 0x1 + NL80211_CHAN_WIDTH_20_NOHT = 0x0 + NL80211_CHAN_WIDTH_2 = 0x9 + NL80211_CHAN_WIDTH_40 = 0x2 + NL80211_CHAN_WIDTH_4 = 0xa + NL80211_CHAN_WIDTH_5 = 0x6 + NL80211_CHAN_WIDTH_80 = 0x3 + NL80211_CHAN_WIDTH_80P80 = 0x4 + NL80211_CHAN_WIDTH_8 = 0xb + NL80211_CMD_ABORT_SCAN = 0x72 + NL80211_CMD_ACTION = 0x3b + NL80211_CMD_ACTION_TX_STATUS = 0x3c + NL80211_CMD_ADD_NAN_FUNCTION = 0x75 + NL80211_CMD_ADD_TX_TS = 0x69 + NL80211_CMD_ASSOCIATE = 0x26 + NL80211_CMD_AUTHENTICATE = 0x25 + NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL = 0x38 + NL80211_CMD_CHANGE_NAN_CONFIG = 0x77 + NL80211_CMD_CHANNEL_SWITCH = 0x66 + NL80211_CMD_CH_SWITCH_NOTIFY = 0x58 + NL80211_CMD_CH_SWITCH_STARTED_NOTIFY = 0x6e + NL80211_CMD_CONNECT = 0x2e + NL80211_CMD_CONN_FAILED = 0x5b + NL80211_CMD_CONTROL_PORT_FRAME = 0x81 + NL80211_CMD_CONTROL_PORT_FRAME_TX_STATUS = 0x8b + NL80211_CMD_CRIT_PROTOCOL_START = 0x62 + NL80211_CMD_CRIT_PROTOCOL_STOP = 0x63 + NL80211_CMD_DEAUTHENTICATE = 0x27 + NL80211_CMD_DEL_BEACON = 0x10 + NL80211_CMD_DEL_INTERFACE = 0x8 + NL80211_CMD_DEL_KEY = 0xc + NL80211_CMD_DEL_MPATH = 0x18 + NL80211_CMD_DEL_NAN_FUNCTION = 0x76 + NL80211_CMD_DEL_PMK = 0x7c + NL80211_CMD_DEL_PMKSA = 0x35 + NL80211_CMD_DEL_STATION = 0x14 + NL80211_CMD_DEL_TX_TS = 0x6a + NL80211_CMD_DEL_WIPHY = 0x4 + NL80211_CMD_DISASSOCIATE = 0x28 + NL80211_CMD_DISCONNECT = 0x30 + NL80211_CMD_EXTERNAL_AUTH = 0x7f + NL80211_CMD_FLUSH_PMKSA = 0x36 + NL80211_CMD_FRAME = 0x3b + NL80211_CMD_FRAME_TX_STATUS = 0x3c + NL80211_CMD_FRAME_WAIT_CANCEL = 0x43 + NL80211_CMD_FT_EVENT = 0x61 + NL80211_CMD_GET_BEACON = 0xd + NL80211_CMD_GET_COALESCE = 0x64 + NL80211_CMD_GET_FTM_RESPONDER_STATS = 0x82 + NL80211_CMD_GET_INTERFACE = 0x5 + NL80211_CMD_GET_KEY = 0x9 + NL80211_CMD_GET_MESH_CONFIG = 0x1c + NL80211_CMD_GET_MESH_PARAMS = 0x1c + NL80211_CMD_GET_MPATH = 0x15 + NL80211_CMD_GET_MPP = 0x6b + NL80211_CMD_GET_POWER_SAVE = 0x3e + NL80211_CMD_GET_PROTOCOL_FEATURES = 0x5f + NL80211_CMD_GET_REG = 0x1f + NL80211_CMD_GET_SCAN = 0x20 + NL80211_CMD_GET_STATION = 0x11 + NL80211_CMD_GET_SURVEY = 0x32 + NL80211_CMD_GET_WIPHY = 0x1 + NL80211_CMD_GET_WOWLAN = 0x49 + NL80211_CMD_JOIN_IBSS = 0x2b + NL80211_CMD_JOIN_MESH = 0x44 + NL80211_CMD_JOIN_OCB = 0x6c + NL80211_CMD_LEAVE_IBSS = 0x2c + NL80211_CMD_LEAVE_MESH = 0x45 + NL80211_CMD_LEAVE_OCB = 0x6d + NL80211_CMD_MAX = 0x93 + NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 + NL80211_CMD_NAN_MATCH = 0x78 + NL80211_CMD_NEW_BEACON = 0xf + NL80211_CMD_NEW_INTERFACE = 0x7 + NL80211_CMD_NEW_KEY = 0xb + NL80211_CMD_NEW_MPATH = 0x17 + NL80211_CMD_NEW_PEER_CANDIDATE = 0x48 + NL80211_CMD_NEW_SCAN_RESULTS = 0x22 + NL80211_CMD_NEW_STATION = 0x13 + NL80211_CMD_NEW_SURVEY_RESULTS = 0x33 + NL80211_CMD_NEW_WIPHY = 0x3 + NL80211_CMD_NOTIFY_CQM = 0x40 + NL80211_CMD_NOTIFY_RADAR = 0x86 + NL80211_CMD_PEER_MEASUREMENT_COMPLETE = 0x85 + NL80211_CMD_PEER_MEASUREMENT_RESULT = 0x84 + NL80211_CMD_PEER_MEASUREMENT_START = 0x83 + NL80211_CMD_PMKSA_CANDIDATE = 0x50 + NL80211_CMD_PORT_AUTHORIZED = 0x7d + NL80211_CMD_PROBE_CLIENT = 0x54 + NL80211_CMD_PROBE_MESH_LINK = 0x88 + NL80211_CMD_RADAR_DETECT = 0x5e + NL80211_CMD_REG_BEACON_HINT = 0x2a + NL80211_CMD_REG_CHANGE = 0x24 + NL80211_CMD_REGISTER_ACTION = 0x3a + NL80211_CMD_REGISTER_BEACONS = 0x55 + NL80211_CMD_REGISTER_FRAME = 0x3a + NL80211_CMD_RELOAD_REGDB = 0x7e + NL80211_CMD_REMAIN_ON_CHANNEL = 0x37 + NL80211_CMD_REQ_SET_REG = 0x1b + NL80211_CMD_ROAM = 0x2f + NL80211_CMD_SCAN_ABORTED = 0x23 + NL80211_CMD_SCHED_SCAN_RESULTS = 0x4d + NL80211_CMD_SCHED_SCAN_STOPPED = 0x4e + NL80211_CMD_SET_BEACON = 0xe + NL80211_CMD_SET_BSS = 0x19 + NL80211_CMD_SET_CHANNEL = 0x41 + NL80211_CMD_SET_COALESCE = 0x65 + NL80211_CMD_SET_CQM = 0x3f + NL80211_CMD_SET_INTERFACE = 0x6 + NL80211_CMD_SET_KEY = 0xa + NL80211_CMD_SET_MAC_ACL = 0x5d + NL80211_CMD_SET_MCAST_RATE = 0x5c + NL80211_CMD_SET_MESH_CONFIG = 0x1d + NL80211_CMD_SET_MESH_PARAMS = 0x1d + NL80211_CMD_SET_MGMT_EXTRA_IE = 0x1e + NL80211_CMD_SET_MPATH = 0x16 + NL80211_CMD_SET_MULTICAST_TO_UNICAST = 0x79 + NL80211_CMD_SET_NOACK_MAP = 0x57 + NL80211_CMD_SET_PMK = 0x7b + NL80211_CMD_SET_PMKSA = 0x34 + NL80211_CMD_SET_POWER_SAVE = 0x3d + NL80211_CMD_SET_QOS_MAP = 0x68 + NL80211_CMD_SET_REG = 0x1a + NL80211_CMD_SET_REKEY_OFFLOAD = 0x4f + NL80211_CMD_SET_SAR_SPECS = 0x8c + NL80211_CMD_SET_STATION = 0x12 + NL80211_CMD_SET_TID_CONFIG = 0x89 + NL80211_CMD_SET_TX_BITRATE_MASK = 0x39 + NL80211_CMD_SET_WDS_PEER = 0x42 + NL80211_CMD_SET_WIPHY = 0x2 + NL80211_CMD_SET_WIPHY_NETNS = 0x31 + NL80211_CMD_SET_WOWLAN = 0x4a + NL80211_CMD_STA_OPMODE_CHANGED = 0x80 + NL80211_CMD_START_AP = 0xf + NL80211_CMD_START_NAN = 0x73 + NL80211_CMD_START_P2P_DEVICE = 0x59 + NL80211_CMD_START_SCHED_SCAN = 0x4b + NL80211_CMD_STOP_AP = 0x10 + NL80211_CMD_STOP_NAN = 0x74 + NL80211_CMD_STOP_P2P_DEVICE = 0x5a + NL80211_CMD_STOP_SCHED_SCAN = 0x4c + NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH = 0x70 + NL80211_CMD_TDLS_CHANNEL_SWITCH = 0x6f + NL80211_CMD_TDLS_MGMT = 0x52 + NL80211_CMD_TDLS_OPER = 0x51 + NL80211_CMD_TESTMODE = 0x2d + NL80211_CMD_TRIGGER_SCAN = 0x21 + NL80211_CMD_UNEXPECTED_4ADDR_FRAME = 0x56 + NL80211_CMD_UNEXPECTED_FRAME = 0x53 + NL80211_CMD_UNPROT_BEACON = 0x8a + NL80211_CMD_UNPROT_DEAUTHENTICATE = 0x46 + NL80211_CMD_UNPROT_DISASSOCIATE = 0x47 + NL80211_CMD_UNSPEC = 0x0 + NL80211_CMD_UPDATE_CONNECT_PARAMS = 0x7a + NL80211_CMD_UPDATE_FT_IES = 0x60 + NL80211_CMD_UPDATE_OWE_INFO = 0x87 + NL80211_CMD_VENDOR = 0x67 + NL80211_CMD_WIPHY_REG_CHANGE = 0x71 + NL80211_COALESCE_CONDITION_MATCH = 0x0 + NL80211_COALESCE_CONDITION_NO_MATCH = 0x1 + NL80211_CONN_FAIL_BLOCKED_CLIENT = 0x1 + NL80211_CONN_FAIL_MAX_CLIENTS = 0x0 + NL80211_CQM_RSSI_BEACON_LOSS_EVENT = 0x2 + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH = 0x1 + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW = 0x0 + NL80211_CQM_TXE_MAX_INTVL = 0x708 + NL80211_CRIT_PROTO_APIPA = 0x3 + NL80211_CRIT_PROTO_DHCP = 0x1 + NL80211_CRIT_PROTO_EAPOL = 0x2 + NL80211_CRIT_PROTO_MAX_DURATION = 0x1388 + NL80211_CRIT_PROTO_UNSPEC = 0x0 + NL80211_DFS_AVAILABLE = 0x2 + NL80211_DFS_ETSI = 0x2 + NL80211_DFS_FCC = 0x1 + NL80211_DFS_JP = 0x3 + NL80211_DFS_UNAVAILABLE = 0x1 + NL80211_DFS_UNSET = 0x0 + NL80211_DFS_USABLE = 0x0 + NL80211_EDMG_BW_CONFIG_MAX = 0xf + NL80211_EDMG_BW_CONFIG_MIN = 0x4 + NL80211_EDMG_CHANNELS_MAX = 0x3c + NL80211_EDMG_CHANNELS_MIN = 0x1 + NL80211_EXTERNAL_AUTH_ABORT = 0x1 + NL80211_EXTERNAL_AUTH_START = 0x0 + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK = 0x32 + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X = 0x10 + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK = 0xf + NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP = 0x12 + NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT = 0x1b + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 0x21 + NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 0x22 + NL80211_EXT_FEATURE_AQL = 0x28 + NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 0x2e + NL80211_EXT_FEATURE_BEACON_PROTECTION = 0x29 + NL80211_EXT_FEATURE_BEACON_RATE_HE = 0x36 + NL80211_EXT_FEATURE_BEACON_RATE_HT = 0x7 + NL80211_EXT_FEATURE_BEACON_RATE_LEGACY = 0x6 + NL80211_EXT_FEATURE_BEACON_RATE_VHT = 0x8 + NL80211_EXT_FEATURE_BSS_PARENT_TSF = 0x4 + NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 = 0x1f + NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH = 0x2a + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211 = 0x1a + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS = 0x30 + NL80211_EXT_FEATURE_CQM_RSSI_LIST = 0xd + NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 0x1b + NL80211_EXT_FEATURE_DEL_IBSS_STA = 0x2c + NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 + NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 + NL80211_EXT_FEATURE_FILS_DISCOVERY = 0x34 + NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME = 0x11 + NL80211_EXT_FEATURE_FILS_SK_OFFLOAD = 0xe + NL80211_EXT_FEATURE_FILS_STA = 0x9 + NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN = 0x18 + NL80211_EXT_FEATURE_LOW_POWER_SCAN = 0x17 + NL80211_EXT_FEATURE_LOW_SPAN_SCAN = 0x16 + NL80211_EXT_FEATURE_MFP_OPTIONAL = 0x15 + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA = 0xa + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED = 0xb + NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS = 0x2d + NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER = 0x2 + NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 + NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 + NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b + NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_RRM = 0x1 + NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 + NL80211_EXT_FEATURE_SAE_OFFLOAD = 0x26 + NL80211_EXT_FEATURE_SCAN_FREQ_KHZ = 0x2f + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT = 0x1e + NL80211_EXT_FEATURE_SCAN_RANDOM_SN = 0x1d + NL80211_EXT_FEATURE_SCAN_START_TIME = 0x3 + NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23 + NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 0xc + NL80211_EXT_FEATURE_SECURE_LTF = 0x37 + NL80211_EXT_FEATURE_SECURE_RTT = 0x38 + NL80211_EXT_FEATURE_SET_SCAN_DWELL = 0x5 + NL80211_EXT_FEATURE_STA_TX_PWR = 0x25 + NL80211_EXT_FEATURE_TXQS = 0x1c + NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 0x35 + NL80211_EXT_FEATURE_VHT_IBSS = 0x0 + NL80211_EXT_FEATURE_VLAN_OFFLOAD = 0x27 + NL80211_FEATURE_ACKTO_ESTIMATION = 0x800000 + NL80211_FEATURE_ACTIVE_MONITOR = 0x20000 + NL80211_FEATURE_ADVERTISE_CHAN_LIMITS = 0x4000 + NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE = 0x40000 + NL80211_FEATURE_AP_SCAN = 0x100 + NL80211_FEATURE_CELL_BASE_REG_HINTS = 0x8 + NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES = 0x80000 + NL80211_FEATURE_DYNAMIC_SMPS = 0x2000000 + NL80211_FEATURE_FULL_AP_CLIENT_STATE = 0x8000 + NL80211_FEATURE_HT_IBSS = 0x2 + NL80211_FEATURE_INACTIVITY_TIMER = 0x4 + NL80211_FEATURE_LOW_PRIORITY_SCAN = 0x40 + NL80211_FEATURE_MAC_ON_CREATE = 0x8000000 + NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 0x80000000 + NL80211_FEATURE_NEED_OBSS_SCAN = 0x400 + NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL = 0x10 + NL80211_FEATURE_P2P_GO_CTWIN = 0x800 + NL80211_FEATURE_P2P_GO_OPPPS = 0x1000 + NL80211_FEATURE_QUIET = 0x200000 + NL80211_FEATURE_SAE = 0x20 + NL80211_FEATURE_SCAN_FLUSH = 0x80 + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 0x20000000 + NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 0x40000000 + NL80211_FEATURE_SK_TX_STATUS = 0x1 + NL80211_FEATURE_STATIC_SMPS = 0x1000000 + NL80211_FEATURE_SUPPORTS_WMM_ADMISSION = 0x4000000 + NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 0x10000000 + NL80211_FEATURE_TX_POWER_INSERTION = 0x400000 + NL80211_FEATURE_USERSPACE_MPM = 0x10000 + NL80211_FEATURE_VIF_TXPOWER = 0x200 + NL80211_FEATURE_WFA_TPC_IE_IN_PROBES = 0x100000 + NL80211_FILS_DISCOVERY_ATTR_INT_MAX = 0x2 + NL80211_FILS_DISCOVERY_ATTR_INT_MIN = 0x1 + NL80211_FILS_DISCOVERY_ATTR_MAX = 0x3 + NL80211_FILS_DISCOVERY_ATTR_TMPL = 0x3 + NL80211_FILS_DISCOVERY_TMPL_MIN_LEN = 0x2a + NL80211_FREQUENCY_ATTR_16MHZ = 0x19 + NL80211_FREQUENCY_ATTR_1MHZ = 0x15 + NL80211_FREQUENCY_ATTR_2MHZ = 0x16 + NL80211_FREQUENCY_ATTR_4MHZ = 0x17 + NL80211_FREQUENCY_ATTR_8MHZ = 0x18 + NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 0xd + NL80211_FREQUENCY_ATTR_DFS_STATE = 0x7 + NL80211_FREQUENCY_ATTR_DFS_TIME = 0x8 + NL80211_FREQUENCY_ATTR_DISABLED = 0x2 + NL80211_FREQUENCY_ATTR_FREQ = 0x1 + NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf + NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe + NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf + NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 + NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 + NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc + NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 + NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb + NL80211_FREQUENCY_ATTR_NO_HE = 0x13 + NL80211_FREQUENCY_ATTR_NO_HT40_MINUS = 0x9 + NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa + NL80211_FREQUENCY_ATTR_NO_IBSS = 0x3 + NL80211_FREQUENCY_ATTR_NO_IR = 0x3 + NL80211_FREQUENCY_ATTR_OFFSET = 0x14 + NL80211_FREQUENCY_ATTR_PASSIVE_SCAN = 0x3 + NL80211_FREQUENCY_ATTR_RADAR = 0x5 + NL80211_FREQUENCY_ATTR_WMM = 0x12 + NL80211_FTM_RESP_ATTR_CIVICLOC = 0x3 + NL80211_FTM_RESP_ATTR_ENABLED = 0x1 + NL80211_FTM_RESP_ATTR_LCI = 0x2 + NL80211_FTM_RESP_ATTR_MAX = 0x3 + NL80211_FTM_STATS_ASAP_NUM = 0x4 + NL80211_FTM_STATS_FAILED_NUM = 0x3 + NL80211_FTM_STATS_MAX = 0xa + NL80211_FTM_STATS_NON_ASAP_NUM = 0x5 + NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM = 0x9 + NL80211_FTM_STATS_PAD = 0xa + NL80211_FTM_STATS_PARTIAL_NUM = 0x2 + NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM = 0x8 + NL80211_FTM_STATS_SUCCESS_NUM = 0x1 + NL80211_FTM_STATS_TOTAL_DURATION_MSEC = 0x6 + NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM = 0x7 + NL80211_GENL_NAME = "nl80211" + NL80211_HE_BSS_COLOR_ATTR_COLOR = 0x1 + NL80211_HE_BSS_COLOR_ATTR_DISABLED = 0x2 + NL80211_HE_BSS_COLOR_ATTR_MAX = 0x3 + NL80211_HE_BSS_COLOR_ATTR_PARTIAL = 0x3 + NL80211_HE_MAX_CAPABILITY_LEN = 0x36 + NL80211_HE_MIN_CAPABILITY_LEN = 0x10 + NL80211_HE_NSS_MAX = 0x8 + NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP = 0x4 + NL80211_HE_OBSS_PD_ATTR_MAX = 0x6 + NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET = 0x2 + NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET = 0x1 + NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET = 0x3 + NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP = 0x5 + NL80211_HE_OBSS_PD_ATTR_SR_CTRL = 0x6 + NL80211_HIDDEN_SSID_NOT_IN_USE = 0x0 + NL80211_HIDDEN_SSID_ZERO_CONTENTS = 0x2 + NL80211_HIDDEN_SSID_ZERO_LEN = 0x1 + NL80211_HT_CAPABILITY_LEN = 0x1a + NL80211_IFACE_COMB_BI_MIN_GCD = 0x7 + NL80211_IFACE_COMB_LIMITS = 0x1 + NL80211_IFACE_COMB_MAXNUM = 0x2 + NL80211_IFACE_COMB_NUM_CHANNELS = 0x4 + NL80211_IFACE_COMB_RADAR_DETECT_REGIONS = 0x6 + NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS = 0x5 + NL80211_IFACE_COMB_STA_AP_BI_MATCH = 0x3 + NL80211_IFACE_COMB_UNSPEC = 0x0 + NL80211_IFACE_LIMIT_MAX = 0x1 + NL80211_IFACE_LIMIT_TYPES = 0x2 + NL80211_IFACE_LIMIT_UNSPEC = 0x0 + NL80211_IFTYPE_ADHOC = 0x1 + NL80211_IFTYPE_AKM_ATTR_IFTYPES = 0x1 + NL80211_IFTYPE_AKM_ATTR_MAX = 0x2 + NL80211_IFTYPE_AKM_ATTR_SUITES = 0x2 + NL80211_IFTYPE_AP = 0x3 + NL80211_IFTYPE_AP_VLAN = 0x4 + NL80211_IFTYPE_MAX = 0xc + NL80211_IFTYPE_MESH_POINT = 0x7 + NL80211_IFTYPE_MONITOR = 0x6 + NL80211_IFTYPE_NAN = 0xc + NL80211_IFTYPE_OCB = 0xb + NL80211_IFTYPE_P2P_CLIENT = 0x8 + NL80211_IFTYPE_P2P_DEVICE = 0xa + NL80211_IFTYPE_P2P_GO = 0x9 + NL80211_IFTYPE_STATION = 0x2 + NL80211_IFTYPE_UNSPECIFIED = 0x0 + NL80211_IFTYPE_WDS = 0x5 + NL80211_KCK_EXT_LEN = 0x18 + NL80211_KCK_LEN = 0x10 + NL80211_KEK_EXT_LEN = 0x20 + NL80211_KEK_LEN = 0x10 + NL80211_KEY_CIPHER = 0x3 + NL80211_KEY_DATA = 0x1 + NL80211_KEY_DEFAULT_BEACON = 0xa + NL80211_KEY_DEFAULT = 0x5 + NL80211_KEY_DEFAULT_MGMT = 0x6 + NL80211_KEY_DEFAULT_TYPE_MULTICAST = 0x2 + NL80211_KEY_DEFAULT_TYPES = 0x8 + NL80211_KEY_DEFAULT_TYPE_UNICAST = 0x1 + NL80211_KEY_IDX = 0x2 + NL80211_KEY_MAX = 0xa + NL80211_KEY_MODE = 0x9 + NL80211_KEY_NO_TX = 0x1 + NL80211_KEY_RX_TX = 0x0 + NL80211_KEY_SEQ = 0x4 + NL80211_KEY_SET_TX = 0x2 + NL80211_KEY_TYPE = 0x7 + NL80211_KEYTYPE_GROUP = 0x0 + NL80211_KEYTYPE_PAIRWISE = 0x1 + NL80211_KEYTYPE_PEERKEY = 0x2 + NL80211_MAX_NR_AKM_SUITES = 0x2 + NL80211_MAX_NR_CIPHER_SUITES = 0x5 + NL80211_MAX_SUPP_HT_RATES = 0x4d + NL80211_MAX_SUPP_RATES = 0x20 + NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MESHCONF_ATTR_MAX = 0x1f + NL80211_MESHCONF_AUTO_OPEN_PLINKS = 0x7 + NL80211_MESHCONF_AWAKE_WINDOW = 0x1b + NL80211_MESHCONF_CONFIRM_TIMEOUT = 0x2 + NL80211_MESHCONF_CONNECTED_TO_AS = 0x1f + NL80211_MESHCONF_CONNECTED_TO_GATE = 0x1d + NL80211_MESHCONF_ELEMENT_TTL = 0xf + NL80211_MESHCONF_FORWARDING = 0x13 + NL80211_MESHCONF_GATE_ANNOUNCEMENTS = 0x11 + NL80211_MESHCONF_HOLDING_TIMEOUT = 0x3 + NL80211_MESHCONF_HT_OPMODE = 0x16 + NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT = 0xb + NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL = 0x19 + NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES = 0x8 + NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME = 0xd + NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT = 0x17 + NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL = 0x12 + NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL = 0xc + NL80211_MESHCONF_HWMP_RANN_INTERVAL = 0x10 + NL80211_MESHCONF_HWMP_ROOT_INTERVAL = 0x18 + NL80211_MESHCONF_HWMP_ROOTMODE = 0xe + NL80211_MESHCONF_MAX_PEER_LINKS = 0x4 + NL80211_MESHCONF_MAX_RETRIES = 0x5 + NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT = 0xa + NL80211_MESHCONF_NOLEARN = 0x1e + NL80211_MESHCONF_PATH_REFRESH_TIME = 0x9 + NL80211_MESHCONF_PLINK_TIMEOUT = 0x1c + NL80211_MESHCONF_POWER_MODE = 0x1a + NL80211_MESHCONF_RETRY_TIMEOUT = 0x1 + NL80211_MESHCONF_RSSI_THRESHOLD = 0x14 + NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR = 0x15 + NL80211_MESHCONF_TTL = 0x6 + NL80211_MESH_POWER_ACTIVE = 0x1 + NL80211_MESH_POWER_DEEP_SLEEP = 0x3 + NL80211_MESH_POWER_LIGHT_SLEEP = 0x2 + NL80211_MESH_POWER_MAX = 0x3 + NL80211_MESH_POWER_UNKNOWN = 0x0 + NL80211_MESH_SETUP_ATTR_MAX = 0x8 + NL80211_MESH_SETUP_AUTH_PROTOCOL = 0x8 + NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC = 0x2 + NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL = 0x1 + NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC = 0x6 + NL80211_MESH_SETUP_IE = 0x3 + NL80211_MESH_SETUP_USERSPACE_AMPE = 0x5 + NL80211_MESH_SETUP_USERSPACE_AUTH = 0x4 + NL80211_MESH_SETUP_USERSPACE_MPM = 0x7 + NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE = 0x3 + NL80211_MFP_NO = 0x0 + NL80211_MFP_OPTIONAL = 0x2 + NL80211_MFP_REQUIRED = 0x1 + NL80211_MIN_REMAIN_ON_CHANNEL_TIME = 0xa + NL80211_MNTR_FLAG_ACTIVE = 0x6 + NL80211_MNTR_FLAG_CONTROL = 0x3 + NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 + NL80211_MNTR_FLAG_FCSFAIL = 0x1 + NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_OTHER_BSS = 0x4 + NL80211_MNTR_FLAG_PLCPFAIL = 0x2 + NL80211_MPATH_FLAG_ACTIVE = 0x1 + NL80211_MPATH_FLAG_FIXED = 0x8 + NL80211_MPATH_FLAG_RESOLVED = 0x10 + NL80211_MPATH_FLAG_RESOLVING = 0x2 + NL80211_MPATH_FLAG_SN_VALID = 0x4 + NL80211_MPATH_INFO_DISCOVERY_RETRIES = 0x7 + NL80211_MPATH_INFO_DISCOVERY_TIMEOUT = 0x6 + NL80211_MPATH_INFO_EXPTIME = 0x4 + NL80211_MPATH_INFO_FLAGS = 0x5 + NL80211_MPATH_INFO_FRAME_QLEN = 0x1 + NL80211_MPATH_INFO_HOP_COUNT = 0x8 + NL80211_MPATH_INFO_MAX = 0x9 + NL80211_MPATH_INFO_METRIC = 0x3 + NL80211_MPATH_INFO_PATH_CHANGE = 0x9 + NL80211_MPATH_INFO_SN = 0x2 + NL80211_MULTICAST_GROUP_CONFIG = "config" + NL80211_MULTICAST_GROUP_MLME = "mlme" + NL80211_MULTICAST_GROUP_NAN = "nan" + NL80211_MULTICAST_GROUP_REG = "regulatory" + NL80211_MULTICAST_GROUP_SCAN = "scan" + NL80211_MULTICAST_GROUP_TESTMODE = "testmode" + NL80211_MULTICAST_GROUP_VENDOR = "vendor" + NL80211_NAN_FUNC_ATTR_MAX = 0x10 + NL80211_NAN_FUNC_CLOSE_RANGE = 0x9 + NL80211_NAN_FUNC_FOLLOW_UP = 0x2 + NL80211_NAN_FUNC_FOLLOW_UP_DEST = 0x8 + NL80211_NAN_FUNC_FOLLOW_UP_ID = 0x6 + NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID = 0x7 + NL80211_NAN_FUNC_INSTANCE_ID = 0xf + NL80211_NAN_FUNC_MAX_TYPE = 0x2 + NL80211_NAN_FUNC_PUBLISH_BCAST = 0x4 + NL80211_NAN_FUNC_PUBLISH = 0x0 + NL80211_NAN_FUNC_PUBLISH_TYPE = 0x3 + NL80211_NAN_FUNC_RX_MATCH_FILTER = 0xd + NL80211_NAN_FUNC_SERVICE_ID = 0x2 + NL80211_NAN_FUNC_SERVICE_ID_LEN = 0x6 + NL80211_NAN_FUNC_SERVICE_INFO = 0xb + NL80211_NAN_FUNC_SERVICE_SPEC_INFO_MAX_LEN = 0xff + NL80211_NAN_FUNC_SRF = 0xc + NL80211_NAN_FUNC_SRF_MAX_LEN = 0xff + NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE = 0x5 + NL80211_NAN_FUNC_SUBSCRIBE = 0x1 + NL80211_NAN_FUNC_TERM_REASON = 0x10 + NL80211_NAN_FUNC_TERM_REASON_ERROR = 0x2 + NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED = 0x1 + NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST = 0x0 + NL80211_NAN_FUNC_TTL = 0xa + NL80211_NAN_FUNC_TX_MATCH_FILTER = 0xe + NL80211_NAN_FUNC_TYPE = 0x1 + NL80211_NAN_MATCH_ATTR_MAX = 0x2 + NL80211_NAN_MATCH_FUNC_LOCAL = 0x1 + NL80211_NAN_MATCH_FUNC_PEER = 0x2 + NL80211_NAN_SOLICITED_PUBLISH = 0x1 + NL80211_NAN_SRF_ATTR_MAX = 0x4 + NL80211_NAN_SRF_BF = 0x2 + NL80211_NAN_SRF_BF_IDX = 0x3 + NL80211_NAN_SRF_INCLUDE = 0x1 + NL80211_NAN_SRF_MAC_ADDRS = 0x4 + NL80211_NAN_UNSOLICITED_PUBLISH = 0x2 + NL80211_NUM_ACS = 0x4 + NL80211_P2P_PS_SUPPORTED = 0x1 + NL80211_P2P_PS_UNSUPPORTED = 0x0 + NL80211_PKTPAT_MASK = 0x1 + NL80211_PKTPAT_OFFSET = 0x3 + NL80211_PKTPAT_PATTERN = 0x2 + NL80211_PLINK_ACTION_BLOCK = 0x2 + NL80211_PLINK_ACTION_NO_ACTION = 0x0 + NL80211_PLINK_ACTION_OPEN = 0x1 + NL80211_PLINK_BLOCKED = 0x6 + NL80211_PLINK_CNF_RCVD = 0x3 + NL80211_PLINK_ESTAB = 0x4 + NL80211_PLINK_HOLDING = 0x5 + NL80211_PLINK_LISTEN = 0x0 + NL80211_PLINK_OPN_RCVD = 0x2 + NL80211_PLINK_OPN_SNT = 0x1 + NL80211_PMKSA_CANDIDATE_BSSID = 0x2 + NL80211_PMKSA_CANDIDATE_INDEX = 0x1 + NL80211_PMKSA_CANDIDATE_PREAUTH = 0x3 + NL80211_PMSR_ATTR_MAX = 0x5 + NL80211_PMSR_ATTR_MAX_PEERS = 0x1 + NL80211_PMSR_ATTR_PEERS = 0x5 + NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR = 0x3 + NL80211_PMSR_ATTR_REPORT_AP_TSF = 0x2 + NL80211_PMSR_ATTR_TYPE_CAPA = 0x4 + NL80211_PMSR_FTM_CAPA_ATTR_ASAP = 0x1 + NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS = 0x6 + NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT = 0x7 + NL80211_PMSR_FTM_CAPA_ATTR_MAX = 0xa + NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST = 0x8 + NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP = 0x2 + NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED = 0xa + NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES = 0x5 + NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC = 0x4 + NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI = 0x3 + NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED = 0x9 + NL80211_PMSR_FTM_FAILURE_BAD_CHANGED_PARAMS = 0x7 + NL80211_PMSR_FTM_FAILURE_INVALID_TIMESTAMP = 0x5 + NL80211_PMSR_FTM_FAILURE_NO_RESPONSE = 0x1 + NL80211_PMSR_FTM_FAILURE_PEER_BUSY = 0x6 + NL80211_PMSR_FTM_FAILURE_PEER_NOT_CAPABLE = 0x4 + NL80211_PMSR_FTM_FAILURE_REJECTED = 0x2 + NL80211_PMSR_FTM_FAILURE_UNSPECIFIED = 0x0 + NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL = 0x3 + NL80211_PMSR_FTM_REQ_ATTR_ASAP = 0x1 + NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION = 0x5 + NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD = 0x4 + NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST = 0x6 + NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK = 0xc + NL80211_PMSR_FTM_REQ_ATTR_MAX = 0xd + NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED = 0xb + NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP = 0x3 + NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES = 0x7 + NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE = 0x2 + NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC = 0x9 + NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI = 0x8 + NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED = 0xa + NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION = 0x7 + NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX = 0x2 + NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME = 0x5 + NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC = 0x14 + NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG = 0x10 + NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD = 0x12 + NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE = 0x11 + NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON = 0x1 + NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST = 0x8 + NL80211_PMSR_FTM_RESP_ATTR_LCI = 0x13 + NL80211_PMSR_FTM_RESP_ATTR_MAX = 0x15 + NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP = 0x6 + NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS = 0x3 + NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES = 0x4 + NL80211_PMSR_FTM_RESP_ATTR_PAD = 0x15 + NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG = 0x9 + NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD = 0xa + NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG = 0xd + NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD = 0xf + NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE = 0xe + NL80211_PMSR_FTM_RESP_ATTR_RX_RATE = 0xc + NL80211_PMSR_FTM_RESP_ATTR_TX_RATE = 0xb + NL80211_PMSR_PEER_ATTR_ADDR = 0x1 + NL80211_PMSR_PEER_ATTR_CHAN = 0x2 + NL80211_PMSR_PEER_ATTR_MAX = 0x4 + NL80211_PMSR_PEER_ATTR_REQ = 0x3 + NL80211_PMSR_PEER_ATTR_RESP = 0x4 + NL80211_PMSR_REQ_ATTR_DATA = 0x1 + NL80211_PMSR_REQ_ATTR_GET_AP_TSF = 0x2 + NL80211_PMSR_REQ_ATTR_MAX = 0x2 + NL80211_PMSR_RESP_ATTR_AP_TSF = 0x4 + NL80211_PMSR_RESP_ATTR_DATA = 0x1 + NL80211_PMSR_RESP_ATTR_FINAL = 0x5 + NL80211_PMSR_RESP_ATTR_HOST_TIME = 0x3 + NL80211_PMSR_RESP_ATTR_MAX = 0x6 + NL80211_PMSR_RESP_ATTR_PAD = 0x6 + NL80211_PMSR_RESP_ATTR_STATUS = 0x2 + NL80211_PMSR_STATUS_FAILURE = 0x3 + NL80211_PMSR_STATUS_REFUSED = 0x1 + NL80211_PMSR_STATUS_SUCCESS = 0x0 + NL80211_PMSR_STATUS_TIMEOUT = 0x2 + NL80211_PMSR_TYPE_FTM = 0x1 + NL80211_PMSR_TYPE_INVALID = 0x0 + NL80211_PMSR_TYPE_MAX = 0x1 + NL80211_PREAMBLE_DMG = 0x3 + NL80211_PREAMBLE_HE = 0x4 + NL80211_PREAMBLE_HT = 0x1 + NL80211_PREAMBLE_LEGACY = 0x0 + NL80211_PREAMBLE_VHT = 0x2 + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U = 0x8 + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P = 0x4 + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 = 0x2 + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS = 0x1 + NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP = 0x1 + NL80211_PS_DISABLED = 0x0 + NL80211_PS_ENABLED = 0x1 + NL80211_RADAR_CAC_ABORTED = 0x2 + NL80211_RADAR_CAC_FINISHED = 0x1 + NL80211_RADAR_CAC_STARTED = 0x5 + NL80211_RADAR_DETECTED = 0x0 + NL80211_RADAR_NOP_FINISHED = 0x3 + NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 + NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb + NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 + NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc + NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 + NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 + NL80211_RATE_INFO_BITRATE32 = 0x5 + NL80211_RATE_INFO_BITRATE = 0x1 + NL80211_RATE_INFO_HE_1XLTF = 0x0 + NL80211_RATE_INFO_HE_2XLTF = 0x1 + NL80211_RATE_INFO_HE_4XLTF = 0x2 + NL80211_RATE_INFO_HE_DCM = 0x10 + NL80211_RATE_INFO_HE_GI_0_8 = 0x0 + NL80211_RATE_INFO_HE_GI_1_6 = 0x1 + NL80211_RATE_INFO_HE_GI_3_2 = 0x2 + NL80211_RATE_INFO_HE_GI = 0xf + NL80211_RATE_INFO_HE_MCS = 0xd + NL80211_RATE_INFO_HE_NSS = 0xe + NL80211_RATE_INFO_HE_RU_ALLOC_106 = 0x2 + NL80211_RATE_INFO_HE_RU_ALLOC_242 = 0x3 + NL80211_RATE_INFO_HE_RU_ALLOC_26 = 0x0 + NL80211_RATE_INFO_HE_RU_ALLOC_2x996 = 0x6 + NL80211_RATE_INFO_HE_RU_ALLOC_484 = 0x4 + NL80211_RATE_INFO_HE_RU_ALLOC_52 = 0x1 + NL80211_RATE_INFO_HE_RU_ALLOC_996 = 0x5 + NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 + NL80211_RATE_INFO_MAX = 0x16 + NL80211_RATE_INFO_MCS = 0x2 + NL80211_RATE_INFO_SHORT_GI = 0x4 + NL80211_RATE_INFO_VHT_MCS = 0x6 + NL80211_RATE_INFO_VHT_NSS = 0x7 + NL80211_REGDOM_SET_BY_CORE = 0x0 + NL80211_REGDOM_SET_BY_COUNTRY_IE = 0x3 + NL80211_REGDOM_SET_BY_DRIVER = 0x2 + NL80211_REGDOM_SET_BY_USER = 0x1 + NL80211_REGDOM_TYPE_COUNTRY = 0x0 + NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 + NL80211_REGDOM_TYPE_INTERSECTION = 0x3 + NL80211_REGDOM_TYPE_WORLD = 0x1 + NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REKEY_DATA_AKM = 0x4 + NL80211_REKEY_DATA_KCK = 0x2 + NL80211_REKEY_DATA_KEK = 0x1 + NL80211_REKEY_DATA_REPLAY_CTR = 0x3 + NL80211_REPLAY_CTR_LEN = 0x8 + NL80211_RRF_AUTO_BW = 0x800 + NL80211_RRF_DFS = 0x10 + NL80211_RRF_GO_CONCURRENT = 0x1000 + NL80211_RRF_IR_CONCURRENT = 0x1000 + NL80211_RRF_NO_160MHZ = 0x10000 + NL80211_RRF_NO_80MHZ = 0x8000 + NL80211_RRF_NO_CCK = 0x2 + NL80211_RRF_NO_HE = 0x20000 + NL80211_RRF_NO_HT40 = 0x6000 + NL80211_RRF_NO_HT40MINUS = 0x2000 + NL80211_RRF_NO_HT40PLUS = 0x4000 + NL80211_RRF_NO_IBSS = 0x80 + NL80211_RRF_NO_INDOOR = 0x4 + NL80211_RRF_NO_IR_ALL = 0x180 + NL80211_RRF_NO_IR = 0x80 + NL80211_RRF_NO_OFDM = 0x1 + NL80211_RRF_NO_OUTDOOR = 0x8 + NL80211_RRF_PASSIVE_SCAN = 0x80 + NL80211_RRF_PTMP_ONLY = 0x40 + NL80211_RRF_PTP_ONLY = 0x20 + NL80211_RXMGMT_FLAG_ANSWERED = 0x1 + NL80211_RXMGMT_FLAG_EXTERNAL_AUTH = 0x2 + NL80211_SAE_PWE_BOTH = 0x3 + NL80211_SAE_PWE_HASH_TO_ELEMENT = 0x2 + NL80211_SAE_PWE_HUNT_AND_PECK = 0x1 + NL80211_SAE_PWE_UNSPECIFIED = 0x0 + NL80211_SAR_ATTR_MAX = 0x2 + NL80211_SAR_ATTR_SPECS = 0x2 + NL80211_SAR_ATTR_SPECS_END_FREQ = 0x4 + NL80211_SAR_ATTR_SPECS_MAX = 0x4 + NL80211_SAR_ATTR_SPECS_POWER = 0x1 + NL80211_SAR_ATTR_SPECS_RANGE_INDEX = 0x2 + NL80211_SAR_ATTR_SPECS_START_FREQ = 0x3 + NL80211_SAR_ATTR_TYPE = 0x1 + NL80211_SAR_TYPE_POWER = 0x0 + NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP = 0x20 + NL80211_SCAN_FLAG_AP = 0x4 + NL80211_SCAN_FLAG_COLOCATED_6GHZ = 0x4000 + NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME = 0x10 + NL80211_SCAN_FLAG_FLUSH = 0x2 + NL80211_SCAN_FLAG_FREQ_KHZ = 0x2000 + NL80211_SCAN_FLAG_HIGH_ACCURACY = 0x400 + NL80211_SCAN_FLAG_LOW_POWER = 0x200 + NL80211_SCAN_FLAG_LOW_PRIORITY = 0x1 + NL80211_SCAN_FLAG_LOW_SPAN = 0x100 + NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 0x1000 + NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x80 + NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE = 0x40 + NL80211_SCAN_FLAG_RANDOM_ADDR = 0x8 + NL80211_SCAN_FLAG_RANDOM_SN = 0x800 + NL80211_SCAN_RSSI_THOLD_OFF = -0x12c + NL80211_SCHED_SCAN_MATCH_ATTR_BSSID = 0x5 + NL80211_SCHED_SCAN_MATCH_ATTR_MAX = 0x6 + NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI = 0x3 + NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST = 0x4 + NL80211_SCHED_SCAN_MATCH_ATTR_RSSI = 0x2 + NL80211_SCHED_SCAN_MATCH_ATTR_SSID = 0x1 + NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI = 0x6 + NL80211_SCHED_SCAN_PLAN_INTERVAL = 0x1 + NL80211_SCHED_SCAN_PLAN_ITERATIONS = 0x2 + NL80211_SCHED_SCAN_PLAN_MAX = 0x2 + NL80211_SMPS_DYNAMIC = 0x2 + NL80211_SMPS_MAX = 0x2 + NL80211_SMPS_OFF = 0x0 + NL80211_SMPS_STATIC = 0x1 + NL80211_STA_BSS_PARAM_BEACON_INTERVAL = 0x5 + NL80211_STA_BSS_PARAM_CTS_PROT = 0x1 + NL80211_STA_BSS_PARAM_DTIM_PERIOD = 0x4 + NL80211_STA_BSS_PARAM_MAX = 0x5 + NL80211_STA_BSS_PARAM_SHORT_PREAMBLE = 0x2 + NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME = 0x3 + NL80211_STA_FLAG_ASSOCIATED = 0x7 + NL80211_STA_FLAG_AUTHENTICATED = 0x5 + NL80211_STA_FLAG_AUTHORIZED = 0x1 + NL80211_STA_FLAG_MAX = 0x7 + NL80211_STA_FLAG_MAX_OLD_API = 0x6 + NL80211_STA_FLAG_MFP = 0x4 + NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 + NL80211_STA_FLAG_TDLS_PEER = 0x6 + NL80211_STA_FLAG_WME = 0x3 + NL80211_STA_INFO_ACK_SIGNAL_AVG = 0x23 + NL80211_STA_INFO_ACK_SIGNAL = 0x22 + NL80211_STA_INFO_AIRTIME_LINK_METRIC = 0x29 + NL80211_STA_INFO_AIRTIME_WEIGHT = 0x28 + NL80211_STA_INFO_ASSOC_AT_BOOTTIME = 0x2a + NL80211_STA_INFO_BEACON_LOSS = 0x12 + NL80211_STA_INFO_BEACON_RX = 0x1d + NL80211_STA_INFO_BEACON_SIGNAL_AVG = 0x1e + NL80211_STA_INFO_BSS_PARAM = 0xf + NL80211_STA_INFO_CHAIN_SIGNAL_AVG = 0x1a + NL80211_STA_INFO_CHAIN_SIGNAL = 0x19 + NL80211_STA_INFO_CONNECTED_TIME = 0x10 + NL80211_STA_INFO_CONNECTED_TO_AS = 0x2b + NL80211_STA_INFO_CONNECTED_TO_GATE = 0x26 + NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG = 0x23 + NL80211_STA_INFO_EXPECTED_THROUGHPUT = 0x1b + NL80211_STA_INFO_FCS_ERROR_COUNT = 0x25 + NL80211_STA_INFO_INACTIVE_TIME = 0x1 + NL80211_STA_INFO_LLID = 0x4 + NL80211_STA_INFO_LOCAL_PM = 0x14 + NL80211_STA_INFO_MAX = 0x2b + NL80211_STA_INFO_NONPEER_PM = 0x16 + NL80211_STA_INFO_PAD = 0x21 + NL80211_STA_INFO_PEER_PM = 0x15 + NL80211_STA_INFO_PLID = 0x5 + NL80211_STA_INFO_PLINK_STATE = 0x6 + NL80211_STA_INFO_RX_BITRATE = 0xe + NL80211_STA_INFO_RX_BYTES64 = 0x17 + NL80211_STA_INFO_RX_BYTES = 0x2 + NL80211_STA_INFO_RX_DROP_MISC = 0x1c + NL80211_STA_INFO_RX_DURATION = 0x20 + NL80211_STA_INFO_RX_MPDUS = 0x24 + NL80211_STA_INFO_RX_PACKETS = 0x9 + NL80211_STA_INFO_SIGNAL_AVG = 0xd + NL80211_STA_INFO_SIGNAL = 0x7 + NL80211_STA_INFO_STA_FLAGS = 0x11 + NL80211_STA_INFO_TID_STATS = 0x1f + NL80211_STA_INFO_T_OFFSET = 0x13 + NL80211_STA_INFO_TX_BITRATE = 0x8 + NL80211_STA_INFO_TX_BYTES64 = 0x18 + NL80211_STA_INFO_TX_BYTES = 0x3 + NL80211_STA_INFO_TX_DURATION = 0x27 + NL80211_STA_INFO_TX_FAILED = 0xc + NL80211_STA_INFO_TX_PACKETS = 0xa + NL80211_STA_INFO_TX_RETRIES = 0xb + NL80211_STA_WME_MAX = 0x2 + NL80211_STA_WME_MAX_SP = 0x2 + NL80211_STA_WME_UAPSD_QUEUES = 0x1 + NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY = 0x5 + NL80211_SURVEY_INFO_CHANNEL_TIME = 0x4 + NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY = 0x6 + NL80211_SURVEY_INFO_CHANNEL_TIME_RX = 0x7 + NL80211_SURVEY_INFO_CHANNEL_TIME_TX = 0x8 + NL80211_SURVEY_INFO_FREQUENCY = 0x1 + NL80211_SURVEY_INFO_FREQUENCY_OFFSET = 0xc + NL80211_SURVEY_INFO_IN_USE = 0x3 + NL80211_SURVEY_INFO_MAX = 0xc + NL80211_SURVEY_INFO_NOISE = 0x2 + NL80211_SURVEY_INFO_PAD = 0xa + NL80211_SURVEY_INFO_TIME_BSS_RX = 0xb + NL80211_SURVEY_INFO_TIME_BUSY = 0x5 + NL80211_SURVEY_INFO_TIME = 0x4 + NL80211_SURVEY_INFO_TIME_EXT_BUSY = 0x6 + NL80211_SURVEY_INFO_TIME_RX = 0x7 + NL80211_SURVEY_INFO_TIME_SCAN = 0x9 + NL80211_SURVEY_INFO_TIME_TX = 0x8 + NL80211_TDLS_DISABLE_LINK = 0x4 + NL80211_TDLS_DISCOVERY_REQ = 0x0 + NL80211_TDLS_ENABLE_LINK = 0x3 + NL80211_TDLS_PEER_HE = 0x8 + NL80211_TDLS_PEER_HT = 0x1 + NL80211_TDLS_PEER_VHT = 0x2 + NL80211_TDLS_PEER_WMM = 0x4 + NL80211_TDLS_SETUP = 0x1 + NL80211_TDLS_TEARDOWN = 0x2 + NL80211_TID_CONFIG_ATTR_AMPDU_CTRL = 0x9 + NL80211_TID_CONFIG_ATTR_AMSDU_CTRL = 0xb + NL80211_TID_CONFIG_ATTR_MAX = 0xd + NL80211_TID_CONFIG_ATTR_NOACK = 0x6 + NL80211_TID_CONFIG_ATTR_OVERRIDE = 0x4 + NL80211_TID_CONFIG_ATTR_PAD = 0x1 + NL80211_TID_CONFIG_ATTR_PEER_SUPP = 0x3 + NL80211_TID_CONFIG_ATTR_RETRY_LONG = 0x8 + NL80211_TID_CONFIG_ATTR_RETRY_SHORT = 0x7 + NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL = 0xa + NL80211_TID_CONFIG_ATTR_TIDS = 0x5 + NL80211_TID_CONFIG_ATTR_TX_RATE = 0xd + NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE = 0xc + NL80211_TID_CONFIG_ATTR_VIF_SUPP = 0x2 + NL80211_TID_CONFIG_DISABLE = 0x1 + NL80211_TID_CONFIG_ENABLE = 0x0 + NL80211_TID_STATS_MAX = 0x6 + NL80211_TID_STATS_PAD = 0x5 + NL80211_TID_STATS_RX_MSDU = 0x1 + NL80211_TID_STATS_TX_MSDU = 0x2 + NL80211_TID_STATS_TX_MSDU_FAILED = 0x4 + NL80211_TID_STATS_TX_MSDU_RETRIES = 0x3 + NL80211_TID_STATS_TXQ_STATS = 0x6 + NL80211_TIMEOUT_ASSOC = 0x3 + NL80211_TIMEOUT_AUTH = 0x2 + NL80211_TIMEOUT_SCAN = 0x1 + NL80211_TIMEOUT_UNSPECIFIED = 0x0 + NL80211_TKIP_DATA_OFFSET_ENCR_KEY = 0x0 + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY = 0x18 + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY = 0x10 + NL80211_TX_POWER_AUTOMATIC = 0x0 + NL80211_TX_POWER_FIXED = 0x2 + NL80211_TX_POWER_LIMITED = 0x1 + NL80211_TXQ_ATTR_AC = 0x1 + NL80211_TXQ_ATTR_AIFS = 0x5 + NL80211_TXQ_ATTR_CWMAX = 0x4 + NL80211_TXQ_ATTR_CWMIN = 0x3 + NL80211_TXQ_ATTR_MAX = 0x5 + NL80211_TXQ_ATTR_QUEUE = 0x1 + NL80211_TXQ_ATTR_TXOP = 0x2 + NL80211_TXQ_Q_BE = 0x2 + NL80211_TXQ_Q_BK = 0x3 + NL80211_TXQ_Q_VI = 0x1 + NL80211_TXQ_Q_VO = 0x0 + NL80211_TXQ_STATS_BACKLOG_BYTES = 0x1 + NL80211_TXQ_STATS_BACKLOG_PACKETS = 0x2 + NL80211_TXQ_STATS_COLLISIONS = 0x8 + NL80211_TXQ_STATS_DROPS = 0x4 + NL80211_TXQ_STATS_ECN_MARKS = 0x5 + NL80211_TXQ_STATS_FLOWS = 0x3 + NL80211_TXQ_STATS_MAX = 0xb + NL80211_TXQ_STATS_MAX_FLOWS = 0xb + NL80211_TXQ_STATS_OVERLIMIT = 0x6 + NL80211_TXQ_STATS_OVERMEMORY = 0x7 + NL80211_TXQ_STATS_TX_BYTES = 0x9 + NL80211_TXQ_STATS_TX_PACKETS = 0xa + NL80211_TX_RATE_AUTOMATIC = 0x0 + NL80211_TXRATE_DEFAULT_GI = 0x0 + NL80211_TX_RATE_FIXED = 0x2 + NL80211_TXRATE_FORCE_LGI = 0x2 + NL80211_TXRATE_FORCE_SGI = 0x1 + NL80211_TXRATE_GI = 0x4 + NL80211_TXRATE_HE = 0x5 + NL80211_TXRATE_HE_GI = 0x6 + NL80211_TXRATE_HE_LTF = 0x7 + NL80211_TXRATE_HT = 0x2 + NL80211_TXRATE_LEGACY = 0x1 + NL80211_TX_RATE_LIMITED = 0x1 + NL80211_TXRATE_MAX = 0x7 + NL80211_TXRATE_MCS = 0x2 + NL80211_TXRATE_VHT = 0x3 + NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT = 0x1 + NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX = 0x2 + NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL = 0x2 + NL80211_USER_REG_HINT_CELL_BASE = 0x1 + NL80211_USER_REG_HINT_INDOOR = 0x2 + NL80211_USER_REG_HINT_USER = 0x0 + NL80211_VENDOR_ID_IS_LINUX = 0x80000000 + NL80211_VHT_CAPABILITY_LEN = 0xc + NL80211_VHT_NSS_MAX = 0x8 + NL80211_WIPHY_NAME_MAXLEN = 0x40 + NL80211_WMMR_AIFSN = 0x3 + NL80211_WMMR_CW_MAX = 0x2 + NL80211_WMMR_CW_MIN = 0x1 + NL80211_WMMR_MAX = 0x4 + NL80211_WMMR_TXOP = 0x4 + NL80211_WOWLAN_PKTPAT_MASK = 0x1 + NL80211_WOWLAN_PKTPAT_OFFSET = 0x3 + NL80211_WOWLAN_PKTPAT_PATTERN = 0x2 + NL80211_WOWLAN_TCP_DATA_INTERVAL = 0x9 + NL80211_WOWLAN_TCP_DATA_PAYLOAD = 0x6 + NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ = 0x7 + NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN = 0x8 + NL80211_WOWLAN_TCP_DST_IPV4 = 0x2 + NL80211_WOWLAN_TCP_DST_MAC = 0x3 + NL80211_WOWLAN_TCP_DST_PORT = 0x5 + NL80211_WOWLAN_TCP_SRC_IPV4 = 0x1 + NL80211_WOWLAN_TCP_SRC_PORT = 0x4 + NL80211_WOWLAN_TCP_WAKE_MASK = 0xb + NL80211_WOWLAN_TCP_WAKE_PAYLOAD = 0xa + NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE = 0x8 + NL80211_WOWLAN_TRIG_ANY = 0x1 + NL80211_WOWLAN_TRIG_DISCONNECT = 0x2 + NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST = 0x7 + NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE = 0x6 + NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED = 0x5 + NL80211_WOWLAN_TRIG_MAGIC_PKT = 0x3 + NL80211_WOWLAN_TRIG_NET_DETECT = 0x12 + NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS = 0x13 + NL80211_WOWLAN_TRIG_PKT_PATTERN = 0x4 + NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 0x9 + NL80211_WOWLAN_TRIG_TCP_CONNECTION = 0xe + NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 0xa + NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 0xb + NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 0xc + NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN = 0xd + NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST = 0x10 + NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH = 0xf + NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS = 0x11 + NL80211_WPA_VERSION_1 = 0x1 + NL80211_WPA_VERSION_2 = 0x2 + NL80211_WPA_VERSION_3 = 0x4 +) + +const ( + FRA_UNSPEC = 0x0 + FRA_DST = 0x1 + FRA_SRC = 0x2 + FRA_IIFNAME = 0x3 + FRA_GOTO = 0x4 + FRA_UNUSED2 = 0x5 + FRA_PRIORITY = 0x6 + FRA_UNUSED3 = 0x7 + FRA_UNUSED4 = 0x8 + FRA_UNUSED5 = 0x9 + FRA_FWMARK = 0xa + FRA_FLOW = 0xb + FRA_TUN_ID = 0xc + FRA_SUPPRESS_IFGROUP = 0xd + FRA_SUPPRESS_PREFIXLEN = 0xe + FRA_TABLE = 0xf + FRA_FWMASK = 0x10 + FRA_OIFNAME = 0x11 + FRA_PAD = 0x12 + FRA_L3MDEV = 0x13 + FRA_UID_RANGE = 0x14 + FRA_PROTOCOL = 0x15 + FRA_IP_PROTO = 0x16 + FRA_SPORT_RANGE = 0x17 + FRA_DPORT_RANGE = 0x18 + FR_ACT_UNSPEC = 0x0 + FR_ACT_TO_TBL = 0x1 + FR_ACT_GOTO = 0x2 + FR_ACT_NOP = 0x3 + FR_ACT_RES3 = 0x4 + FR_ACT_RES4 = 0x5 + FR_ACT_BLACKHOLE = 0x6 + FR_ACT_UNREACHABLE = 0x7 + FR_ACT_PROHIBIT = 0x8 +) + +const ( + AUDIT_NLGRP_NONE = 0x0 + AUDIT_NLGRP_READLOG = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index bea2549455ea..89c516a29acf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/386/cgo -- -Wall -Werror -static -I/tmp/386/include -m32 linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux @@ -240,6 +240,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -250,6 +254,19 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -311,6 +328,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index b8c8f2894335..62b4fb269963 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/amd64/cgo -- -Wall -Werror -static -I/tmp/amd64/include -m64 linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux @@ -255,6 +255,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -265,6 +269,20 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -324,6 +342,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 4db44301632b..e86b35893ece 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/arm/cgo -- -Wall -Werror -static -I/tmp/arm/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux @@ -231,6 +231,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -241,6 +245,19 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -302,6 +319,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 3ebcad8a8873..6c6be4c911d8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/arm64/cgo -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux @@ -234,6 +234,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -244,6 +248,20 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -303,6 +321,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go new file mode 100644 index 000000000000..4982ea355a28 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -0,0 +1,691 @@ +// cgo -godefs -objdir=/tmp/loong64/cgo -- -Wall -Werror -static -I/tmp/loong64/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +const ( + SizeofPtr = 0x8 + SizeofLong = 0x8 +) + +type ( + _C_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + _ uint64 + Size int64 + Blksize int32 + _ int32 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + _ [2]int32 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte +} + +type Flock_t struct { + Type int16 + Whence int16 + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + +const ( + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + +const ( + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 +) + +const ( + SizeofSockFprog = 0x10 +) + +type PtraceRegs struct { + Regs [32]uint64 + Orig_a0 uint64 + Era uint64 + Badv uint64 + Reserved [10]uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + _ [0]int8 + _ [4]byte +} + +type Ustat_t struct { + Tfree int32 + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + +const ( + POLLRDHUP = 0x2000 +) + +type Sigset_t struct { + Val [16]uint64 +} + +const _C__NSIG = 0x41 + +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Taskstats struct { + Version uint16 + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]int8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 + Thrashing_count uint64 + Thrashing_delay_total uint64 + Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 +} + +type cpuMask uint64 + +const ( + _NCPUBITS = 0x40 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +const ( + SizeofTpacketHdr = 0x20 +) + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x1269 +) + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800870a1 + PPS_SETPARAMS = 0x400870a2 + PPS_GETCAP = 0x800870a3 + PPS_FETCH = 0xc00870a4 +) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 3eb33e48ab53..173141a67032 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mips/cgo -- -Wall -Werror -static -I/tmp/mips/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux @@ -236,6 +236,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -246,6 +250,19 @@ type Sigset_t struct { const _C__NSIG = 0x80 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x3 +) + +type Siginfo struct { + Signo int32 + Code int32 + Errno int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -307,6 +324,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 79a94467252f..93ae4c51673d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mips64/cgo -- -Wall -Werror -static -I/tmp/mips64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux @@ -237,6 +237,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -247,6 +251,20 @@ type Sigset_t struct { const _C__NSIG = 0x80 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x3 +) + +type Siginfo struct { + Signo int32 + Code int32 + Errno int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -306,6 +324,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 8f4b107cad36..4e4e510ca519 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mips64le/cgo -- -Wall -Werror -static -I/tmp/mips64le/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux @@ -237,6 +237,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -247,6 +251,20 @@ type Sigset_t struct { const _C__NSIG = 0x80 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x3 +) + +type Siginfo struct { + Signo int32 + Code int32 + Errno int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -306,6 +324,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index e4eb2179811f..3f5ba013d995 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mipsle/cgo -- -Wall -Werror -static -I/tmp/mipsle/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux @@ -236,6 +236,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -246,6 +250,19 @@ type Sigset_t struct { const _C__NSIG = 0x80 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x3 +) + +type Siginfo struct { + Signo int32 + Code int32 + Errno int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -307,6 +324,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index d5b21f0f7da5..71dfe7cdb47a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/ppc/cgo -- -Wall -Werror -static -I/tmp/ppc/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux @@ -243,6 +243,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -253,6 +257,19 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -314,6 +331,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 5188d142b9f5..3a2b7f0a666e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/ppc64/cgo -- -Wall -Werror -static -I/tmp/ppc64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux @@ -244,6 +244,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -254,6 +258,20 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -313,6 +331,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index de4dd4c736e8..a52d62756328 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/ppc64le/cgo -- -Wall -Werror -static -I/tmp/ppc64le/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux @@ -244,6 +244,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -254,6 +258,20 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -313,6 +331,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index dccbf9b06040..dfc007d8a691 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/riscv64/cgo -- -Wall -Werror -static -I/tmp/riscv64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux @@ -262,6 +262,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -272,6 +276,20 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -331,6 +349,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 6358806106f0..b53cb9103d30 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/s390x/cgo -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux @@ -210,8 +210,8 @@ type PtraceFpregs struct { } type PtracePer struct { - _ [0]uint64 - _ [32]byte + Control_regs [3]uint64 + _ [8]byte Starting_addr uint64 Ending_addr uint64 Perc_atmid uint16 @@ -257,6 +257,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -267,6 +271,20 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -326,6 +344,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 765edc13ff25..fe0aa3547280 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/sparc64/cgo -- -Wall -Werror -static -I/tmp/sparc64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux @@ -239,6 +239,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x400000 +) + const ( POLLRDHUP = 0x800 ) @@ -249,6 +253,20 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x4 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -308,6 +326,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 2fd2060e617a..9bc4c8f9d889 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -491,6 +491,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 6a5a1a8ae556..bb05f655d225 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -499,6 +499,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 84cc8d01e656..db40e3a19c66 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -496,6 +496,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index c844e7096ff5..11121151ccf0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -499,6 +499,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index baf5fe650444..26eba23b729f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -58,22 +58,22 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Mode uint32 - Dev int32 - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev int32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - X__st_birthtim Timespec + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ Timespec } type Statfs_t struct { @@ -94,11 +94,11 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 - Pad_cgo_0 [2]byte + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte + _ [2]byte Mount_info [160]byte } @@ -111,13 +111,13 @@ type Flock_t struct { } type Dirent struct { - Fileno uint64 - Off int64 - Reclen uint16 - Type uint8 - Namlen uint8 - X__d_padding [4]uint8 - Name [256]int8 + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 } type Fsid struct { @@ -262,8 +262,8 @@ type FdSet struct { } const ( - SizeofIfMsghdr = 0xec - SizeofIfData = 0xd4 + SizeofIfMsghdr = 0xa0 + SizeofIfData = 0x88 SizeofIfaMsghdr = 0x18 SizeofIfAnnounceMsghdr = 0x1a SizeofRtMsghdr = 0x60 @@ -292,7 +292,7 @@ type IfData struct { Link_state uint8 Mtu uint32 Metric uint32 - Pad uint32 + Rdomain uint32 Baudrate uint64 Ipackets uint64 Ierrors uint64 @@ -304,10 +304,10 @@ type IfData struct { Imcasts uint64 Omcasts uint64 Iqdrops uint64 + Oqdrops uint64 Noproto uint64 Capabilities uint32 Lastchange Timeval - Mclpool [7]Mclpool } type IfaMsghdr struct { @@ -368,20 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct { - Grown int32 - Alive uint16 - Hwm uint16 - Cwm uint16 - Lwm uint16 -} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x8 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -407,11 +399,14 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp BpfTimeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { @@ -488,7 +483,7 @@ type Uvmexp struct { Zeropages int32 Reserve_pagedaemon int32 Reserve_kernel int32 - Anonpages int32 + Unused01 int32 Vnodepages int32 Vtextpages int32 Freemin int32 @@ -507,8 +502,8 @@ type Uvmexp struct { Swpgonly int32 Nswget int32 Nanon int32 - Nanonneeded int32 - Nfreeanon int32 + Unused05 int32 + Unused06 int32 Faults int32 Traps int32 Intrs int32 @@ -516,8 +511,8 @@ type Uvmexp struct { Softs int32 Syscalls int32 Pageins int32 - Obsolete_swapins int32 - Obsolete_swapouts int32 + Unused07 int32 + Unused08 int32 Pgswapin int32 Pgswapout int32 Forks int32 @@ -525,7 +520,7 @@ type Uvmexp struct { Forks_sharevm int32 Pga_zerohit int32 Pga_zeromiss int32 - Zeroaborts int32 + Unused09 int32 Fltnoram int32 Fltnoanon int32 Fltnoamap int32 @@ -557,9 +552,9 @@ type Uvmexp struct { Pdpageouts int32 Pdpending int32 Pddeact int32 - Pdreanon int32 - Pdrevnode int32 - Pdrevtext int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 Fpswtch int32 Kmapent int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index e21ae8ecfa6f..5a5479886989 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -73,7 +73,6 @@ type Stat_t struct { Blksize int32 Flags uint32 Gen uint32 - _ [4]byte _ Timespec } @@ -81,7 +80,6 @@ type Statfs_t struct { F_flags uint32 F_bsize uint32 F_iosize uint32 - _ [4]byte F_blocks uint64 F_bfree uint64 F_bavail int64 @@ -96,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } @@ -200,10 +198,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen uint32 - _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -311,7 +307,6 @@ type IfData struct { Oqdrops uint64 Noproto uint64 Capabilities uint32 - _ [4]byte Lastchange Timeval } @@ -373,14 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x10 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -395,7 +388,6 @@ type BpfStat struct { type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } @@ -411,7 +403,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { @@ -488,7 +483,7 @@ type Uvmexp struct { Zeropages int32 Reserve_pagedaemon int32 Reserve_kernel int32 - Anonpages int32 + Unused01 int32 Vnodepages int32 Vtextpages int32 Freemin int32 @@ -507,8 +502,8 @@ type Uvmexp struct { Swpgonly int32 Nswget int32 Nanon int32 - Nanonneeded int32 - Nfreeanon int32 + Unused05 int32 + Unused06 int32 Faults int32 Traps int32 Intrs int32 @@ -516,8 +511,8 @@ type Uvmexp struct { Softs int32 Syscalls int32 Pageins int32 - Obsolete_swapins int32 - Obsolete_swapouts int32 + Unused07 int32 + Unused08 int32 Pgswapin int32 Pgswapout int32 Forks int32 @@ -525,7 +520,7 @@ type Uvmexp struct { Forks_sharevm int32 Pga_zerohit int32 Pga_zeromiss int32 - Zeroaborts int32 + Unused09 int32 Fltnoram int32 Fltnoanon int32 Fltnoamap int32 @@ -557,9 +552,9 @@ type Uvmexp struct { Pdpageouts int32 Pdpending int32 Pddeact int32 - Pdreanon int32 - Pdrevnode int32 - Pdrevtext int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 Fpswtch int32 Kmapent int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index f190651cd964..be58c4e1ff8b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -98,10 +98,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } @@ -375,14 +375,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x8 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -412,7 +410,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 84747c582cfc..52338266cb3e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } @@ -368,14 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x10 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -405,7 +403,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index ac5c8b6370b1..605cfdb12b1d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } @@ -368,14 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x10 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -405,7 +403,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go new file mode 100644 index 000000000000..d6724c0102c8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go @@ -0,0 +1,571 @@ +// cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte + _ [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0xa8 + SizeofIfData = 0x90 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Rdomain uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Capabilities uint32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]int8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct{} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x18 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_EACCESS = 0x1 + AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Sigset_t uint32 + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofUvmexp = 0x158 + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Unused01 int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Unused05 int32 + Unused06 int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Unused07 int32 + Unused08 int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Unused09 int32 + Fltnoram int32 + Fltnoanon int32 + Fltnoamap int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 + Fpswtch int32 + Kmapent int32 +} + +const SizeofClockinfo = 0x10 + +type Clockinfo struct { + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go new file mode 100644 index 000000000000..ddfd27a434a1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go @@ -0,0 +1,571 @@ +// cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte + _ [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0xa8 + SizeofIfData = 0x90 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Rdomain uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Capabilities uint32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]int8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct{} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x18 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_EACCESS = 0x1 + AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Sigset_t uint32 + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofUvmexp = 0x158 + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Unused01 int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Unused05 int32 + Unused06 int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Unused07 int32 + Unused08 int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Unused09 int32 + Fltnoram int32 + Fltnoanon int32 + Fltnoamap int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 + Fpswtch int32 + Kmapent int32 +} + +const SizeofClockinfo = 0x10 + +type Clockinfo struct { + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index ad4aad279686..0400747c67d4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -178,7 +178,7 @@ type Linger struct { } type Iovec struct { - Base *int8 + Base *byte Len uint64 } @@ -480,3 +480,38 @@ const ( MOUNTEDOVER = 0x40000000 FILE_EXCEPTION = 0x60000070 ) + +const ( + TUNNEWPPA = 0x540001 + TUNSETPPA = 0x540002 + + I_STR = 0x5308 + I_POP = 0x5303 + I_PUSH = 0x5302 + I_LINK = 0x530c + I_UNLINK = 0x530d + I_PLINK = 0x5316 + I_PUNLINK = 0x5317 + + IF_UNITSEL = -0x7ffb8cca +) + +type strbuf struct { + Maxlen int32 + Len int32 + Buf *int8 +} + +type Strioctl struct { + Cmd int32 + Timout int32 + Len int32 + Dp *int8 +} + +type Lifreq struct { + Name [32]int8 + Lifru1 [4]byte + Type uint32 + Lifru [336]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index 4ab638cb94c7..aec1efcb306a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -339,7 +339,7 @@ type Statfs_t struct { Flags uint64 } -type Dirent struct { +type direntLE struct { Reclen uint16 Namlen uint16 Ino uint32 @@ -347,6 +347,15 @@ type Dirent struct { Name [256]byte } +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + _ [5]byte +} + type FdSet struct { Bits [64]int32 } diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index 855698bb2828..75980fd44ad7 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -15,11 +15,11 @@ import ( // in http://msdn.microsoft.com/en-us/library/ms880421. // This function returns "" (2 double quotes) if s is empty. // Alternatively, these transformations are done: -// - every back slash (\) is doubled, but only if immediately -// followed by double quote ("); -// - every double quote (") is escaped by back slash (\); -// - finally, s is wrapped with double quotes (arg -> "arg"), -// but only if there is space or tab inside s. +// - every back slash (\) is doubled, but only if immediately +// followed by double quote ("); +// - every double quote (") is escaped by back slash (\); +// - finally, s is wrapped with double quotes (arg -> "arg"), +// but only if there is space or tab inside s. func EscapeArg(s string) string { if len(s) == 0 { return "\"\"" diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go index 906325e095a8..6c8d97b6a590 100644 --- a/vendor/golang.org/x/sys/windows/registry/key.go +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -20,7 +20,6 @@ // log.Fatal(err) // } // fmt.Printf("Windows system root is %q\n", s) -// package registry import ( diff --git a/vendor/golang.org/x/sys/windows/setupapi_windows.go b/vendor/golang.org/x/sys/windows/setupapi_windows.go index 14027da3f3f1..f8126482fa5a 100644 --- a/vendor/golang.org/x/sys/windows/setupapi_windows.go +++ b/vendor/golang.org/x/sys/windows/setupapi_windows.go @@ -296,7 +296,7 @@ const ( // Flag to indicate that the sorting from the INF file should be used. DI_INF_IS_SORTED DI_FLAGS = 0x00008000 - // Flag to indicate that only the the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched. + // Flag to indicate that only the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched. DI_ENUMSINGLEINF DI_FLAGS = 0x00010000 // Flag that prevents ConfigMgr from removing/re-enumerating devices during device diff --git a/vendor/golang.org/x/sys/windows/svc/eventlog/log.go b/vendor/golang.org/x/sys/windows/svc/eventlog/log.go index a1796fbb5df0..f37b4b5107a4 100644 --- a/vendor/golang.org/x/sys/windows/svc/eventlog/log.go +++ b/vendor/golang.org/x/sys/windows/svc/eventlog/log.go @@ -6,7 +6,6 @@ // +build windows // Package eventlog implements access to Windows event log. -// package eventlog import ( diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index 72074d582f10..8732cdb957f3 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -30,8 +30,6 @@ import ( "strings" "syscall" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) // ByteSliceFromString returns a NUL-terminated slice of bytes @@ -83,13 +81,7 @@ func BytePtrToString(p *byte) string { ptr = unsafe.Pointer(uintptr(ptr) + 1) } - var s []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - h.Data = unsafe.Pointer(p) - h.Len = n - h.Cap = n - - return string(s) + return string(unsafe.Slice(p, n)) } // Single-word zero for use when we need a valid pointer to 0 bytes. diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index cf44e6933790..a49853e9d3af 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -10,6 +10,7 @@ import ( errorspkg "errors" "fmt" "runtime" + "strings" "sync" "syscall" "time" @@ -86,10 +87,8 @@ func StringToUTF16(s string) []uint16 { // s, with a terminating NUL added. If s contains a NUL byte at any // location, it returns (nil, syscall.EINVAL). func UTF16FromString(s string) ([]uint16, error) { - for i := 0; i < len(s); i++ { - if s[i] == 0 { - return nil, syscall.EINVAL - } + if strings.IndexByte(s, 0) != -1 { + return nil, syscall.EINVAL } return utf16.Encode([]rune(s + "\x00")), nil } @@ -139,13 +138,7 @@ func UTF16PtrToString(p *uint16) string { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - var s []uint16 - h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - h.Data = unsafe.Pointer(p) - h.Len = n - h.Cap = n - - return string(utf16.Decode(s)) + return string(utf16.Decode(unsafe.Slice(p, n))) } func Getpagesize() int { return 4096 } @@ -186,8 +179,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState -//sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) -//sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) +//sys readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) = ReadFile +//sys writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) = WriteFile //sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) //sys SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff] //sys CloseHandle(handle Handle) (err error) @@ -365,6 +358,16 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) +//sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows +//sys EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) = user32.EnumChildWindows +//sys GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) = user32.GetClassNameW +//sys GetDesktopWindow() (hwnd HWND) = user32.GetDesktopWindow +//sys GetForegroundWindow() (hwnd HWND) = user32.GetForegroundWindow +//sys IsWindow(hwnd HWND) (isWindow bool) = user32.IsWindow +//sys IsWindowUnicode(hwnd HWND) (isUnicode bool) = user32.IsWindowUnicode +//sys IsWindowVisible(hwnd HWND) (isVisible bool) = user32.IsWindowVisible +//sys GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) = user32.GetGUIThreadInfo +//sys GetLargePageMinimum() (size uintptr) // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -418,6 +421,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) = psapi.GetModuleInformation //sys GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) = psapi.GetModuleFileNameExW //sys GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) = psapi.GetModuleBaseNameW +//sys QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSetEx // NT Native APIs //sys rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb @@ -439,6 +443,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) = ntdll.RtlAddFunctionTable //sys RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) = ntdll.RtlDeleteFunctionTable +// Desktop Window Manager API (Dwmapi) +//sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute +//sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. @@ -549,12 +557,6 @@ func Read(fd Handle, p []byte) (n int, err error) { } return 0, e } - if raceenabled { - if done > 0 { - raceWriteRange(unsafe.Pointer(&p[0]), int(done)) - } - raceAcquire(unsafe.Pointer(&ioSync)) - } return int(done), nil } @@ -567,12 +569,31 @@ func Write(fd Handle, p []byte) (n int, err error) { if e != nil { return 0, e } - if raceenabled && done > 0 { - raceReadRange(unsafe.Pointer(&p[0]), int(done)) - } return int(done), nil } +func ReadFile(fd Handle, p []byte, done *uint32, overlapped *Overlapped) error { + err := readFile(fd, p, done, overlapped) + if raceenabled { + if *done > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), int(*done)) + } + raceAcquire(unsafe.Pointer(&ioSync)) + } + return err +} + +func WriteFile(fd Handle, p []byte, done *uint32, overlapped *Overlapped) error { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + err := writeFile(fd, p, done, overlapped) + if raceenabled && *done > 0 { + raceReadRange(unsafe.Pointer(&p[0]), int(*done)) + } + return err +} + var ioSync int64 func Seek(fd Handle, offset int64, whence int) (newoffset int64, err error) { @@ -611,7 +632,6 @@ var ( func getStdHandle(stdhandle uint32) (fd Handle) { r, _ := GetStdHandle(stdhandle) - CloseOnExec(r) return r } @@ -736,7 +756,7 @@ func Utimes(path string, tv []Timeval) (err error) { if e != nil { return e } - defer Close(h) + defer CloseHandle(h) a := NsecToFiletime(tv[0].Nanoseconds()) w := NsecToFiletime(tv[1].Nanoseconds()) return SetFileTime(h, nil, &a, &w) @@ -756,7 +776,7 @@ func UtimesNano(path string, ts []Timespec) (err error) { if e != nil { return e } - defer Close(h) + defer CloseHandle(h) a := NsecToFiletime(TimespecToNsec(ts[0])) w := NsecToFiletime(TimespecToNsec(ts[1])) return SetFileTime(h, nil, &a, &w) @@ -850,6 +870,7 @@ const socket_error = uintptr(^uint32(0)) //sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar +//sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -959,6 +980,32 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { return unsafe.Pointer(&sa.raw), sl, nil } +type RawSockaddrBth struct { + AddressFamily [2]byte + BtAddr [8]byte + ServiceClassId [16]byte + Port [4]byte +} + +type SockaddrBth struct { + BtAddr uint64 + ServiceClassId GUID + Port uint32 + + raw RawSockaddrBth +} + +func (sa *SockaddrBth) sockaddr() (unsafe.Pointer, int32, error) { + family := AF_BTH + sa.raw = RawSockaddrBth{ + AddressFamily: *(*[2]byte)(unsafe.Pointer(&family)), + BtAddr: *(*[8]byte)(unsafe.Pointer(&sa.BtAddr)), + Port: *(*[4]byte)(unsafe.Pointer(&sa.Port)), + ServiceClassId: *(*[16]byte)(unsafe.Pointer(&sa.ServiceClassId)), + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { switch rsa.Addr.Family { case AF_UNIX: @@ -1034,6 +1081,14 @@ func Connect(fd Handle, sa Sockaddr) (err error) { return connect(fd, ptr, n) } +func GetBestInterfaceEx(sa Sockaddr, pdwBestIfIndex *uint32) (err error) { + ptr, _, err := sa.sockaddr() + if err != nil { + return err + } + return getBestInterfaceEx(ptr, pdwBestIfIndex) +} + func Getsockname(fd Handle) (sa Sockaddr, err error) { var rsa RawSockaddrAny l := int32(unsafe.Sizeof(rsa)) @@ -1061,9 +1116,13 @@ func Shutdown(fd Handle, how int) (err error) { } func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) { - rsa, l, err := to.sockaddr() - if err != nil { - return err + var rsa unsafe.Pointer + var l int32 + if to != nil { + rsa, l, err = to.sockaddr() + if err != nil { + return err + } } return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine) } @@ -1687,3 +1746,71 @@ func LoadResourceData(module, resInfo Handle) (data []byte, err error) { h.Cap = int(size) return } + +// PSAPI_WORKING_SET_EX_BLOCK contains extended working set information for a page. +type PSAPI_WORKING_SET_EX_BLOCK uint64 + +// Valid returns the validity of this page. +// If this bit is 1, the subsequent members are valid; otherwise they should be ignored. +func (b PSAPI_WORKING_SET_EX_BLOCK) Valid() bool { + return (b & 1) == 1 +} + +// ShareCount is the number of processes that share this page. The maximum value of this member is 7. +func (b PSAPI_WORKING_SET_EX_BLOCK) ShareCount() uint64 { + return b.intField(1, 3) +} + +// Win32Protection is the memory protection attributes of the page. For a list of values, see +// https://docs.microsoft.com/en-us/windows/win32/memory/memory-protection-constants +func (b PSAPI_WORKING_SET_EX_BLOCK) Win32Protection() uint64 { + return b.intField(4, 11) +} + +// Shared returns the shared status of this page. +// If this bit is 1, the page can be shared. +func (b PSAPI_WORKING_SET_EX_BLOCK) Shared() bool { + return (b & (1 << 15)) == 1 +} + +// Node is the NUMA node. The maximum value of this member is 63. +func (b PSAPI_WORKING_SET_EX_BLOCK) Node() uint64 { + return b.intField(16, 6) +} + +// Locked returns the locked status of this page. +// If this bit is 1, the virtual page is locked in physical memory. +func (b PSAPI_WORKING_SET_EX_BLOCK) Locked() bool { + return (b & (1 << 22)) == 1 +} + +// LargePage returns the large page status of this page. +// If this bit is 1, the page is a large page. +func (b PSAPI_WORKING_SET_EX_BLOCK) LargePage() bool { + return (b & (1 << 23)) == 1 +} + +// Bad returns the bad status of this page. +// If this bit is 1, the page is has been reported as bad. +func (b PSAPI_WORKING_SET_EX_BLOCK) Bad() bool { + return (b & (1 << 31)) == 1 +} + +// intField extracts an integer field in the PSAPI_WORKING_SET_EX_BLOCK union. +func (b PSAPI_WORKING_SET_EX_BLOCK) intField(start, length int) uint64 { + var mask PSAPI_WORKING_SET_EX_BLOCK + for pos := start; pos < start+length; pos++ { + mask |= (1 << pos) + } + + masked := b & mask + return uint64(masked >> start) +} + +// PSAPI_WORKING_SET_EX_INFORMATION contains extended working set information for a process. +type PSAPI_WORKING_SET_EX_INFORMATION struct { + // The virtual address. + VirtualAddress Pointer + // A PSAPI_WORKING_SET_EX_BLOCK union that indicates the attributes of the page at VirtualAddress. + VirtualAttributes PSAPI_WORKING_SET_EX_BLOCK +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index e19471c6a852..0c4add974106 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -160,6 +160,10 @@ const ( MAX_COMPUTERNAME_LENGTH = 15 + MAX_DHCPV6_DUID_LENGTH = 130 + + MAX_DNS_SUFFIX_STRING_LENGTH = 256 + TIME_ZONE_ID_UNKNOWN = 0 TIME_ZONE_ID_STANDARD = 1 @@ -2000,27 +2004,62 @@ type IpAdapterPrefix struct { } type IpAdapterAddresses struct { - Length uint32 - IfIndex uint32 - Next *IpAdapterAddresses - AdapterName *byte - FirstUnicastAddress *IpAdapterUnicastAddress - FirstAnycastAddress *IpAdapterAnycastAddress - FirstMulticastAddress *IpAdapterMulticastAddress - FirstDnsServerAddress *IpAdapterDnsServerAdapter - DnsSuffix *uint16 - Description *uint16 - FriendlyName *uint16 - PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte - PhysicalAddressLength uint32 - Flags uint32 - Mtu uint32 - IfType uint32 - OperStatus uint32 - Ipv6IfIndex uint32 - ZoneIndices [16]uint32 - FirstPrefix *IpAdapterPrefix - /* more fields might be present here. */ + Length uint32 + IfIndex uint32 + Next *IpAdapterAddresses + AdapterName *byte + FirstUnicastAddress *IpAdapterUnicastAddress + FirstAnycastAddress *IpAdapterAnycastAddress + FirstMulticastAddress *IpAdapterMulticastAddress + FirstDnsServerAddress *IpAdapterDnsServerAdapter + DnsSuffix *uint16 + Description *uint16 + FriendlyName *uint16 + PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte + PhysicalAddressLength uint32 + Flags uint32 + Mtu uint32 + IfType uint32 + OperStatus uint32 + Ipv6IfIndex uint32 + ZoneIndices [16]uint32 + FirstPrefix *IpAdapterPrefix + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + FirstWinsServerAddress *IpAdapterWinsServerAddress + FirstGatewayAddress *IpAdapterGatewayAddress + Ipv4Metric uint32 + Ipv6Metric uint32 + Luid uint64 + Dhcpv4Server SocketAddress + CompartmentId uint32 + NetworkGuid GUID + ConnectionType uint32 + TunnelType uint32 + Dhcpv6Server SocketAddress + Dhcpv6ClientDuid [MAX_DHCPV6_DUID_LENGTH]byte + Dhcpv6ClientDuidLength uint32 + Dhcpv6Iaid uint32 + FirstDnsSuffix *IpAdapterDNSSuffix +} + +type IpAdapterWinsServerAddress struct { + Length uint32 + Reserved uint32 + Next *IpAdapterWinsServerAddress + Address SocketAddress +} + +type IpAdapterGatewayAddress struct { + Length uint32 + Reserved uint32 + Next *IpAdapterGatewayAddress + Address SocketAddress +} + +type IpAdapterDNSSuffix struct { + Next *IpAdapterDNSSuffix + String [MAX_DNS_SUFFIX_STRING_LENGTH]uint16 } const ( @@ -3174,3 +3213,48 @@ type ModuleInfo struct { } const ALL_PROCESSOR_GROUPS = 0xFFFF + +type Rect struct { + Left int32 + Top int32 + Right int32 + Bottom int32 +} + +type GUIThreadInfo struct { + Size uint32 + Flags uint32 + Active HWND + Focus HWND + Capture HWND + MenuOwner HWND + MoveSize HWND + CaretHandle HWND + CaretRect Rect +} + +const ( + DWMWA_NCRENDERING_ENABLED = 1 + DWMWA_NCRENDERING_POLICY = 2 + DWMWA_TRANSITIONS_FORCEDISABLED = 3 + DWMWA_ALLOW_NCPAINT = 4 + DWMWA_CAPTION_BUTTON_BOUNDS = 5 + DWMWA_NONCLIENT_RTL_LAYOUT = 6 + DWMWA_FORCE_ICONIC_REPRESENTATION = 7 + DWMWA_FLIP3D_POLICY = 8 + DWMWA_EXTENDED_FRAME_BOUNDS = 9 + DWMWA_HAS_ICONIC_BITMAP = 10 + DWMWA_DISALLOW_PEEK = 11 + DWMWA_EXCLUDED_FROM_PEEK = 12 + DWMWA_CLOAK = 13 + DWMWA_CLOAKED = 14 + DWMWA_FREEZE_REPRESENTATION = 15 + DWMWA_PASSIVE_UPDATE_MODE = 16 + DWMWA_USE_HOSTBACKDROPBRUSH = 17 + DWMWA_USE_IMMERSIVE_DARK_MODE = 20 + DWMWA_WINDOW_CORNER_PREFERENCE = 33 + DWMWA_BORDER_COLOR = 34 + DWMWA_CAPTION_COLOR = 35 + DWMWA_TEXT_COLOR = 36 + DWMWA_VISIBLE_FRAME_BORDER_THICKNESS = 37 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9ea1a44f04de..ac60052e44a7 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -40,6 +40,7 @@ var ( modadvapi32 = NewLazySystemDLL("advapi32.dll") modcrypt32 = NewLazySystemDLL("crypt32.dll") moddnsapi = NewLazySystemDLL("dnsapi.dll") + moddwmapi = NewLazySystemDLL("dwmapi.dll") modiphlpapi = NewLazySystemDLL("iphlpapi.dll") modkernel32 = NewLazySystemDLL("kernel32.dll") modmswsock = NewLazySystemDLL("mswsock.dll") @@ -175,8 +176,11 @@ var ( procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") + procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -248,6 +252,7 @@ var ( procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLargePageMinimum = modkernel32.NewProc("GetLargePageMinimum") procGetLastError = modkernel32.NewProc("GetLastError") procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") @@ -407,6 +412,7 @@ var ( procGetModuleBaseNameW = modpsapi.NewProc("GetModuleBaseNameW") procGetModuleFileNameExW = modpsapi.NewProc("GetModuleFileNameExW") procGetModuleInformation = modpsapi.NewProc("GetModuleInformation") + procQueryWorkingSetEx = modpsapi.NewProc("QueryWorkingSetEx") procSubscribeServiceChangeNotifications = modsechost.NewProc("SubscribeServiceChangeNotifications") procUnsubscribeServiceChangeNotifications = modsechost.NewProc("UnsubscribeServiceChangeNotifications") procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") @@ -442,9 +448,18 @@ var ( procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procEnumChildWindows = moduser32.NewProc("EnumChildWindows") + procEnumWindows = moduser32.NewProc("EnumWindows") procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") + procGetClassNameW = moduser32.NewProc("GetClassNameW") + procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") + procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") + procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") procGetShellWindow = moduser32.NewProc("GetShellWindow") procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") + procIsWindow = moduser32.NewProc("IsWindow") + procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") + procIsWindowVisible = moduser32.NewProc("IsWindowVisible") procMessageBoxW = moduser32.NewProc("MessageBoxW") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") @@ -1523,6 +1538,22 @@ func DnsRecordListFree(rl *DNSRecord, freetype uint32) { return } +func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { + r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { + r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1539,6 +1570,14 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { return } +func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { + r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetIfEntry(pIfRow *MibIfRow) (errcode error) { r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) if r0 != 0 { @@ -2142,6 +2181,12 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( return } +func GetLargePageMinimum() (size uintptr) { + r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + size = uintptr(r0) + return +} + func GetLastError() (lasterr error) { r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) if r0 != 0 { @@ -2761,7 +2806,7 @@ func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree return } -func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { +func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { var _p0 *byte if len(buf) > 0 { _p0 = &buf[0] @@ -3203,7 +3248,7 @@ func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, return } -func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { +func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { var _p0 *byte if len(buf) > 0 { _p0 = &buf[0] @@ -3495,6 +3540,14 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb return } +func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { + r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) { ret = procSubscribeServiceChangeNotifications.Find() if ret != nil { @@ -3784,6 +3837,19 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui return } +func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { + syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) + return +} + +func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { + r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitWindowsEx(flags uint32, reason uint32) (err error) { r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) if r1 == 0 { @@ -3792,6 +3858,35 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) { return } +func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { + r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) + copied = int32(r0) + if copied == 0 { + err = errnoErr(e1) + } + return +} + +func GetDesktopWindow() (hwnd HWND) { + r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) + hwnd = HWND(r0) + return +} + +func GetForegroundWindow() (hwnd HWND) { + r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) + hwnd = HWND(r0) + return +} + +func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetShellWindow() (shellWindow HWND) { r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) shellWindow = HWND(r0) @@ -3807,6 +3902,24 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { return } +func IsWindow(hwnd HWND) (isWindow bool) { + r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) + isWindow = r0 != 0 + return +} + +func IsWindowUnicode(hwnd HWND) (isUnicode bool) { + r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) + isUnicode = r0 != 0 + return +} + +func IsWindowVisible(hwnd HWND) (isVisible bool) { + r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) + isVisible = r0 != 0 + return +} + func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) ret = int32(r0) diff --git a/vendor/golang.org/x/term/AUTHORS b/vendor/golang.org/x/term/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vendor/golang.org/x/term/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/term/CONTRIBUTORS b/vendor/golang.org/x/term/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vendor/golang.org/x/term/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/term/term.go b/vendor/golang.org/x/term/term.go index d5927088082b..1a40d1012563 100644 --- a/vendor/golang.org/x/term/term.go +++ b/vendor/golang.org/x/term/term.go @@ -7,11 +7,11 @@ // // Putting a terminal into raw mode is the most common requirement: // -// oldState, err := term.MakeRaw(int(os.Stdin.Fd())) -// if err != nil { -// panic(err) -// } -// defer term.Restore(int(os.Stdin.Fd()), oldState) +// oldState, err := term.MakeRaw(int(os.Stdin.Fd())) +// if err != nil { +// panic(err) +// } +// defer term.Restore(int(os.Stdin.Fd()), oldState) // // Note that on non-Unix systems os.Stdin.Fd() may not be 0. package term diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go index 535ab8257c41..f636667fb042 100644 --- a/vendor/golang.org/x/term/terminal.go +++ b/vendor/golang.org/x/term/terminal.go @@ -233,7 +233,6 @@ func (t *Terminal) queue(data []rune) { t.outBuf = append(t.outBuf, []byte(string(data))...) } -var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} var space = []rune{' '} func isPrintable(key rune) bool { @@ -935,7 +934,7 @@ func (s *stRingBuffer) Add(a string) { // next most recent, and so on. If such an element doesn't exist then ok is // false. func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { - if n >= s.size { + if n < 0 || n >= s.size { return "", false } index := s.head - n diff --git a/vendor/golang.org/x/text/AUTHORS b/vendor/golang.org/x/text/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vendor/golang.org/x/text/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/text/CONTRIBUTORS b/vendor/golang.org/x/text/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vendor/golang.org/x/text/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index e4c0811016c2..9d2ae547b5ed 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -193,14 +193,14 @@ func (p *paragraph) run() { // // At the end of this function: // -// - The member variable matchingPDI is set to point to the index of the -// matching PDI character for each isolate initiator character. If there is -// no matching PDI, it is set to the length of the input text. For other -// characters, it is set to -1. -// - The member variable matchingIsolateInitiator is set to point to the -// index of the matching isolate initiator character for each PDI character. -// If there is no matching isolate initiator, or the character is not a PDI, -// it is set to -1. +// - The member variable matchingPDI is set to point to the index of the +// matching PDI character for each isolate initiator character. If there is +// no matching PDI, it is set to the length of the input text. For other +// characters, it is set to -1. +// - The member variable matchingIsolateInitiator is set to point to the +// index of the matching isolate initiator character for each PDI character. +// If there is no matching isolate initiator, or the character is not a PDI, +// it is set to -1. func (p *paragraph) determineMatchingIsolates() { p.matchingPDI = make([]int, p.Len()) p.matchingIsolateInitiator = make([]int, p.Len()) @@ -435,7 +435,7 @@ func maxLevel(a, b level) level { } // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, -// either L or R, for each isolating run sequence. +// either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { length := len(indexes) types := make([]Class, length) @@ -495,9 +495,9 @@ func (s *isolatingRunSequence) resolveWeakTypes() { if t == NSM { s.types[i] = precedingCharacterType } else { - if t.in(LRI, RLI, FSI, PDI) { - precedingCharacterType = ON - } + // if t.in(LRI, RLI, FSI, PDI) { + // precedingCharacterType = ON + // } precedingCharacterType = t } } @@ -905,7 +905,7 @@ func (p *paragraph) getLevels(linebreaks []int) []level { // Lines are concatenated from left to right. So for example, the fifth // character from the left on the third line is // -// getReordering(linebreaks)[linebreaks[1] + 4] +// getReordering(linebreaks)[linebreaks[1] + 4] // // (linebreaks[1] is the position after the last character of the second // line, which is also the index of the first character on the third line, diff --git a/vendor/golang.org/x/text/unicode/bidi/trieval.go b/vendor/golang.org/x/text/unicode/bidi/trieval.go index 4c459c4b72e0..6a796e2214c6 100644 --- a/vendor/golang.org/x/text/unicode/bidi/trieval.go +++ b/vendor/golang.org/x/text/unicode/bidi/trieval.go @@ -37,18 +37,6 @@ const ( unknownClass = ^Class(0) ) -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - // A trie entry has the following bits: // 7..5 XOR mask for brackets // 4 1: Bracket open, 0: Bracket close diff --git a/vendor/golang.org/x/text/unicode/norm/forminfo.go b/vendor/golang.org/x/text/unicode/norm/forminfo.go index 526c7033ac46..d69ccb4f9761 100644 --- a/vendor/golang.org/x/text/unicode/norm/forminfo.go +++ b/vendor/golang.org/x/text/unicode/norm/forminfo.go @@ -110,10 +110,11 @@ func (p Properties) BoundaryAfter() bool { } // We pack quick check data in 4 bits: -// 5: Combines forward (0 == false, 1 == true) -// 4..3: NFC_QC Yes(00), No (10), or Maybe (11) -// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition. -// 1..0: Number of trailing non-starters. +// +// 5: Combines forward (0 == false, 1 == true) +// 4..3: NFC_QC Yes(00), No (10), or Maybe (11) +// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition. +// 1..0: Number of trailing non-starters. // // When all 4 bits are zero, the character is inert, meaning it is never // influenced by normalization. diff --git a/vendor/golang.org/x/text/unicode/norm/normalize.go b/vendor/golang.org/x/text/unicode/norm/normalize.go index 95efcf26e81d..4747ad07a839 100644 --- a/vendor/golang.org/x/text/unicode/norm/normalize.go +++ b/vendor/golang.org/x/text/unicode/norm/normalize.go @@ -18,16 +18,17 @@ import ( // A Form denotes a canonical representation of Unicode code points. // The Unicode-defined normalization and equivalence forms are: // -// NFC Unicode Normalization Form C -// NFD Unicode Normalization Form D -// NFKC Unicode Normalization Form KC -// NFKD Unicode Normalization Form KD +// NFC Unicode Normalization Form C +// NFD Unicode Normalization Form D +// NFKC Unicode Normalization Form KC +// NFKD Unicode Normalization Form KD // // For a Form f, this documentation uses the notation f(x) to mean // the bytes or string x converted to the given form. // A position n in x is called a boundary if conversion to the form can // proceed independently on both sides: -// f(x) == append(f(x[0:n]), f(x[n:])...) +// +// f(x) == append(f(x[0:n]), f(x[n:])...) // // References: https://unicode.org/reports/tr15/ and // https://unicode.org/notes/tn5/. diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index 96a130d30e9e..9115ef257e83 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -7315,7 +7315,7 @@ const recompMapPacked = "" + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E - "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00v\x03#\x00\x00\x1e\x7f" + // 0x00760323: 0x00001E7F "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 @@ -7342,7 +7342,7 @@ const recompMapPacked = "" + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 - "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x01\x7f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 diff --git a/vendor/golang.org/x/text/width/kind_string.go b/vendor/golang.org/x/text/width/kind_string.go deleted file mode 100644 index dd3febd43b27..000000000000 --- a/vendor/golang.org/x/text/width/kind_string.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "stringer -type=Kind"; DO NOT EDIT. - -package width - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Neutral-0] - _ = x[EastAsianAmbiguous-1] - _ = x[EastAsianWide-2] - _ = x[EastAsianNarrow-3] - _ = x[EastAsianFullwidth-4] - _ = x[EastAsianHalfwidth-5] -} - -const _Kind_name = "NeutralEastAsianAmbiguousEastAsianWideEastAsianNarrowEastAsianFullwidthEastAsianHalfwidth" - -var _Kind_index = [...]uint8{0, 7, 25, 38, 53, 71, 89} - -func (i Kind) String() string { - if i < 0 || i >= Kind(len(_Kind_index)-1) { - return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] -} diff --git a/vendor/golang.org/x/text/width/tables10.0.0.go b/vendor/golang.org/x/text/width/tables10.0.0.go deleted file mode 100644 index 186b1d4efac5..000000000000 --- a/vendor/golang.org/x/text/width/tables10.0.0.go +++ /dev/null @@ -1,1319 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "10.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c59df54630d3dc4a. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 101 blocks, 6464 entries, 12928 bytes -// The third block is the zero block. -var widthValues = [6464]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, - 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, - 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, - 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, - 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, - 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, - 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, - 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, - 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, - 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, - // Block 0x3e, offset 0xf80 - 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, - 0xf86: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, - 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, - 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, - 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, - 0xffc: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, - 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, - 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, - 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, - 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, - 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, - 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, - 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, - 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, - // Block 0x42, offset 0x1080 - 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, - 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, - 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, - 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, - 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, - 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, - 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, - 0x10aa: 0x4000, 0x10ab: 0x4000, - // Block 0x43, offset 0x10c0 - 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, - 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, - 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, - 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, - 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, - 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, - 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, - 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, - 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, - 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, - 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, - // Block 0x44, offset 0x1100 - 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, - 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, - 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, - 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, - 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, - 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, - 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, - 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, - 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, - 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, - 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, - // Block 0x45, offset 0x1140 - 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, - 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, - 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, - 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, - 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, - 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, - 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, - 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, - 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, - 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, - 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, - // Block 0x46, offset 0x1180 - 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, - 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, - 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, - 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, - 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, - 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, - 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, - 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, - 0x11bd: 0x2000, - // Block 0x47, offset 0x11c0 - 0x11e0: 0x4000, 0x11e1: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, - 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, - 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, - 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, - 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, - 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, - 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, - 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, - 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, - 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, - 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, - 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, - 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, - 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, - 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, - 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, - 0x129e: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, - 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, - 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, - // Block 0x4c, offset 0x1300 - 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, - 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, - 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, - 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, - 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, - 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, - 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, - 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, - 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, - 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1344: 0x4000, - // Block 0x4e, offset 0x1380 - 0x138f: 0x4000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, - 0x13d0: 0x2000, 0x13d1: 0x2000, - 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, - 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, - 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, - 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, - 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, - 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, - 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, - 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, - 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, - 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, - 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, - 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, - 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, - 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, - // Block 0x51, offset 0x1440 - 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, - 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, - 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, - 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, - 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, - 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, - 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, - 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, - 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, - 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, - 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, - 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, - 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, - 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, - 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, - 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, - 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, - 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, - 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, - 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, - 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, - 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f4: 0x4000, - 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, - // Block 0x59, offset 0x1640 - 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, - 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, - 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, - 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, - 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, - 0x167c: 0x4000, 0x167f: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, - 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, - 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, - 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, - 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, - 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, - 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, - 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, - 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, - 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, - 0x16bc: 0x4000, 0x16bd: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16cb: 0x4000, - 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, - 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, - 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, - 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, - 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, - 0x16fa: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1715: 0x4000, 0x1716: 0x4000, - 0x1724: 0x4000, - // Block 0x5d, offset 0x1740 - 0x177b: 0x4000, - 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, - 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, - 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, - 0x17d2: 0x4000, - 0x17eb: 0x4000, 0x17ec: 0x4000, - 0x17f4: 0x4000, 0x17f5: 0x4000, - 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, - // Block 0x60, offset 0x1800 - 0x1810: 0x4000, 0x1811: 0x4000, - 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, - 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, - 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, - 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, - 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, - 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, - 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, - 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, - // Block 0x61, offset 0x1840 - 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, - 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, - 0x184c: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, - 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, - 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, - 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, - 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, - 0x186a: 0x4000, 0x186b: 0x4000, - // Block 0x62, offset 0x1880 - 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, - 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, - 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, - 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x4000, - 0x18d0: 0x4000, 0x18d1: 0x4000, - 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, - 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, - 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, - 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, - // Block 0x64, offset 0x1900 - 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, - 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, - 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, - 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, - 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, - 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, - 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, - 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, - 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, - 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, - 0x193c: 0x2000, 0x193d: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, - 0x265: 0x3d, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, - // Block 0xd, offset 0x340 - 0x37f: 0x45, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, - 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, - // Block 0x10, offset 0x400 - 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, - 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, - 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, - 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, - 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// <0 padding> -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 14936 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/tables11.0.0.go b/vendor/golang.org/x/text/width/tables11.0.0.go deleted file mode 100644 index 990f7622f175..000000000000 --- a/vendor/golang.org/x/text/width/tables11.0.0.go +++ /dev/null @@ -1,1331 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "11.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c0f7712776e71cd4. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 101 blocks, 6464 entries, 12928 bytes -// The third block is the zero block. -var widthValues = [6464]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, - 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, - 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, - 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, - 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, - 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, - 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, - 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, - 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, - 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, - // Block 0x3e, offset 0xf80 - 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, - 0xf86: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, - 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, - 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, - 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, - 0xffc: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, - 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, - 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, - 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, - 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, - 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, - 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, - 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, - 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, - // Block 0x42, offset 0x1080 - 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, - 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, - 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, - 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, - 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, - 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, - 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, - 0x10aa: 0x4000, 0x10ab: 0x4000, - // Block 0x43, offset 0x10c0 - 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, - 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, - 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, - 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, - 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, - 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, - 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, - 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, - 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, - 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, - 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, - // Block 0x44, offset 0x1100 - 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, - 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, - 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, - 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, - 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, - 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, - 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, - 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, - 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, - 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, - 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, - // Block 0x45, offset 0x1140 - 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, - 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, - 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, - 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, - 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, - 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, - 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, - 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, - 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, - 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, - 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, - // Block 0x46, offset 0x1180 - 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, - 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, - 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, - 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, - 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, - 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, - 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, - 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, - 0x11bd: 0x2000, - // Block 0x47, offset 0x11c0 - 0x11e0: 0x4000, 0x11e1: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, - 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, - 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, - 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, - 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000, - 0x1230: 0x4000, 0x1231: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, - 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, - 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, - 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, - 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, - 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, - 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, - 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, - 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, - 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, - 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, - 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, - 0x129e: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, - 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, - 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, - // Block 0x4c, offset 0x1300 - 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, - 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, - 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, - 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, - 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, - 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, - 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, - 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, - 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, - 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1344: 0x4000, - // Block 0x4e, offset 0x1380 - 0x138f: 0x4000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, - 0x13d0: 0x2000, 0x13d1: 0x2000, - 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, - 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, - 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, - 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, - 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, - 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, - 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, - 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, - 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, - 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, - 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, - 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, - 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, - 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, - // Block 0x51, offset 0x1440 - 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, - 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, - 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, - 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, - 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, - 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, - 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, - 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, - 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, - 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, - 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, - 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, - 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, - 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, - 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, - 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, - 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, - 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, - 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, - 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, - 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, - 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f4: 0x4000, - 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, - // Block 0x59, offset 0x1640 - 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, - 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, - 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, - 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, - 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, - 0x167c: 0x4000, 0x167f: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, - 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, - 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, - 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, - 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, - 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, - 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, - 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, - 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, - 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, - 0x16bc: 0x4000, 0x16bd: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16cb: 0x4000, - 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, - 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, - 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, - 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, - 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, - 0x16fa: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1715: 0x4000, 0x1716: 0x4000, - 0x1724: 0x4000, - // Block 0x5d, offset 0x1740 - 0x177b: 0x4000, - 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, - 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, - 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, - 0x17d2: 0x4000, - 0x17eb: 0x4000, 0x17ec: 0x4000, - 0x17f4: 0x4000, 0x17f5: 0x4000, - 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000, - // Block 0x60, offset 0x1800 - 0x1810: 0x4000, 0x1811: 0x4000, - 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, - 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, - 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, - 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, - 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, - 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, - 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, - 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, - // Block 0x61, offset 0x1840 - 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, - 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, - 0x184c: 0x4000, 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, - 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, - 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, - 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, - 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, - 0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000, - 0x1870: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000, - 0x1876: 0x4000, 0x187a: 0x4000, - 0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000, - // Block 0x62, offset 0x1880 - 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, - 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, - 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, - 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, - 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, - 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, - 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, - 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, - 0x18d0: 0x4000, 0x18d1: 0x4000, - 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, - 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, - 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, - 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, - 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, - 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, - 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, - 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, - // Block 0x64, offset 0x1900 - 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, - 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, - 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, - 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, - 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, - 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, - 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, - 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, - 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, - 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, - 0x193c: 0x2000, 0x193d: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, - 0x265: 0x3d, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, - // Block 0xd, offset 0x340 - 0x37f: 0x45, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, - 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, - // Block 0x10, offset 0x400 - 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, - 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, - 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, - 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, - 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// <0 padding> -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 14936 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/tables12.0.0.go b/vendor/golang.org/x/text/width/tables12.0.0.go deleted file mode 100644 index 85296297e38c..000000000000 --- a/vendor/golang.org/x/text/width/tables12.0.0.go +++ /dev/null @@ -1,1351 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "12.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14720 bytes (14.38 KiB). Checksum: 3f4f2516ded5489b. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 104 blocks, 6656 entries, 13312 bytes -// The third block is the zero block. -var widthValues = [6656]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, - 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, - 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, - 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, - 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, - 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, - 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, - 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, - 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, - 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, - // Block 0x3e, offset 0xf80 - 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, - 0xf86: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, - 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, - 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, - 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, - 0xffc: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, - 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, - 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, - 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, - 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, - 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, - 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, - 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, - 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, - // Block 0x42, offset 0x1080 - 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, - 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, - 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, - 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, - 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, - 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, - 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, - 0x10aa: 0x4000, 0x10ab: 0x4000, - // Block 0x43, offset 0x10c0 - 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, - 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, - 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, - 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, - 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, - 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, - 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, - 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, - 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, - 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, - 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, - // Block 0x44, offset 0x1100 - 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, - 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, - 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, - 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, - 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, - 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, - 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, - 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, - 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, - 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, - 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, - // Block 0x45, offset 0x1140 - 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, - 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, - 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, - 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, - 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, - 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, - 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, - 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, - 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, - 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, - 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, - // Block 0x46, offset 0x1180 - 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, - 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, - 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, - 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, - 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, - 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, - 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, - 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, - 0x11bd: 0x2000, - // Block 0x47, offset 0x11c0 - 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, - 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, - 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, - 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, - 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000, - 0x1230: 0x4000, 0x1231: 0x4000, 0x1232: 0x4000, 0x1233: 0x4000, 0x1234: 0x4000, 0x1235: 0x4000, - 0x1236: 0x4000, 0x1237: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, - 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, - 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, - 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, - 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, - 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, - 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, - 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, - 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, - 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, - 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, - 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, - 0x129e: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12d0: 0x4000, 0x12d1: 0x4000, - 0x12d2: 0x4000, - 0x12e4: 0x4000, 0x12e5: 0x4000, 0x12e6: 0x4000, 0x12e7: 0x4000, - 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, - 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, - 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, - // Block 0x4c, offset 0x1300 - 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, - 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, - 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, - 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, - 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, - 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, - 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, - 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, - 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, - 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1344: 0x4000, - // Block 0x4e, offset 0x1380 - 0x138f: 0x4000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, - 0x13d0: 0x2000, 0x13d1: 0x2000, - 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, - 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, - 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, - 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, - 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, - 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, - 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, - 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, - 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, - 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, - 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, - 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, - 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, - 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, - // Block 0x51, offset 0x1440 - 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, - 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, - 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, - 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, - 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, - 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, - 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, - 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, - 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, - 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, - 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, - 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, - 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, - 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, - 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, - 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, - 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, - 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, - 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, - 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, - 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, - 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f4: 0x4000, - 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, - // Block 0x59, offset 0x1640 - 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, - 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, - 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, - 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, - 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, - 0x167c: 0x4000, 0x167f: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, - 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, - 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, - 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, - 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, - 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, - 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, - 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, - 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, - 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, - 0x16bc: 0x4000, 0x16bd: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16cb: 0x4000, - 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, - 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, - 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, - 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, - 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, - 0x16fa: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1715: 0x4000, 0x1716: 0x4000, - 0x1724: 0x4000, - // Block 0x5d, offset 0x1740 - 0x177b: 0x4000, - 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, - 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, - 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, - 0x17d2: 0x4000, 0x17d5: 0x4000, - 0x17eb: 0x4000, 0x17ec: 0x4000, - 0x17f4: 0x4000, 0x17f5: 0x4000, - 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000, 0x17fa: 0x4000, - // Block 0x60, offset 0x1800 - 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, - 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, - 0x182a: 0x4000, 0x182b: 0x4000, - // Block 0x61, offset 0x1840 - 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, - 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, - 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, - 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, - 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, - 0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000, - 0x1870: 0x4000, 0x1871: 0x4000, 0x1872: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000, - 0x1876: 0x4000, 0x1877: 0x4000, 0x1878: 0x4000, 0x1879: 0x4000, 0x187a: 0x4000, 0x187b: 0x4000, - 0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000, - // Block 0x62, offset 0x1880 - 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, - 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, - 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, - 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, - 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, - 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, - 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, - 0x18aa: 0x4000, 0x18ab: 0x4000, 0x18ac: 0x4000, 0x18ad: 0x4000, 0x18ae: 0x4000, 0x18af: 0x4000, - 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, - 0x18b6: 0x4000, 0x18ba: 0x4000, 0x18bb: 0x4000, - 0x18bc: 0x4000, 0x18bd: 0x4000, 0x18be: 0x4000, 0x18bf: 0x4000, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, 0x18c3: 0x4000, 0x18c4: 0x4000, 0x18c5: 0x4000, - 0x18c6: 0x4000, 0x18c7: 0x4000, 0x18c8: 0x4000, 0x18c9: 0x4000, 0x18ca: 0x4000, 0x18cb: 0x4000, - 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, - 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, - 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, - 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, - 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, - 0x18ea: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, - 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, - 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, - 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, - // Block 0x64, offset 0x1900 - 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, - 0x1906: 0x4000, 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, - 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, - 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, - 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, - 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, - 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, - 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, - 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, - 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, - 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, - // Block 0x65, offset 0x1940 - 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, - 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, - // Block 0x66, offset 0x1980 - 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, - 0x1990: 0x4000, 0x1991: 0x4000, - 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, - // Block 0x67, offset 0x19c0 - 0x19c0: 0x2000, 0x19c1: 0x2000, 0x19c2: 0x2000, 0x19c3: 0x2000, 0x19c4: 0x2000, 0x19c5: 0x2000, - 0x19c6: 0x2000, 0x19c7: 0x2000, 0x19c8: 0x2000, 0x19c9: 0x2000, 0x19ca: 0x2000, 0x19cb: 0x2000, - 0x19cc: 0x2000, 0x19cd: 0x2000, 0x19ce: 0x2000, 0x19cf: 0x2000, 0x19d0: 0x2000, 0x19d1: 0x2000, - 0x19d2: 0x2000, 0x19d3: 0x2000, 0x19d4: 0x2000, 0x19d5: 0x2000, 0x19d6: 0x2000, 0x19d7: 0x2000, - 0x19d8: 0x2000, 0x19d9: 0x2000, 0x19da: 0x2000, 0x19db: 0x2000, 0x19dc: 0x2000, 0x19dd: 0x2000, - 0x19de: 0x2000, 0x19df: 0x2000, 0x19e0: 0x2000, 0x19e1: 0x2000, 0x19e2: 0x2000, 0x19e3: 0x2000, - 0x19e4: 0x2000, 0x19e5: 0x2000, 0x19e6: 0x2000, 0x19e7: 0x2000, 0x19e8: 0x2000, 0x19e9: 0x2000, - 0x19ea: 0x2000, 0x19eb: 0x2000, 0x19ec: 0x2000, 0x19ed: 0x2000, 0x19ee: 0x2000, 0x19ef: 0x2000, - 0x19f0: 0x2000, 0x19f1: 0x2000, 0x19f2: 0x2000, 0x19f3: 0x2000, 0x19f4: 0x2000, 0x19f5: 0x2000, - 0x19f6: 0x2000, 0x19f7: 0x2000, 0x19f8: 0x2000, 0x19f9: 0x2000, 0x19fa: 0x2000, 0x19fb: 0x2000, - 0x19fc: 0x2000, 0x19fd: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, - 0x265: 0x3d, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, - // Block 0xd, offset 0x340 - 0x37f: 0x45, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, - 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, - // Block 0x10, offset 0x400 - 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, - 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, - 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, - 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, 0x41f: 0x5e, - 0x424: 0x5f, 0x425: 0x60, 0x426: 0x61, 0x427: 0x62, - 0x429: 0x63, 0x42a: 0x64, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x65, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// <0 padding> -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 15320 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/tables13.0.0.go b/vendor/golang.org/x/text/width/tables13.0.0.go deleted file mode 100644 index bac3f1aee341..000000000000 --- a/vendor/golang.org/x/text/width/tables13.0.0.go +++ /dev/null @@ -1,1352 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build go1.16 -// +build go1.16 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "13.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14848 bytes (14.50 KiB). Checksum: 17e24343536472f6. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 105 blocks, 6720 entries, 13440 bytes -// The third block is the zero block. -var widthValues = [6720]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, 0xe3b: 0x4000, - 0xe3c: 0x4000, 0xe3d: 0x4000, 0xe3e: 0x4000, 0xe3f: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, 0xf3f: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, - // Block 0x3e, offset 0xf80 - 0xfa0: 0x4000, 0xfa1: 0x4000, 0xfa2: 0x4000, 0xfa3: 0x4000, - 0xfa4: 0x4000, 0xfa5: 0x4000, 0xfa6: 0x4000, 0xfa7: 0x4000, 0xfa8: 0x4000, 0xfa9: 0x4000, - 0xfaa: 0x4000, 0xfab: 0x4000, 0xfac: 0x4000, 0xfad: 0x4000, 0xfae: 0x4000, 0xfaf: 0x4000, - 0xfb0: 0x4000, 0xfb1: 0x4000, 0xfb2: 0x4000, 0xfb3: 0x4000, 0xfb4: 0x4000, 0xfb5: 0x4000, - 0xfb6: 0x4000, 0xfb7: 0x4000, 0xfb8: 0x4000, 0xfb9: 0x4000, 0xfba: 0x4000, 0xfbb: 0x4000, - 0xfbc: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfc0: 0x4000, 0xfc1: 0x4000, 0xfc2: 0x4000, 0xfc3: 0x4000, 0xfc4: 0x4000, 0xfc5: 0x4000, - 0xfc6: 0x4000, 0xfc7: 0x4000, 0xfc8: 0x4000, 0xfc9: 0x4000, 0xfca: 0x4000, 0xfcb: 0x4000, - 0xfcc: 0x4000, 0xfcd: 0x4000, 0xfce: 0x4000, 0xfcf: 0x4000, 0xfd0: 0x4000, 0xfd1: 0x4000, - 0xfd2: 0x4000, 0xfd3: 0x4000, 0xfd4: 0x4000, 0xfd5: 0x4000, 0xfd6: 0x4000, 0xfd7: 0x4000, - 0xfd8: 0x4000, 0xfd9: 0x4000, 0xfda: 0x4000, 0xfdb: 0x4000, 0xfdc: 0x4000, 0xfdd: 0x4000, - 0xfde: 0x4000, 0xfdf: 0x4000, 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x2000, 0x1001: 0x2000, 0x1002: 0x2000, 0x1003: 0x2000, 0x1004: 0x2000, 0x1005: 0x2000, - 0x1006: 0x2000, 0x1007: 0x2000, 0x1008: 0x2000, 0x1009: 0x2000, 0x100a: 0x2000, 0x100b: 0x2000, - 0x100c: 0x2000, 0x100d: 0x2000, 0x100e: 0x2000, 0x100f: 0x2000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, - 0x1030: 0x4000, 0x1031: 0x4000, 0x1032: 0x4000, 0x1033: 0x4000, 0x1034: 0x4000, 0x1035: 0x4000, - 0x1036: 0x4000, 0x1037: 0x4000, 0x1038: 0x4000, 0x1039: 0x4000, 0x103a: 0x4000, 0x103b: 0x4000, - 0x103c: 0x4000, 0x103d: 0x4000, 0x103e: 0x4000, 0x103f: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x4000, 0x1041: 0x4000, 0x1042: 0x4000, 0x1043: 0x4000, 0x1044: 0x4000, 0x1045: 0x4000, - 0x1046: 0x4000, 0x1047: 0x4000, 0x1048: 0x4000, 0x1049: 0x4000, 0x104a: 0x4000, 0x104b: 0x4000, - 0x104c: 0x4000, 0x104d: 0x4000, 0x104e: 0x4000, 0x104f: 0x4000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, 0x105a: 0x4000, 0x105b: 0x4000, 0x105c: 0x4000, 0x105d: 0x4000, - 0x105e: 0x4000, 0x105f: 0x4000, 0x1060: 0x4000, 0x1061: 0x4000, 0x1062: 0x4000, 0x1063: 0x4000, - 0x1064: 0x4000, 0x1065: 0x4000, 0x1066: 0x4000, 0x1068: 0x4000, 0x1069: 0x4000, - 0x106a: 0x4000, 0x106b: 0x4000, - // Block 0x42, offset 0x1080 - 0x1081: 0x9012, 0x1082: 0x9012, 0x1083: 0x9012, 0x1084: 0x9012, 0x1085: 0x9012, - 0x1086: 0x9012, 0x1087: 0x9012, 0x1088: 0x9012, 0x1089: 0x9012, 0x108a: 0x9012, 0x108b: 0x9012, - 0x108c: 0x9012, 0x108d: 0x9012, 0x108e: 0x9012, 0x108f: 0x9012, 0x1090: 0x9012, 0x1091: 0x9012, - 0x1092: 0x9012, 0x1093: 0x9012, 0x1094: 0x9012, 0x1095: 0x9012, 0x1096: 0x9012, 0x1097: 0x9012, - 0x1098: 0x9012, 0x1099: 0x9012, 0x109a: 0x9012, 0x109b: 0x9012, 0x109c: 0x9012, 0x109d: 0x9012, - 0x109e: 0x9012, 0x109f: 0x9012, 0x10a0: 0x9049, 0x10a1: 0x9049, 0x10a2: 0x9049, 0x10a3: 0x9049, - 0x10a4: 0x9049, 0x10a5: 0x9049, 0x10a6: 0x9049, 0x10a7: 0x9049, 0x10a8: 0x9049, 0x10a9: 0x9049, - 0x10aa: 0x9049, 0x10ab: 0x9049, 0x10ac: 0x9049, 0x10ad: 0x9049, 0x10ae: 0x9049, 0x10af: 0x9049, - 0x10b0: 0x9049, 0x10b1: 0x9049, 0x10b2: 0x9049, 0x10b3: 0x9049, 0x10b4: 0x9049, 0x10b5: 0x9049, - 0x10b6: 0x9049, 0x10b7: 0x9049, 0x10b8: 0x9049, 0x10b9: 0x9049, 0x10ba: 0x9049, 0x10bb: 0x9049, - 0x10bc: 0x9049, 0x10bd: 0x9049, 0x10be: 0x9049, 0x10bf: 0x9049, - // Block 0x43, offset 0x10c0 - 0x10c0: 0x9049, 0x10c1: 0x9049, 0x10c2: 0x9049, 0x10c3: 0x9049, 0x10c4: 0x9049, 0x10c5: 0x9049, - 0x10c6: 0x9049, 0x10c7: 0x9049, 0x10c8: 0x9049, 0x10c9: 0x9049, 0x10ca: 0x9049, 0x10cb: 0x9049, - 0x10cc: 0x9049, 0x10cd: 0x9049, 0x10ce: 0x9049, 0x10cf: 0x9049, 0x10d0: 0x9049, 0x10d1: 0x9049, - 0x10d2: 0x9049, 0x10d3: 0x9049, 0x10d4: 0x9049, 0x10d5: 0x9049, 0x10d6: 0x9049, 0x10d7: 0x9049, - 0x10d8: 0x9049, 0x10d9: 0x9049, 0x10da: 0x9049, 0x10db: 0x9049, 0x10dc: 0x9049, 0x10dd: 0x9049, - 0x10de: 0x9049, 0x10df: 0x904a, 0x10e0: 0x904b, 0x10e1: 0xb04c, 0x10e2: 0xb04d, 0x10e3: 0xb04d, - 0x10e4: 0xb04e, 0x10e5: 0xb04f, 0x10e6: 0xb050, 0x10e7: 0xb051, 0x10e8: 0xb052, 0x10e9: 0xb053, - 0x10ea: 0xb054, 0x10eb: 0xb055, 0x10ec: 0xb056, 0x10ed: 0xb057, 0x10ee: 0xb058, 0x10ef: 0xb059, - 0x10f0: 0xb05a, 0x10f1: 0xb05b, 0x10f2: 0xb05c, 0x10f3: 0xb05d, 0x10f4: 0xb05e, 0x10f5: 0xb05f, - 0x10f6: 0xb060, 0x10f7: 0xb061, 0x10f8: 0xb062, 0x10f9: 0xb063, 0x10fa: 0xb064, 0x10fb: 0xb065, - 0x10fc: 0xb052, 0x10fd: 0xb066, 0x10fe: 0xb067, 0x10ff: 0xb055, - // Block 0x44, offset 0x1100 - 0x1100: 0xb068, 0x1101: 0xb069, 0x1102: 0xb06a, 0x1103: 0xb06b, 0x1104: 0xb05a, 0x1105: 0xb056, - 0x1106: 0xb06c, 0x1107: 0xb06d, 0x1108: 0xb06b, 0x1109: 0xb06e, 0x110a: 0xb06b, 0x110b: 0xb06f, - 0x110c: 0xb06f, 0x110d: 0xb070, 0x110e: 0xb070, 0x110f: 0xb071, 0x1110: 0xb056, 0x1111: 0xb072, - 0x1112: 0xb073, 0x1113: 0xb072, 0x1114: 0xb074, 0x1115: 0xb073, 0x1116: 0xb075, 0x1117: 0xb075, - 0x1118: 0xb076, 0x1119: 0xb076, 0x111a: 0xb077, 0x111b: 0xb077, 0x111c: 0xb073, 0x111d: 0xb078, - 0x111e: 0xb079, 0x111f: 0xb067, 0x1120: 0xb07a, 0x1121: 0xb07b, 0x1122: 0xb07b, 0x1123: 0xb07b, - 0x1124: 0xb07b, 0x1125: 0xb07b, 0x1126: 0xb07b, 0x1127: 0xb07b, 0x1128: 0xb07b, 0x1129: 0xb07b, - 0x112a: 0xb07b, 0x112b: 0xb07b, 0x112c: 0xb07b, 0x112d: 0xb07b, 0x112e: 0xb07b, 0x112f: 0xb07b, - 0x1130: 0xb07c, 0x1131: 0xb07c, 0x1132: 0xb07c, 0x1133: 0xb07c, 0x1134: 0xb07c, 0x1135: 0xb07c, - 0x1136: 0xb07c, 0x1137: 0xb07c, 0x1138: 0xb07c, 0x1139: 0xb07c, 0x113a: 0xb07c, 0x113b: 0xb07c, - 0x113c: 0xb07c, 0x113d: 0xb07c, 0x113e: 0xb07c, - // Block 0x45, offset 0x1140 - 0x1142: 0xb07d, 0x1143: 0xb07e, 0x1144: 0xb07f, 0x1145: 0xb080, - 0x1146: 0xb07f, 0x1147: 0xb07e, 0x114a: 0xb081, 0x114b: 0xb082, - 0x114c: 0xb083, 0x114d: 0xb07f, 0x114e: 0xb080, 0x114f: 0xb07f, - 0x1152: 0xb084, 0x1153: 0xb085, 0x1154: 0xb084, 0x1155: 0xb086, 0x1156: 0xb084, 0x1157: 0xb087, - 0x115a: 0xb088, 0x115b: 0xb089, 0x115c: 0xb08a, - 0x1160: 0x908b, 0x1161: 0x908b, 0x1162: 0x908c, 0x1163: 0x908d, - 0x1164: 0x908b, 0x1165: 0x908e, 0x1166: 0x908f, 0x1168: 0xb090, 0x1169: 0xb091, - 0x116a: 0xb092, 0x116b: 0xb091, 0x116c: 0xb093, 0x116d: 0xb094, 0x116e: 0xb095, - 0x117d: 0x2000, - // Block 0x46, offset 0x1180 - 0x11a0: 0x4000, 0x11a1: 0x4000, 0x11a2: 0x4000, 0x11a3: 0x4000, - 0x11a4: 0x4000, - 0x11b0: 0x4000, 0x11b1: 0x4000, - // Block 0x47, offset 0x11c0 - 0x11c0: 0x4000, 0x11c1: 0x4000, 0x11c2: 0x4000, 0x11c3: 0x4000, 0x11c4: 0x4000, 0x11c5: 0x4000, - 0x11c6: 0x4000, 0x11c7: 0x4000, 0x11c8: 0x4000, 0x11c9: 0x4000, 0x11ca: 0x4000, 0x11cb: 0x4000, - 0x11cc: 0x4000, 0x11cd: 0x4000, 0x11ce: 0x4000, 0x11cf: 0x4000, 0x11d0: 0x4000, 0x11d1: 0x4000, - 0x11d2: 0x4000, 0x11d3: 0x4000, 0x11d4: 0x4000, 0x11d5: 0x4000, 0x11d6: 0x4000, 0x11d7: 0x4000, - 0x11d8: 0x4000, 0x11d9: 0x4000, 0x11da: 0x4000, 0x11db: 0x4000, 0x11dc: 0x4000, 0x11dd: 0x4000, - 0x11de: 0x4000, 0x11df: 0x4000, 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, - 0x11e4: 0x4000, 0x11e5: 0x4000, 0x11e6: 0x4000, 0x11e7: 0x4000, 0x11e8: 0x4000, 0x11e9: 0x4000, - 0x11ea: 0x4000, 0x11eb: 0x4000, 0x11ec: 0x4000, 0x11ed: 0x4000, 0x11ee: 0x4000, 0x11ef: 0x4000, - 0x11f0: 0x4000, 0x11f1: 0x4000, 0x11f2: 0x4000, 0x11f3: 0x4000, 0x11f4: 0x4000, 0x11f5: 0x4000, - 0x11f6: 0x4000, 0x11f7: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, - 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, - 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, - 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, - 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, - 0x129e: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12d0: 0x4000, 0x12d1: 0x4000, - 0x12d2: 0x4000, - 0x12e4: 0x4000, 0x12e5: 0x4000, 0x12e6: 0x4000, 0x12e7: 0x4000, - 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, - 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, - 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, - // Block 0x4c, offset 0x1300 - 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, - 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, - 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, - 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, - 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, - 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, - 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, - 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, - 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, - 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1344: 0x4000, - // Block 0x4e, offset 0x1380 - 0x138f: 0x4000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, - 0x13d0: 0x2000, 0x13d1: 0x2000, - 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, - 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, - 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, - 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, - 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, - 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, - 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, - 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, - 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, - 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, - 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, - 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, - 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, - 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, - // Block 0x51, offset 0x1440 - 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, - 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, - 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, - 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, - 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, - 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, - 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, - 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, - 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, - 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, - 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, - 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, - 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, - 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, - 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, - 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, - 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, - 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, - 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, - 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, - 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, - 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f4: 0x4000, - 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, - // Block 0x59, offset 0x1640 - 0x1640: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, - 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, - 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, - 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, - 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, - 0x167c: 0x4000, 0x167d: 0x4000, 0x167e: 0x4000, 0x167f: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, - 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, - 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, - 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, - 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, - 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, - 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, - 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, - 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, - 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, - 0x16bc: 0x4000, 0x16bf: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16c0: 0x4000, 0x16c1: 0x4000, 0x16c2: 0x4000, 0x16c3: 0x4000, 0x16c4: 0x4000, 0x16c5: 0x4000, - 0x16c6: 0x4000, 0x16c7: 0x4000, 0x16c8: 0x4000, 0x16c9: 0x4000, 0x16ca: 0x4000, 0x16cb: 0x4000, - 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16cf: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, - 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, - 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, - 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, - 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, 0x16e8: 0x4000, 0x16e9: 0x4000, - 0x16ea: 0x4000, 0x16eb: 0x4000, 0x16ec: 0x4000, 0x16ed: 0x4000, 0x16ee: 0x4000, 0x16ef: 0x4000, - 0x16f0: 0x4000, 0x16f1: 0x4000, 0x16f2: 0x4000, 0x16f3: 0x4000, 0x16f4: 0x4000, 0x16f5: 0x4000, - 0x16f6: 0x4000, 0x16f7: 0x4000, 0x16f8: 0x4000, 0x16f9: 0x4000, 0x16fa: 0x4000, 0x16fb: 0x4000, - 0x16fc: 0x4000, 0x16fd: 0x4000, - // Block 0x5c, offset 0x1700 - 0x170b: 0x4000, - 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x1710: 0x4000, 0x1711: 0x4000, - 0x1712: 0x4000, 0x1713: 0x4000, 0x1714: 0x4000, 0x1715: 0x4000, 0x1716: 0x4000, 0x1717: 0x4000, - 0x1718: 0x4000, 0x1719: 0x4000, 0x171a: 0x4000, 0x171b: 0x4000, 0x171c: 0x4000, 0x171d: 0x4000, - 0x171e: 0x4000, 0x171f: 0x4000, 0x1720: 0x4000, 0x1721: 0x4000, 0x1722: 0x4000, 0x1723: 0x4000, - 0x1724: 0x4000, 0x1725: 0x4000, 0x1726: 0x4000, 0x1727: 0x4000, - 0x173a: 0x4000, - // Block 0x5d, offset 0x1740 - 0x1755: 0x4000, 0x1756: 0x4000, - 0x1764: 0x4000, - // Block 0x5e, offset 0x1780 - 0x17bb: 0x4000, - 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, 0x17bf: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, - 0x17cc: 0x4000, 0x17cd: 0x4000, 0x17ce: 0x4000, 0x17cf: 0x4000, - // Block 0x60, offset 0x1800 - 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, - 0x180c: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, - 0x1812: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, - 0x182b: 0x4000, 0x182c: 0x4000, - 0x1834: 0x4000, 0x1835: 0x4000, - 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, - 0x183c: 0x4000, - // Block 0x61, offset 0x1840 - 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, - 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, - 0x186a: 0x4000, 0x186b: 0x4000, - // Block 0x62, offset 0x1880 - 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, - 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, - 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, - 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, - 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, - 0x18aa: 0x4000, 0x18ab: 0x4000, 0x18ac: 0x4000, 0x18ad: 0x4000, 0x18ae: 0x4000, 0x18af: 0x4000, - 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, - 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, 0x18ba: 0x4000, - 0x18bc: 0x4000, 0x18bd: 0x4000, 0x18be: 0x4000, 0x18bf: 0x4000, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, 0x18c3: 0x4000, 0x18c4: 0x4000, 0x18c5: 0x4000, - 0x18c7: 0x4000, 0x18c8: 0x4000, 0x18c9: 0x4000, 0x18ca: 0x4000, 0x18cb: 0x4000, - 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, - 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, - 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, - 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, - 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, - 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, - 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, - 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, - 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, - // Block 0x64, offset 0x1900 - 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, - 0x1906: 0x4000, 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, 0x190b: 0x4000, - 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, - 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, - 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, - 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, - 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, - 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, - 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, - 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, - 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, - // Block 0x65, offset 0x1940 - 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, 0x1974: 0x4000, - 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, - // Block 0x66, offset 0x1980 - 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, 0x1983: 0x4000, 0x1984: 0x4000, 0x1985: 0x4000, - 0x1986: 0x4000, - 0x1990: 0x4000, 0x1991: 0x4000, - 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, 0x1996: 0x4000, 0x1997: 0x4000, - 0x1998: 0x4000, 0x1999: 0x4000, 0x199a: 0x4000, 0x199b: 0x4000, 0x199c: 0x4000, 0x199d: 0x4000, - 0x199e: 0x4000, 0x199f: 0x4000, 0x19a0: 0x4000, 0x19a1: 0x4000, 0x19a2: 0x4000, 0x19a3: 0x4000, - 0x19a4: 0x4000, 0x19a5: 0x4000, 0x19a6: 0x4000, 0x19a7: 0x4000, 0x19a8: 0x4000, - 0x19b0: 0x4000, 0x19b1: 0x4000, 0x19b2: 0x4000, 0x19b3: 0x4000, 0x19b4: 0x4000, 0x19b5: 0x4000, - 0x19b6: 0x4000, - // Block 0x67, offset 0x19c0 - 0x19c0: 0x4000, 0x19c1: 0x4000, 0x19c2: 0x4000, - 0x19d0: 0x4000, 0x19d1: 0x4000, - 0x19d2: 0x4000, 0x19d3: 0x4000, 0x19d4: 0x4000, 0x19d5: 0x4000, 0x19d6: 0x4000, - // Block 0x68, offset 0x1a00 - 0x1a00: 0x2000, 0x1a01: 0x2000, 0x1a02: 0x2000, 0x1a03: 0x2000, 0x1a04: 0x2000, 0x1a05: 0x2000, - 0x1a06: 0x2000, 0x1a07: 0x2000, 0x1a08: 0x2000, 0x1a09: 0x2000, 0x1a0a: 0x2000, 0x1a0b: 0x2000, - 0x1a0c: 0x2000, 0x1a0d: 0x2000, 0x1a0e: 0x2000, 0x1a0f: 0x2000, 0x1a10: 0x2000, 0x1a11: 0x2000, - 0x1a12: 0x2000, 0x1a13: 0x2000, 0x1a14: 0x2000, 0x1a15: 0x2000, 0x1a16: 0x2000, 0x1a17: 0x2000, - 0x1a18: 0x2000, 0x1a19: 0x2000, 0x1a1a: 0x2000, 0x1a1b: 0x2000, 0x1a1c: 0x2000, 0x1a1d: 0x2000, - 0x1a1e: 0x2000, 0x1a1f: 0x2000, 0x1a20: 0x2000, 0x1a21: 0x2000, 0x1a22: 0x2000, 0x1a23: 0x2000, - 0x1a24: 0x2000, 0x1a25: 0x2000, 0x1a26: 0x2000, 0x1a27: 0x2000, 0x1a28: 0x2000, 0x1a29: 0x2000, - 0x1a2a: 0x2000, 0x1a2b: 0x2000, 0x1a2c: 0x2000, 0x1a2d: 0x2000, 0x1a2e: 0x2000, 0x1a2f: 0x2000, - 0x1a30: 0x2000, 0x1a31: 0x2000, 0x1a32: 0x2000, 0x1a33: 0x2000, 0x1a34: 0x2000, 0x1a35: 0x2000, - 0x1a36: 0x2000, 0x1a37: 0x2000, 0x1a38: 0x2000, 0x1a39: 0x2000, 0x1a3a: 0x2000, 0x1a3b: 0x2000, - 0x1a3c: 0x2000, 0x1a3d: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x0e, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3a, 0x253: 0x3b, - 0x265: 0x3c, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3d, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3e, 0x339: 0x3f, 0x33c: 0x40, 0x33d: 0x41, 0x33e: 0x42, 0x33f: 0x43, - // Block 0xd, offset 0x340 - 0x37f: 0x44, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x45, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x0e, 0x3ac: 0x0e, 0x3ad: 0x0e, 0x3ae: 0x0e, 0x3af: 0x0e, - 0x3b0: 0x0e, 0x3b1: 0x0e, 0x3b2: 0x0e, 0x3b3: 0x46, 0x3b4: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, - 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, - // Block 0x10, offset 0x400 - 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, - 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, - 0x410: 0x56, 0x411: 0x57, 0x412: 0x0e, 0x413: 0x58, 0x414: 0x59, 0x415: 0x5a, 0x416: 0x5b, 0x417: 0x5c, - 0x418: 0x0e, 0x419: 0x5d, 0x41a: 0x0e, 0x41b: 0x5e, 0x41f: 0x5f, - 0x424: 0x60, 0x425: 0x61, 0x426: 0x0e, 0x427: 0x62, - 0x429: 0x63, 0x42a: 0x64, 0x42b: 0x65, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x66, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// <0 padding> -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 15448 bytes (15KiB) diff --git a/vendor/golang.org/x/text/width/tables9.0.0.go b/vendor/golang.org/x/text/width/tables9.0.0.go deleted file mode 100644 index b3db84f6f9b6..000000000000 --- a/vendor/golang.org/x/text/width/tables9.0.0.go +++ /dev/null @@ -1,1287 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build !go1.10 -// +build !go1.10 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "9.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14080 bytes (13.75 KiB). Checksum: 3b8aeb3dc03667a3. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 99 blocks, 6336 entries, 12672 bytes -// The third block is the zero block. -var widthValues = [6336]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, - 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, - 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, - 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, - 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, - 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, - 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, - 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, - 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, - 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, - // Block 0x3e, offset 0xf80 - 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, - 0xf86: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, - 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, - 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, - 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, - 0xffc: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, - 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, - 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, - 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, - 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, - 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, - 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, - 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, - 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, - // Block 0x42, offset 0x1080 - 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, - 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, - 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, - 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, - 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, - 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, - 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, - 0x10aa: 0x4000, 0x10ab: 0x4000, - // Block 0x43, offset 0x10c0 - 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, - 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, - 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, - 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, - 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, - 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, - 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, - 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, - 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, - 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, - 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, - // Block 0x44, offset 0x1100 - 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, - 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, - 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, - 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, - 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, - 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, - 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, - 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, - 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, - 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, - 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, - // Block 0x45, offset 0x1140 - 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, - 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, - 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, - 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, - 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, - 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, - 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, - 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, - 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, - 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, - 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, - // Block 0x46, offset 0x1180 - 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, - 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, - 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, - 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, - 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, - 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, - 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, - 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, - 0x11bd: 0x2000, - // Block 0x47, offset 0x11c0 - 0x11e0: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, - 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, - 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, - 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, - 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, - 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, - 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, - 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, - 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, - 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, - 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, - 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12c4: 0x4000, - // Block 0x4c, offset 0x1300 - 0x130f: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1340: 0x2000, 0x1341: 0x2000, 0x1342: 0x2000, 0x1343: 0x2000, 0x1344: 0x2000, 0x1345: 0x2000, - 0x1346: 0x2000, 0x1347: 0x2000, 0x1348: 0x2000, 0x1349: 0x2000, 0x134a: 0x2000, - 0x1350: 0x2000, 0x1351: 0x2000, - 0x1352: 0x2000, 0x1353: 0x2000, 0x1354: 0x2000, 0x1355: 0x2000, 0x1356: 0x2000, 0x1357: 0x2000, - 0x1358: 0x2000, 0x1359: 0x2000, 0x135a: 0x2000, 0x135b: 0x2000, 0x135c: 0x2000, 0x135d: 0x2000, - 0x135e: 0x2000, 0x135f: 0x2000, 0x1360: 0x2000, 0x1361: 0x2000, 0x1362: 0x2000, 0x1363: 0x2000, - 0x1364: 0x2000, 0x1365: 0x2000, 0x1366: 0x2000, 0x1367: 0x2000, 0x1368: 0x2000, 0x1369: 0x2000, - 0x136a: 0x2000, 0x136b: 0x2000, 0x136c: 0x2000, 0x136d: 0x2000, - 0x1370: 0x2000, 0x1371: 0x2000, 0x1372: 0x2000, 0x1373: 0x2000, 0x1374: 0x2000, 0x1375: 0x2000, - 0x1376: 0x2000, 0x1377: 0x2000, 0x1378: 0x2000, 0x1379: 0x2000, 0x137a: 0x2000, 0x137b: 0x2000, - 0x137c: 0x2000, 0x137d: 0x2000, 0x137e: 0x2000, 0x137f: 0x2000, - // Block 0x4e, offset 0x1380 - 0x1380: 0x2000, 0x1381: 0x2000, 0x1382: 0x2000, 0x1383: 0x2000, 0x1384: 0x2000, 0x1385: 0x2000, - 0x1386: 0x2000, 0x1387: 0x2000, 0x1388: 0x2000, 0x1389: 0x2000, 0x138a: 0x2000, 0x138b: 0x2000, - 0x138c: 0x2000, 0x138d: 0x2000, 0x138e: 0x2000, 0x138f: 0x2000, 0x1390: 0x2000, 0x1391: 0x2000, - 0x1392: 0x2000, 0x1393: 0x2000, 0x1394: 0x2000, 0x1395: 0x2000, 0x1396: 0x2000, 0x1397: 0x2000, - 0x1398: 0x2000, 0x1399: 0x2000, 0x139a: 0x2000, 0x139b: 0x2000, 0x139c: 0x2000, 0x139d: 0x2000, - 0x139e: 0x2000, 0x139f: 0x2000, 0x13a0: 0x2000, 0x13a1: 0x2000, 0x13a2: 0x2000, 0x13a3: 0x2000, - 0x13a4: 0x2000, 0x13a5: 0x2000, 0x13a6: 0x2000, 0x13a7: 0x2000, 0x13a8: 0x2000, 0x13a9: 0x2000, - 0x13b0: 0x2000, 0x13b1: 0x2000, 0x13b2: 0x2000, 0x13b3: 0x2000, 0x13b4: 0x2000, 0x13b5: 0x2000, - 0x13b6: 0x2000, 0x13b7: 0x2000, 0x13b8: 0x2000, 0x13b9: 0x2000, 0x13ba: 0x2000, 0x13bb: 0x2000, - 0x13bc: 0x2000, 0x13bd: 0x2000, 0x13be: 0x2000, 0x13bf: 0x2000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, 0x13cb: 0x2000, - 0x13cc: 0x2000, 0x13cd: 0x2000, 0x13ce: 0x4000, 0x13cf: 0x2000, 0x13d0: 0x2000, 0x13d1: 0x4000, - 0x13d2: 0x4000, 0x13d3: 0x4000, 0x13d4: 0x4000, 0x13d5: 0x4000, 0x13d6: 0x4000, 0x13d7: 0x4000, - 0x13d8: 0x4000, 0x13d9: 0x4000, 0x13da: 0x4000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x4000, 0x1401: 0x4000, 0x1402: 0x4000, - 0x1410: 0x4000, 0x1411: 0x4000, - 0x1412: 0x4000, 0x1413: 0x4000, 0x1414: 0x4000, 0x1415: 0x4000, 0x1416: 0x4000, 0x1417: 0x4000, - 0x1418: 0x4000, 0x1419: 0x4000, 0x141a: 0x4000, 0x141b: 0x4000, 0x141c: 0x4000, 0x141d: 0x4000, - 0x141e: 0x4000, 0x141f: 0x4000, 0x1420: 0x4000, 0x1421: 0x4000, 0x1422: 0x4000, 0x1423: 0x4000, - 0x1424: 0x4000, 0x1425: 0x4000, 0x1426: 0x4000, 0x1427: 0x4000, 0x1428: 0x4000, 0x1429: 0x4000, - 0x142a: 0x4000, 0x142b: 0x4000, 0x142c: 0x4000, 0x142d: 0x4000, 0x142e: 0x4000, 0x142f: 0x4000, - 0x1430: 0x4000, 0x1431: 0x4000, 0x1432: 0x4000, 0x1433: 0x4000, 0x1434: 0x4000, 0x1435: 0x4000, - 0x1436: 0x4000, 0x1437: 0x4000, 0x1438: 0x4000, 0x1439: 0x4000, 0x143a: 0x4000, 0x143b: 0x4000, - // Block 0x51, offset 0x1440 - 0x1440: 0x4000, 0x1441: 0x4000, 0x1442: 0x4000, 0x1443: 0x4000, 0x1444: 0x4000, 0x1445: 0x4000, - 0x1446: 0x4000, 0x1447: 0x4000, 0x1448: 0x4000, - 0x1450: 0x4000, 0x1451: 0x4000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, 0x1483: 0x4000, 0x1484: 0x4000, 0x1485: 0x4000, - 0x1486: 0x4000, 0x1487: 0x4000, 0x1488: 0x4000, 0x1489: 0x4000, 0x148a: 0x4000, 0x148b: 0x4000, - 0x148c: 0x4000, 0x148d: 0x4000, 0x148e: 0x4000, 0x148f: 0x4000, 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, - 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - 0x14bc: 0x4000, 0x14bd: 0x4000, 0x14be: 0x4000, 0x14bf: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, 0x14c9: 0x4000, 0x14ca: 0x4000, 0x14cb: 0x4000, - 0x14cc: 0x4000, 0x14cd: 0x4000, 0x14ce: 0x4000, 0x14cf: 0x4000, 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000, - 0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000, - 0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000, - 0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000, - 0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000, - 0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000, - 0x14fc: 0x4000, 0x14fe: 0x4000, 0x14ff: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, - 0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000, - 0x1524: 0x4000, 0x1525: 0x4000, 0x1526: 0x4000, 0x1527: 0x4000, 0x1528: 0x4000, 0x1529: 0x4000, - 0x152a: 0x4000, 0x152b: 0x4000, 0x152c: 0x4000, 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1536: 0x4000, 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, - 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, - 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1574: 0x4000, - 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000, - 0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000, - 0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000, - 0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, 0x15d4: 0x4000, 0x15d5: 0x4000, 0x15d6: 0x4000, 0x15d7: 0x4000, - 0x15d8: 0x4000, 0x15d9: 0x4000, 0x15da: 0x4000, 0x15db: 0x4000, 0x15dc: 0x4000, 0x15dd: 0x4000, - 0x15de: 0x4000, 0x15df: 0x4000, 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000, - 0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, - // Block 0x59, offset 0x1640 - 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, - 0x167a: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1695: 0x4000, 0x1696: 0x4000, - 0x16a4: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16fb: 0x4000, - 0x16fc: 0x4000, 0x16fd: 0x4000, 0x16fe: 0x4000, 0x16ff: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000, - 0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000, - 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000, - // Block 0x5d, offset 0x1740 - 0x1740: 0x4000, 0x1741: 0x4000, 0x1742: 0x4000, 0x1743: 0x4000, 0x1744: 0x4000, 0x1745: 0x4000, - 0x174c: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000, - 0x1752: 0x4000, - 0x176b: 0x4000, 0x176c: 0x4000, - 0x1774: 0x4000, 0x1775: 0x4000, - 0x1776: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1790: 0x4000, 0x1791: 0x4000, - 0x1792: 0x4000, 0x1793: 0x4000, 0x1794: 0x4000, 0x1795: 0x4000, 0x1796: 0x4000, 0x1797: 0x4000, - 0x1798: 0x4000, 0x1799: 0x4000, 0x179a: 0x4000, 0x179b: 0x4000, 0x179c: 0x4000, 0x179d: 0x4000, - 0x179e: 0x4000, 0x17a0: 0x4000, 0x17a1: 0x4000, 0x17a2: 0x4000, 0x17a3: 0x4000, - 0x17a4: 0x4000, 0x17a5: 0x4000, 0x17a6: 0x4000, 0x17a7: 0x4000, - 0x17b0: 0x4000, 0x17b3: 0x4000, 0x17b4: 0x4000, 0x17b5: 0x4000, - 0x17b6: 0x4000, 0x17b7: 0x4000, 0x17b8: 0x4000, 0x17b9: 0x4000, 0x17ba: 0x4000, 0x17bb: 0x4000, - 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, - 0x17d0: 0x4000, 0x17d1: 0x4000, - 0x17d2: 0x4000, 0x17d3: 0x4000, 0x17d4: 0x4000, 0x17d5: 0x4000, 0x17d6: 0x4000, 0x17d7: 0x4000, - 0x17d8: 0x4000, 0x17d9: 0x4000, 0x17da: 0x4000, 0x17db: 0x4000, 0x17dc: 0x4000, 0x17dd: 0x4000, - 0x17de: 0x4000, - // Block 0x60, offset 0x1800 - 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, - 0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000, - 0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, - // Block 0x61, offset 0x1840 - 0x1840: 0x4000, - // Block 0x62, offset 0x1880 - 0x1880: 0x2000, 0x1881: 0x2000, 0x1882: 0x2000, 0x1883: 0x2000, 0x1884: 0x2000, 0x1885: 0x2000, - 0x1886: 0x2000, 0x1887: 0x2000, 0x1888: 0x2000, 0x1889: 0x2000, 0x188a: 0x2000, 0x188b: 0x2000, - 0x188c: 0x2000, 0x188d: 0x2000, 0x188e: 0x2000, 0x188f: 0x2000, 0x1890: 0x2000, 0x1891: 0x2000, - 0x1892: 0x2000, 0x1893: 0x2000, 0x1894: 0x2000, 0x1895: 0x2000, 0x1896: 0x2000, 0x1897: 0x2000, - 0x1898: 0x2000, 0x1899: 0x2000, 0x189a: 0x2000, 0x189b: 0x2000, 0x189c: 0x2000, 0x189d: 0x2000, - 0x189e: 0x2000, 0x189f: 0x2000, 0x18a0: 0x2000, 0x18a1: 0x2000, 0x18a2: 0x2000, 0x18a3: 0x2000, - 0x18a4: 0x2000, 0x18a5: 0x2000, 0x18a6: 0x2000, 0x18a7: 0x2000, 0x18a8: 0x2000, 0x18a9: 0x2000, - 0x18aa: 0x2000, 0x18ab: 0x2000, 0x18ac: 0x2000, 0x18ad: 0x2000, 0x18ae: 0x2000, 0x18af: 0x2000, - 0x18b0: 0x2000, 0x18b1: 0x2000, 0x18b2: 0x2000, 0x18b3: 0x2000, 0x18b4: 0x2000, 0x18b5: 0x2000, - 0x18b6: 0x2000, 0x18b7: 0x2000, 0x18b8: 0x2000, 0x18b9: 0x2000, 0x18ba: 0x2000, 0x18bb: 0x2000, - 0x18bc: 0x2000, 0x18bd: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, - 0x265: 0x3d, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, - // Block 0xd, offset 0x340 - 0x37f: 0x45, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x48, - // Block 0x10, offset 0x400 - 0x400: 0x49, 0x403: 0x4a, 0x404: 0x4b, 0x405: 0x4c, 0x406: 0x4d, - 0x408: 0x4e, 0x409: 0x4f, 0x40c: 0x50, 0x40d: 0x51, 0x40e: 0x52, 0x40f: 0x53, - 0x410: 0x3a, 0x411: 0x54, 0x412: 0x0e, 0x413: 0x55, 0x414: 0x56, 0x415: 0x57, 0x416: 0x58, 0x417: 0x59, - 0x418: 0x0e, 0x419: 0x5a, 0x41a: 0x0e, 0x41b: 0x5b, - 0x424: 0x5c, 0x425: 0x5d, 0x426: 0x5e, 0x427: 0x5f, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x60, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// <0 padding> -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 14680 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/transform.go b/vendor/golang.org/x/text/width/transform.go deleted file mode 100644 index 0049f700a2f6..000000000000 --- a/vendor/golang.org/x/text/width/transform.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package width - -import ( - "unicode/utf8" - - "golang.org/x/text/transform" -) - -type foldTransform struct { - transform.NopResetter -} - -func (foldTransform) Span(src []byte, atEOF bool) (n int, err error) { - for n < len(src) { - if src[n] < utf8.RuneSelf { - // ASCII fast path. - for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { - } - continue - } - v, size := trie.lookup(src[n:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - err = transform.ErrShortSrc - } else { - n = len(src) - } - break - } - if elem(v)&tagNeedsFold != 0 { - err = transform.ErrEndOfSpan - break - } - n += size - } - return n, err -} - -func (foldTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - if src[nSrc] < utf8.RuneSelf { - // ASCII fast path. - start, end := nSrc, len(src) - if d := len(dst) - nDst; d < end-start { - end = nSrc + d - } - for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { - } - n := copy(dst[nDst:], src[start:nSrc]) - if nDst += n; nDst == len(dst) { - nSrc = start + n - if nSrc == len(src) { - return nDst, nSrc, nil - } - if src[nSrc] < utf8.RuneSelf { - return nDst, nSrc, transform.ErrShortDst - } - } - continue - } - v, size := trie.lookup(src[nSrc:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - return nDst, nSrc, transform.ErrShortSrc - } - size = 1 // gobble 1 byte - } - if elem(v)&tagNeedsFold == 0 { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += size - } else { - data := inverseData[byte(v)] - if len(dst)-nDst < int(data[0]) { - return nDst, nSrc, transform.ErrShortDst - } - i := 1 - for end := int(data[0]); i < end; i++ { - dst[nDst] = data[i] - nDst++ - } - dst[nDst] = data[i] ^ src[nSrc+size-1] - nDst++ - } - nSrc += size - } - return nDst, nSrc, nil -} - -type narrowTransform struct { - transform.NopResetter -} - -func (narrowTransform) Span(src []byte, atEOF bool) (n int, err error) { - for n < len(src) { - if src[n] < utf8.RuneSelf { - // ASCII fast path. - for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { - } - continue - } - v, size := trie.lookup(src[n:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - err = transform.ErrShortSrc - } else { - n = len(src) - } - break - } - if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { - } else { - err = transform.ErrEndOfSpan - break - } - n += size - } - return n, err -} - -func (narrowTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - if src[nSrc] < utf8.RuneSelf { - // ASCII fast path. - start, end := nSrc, len(src) - if d := len(dst) - nDst; d < end-start { - end = nSrc + d - } - for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { - } - n := copy(dst[nDst:], src[start:nSrc]) - if nDst += n; nDst == len(dst) { - nSrc = start + n - if nSrc == len(src) { - return nDst, nSrc, nil - } - if src[nSrc] < utf8.RuneSelf { - return nDst, nSrc, transform.ErrShortDst - } - } - continue - } - v, size := trie.lookup(src[nSrc:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - return nDst, nSrc, transform.ErrShortSrc - } - size = 1 // gobble 1 byte - } - if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += size - } else { - data := inverseData[byte(v)] - if len(dst)-nDst < int(data[0]) { - return nDst, nSrc, transform.ErrShortDst - } - i := 1 - for end := int(data[0]); i < end; i++ { - dst[nDst] = data[i] - nDst++ - } - dst[nDst] = data[i] ^ src[nSrc+size-1] - nDst++ - } - nSrc += size - } - return nDst, nSrc, nil -} - -type wideTransform struct { - transform.NopResetter -} - -func (wideTransform) Span(src []byte, atEOF bool) (n int, err error) { - for n < len(src) { - // TODO: Consider ASCII fast path. Special-casing ASCII handling can - // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably - // not enough to warrant the extra code and complexity. - v, size := trie.lookup(src[n:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - err = transform.ErrShortSrc - } else { - n = len(src) - } - break - } - if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { - } else { - err = transform.ErrEndOfSpan - break - } - n += size - } - return n, err -} - -func (wideTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - // TODO: Consider ASCII fast path. Special-casing ASCII handling can - // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably - // not enough to warrant the extra code and complexity. - v, size := trie.lookup(src[nSrc:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - return nDst, nSrc, transform.ErrShortSrc - } - size = 1 // gobble 1 byte - } - if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += size - } else { - data := inverseData[byte(v)] - if len(dst)-nDst < int(data[0]) { - return nDst, nSrc, transform.ErrShortDst - } - i := 1 - for end := int(data[0]); i < end; i++ { - dst[nDst] = data[i] - nDst++ - } - dst[nDst] = data[i] ^ src[nSrc+size-1] - nDst++ - } - nSrc += size - } - return nDst, nSrc, nil -} diff --git a/vendor/golang.org/x/text/width/trieval.go b/vendor/golang.org/x/text/width/trieval.go deleted file mode 100644 index ca8e45fd19ef..000000000000 --- a/vendor/golang.org/x/text/width/trieval.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -package width - -// elem is an entry of the width trie. The high byte is used to encode the type -// of the rune. The low byte is used to store the index to a mapping entry in -// the inverseData array. -type elem uint16 - -const ( - tagNeutral elem = iota << typeShift - tagAmbiguous - tagWide - tagNarrow - tagFullwidth - tagHalfwidth -) - -const ( - numTypeBits = 3 - typeShift = 16 - numTypeBits - - // tagNeedsFold is true for all fullwidth and halfwidth runes except for - // the Won sign U+20A9. - tagNeedsFold = 0x1000 - - // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide - // variant. - wonSign rune = 0x20A9 -) diff --git a/vendor/golang.org/x/text/width/width.go b/vendor/golang.org/x/text/width/width.go deleted file mode 100644 index 29c7509be7ca..000000000000 --- a/vendor/golang.org/x/text/width/width.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate stringer -type=Kind -//go:generate go run gen.go gen_common.go gen_trieval.go - -// Package width provides functionality for handling different widths in text. -// -// Wide characters behave like ideographs; they tend to allow line breaks after -// each character and remain upright in vertical text layout. Narrow characters -// are kept together in words or runs that are rotated sideways in vertical text -// layout. -// -// For more information, see https://unicode.org/reports/tr11/. -package width // import "golang.org/x/text/width" - -import ( - "unicode/utf8" - - "golang.org/x/text/transform" -) - -// TODO -// 1) Reduce table size by compressing blocks. -// 2) API proposition for computing display length -// (approximation, fixed pitch only). -// 3) Implement display length. - -// Kind indicates the type of width property as defined in https://unicode.org/reports/tr11/. -type Kind int - -const ( - // Neutral characters do not occur in legacy East Asian character sets. - Neutral Kind = iota - - // EastAsianAmbiguous characters that can be sometimes wide and sometimes - // narrow and require additional information not contained in the character - // code to further resolve their width. - EastAsianAmbiguous - - // EastAsianWide characters are wide in its usual form. They occur only in - // the context of East Asian typography. These runes may have explicit - // halfwidth counterparts. - EastAsianWide - - // EastAsianNarrow characters are narrow in its usual form. They often have - // fullwidth counterparts. - EastAsianNarrow - - // Note: there exist Narrow runes that do not have fullwidth or wide - // counterparts, despite what the definition says (e.g. U+27E6). - - // EastAsianFullwidth characters have a compatibility decompositions of type - // wide that map to a narrow counterpart. - EastAsianFullwidth - - // EastAsianHalfwidth characters have a compatibility decomposition of type - // narrow that map to a wide or ambiguous counterpart, plus U+20A9 ₩ WON - // SIGN. - EastAsianHalfwidth - - // Note: there exist runes that have a halfwidth counterparts but that are - // classified as Ambiguous, rather than wide (e.g. U+2190). -) - -// TODO: the generated tries need to return size 1 for invalid runes for the -// width to be computed correctly (each byte should render width 1) - -var trie = newWidthTrie(0) - -// Lookup reports the Properties of the first rune in b and the number of bytes -// of its UTF-8 encoding. -func Lookup(b []byte) (p Properties, size int) { - v, sz := trie.lookup(b) - return Properties{elem(v), b[sz-1]}, sz -} - -// LookupString reports the Properties of the first rune in s and the number of -// bytes of its UTF-8 encoding. -func LookupString(s string) (p Properties, size int) { - v, sz := trie.lookupString(s) - return Properties{elem(v), s[sz-1]}, sz -} - -// LookupRune reports the Properties of rune r. -func LookupRune(r rune) Properties { - var buf [4]byte - n := utf8.EncodeRune(buf[:], r) - v, _ := trie.lookup(buf[:n]) - last := byte(r) - if r >= utf8.RuneSelf { - last = 0x80 + byte(r&0x3f) - } - return Properties{elem(v), last} -} - -// Properties provides access to width properties of a rune. -type Properties struct { - elem elem - last byte -} - -func (e elem) kind() Kind { - return Kind(e >> typeShift) -} - -// Kind returns the Kind of a rune as defined in Unicode TR #11. -// See https://unicode.org/reports/tr11/ for more details. -func (p Properties) Kind() Kind { - return p.elem.kind() -} - -// Folded returns the folded variant of a rune or 0 if the rune is canonical. -func (p Properties) Folded() rune { - if p.elem&tagNeedsFold != 0 { - buf := inverseData[byte(p.elem)] - buf[buf[0]] ^= p.last - r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) - return r - } - return 0 -} - -// Narrow returns the narrow variant of a rune or 0 if the rune is already -// narrow or doesn't have a narrow variant. -func (p Properties) Narrow() rune { - if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianFullwidth || k == EastAsianWide || k == EastAsianAmbiguous) { - buf := inverseData[byte(p.elem)] - buf[buf[0]] ^= p.last - r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) - return r - } - return 0 -} - -// Wide returns the wide variant of a rune or 0 if the rune is already -// wide or doesn't have a wide variant. -func (p Properties) Wide() rune { - if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianHalfwidth || k == EastAsianNarrow) { - buf := inverseData[byte(p.elem)] - buf[buf[0]] ^= p.last - r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) - return r - } - return 0 -} - -// TODO for Properties: -// - Add Fullwidth/Halfwidth or Inverted methods for computing variants -// mapping. -// - Add width information (including information on non-spacing runes). - -// Transformer implements the transform.Transformer interface. -type Transformer struct { - t transform.SpanningTransformer -} - -// Reset implements the transform.Transformer interface. -func (t Transformer) Reset() { t.t.Reset() } - -// Transform implements the transform.Transformer interface. -func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - return t.t.Transform(dst, src, atEOF) -} - -// Span implements the transform.SpanningTransformer interface. -func (t Transformer) Span(src []byte, atEOF bool) (n int, err error) { - return t.t.Span(src, atEOF) -} - -// Bytes returns a new byte slice with the result of applying t to b. -func (t Transformer) Bytes(b []byte) []byte { - b, _, _ = transform.Bytes(t, b) - return b -} - -// String returns a string with the result of applying t to s. -func (t Transformer) String(s string) string { - s, _, _ = transform.String(t, s) - return s -} - -var ( - // Fold is a transform that maps all runes to their canonical width. - // - // Note that the NFKC and NFKD transforms in golang.org/x/text/unicode/norm - // provide a more generic folding mechanism. - Fold Transformer = Transformer{foldTransform{}} - - // Widen is a transform that maps runes to their wide variant, if - // available. - Widen Transformer = Transformer{wideTransform{}} - - // Narrow is a transform that maps runes to their narrow variant, if - // available. - Narrow Transformer = Transformer{narrowTransform{}} -) - -// TODO: Consider the following options: -// - Treat Ambiguous runes that have a halfwidth counterpart as wide, or some -// generalized variant of this. -// - Consider a wide Won character to be the default width (or some generalized -// variant of this). -// - Filter the set of characters that gets converted (the preferred approach is -// to allow applying filters to transforms). diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vendor/golang.org/x/time/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vendor/golang.org/x/time/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index b0b982e9c6e6..8f7c29f156aa 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -80,6 +80,19 @@ func (lim *Limiter) Burst() int { return lim.burst } +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, _, tokens := lim.advance(t) // does not mutute lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + // NewLimiter returns a new Limiter that allows events up to rate r and permits // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { @@ -89,16 +102,16 @@ func NewLimiter(r Limit, b int) *Limiter { } } -// Allow is shorthand for AllowN(time.Now(), 1). +// Allow reports whether an event may happen now. func (lim *Limiter) Allow() bool { return lim.AllowN(time.Now(), 1) } -// AllowN reports whether n events may happen at time now. +// AllowN reports whether n events may happen at time t. // Use this method if you intend to drop / skip events that exceed the rate limit. // Otherwise use Reserve or Wait. -func (lim *Limiter) AllowN(now time.Time, n int) bool { - return lim.reserveN(now, n, 0).ok +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok } // A Reservation holds information about events that are permitted by a Limiter to happen after a delay. @@ -125,17 +138,17 @@ func (r *Reservation) Delay() time.Duration { } // InfDuration is the duration returned by Delay when a Reservation is not OK. -const InfDuration = time.Duration(1<<63 - 1) +const InfDuration = time.Duration(math.MaxInt64) // DelayFrom returns the duration for which the reservation holder must wait // before taking the reserved action. Zero duration means act immediately. // InfDuration means the limiter cannot grant the tokens requested in this // Reservation within the maximum wait time. -func (r *Reservation) DelayFrom(now time.Time) time.Duration { +func (r *Reservation) DelayFrom(t time.Time) time.Duration { if !r.ok { return InfDuration } - delay := r.timeToAct.Sub(now) + delay := r.timeToAct.Sub(t) if delay < 0 { return 0 } @@ -150,7 +163,7 @@ func (r *Reservation) Cancel() { // CancelAt indicates that the reservation holder will not perform the reserved action // and reverses the effects of this Reservation on the rate limit as much as possible, // considering that other reservations may have already been made. -func (r *Reservation) CancelAt(now time.Time) { +func (r *Reservation) CancelAt(t time.Time) { if !r.ok { return } @@ -158,7 +171,7 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.mu.Lock() defer r.lim.mu.Unlock() - if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { return } @@ -170,18 +183,18 @@ func (r *Reservation) CancelAt(now time.Time) { return } // advance time to now - now, _, tokens := r.lim.advance(now) + t, _, tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { tokens = burst } // update state - r.lim.last = now + r.lim.last = t r.lim.tokens = tokens if r.timeToAct == r.lim.lastEvent { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { + if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent } } @@ -196,18 +209,20 @@ func (lim *Limiter) Reserve() *Reservation { // The Limiter takes this Reservation into account when allowing future events. // The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. // Usage example: -// r := lim.ReserveN(time.Now(), 1) -// if !r.OK() { -// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? -// return -// } -// time.Sleep(r.Delay()) -// Act() +// +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// // Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. // If you need to respect a deadline or cancel the delay, use Wait instead. // To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { - r := lim.reserveN(now, n, InfDuration) +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) return &r } @@ -221,6 +236,18 @@ func (lim *Limiter) Wait(ctx context.Context) (err error) { // canceled, or the expected wait time exceeds the Context's Deadline. // The burst limit is ignored if the rate limit is Inf. func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + // The test code calls lim.wait with a fake timer generator. + // This is the real timer generator. + newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { + timer := time.NewTimer(d) + return timer.C, timer.Stop, func() {} + } + + return lim.wait(ctx, n, time.Now(), newTimer) +} + +// wait is the internal implementation of WaitN. +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { lim.mu.Lock() burst := lim.burst limit := lim.limit @@ -236,25 +263,25 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { default: } // Determine wait limit - now := time.Now() waitLimit := InfDuration if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(now) + waitLimit = deadline.Sub(t) } // Reserve - r := lim.reserveN(now, n, waitLimit) + r := lim.reserveN(t, n, waitLimit) if !r.ok { return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) } // Wait if necessary - delay := r.DelayFrom(now) + delay := r.DelayFrom(t) if delay == 0 { return nil } - t := time.NewTimer(delay) - defer t.Stop() + ch, stop, advance := newTimer(delay) + defer stop() + advance() // only has an effect when testing select { - case <-t.C: + case <-ch: // We can proceed. return nil case <-ctx.Done(): @@ -273,13 +300,13 @@ func (lim *Limiter) SetLimit(newLimit Limit) { // SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated // or underutilized by those which reserved (using Reserve or Wait) but did not yet act // before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, _, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.limit = newLimit } @@ -290,13 +317,13 @@ func (lim *Limiter) SetBurst(newBurst int) { } // SetBurstAt sets a new burst size for the limiter. -func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, _, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.burst = newBurst } @@ -304,7 +331,7 @@ func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { // reserveN is a helper method for AllowN, ReserveN, and WaitN. // maxFutureReserve specifies the maximum reservation wait duration allowed. // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { lim.mu.Lock() defer lim.mu.Unlock() @@ -313,7 +340,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: true, lim: lim, tokens: n, - timeToAct: now, + timeToAct: t, } } else if lim.limit == 0 { var ok bool @@ -325,11 +352,11 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: ok, lim: lim, tokens: lim.burst, - timeToAct: now, + timeToAct: t, } } - now, last, tokens := lim.advance(now) + t, last, tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -351,12 +378,12 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio } if ok { r.tokens = n - r.timeToAct = now.Add(waitDuration) + r.timeToAct = t.Add(waitDuration) } // Update state if ok { - lim.last = now + lim.last = t lim.tokens = tokens lim.lastEvent = r.timeToAct } else { @@ -369,20 +396,20 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newT time.Time, newLast time.Time, newTokens float64) { last := lim.last - if now.Before(last) { - last = now + if t.Before(last) { + last = t } // Calculate the new number of tokens, due to time that passed. - elapsed := now.Sub(last) + elapsed := t.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens + return t, last, tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vendor/golang.org/x/tools/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vendor/golang.org/x/tools/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index cec819d64106..2ed25a750248 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -17,32 +17,47 @@ // developer tools, which will then be able to consume both Go 1.7 and // Go 1.8 export data files, so they will work before and after the // Go update. (See discussion at https://golang.org/issue/15651.) -// package gcexportdata // import "golang.org/x/tools/go/gcexportdata" import ( "bufio" "bytes" + "encoding/json" "fmt" "go/token" "go/types" "io" "io/ioutil" + "os/exec" "golang.org/x/tools/go/internal/gcimporter" ) // Find returns the name of an object (.o) or archive (.a) file // containing type information for the specified import path, -// using the workspace layout conventions of go/build. +// using the go command. // If no file was found, an empty filename is returned. // // A relative srcDir is interpreted relative to the current working directory. // // Find also returns the package's resolved (canonical) import path, // reflecting the effects of srcDir and vendoring on importPath. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. func Find(importPath, srcDir string) (filename, path string) { - return gcimporter.FindPkg(importPath, srcDir) + cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) + cmd.Dir = srcDir + out, err := cmd.CombinedOutput() + if err != nil { + return "", "" + } + var data struct { + ImportPath string + Export string + } + json.Unmarshal(out, &data) + return data.Export, data.ImportPath } // NewReader returns a reader for the export data section of an object @@ -101,13 +116,29 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // The indexed export format starts with an 'i'; the older // binary export format starts with a 'c', 'd', or 'v' // (from "version"). Select appropriate importer. - if len(data) > 0 && data[0] == 'i' { - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err - } + if len(data) > 0 { + switch data[0] { + case 'i': + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + + case 'v', 'c', 'd': + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) - return pkg, err + case 'u': + _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) + } + } + return nil, fmt.Errorf("empty export data for %s", path) } // Write writes encoded type information for the specified package to out. diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go index efe221e7e142..37a7247e2686 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/importer.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -23,6 +23,8 @@ import ( // or to control the FileSet or access the imports map populated during // package loading. // +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { return importer{fset, imports} } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go index 0a3cdb9a3b81..196cb3f9b41a 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go @@ -35,16 +35,18 @@ import ( const debugFormat = false // default: false // Current export format version. Increase with each format change. +// // Note: The latest binary (non-indexed) export format is at version 6. -// This exporter is still at level 4, but it doesn't matter since -// the binary importer can handle older versions just fine. -// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE -// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE -// 4: type name objects support type aliases, uses aliasTag -// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -// 2: removed unused bool in ODCL export (compiler only) -// 1: header format change (more regular), export package for _ struct fields -// 0: Go1.7 encoding +// This exporter is still at level 4, but it doesn't matter since +// the binary importer can handle older versions just fine. +// +// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE +// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE +// 4: type name objects support type aliases, uses aliasTag +// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) +// 2: removed unused bool in ODCL export (compiler only) +// 1: header format change (more regular), export package for _ struct fields +// 0: Go1.7 encoding const exportVersion = 4 // trackAllTypes enables cycle tracking for all types, not just named diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go index 3ab66830d747..e96c39600d16 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -45,7 +45,6 @@ var pkgExts = [...]string{".a", ".o"} // the build.Default build.Context). A relative srcDir is interpreted // relative to the current working directory. // If no file was found, an empty filename is returned. -// func FindPkg(path, srcDir string) (filename, id string) { if path == "" { return @@ -109,7 +108,6 @@ func FindPkg(path, srcDir string) (filename, id string) { // If packages[id] contains the completely imported package, that package // can be used directly, and there is no need to call this function (but // there is also no harm but for extra time used). -// func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { // support for parser error handling defer func() { @@ -133,7 +131,6 @@ func ImportData(packages map[string]*types.Package, filename, id string, data io // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. -// func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser var filename, id string @@ -184,8 +181,9 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func defer rc.Close() var hdr string + var size int64 buf := bufio.NewReader(rc) - if hdr, _, err = FindExportData(buf); err != nil { + if hdr, size, err = FindExportData(buf); err != nil { return } @@ -213,10 +211,27 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // The indexed export format starts with an 'i'; the older // binary export format starts with a 'c', 'd', or 'v' // (from "version"). Select appropriate importer. - if len(data) > 0 && data[0] == 'i' { - _, pkg, err = IImportData(fset, packages, data[1:], id) - } else { - _, pkg, err = BImportData(fset, packages, data, id) + if len(data) > 0 { + switch data[0] { + case 'i': + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'v', 'c', 'd': + _, pkg, err := BImportData(fset, packages, data, id) + return pkg, err + + case 'u': + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) + } } default: @@ -348,8 +363,9 @@ func (p *parser) expectKeyword(keyword string) { // ---------------------------------------------------------------------------- // Qualified and unqualified names -// PackageId = string_lit . +// parsePackageID parses a PackageId: // +// PackageId = string_lit . func (p *parser) parsePackageID() string { id, err := strconv.Unquote(p.expect(scanner.String)) if err != nil { @@ -363,13 +379,16 @@ func (p *parser) parsePackageID() string { return id } -// PackageName = ident . +// parsePackageName parse a PackageName: // +// PackageName = ident . func (p *parser) parsePackageName() string { return p.expect(scanner.Ident) } -// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . +// parseDotIdent parses a dotIdentifier: +// +// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . func (p *parser) parseDotIdent() string { ident := "" if p.tok != scanner.Int { @@ -386,8 +405,9 @@ func (p *parser) parseDotIdent() string { return ident } -// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . +// parseQualifiedName parses a QualifiedName: // +// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . func (p *parser) parseQualifiedName() (id, name string) { p.expect('@') id = p.parsePackageID() @@ -410,7 +430,6 @@ func (p *parser) parseQualifiedName() (id, name string) { // id identifies a package, usually by a canonical package path like // "encoding/json" but possibly by a non-canonical import path like // "./json". -// func (p *parser) getPkg(id, name string) *types.Package { // package unsafe is not in the packages maps - handle explicitly if id == "unsafe" { @@ -446,7 +465,6 @@ func (p *parser) getPkg(id, name string) *types.Package { // parseExportedName is like parseQualifiedName, but // the package id is resolved to an imported *types.Package. -// func (p *parser) parseExportedName() (pkg *types.Package, name string) { id, name := p.parseQualifiedName() pkg = p.getPkg(id, "") @@ -456,8 +474,9 @@ func (p *parser) parseExportedName() (pkg *types.Package, name string) { // ---------------------------------------------------------------------------- // Types -// BasicType = identifier . +// parseBasicType parses a BasicType: // +// BasicType = identifier . func (p *parser) parseBasicType() types.Type { id := p.expect(scanner.Ident) obj := types.Universe.Lookup(id) @@ -468,8 +487,9 @@ func (p *parser) parseBasicType() types.Type { return nil } -// ArrayType = "[" int_lit "]" Type . +// parseArrayType parses an ArrayType: // +// ArrayType = "[" int_lit "]" Type . func (p *parser) parseArrayType(parent *types.Package) types.Type { // "[" already consumed and lookahead known not to be "]" lit := p.expect(scanner.Int) @@ -482,8 +502,9 @@ func (p *parser) parseArrayType(parent *types.Package) types.Type { return types.NewArray(elem, n) } -// MapType = "map" "[" Type "]" Type . +// parseMapType parses a MapType: // +// MapType = "map" "[" Type "]" Type . func (p *parser) parseMapType(parent *types.Package) types.Type { p.expectKeyword("map") p.expect('[') @@ -493,7 +514,9 @@ func (p *parser) parseMapType(parent *types.Package) types.Type { return types.NewMap(key, elem) } -// Name = identifier | "?" | QualifiedName . +// parseName parses a Name: +// +// Name = identifier | "?" | QualifiedName . // // For unqualified and anonymous names, the returned package is the parent // package unless parent == nil, in which case the returned package is the @@ -505,7 +528,6 @@ func (p *parser) parseMapType(parent *types.Package) types.Type { // it doesn't exist yet) unless materializePkg is set (which creates an // unnamed package with valid package path). In the latter case, a // subsequent import clause is expected to provide a name for the package. -// func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { pkg = parent if pkg == nil { @@ -539,8 +561,9 @@ func deref(typ types.Type) types.Type { return typ } -// Field = Name Type [ string_lit ] . +// parseField parses a Field: // +// Field = Name Type [ string_lit ] . func (p *parser) parseField(parent *types.Package) (*types.Var, string) { pkg, name := p.parseName(parent, true) @@ -583,9 +606,10 @@ func (p *parser) parseField(parent *types.Package) (*types.Var, string) { return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag } -// StructType = "struct" "{" [ FieldList ] "}" . -// FieldList = Field { ";" Field } . +// parseStructType parses a StructType: // +// StructType = "struct" "{" [ FieldList ] "}" . +// FieldList = Field { ";" Field } . func (p *parser) parseStructType(parent *types.Package) types.Type { var fields []*types.Var var tags []string @@ -610,8 +634,9 @@ func (p *parser) parseStructType(parent *types.Package) types.Type { return types.NewStruct(fields, tags) } -// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . +// parseParameter parses a Parameter: // +// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { _, name := p.parseName(nil, false) // remove gc-specific parameter numbering @@ -635,9 +660,10 @@ func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { return } -// Parameters = "(" [ ParameterList ] ")" . -// ParameterList = { Parameter "," } Parameter . +// parseParameters parses a Parameters: // +// Parameters = "(" [ ParameterList ] ")" . +// ParameterList = { Parameter "," } Parameter . func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { p.expect('(') for p.tok != ')' && p.tok != scanner.EOF { @@ -658,9 +684,10 @@ func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { return } -// Signature = Parameters [ Result ] . -// Result = Type | Parameters . +// parseSignature parses a Signature: // +// Signature = Parameters [ Result ] . +// Result = Type | Parameters . func (p *parser) parseSignature(recv *types.Var) *types.Signature { params, isVariadic := p.parseParameters() @@ -677,14 +704,15 @@ func (p *parser) parseSignature(recv *types.Var) *types.Signature { return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) } -// InterfaceType = "interface" "{" [ MethodList ] "}" . -// MethodList = Method { ";" Method } . -// Method = Name Signature . +// parseInterfaceType parses an InterfaceType: +// +// InterfaceType = "interface" "{" [ MethodList ] "}" . +// MethodList = Method { ";" Method } . +// Method = Name Signature . // // The methods of embedded interfaces are always "inlined" // by the compiler and thus embedded interfaces are never // visible in the export data. -// func (p *parser) parseInterfaceType(parent *types.Package) types.Type { var methods []*types.Func @@ -705,8 +733,9 @@ func (p *parser) parseInterfaceType(parent *types.Package) types.Type { return newInterface(methods, nil).Complete() } -// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . +// parseChanType parses a ChanType: // +// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . func (p *parser) parseChanType(parent *types.Package) types.Type { dir := types.SendRecv if p.tok == scanner.Ident { @@ -724,17 +753,18 @@ func (p *parser) parseChanType(parent *types.Package) types.Type { return types.NewChan(dir, elem) } -// Type = -// BasicType | TypeName | ArrayType | SliceType | StructType | -// PointerType | FuncType | InterfaceType | MapType | ChanType | -// "(" Type ")" . +// parseType parses a Type: // -// BasicType = ident . -// TypeName = ExportedName . -// SliceType = "[" "]" Type . -// PointerType = "*" Type . -// FuncType = "func" Signature . +// Type = +// BasicType | TypeName | ArrayType | SliceType | StructType | +// PointerType | FuncType | InterfaceType | MapType | ChanType | +// "(" Type ")" . // +// BasicType = ident . +// TypeName = ExportedName . +// SliceType = "[" "]" Type . +// PointerType = "*" Type . +// FuncType = "func" Signature . func (p *parser) parseType(parent *types.Package) types.Type { switch p.tok { case scanner.Ident: @@ -786,16 +816,18 @@ func (p *parser) parseType(parent *types.Package) types.Type { // ---------------------------------------------------------------------------- // Declarations -// ImportDecl = "import" PackageName PackageId . +// parseImportDecl parses an ImportDecl: // +// ImportDecl = "import" PackageName PackageId . func (p *parser) parseImportDecl() { p.expectKeyword("import") name := p.parsePackageName() p.getPkg(p.parsePackageID(), name) } -// int_lit = [ "+" | "-" ] { "0" ... "9" } . +// parseInt parses an int_lit: // +// int_lit = [ "+" | "-" ] { "0" ... "9" } . func (p *parser) parseInt() string { s := "" switch p.tok { @@ -808,8 +840,9 @@ func (p *parser) parseInt() string { return s + p.expect(scanner.Int) } -// number = int_lit [ "p" int_lit ] . +// parseNumber parses a number: // +// number = int_lit [ "p" int_lit ] . func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { // mantissa mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0) @@ -844,13 +877,14 @@ func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { return } -// ConstDecl = "const" ExportedName [ Type ] "=" Literal . -// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . -// bool_lit = "true" | "false" . -// complex_lit = "(" float_lit "+" float_lit "i" ")" . -// rune_lit = "(" int_lit "+" int_lit ")" . -// string_lit = `"` { unicode_char } `"` . +// parseConstDecl parses a ConstDecl: // +// ConstDecl = "const" ExportedName [ Type ] "=" Literal . +// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . +// bool_lit = "true" | "false" . +// complex_lit = "(" float_lit "+" float_lit "i" ")" . +// rune_lit = "(" int_lit "+" int_lit ")" . +// string_lit = `"` { unicode_char } `"` . func (p *parser) parseConstDecl() { p.expectKeyword("const") pkg, name := p.parseExportedName() @@ -920,8 +954,9 @@ func (p *parser) parseConstDecl() { pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) } -// TypeDecl = "type" ExportedName Type . +// parseTypeDecl parses a TypeDecl: // +// TypeDecl = "type" ExportedName Type . func (p *parser) parseTypeDecl() { p.expectKeyword("type") pkg, name := p.parseExportedName() @@ -939,8 +974,9 @@ func (p *parser) parseTypeDecl() { } } -// VarDecl = "var" ExportedName Type . +// parseVarDecl parses a VarDecl: // +// VarDecl = "var" ExportedName Type . func (p *parser) parseVarDecl() { p.expectKeyword("var") pkg, name := p.parseExportedName() @@ -948,9 +984,10 @@ func (p *parser) parseVarDecl() { pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) } -// Func = Signature [ Body ] . -// Body = "{" ... "}" . +// parseFunc parses a Func: // +// Func = Signature [ Body ] . +// Body = "{" ... "}" . func (p *parser) parseFunc(recv *types.Var) *types.Signature { sig := p.parseSignature(recv) if p.tok == '{' { @@ -967,9 +1004,10 @@ func (p *parser) parseFunc(recv *types.Var) *types.Signature { return sig } -// MethodDecl = "func" Receiver Name Func . -// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . +// parseMethodDecl parses a MethodDecl: // +// MethodDecl = "func" Receiver Name Func . +// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . func (p *parser) parseMethodDecl() { // "func" already consumed p.expect('(') @@ -992,8 +1030,9 @@ func (p *parser) parseMethodDecl() { base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) } -// FuncDecl = "func" ExportedName Func . +// parseFuncDecl parses a FuncDecl: // +// FuncDecl = "func" ExportedName Func . func (p *parser) parseFuncDecl() { // "func" already consumed pkg, name := p.parseExportedName() @@ -1001,8 +1040,9 @@ func (p *parser) parseFuncDecl() { pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) } -// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . +// parseDecl parses a Decl: // +// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . func (p *parser) parseDecl() { if p.tok == scanner.Ident { switch p.lit { @@ -1029,9 +1069,10 @@ func (p *parser) parseDecl() { // ---------------------------------------------------------------------------- // Export -// Export = "PackageClause { Decl } "$$" . -// PackageClause = "package" PackageName [ "safe" ] "\n" . +// parseExport parses an Export: // +// Export = "PackageClause { Decl } "$$" . +// PackageClause = "package" PackageName [ "safe" ] "\n" . func (p *parser) parseExport() *types.Package { p.expectKeyword("package") name := p.parsePackageName() diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go index 209553409cb5..9a4ff329e128 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -251,7 +251,10 @@ func (p *iexporter) stringOff(s string) uint64 { // pushDecl adds n to the declaration work queue, if not already present. func (p *iexporter) pushDecl(obj types.Object) { // Package unsafe is known to the compiler and predeclared. - assert(obj.Pkg() != types.Unsafe) + // Caller should not ask us to do export it. + if obj.Pkg() == types.Unsafe { + panic("cannot export package unsafe") + } if _, ok := p.declIndex[obj]; ok { return diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go index 1a33cd5c6cd3..4caa0f55d9de 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -17,6 +17,7 @@ import ( "go/token" "go/types" "io" + "math/big" "sort" "strings" @@ -53,7 +54,7 @@ const ( ) type ident struct { - pkg string + pkg *types.Package name string } @@ -100,7 +101,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data if !debug { defer func() { if e := recover(); e != nil { - if version > currentVersion { + if bundle { + err = fmt.Errorf("%v", e) + } else if version > currentVersion { err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) } else { err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) @@ -237,6 +240,15 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data pkg.MarkComplete() } + // SetConstraint can't be called if the constraint type is not yet complete. + // When type params are created in the 'P' case of (*importReader).obj(), + // the associated constraint type may not be complete due to recursion. + // Therefore, we defer calling SetConstraint there, and call it here instead + // after all types are complete. + for _, d := range p.later { + typeparams.SetTypeParamConstraint(d.t, d.constraint) + } + for _, typ := range p.interfaceList { typ.Complete() } @@ -244,6 +256,11 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data return pkgs, nil } +type setConstraintArgs struct { + t *typeparams.TypeParam + constraint types.Type +} + type iimporter struct { version int ipath string @@ -260,6 +277,9 @@ type iimporter struct { fake fakeFileSet interfaceList []*types.Interface + // Arguments for calls to SetConstraint that are deferred due to recursive types + later []setConstraintArgs + indent int // for tracing support } @@ -444,7 +464,7 @@ func (r *importReader) obj(name string) { // To handle recursive references to the typeparam within its // bound, save the partial type in tparamIndex before reading the bounds. - id := ident{r.currPkg.Name(), name} + id := ident{r.currPkg, name} r.p.tparamIndex[id] = t var implicit bool if r.p.version >= iexportVersionGo1_18 { @@ -458,7 +478,11 @@ func (r *importReader) obj(name string) { } typeparams.MarkImplicit(iface) } - typeparams.SetTypeParamConstraint(t, constraint) + // The constraint type may not be complete, if we + // are in the middle of a type recursion involving type + // constraints. So, we defer SetConstraint until we have + // completely set up all types in ImportData. + r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint}) case 'V': typ := r.typ() @@ -489,7 +513,9 @@ func (r *importReader) value() (typ types.Type, val constant.Value) { val = constant.MakeString(r.string()) case types.IsInteger: - val = r.mpint(b) + var x big.Int + r.mpint(&x, b) + val = constant.Make(&x) case types.IsFloat: val = r.mpfloat(b) @@ -538,8 +564,8 @@ func intSize(b *types.Basic) (signed bool, maxBytes uint) { return } -func (r *importReader) mpint(b *types.Basic) constant.Value { - signed, maxBytes := intSize(b) +func (r *importReader) mpint(x *big.Int, typ *types.Basic) { + signed, maxBytes := intSize(typ) maxSmall := 256 - maxBytes if signed { @@ -558,7 +584,8 @@ func (r *importReader) mpint(b *types.Basic) constant.Value { v = ^v } } - return constant.MakeInt64(v) + x.SetInt64(v) + return } v := -n @@ -568,47 +595,23 @@ func (r *importReader) mpint(b *types.Basic) constant.Value { if v < 1 || uint(v) > maxBytes { errorf("weird decoding: %v, %v => %v", n, signed, v) } - - buf := make([]byte, v) - io.ReadFull(&r.declReader, buf) - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { - buf[i], buf[j] = buf[j], buf[i] - } - - x := constant.MakeFromBytes(buf) + b := make([]byte, v) + io.ReadFull(&r.declReader, b) + x.SetBytes(b) if signed && n&1 != 0 { - x = constant.UnaryOp(token.SUB, x, 0) + x.Neg(x) } - return x } -func (r *importReader) mpfloat(b *types.Basic) constant.Value { - x := r.mpint(b) - if constant.Sign(x) == 0 { - return x - } - - exp := r.int64() - switch { - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - // Ensure that the imported Kind is Float, else this constant may run into - // bitsize limits on overlarge integers. Eventually we can instead adopt - // the approach of CL 288632, but that CL relies on go/constant APIs that - // were introduced in go1.13. - // - // TODO(rFindley): sync the logic here with tip Go once we no longer - // support go1.12. - x = constant.ToFloat(x) - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) +func (r *importReader) mpfloat(typ *types.Basic) constant.Value { + var mant big.Int + r.mpint(&mant, typ) + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(r.int64())) } - return x + return constant.Make(&f) } func (r *importReader) ident() string { @@ -756,7 +759,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { errorf("unexpected type param type") } pkg, name := r.qualifiedIdent() - id := ident{pkg.Name(), name} + id := ident{pkg, name} if t, ok := r.p.tparamIndex[id]; ok { // We're already in the process of importing this typeparam. return t diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go new file mode 100644 index 000000000000..286bf445483d --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(go1.18 && goexperiment.unified) +// +build !go1.18 !goexperiment.unified + +package gcimporter + +const unifiedIR = false diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go new file mode 100644 index 000000000000..b5d69ffbe682 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 && goexperiment.unified +// +build go1.18,goexperiment.unified + +package gcimporter + +const unifiedIR = true diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go b/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go new file mode 100644 index 000000000000..8eb20729c2ad --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" +) + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") + return +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go new file mode 100644 index 000000000000..3c1a4375435a --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go @@ -0,0 +1,612 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Derived from go/internal/gcimporter/ureader.go + +//go:build go1.18 +// +build go1.18 + +package gcimporter + +import ( + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/internal/pkgbits" +) + +// A pkgReader holds the shared state for reading a unified IR package +// description. +type pkgReader struct { + pkgbits.PkgDecoder + + fake fakeFileSet + + ctxt *types.Context + imports map[string]*types.Package // previously imported packages, indexed by path + + // lazily initialized arrays corresponding to the unified IR + // PosBase, Pkg, and Type sections, respectively. + posBases []string // position bases (i.e., file names) + pkgs []*types.Package + typs []types.Type + + // laterFns holds functions that need to be invoked at the end of + // import reading. + laterFns []func() +} + +// later adds a function to be invoked at the end of import reading. +func (pr *pkgReader) later(fn func()) { + pr.laterFns = append(pr.laterFns, fn) +} + +// See cmd/compile/internal/noder.derivedInfo. +type derivedInfo struct { + idx pkgbits.Index + needed bool +} + +// See cmd/compile/internal/noder.typeInfo. +type typeInfo struct { + idx pkgbits.Index + derived bool +} + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + s := string(data) + s = s[:strings.LastIndex(s, "\n$$\n")] + input := pkgbits.NewPkgDecoder(path, s) + pkg = readUnifiedPackage(fset, nil, imports, input) + return +} + +// readUnifiedPackage reads a package description from the given +// unified IR export data decoder. +func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { + pr := pkgReader{ + PkgDecoder: input, + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + + ctxt: ctxt, + imports: imports, + + posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), + pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), + typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), + } + defer pr.fake.setLines() + + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + pkg := r.pkg() + r.Bool() // has init + + for i, n := 0, r.Len(); i < n; i++ { + // As if r.obj(), but avoiding the Scope.Lookup call, + // to avoid eager loading of imports. + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + assert(r.Len() == 0) + } + + r.Sync(pkgbits.SyncEOF) + + for _, fn := range pr.laterFns { + fn() + } + + pkg.MarkComplete() + return pkg +} + +// A reader holds the state for reading a single unified IR element +// within a package. +type reader struct { + pkgbits.Decoder + + p *pkgReader + + dict *readerDict +} + +// A readerDict holds the state for type parameters that parameterize +// the current unified IR element. +type readerDict struct { + // bounds is a slice of typeInfos corresponding to the underlying + // bounds of the element's type parameters. + bounds []typeInfo + + // tparams is a slice of the constructed TypeParams for the element. + tparams []*types.TypeParam + + // devived is a slice of types derived from tparams, which may be + // instantiated while reading the current element. + derived []derivedInfo + derivedTypes []types.Type // lazily instantiated from derived +} + +func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.NewDecoder(k, idx, marker), + p: pr, + } +} + +// @@@ Positions + +func (r *reader) pos() token.Pos { + r.Sync(pkgbits.SyncPos) + if !r.Bool() { + return token.NoPos + } + + // TODO(mdempsky): Delta encoding. + posBase := r.posBase() + line := r.Uint() + col := r.Uint() + return r.p.fake.pos(posBase, int(line), int(col)) +} + +func (r *reader) posBase() string { + return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) +} + +func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { + if b := pr.posBases[idx]; b != "" { + return b + } + + r := pr.newReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + + // Within types2, position bases have a lot more details (e.g., + // keeping track of where //line directives appeared exactly). + // + // For go/types, we just track the file name. + + filename := r.String() + + if r.Bool() { // file base + // Was: "b = token.NewTrimmedFileBase(filename, true)" + } else { // line base + pos := r.pos() + line := r.Uint() + col := r.Uint() + + // Was: "b = token.NewLineBase(pos, filename, true, line, col)" + _, _, _ = pos, line, col + } + + b := filename + pr.posBases[idx] = b + return b +} + +// @@@ Packages + +func (r *reader) pkg() *types.Package { + r.Sync(pkgbits.SyncPkg) + return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) +} + +func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { + // TODO(mdempsky): Consider using some non-nil pointer to indicate + // the universe scope, so we don't need to keep re-reading it. + if pkg := pr.pkgs[idx]; pkg != nil { + return pkg + } + + pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() + pr.pkgs[idx] = pkg + return pkg +} + +func (r *reader) doPkg() *types.Package { + path := r.String() + switch path { + case "": + path = r.p.PkgPath() + case "builtin": + return nil // universe + case "unsafe": + return types.Unsafe + } + + if pkg := r.p.imports[path]; pkg != nil { + return pkg + } + + name := r.String() + + pkg := types.NewPackage(path, name) + r.p.imports[path] = pkg + + imports := make([]*types.Package, r.Len()) + for i := range imports { + imports[i] = r.pkg() + } + pkg.SetImports(imports) + + return pkg +} + +// @@@ Types + +func (r *reader) typ() types.Type { + return r.p.typIdx(r.typInfo(), r.dict) +} + +func (r *reader) typInfo() typeInfo { + r.Sync(pkgbits.SyncType) + if r.Bool() { + return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} + } + return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} +} + +func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { + idx := info.idx + var where *types.Type + if info.derived { + where = &dict.derivedTypes[idx] + idx = dict.derived[idx].idx + } else { + where = &pr.typs[idx] + } + + if typ := *where; typ != nil { + return typ + } + + r := pr.newReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + + typ := r.doTyp() + assert(typ != nil) + + // See comment in pkgReader.typIdx explaining how this happens. + if prev := *where; prev != nil { + return prev + } + + *where = typ + return typ +} + +func (r *reader) doTyp() (res types.Type) { + switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { + default: + errorf("unhandled type tag: %v", tag) + panic("unreachable") + + case pkgbits.TypeBasic: + return types.Typ[r.Len()] + + case pkgbits.TypeNamed: + obj, targs := r.obj() + name := obj.(*types.TypeName) + if len(targs) != 0 { + t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) + return t + } + return name.Type() + + case pkgbits.TypeTypeParam: + return r.dict.tparams[r.Len()] + + case pkgbits.TypeArray: + len := int64(r.Uint64()) + return types.NewArray(r.typ(), len) + case pkgbits.TypeChan: + dir := types.ChanDir(r.Len()) + return types.NewChan(dir, r.typ()) + case pkgbits.TypeMap: + return types.NewMap(r.typ(), r.typ()) + case pkgbits.TypePointer: + return types.NewPointer(r.typ()) + case pkgbits.TypeSignature: + return r.signature(nil, nil, nil) + case pkgbits.TypeSlice: + return types.NewSlice(r.typ()) + case pkgbits.TypeStruct: + return r.structType() + case pkgbits.TypeInterface: + return r.interfaceType() + case pkgbits.TypeUnion: + return r.unionType() + } +} + +func (r *reader) structType() *types.Struct { + fields := make([]*types.Var, r.Len()) + var tags []string + for i := range fields { + pos := r.pos() + pkg, name := r.selector() + ftyp := r.typ() + tag := r.String() + embedded := r.Bool() + + fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) + if tag != "" { + for len(tags) < i { + tags = append(tags, "") + } + tags = append(tags, tag) + } + } + return types.NewStruct(fields, tags) +} + +func (r *reader) unionType() *types.Union { + terms := make([]*types.Term, r.Len()) + for i := range terms { + terms[i] = types.NewTerm(r.Bool(), r.typ()) + } + return types.NewUnion(terms) +} + +func (r *reader) interfaceType() *types.Interface { + methods := make([]*types.Func, r.Len()) + embeddeds := make([]types.Type, r.Len()) + implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() + + for i := range methods { + pos := r.pos() + pkg, name := r.selector() + mtyp := r.signature(nil, nil, nil) + methods[i] = types.NewFunc(pos, pkg, name, mtyp) + } + + for i := range embeddeds { + embeddeds[i] = r.typ() + } + + iface := types.NewInterfaceType(methods, embeddeds) + if implicit { + iface.MarkImplicit() + } + return iface +} + +func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { + r.Sync(pkgbits.SyncSignature) + + params := r.params() + results := r.params() + variadic := r.Bool() + + return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) +} + +func (r *reader) params() *types.Tuple { + r.Sync(pkgbits.SyncParams) + + params := make([]*types.Var, r.Len()) + for i := range params { + params[i] = r.param() + } + + return types.NewTuple(params...) +} + +func (r *reader) param() *types.Var { + r.Sync(pkgbits.SyncParam) + + pos := r.pos() + pkg, name := r.localIdent() + typ := r.typ() + + return types.NewParam(pos, pkg, name, typ) +} + +// @@@ Objects + +func (r *reader) obj() (types.Object, []types.Type) { + r.Sync(pkgbits.SyncObject) + + assert(!r.Bool()) + + pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + obj := pkgScope(pkg).Lookup(name) + + targs := make([]types.Type, r.Len()) + for i := range targs { + targs[i] = r.typ() + } + + return obj, targs +} + +func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { + rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + + objPkg, objName := rname.qualifiedIdent() + assert(objName != "") + + tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + + if tag == pkgbits.ObjStub { + assert(objPkg == nil || objPkg == types.Unsafe) + return objPkg, objName + } + + if objPkg.Scope().Lookup(objName) == nil { + dict := pr.objDictIdx(idx) + + r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) + r.dict = dict + + declare := func(obj types.Object) { + objPkg.Scope().Insert(obj) + } + + switch tag { + default: + panic("weird") + + case pkgbits.ObjAlias: + pos := r.pos() + typ := r.typ() + declare(types.NewTypeName(pos, objPkg, objName, typ)) + + case pkgbits.ObjConst: + pos := r.pos() + typ := r.typ() + val := r.Value() + declare(types.NewConst(pos, objPkg, objName, typ, val)) + + case pkgbits.ObjFunc: + pos := r.pos() + tparams := r.typeParamNames() + sig := r.signature(nil, nil, tparams) + declare(types.NewFunc(pos, objPkg, objName, sig)) + + case pkgbits.ObjType: + pos := r.pos() + + obj := types.NewTypeName(pos, objPkg, objName, nil) + named := types.NewNamed(obj, nil, nil) + declare(obj) + + named.SetTypeParams(r.typeParamNames()) + + // TODO(mdempsky): Rewrite receiver types to underlying is an + // Interface? The go/types importer does this (I think because + // unit tests expected that), but cmd/compile doesn't care + // about it, so maybe we can avoid worrying about that here. + rhs := r.typ() + r.p.later(func() { + underlying := rhs.Underlying() + named.SetUnderlying(underlying) + }) + + for i, n := 0, r.Len(); i < n; i++ { + named.AddMethod(r.method()) + } + + case pkgbits.ObjVar: + pos := r.pos() + typ := r.typ() + declare(types.NewVar(pos, objPkg, objName, typ)) + } + } + + return objPkg, objName +} + +func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { + r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + + var dict readerDict + + if implicits := r.Len(); implicits != 0 { + errorf("unexpected object with %v implicit type parameter(s)", implicits) + } + + dict.bounds = make([]typeInfo, r.Len()) + for i := range dict.bounds { + dict.bounds[i] = r.typInfo() + } + + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]types.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + } + + // function references follow, but reader doesn't need those + + return &dict +} + +func (r *reader) typeParamNames() []*types.TypeParam { + r.Sync(pkgbits.SyncTypeParamNames) + + // Note: This code assumes it only processes objects without + // implement type parameters. This is currently fine, because + // reader is only used to read in exported declarations, which are + // always package scoped. + + if len(r.dict.bounds) == 0 { + return nil + } + + // Careful: Type parameter lists may have cycles. To allow for this, + // we construct the type parameter list in two passes: first we + // create all the TypeNames and TypeParams, then we construct and + // set the bound type. + + r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) + for i := range r.dict.bounds { + pos := r.pos() + pkg, name := r.localIdent() + + tname := types.NewTypeName(pos, pkg, name, nil) + r.dict.tparams[i] = types.NewTypeParam(tname, nil) + } + + typs := make([]types.Type, len(r.dict.bounds)) + for i, bound := range r.dict.bounds { + typs[i] = r.p.typIdx(bound, r.dict) + } + + // TODO(mdempsky): This is subtle, elaborate further. + // + // We have to save tparams outside of the closure, because + // typeParamNames() can be called multiple times with the same + // dictionary instance. + // + // Also, this needs to happen later to make sure SetUnderlying has + // been called. + // + // TODO(mdempsky): Is it safe to have a single "later" slice or do + // we need to have multiple passes? See comments on CL 386002 and + // go.dev/issue/52104. + tparams := r.dict.tparams + r.p.later(func() { + for i, typ := range typs { + tparams[i].SetConstraint(typ) + } + }) + + return r.dict.tparams +} + +func (r *reader) method() *types.Func { + r.Sync(pkgbits.SyncMethod) + pos := r.pos() + pkg, name := r.selector() + + rparams := r.typeParamNames() + sig := r.signature(r.param(), rparams, nil) + + _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. + return types.NewFunc(pos, pkg, name, sig) +} + +func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } +func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } +func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } + +func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { + r.Sync(marker) + return r.pkg(), r.String() +} + +// pkgScope returns pkg.Scope(). +// If pkg is nil, it returns types.Universe instead. +// +// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. +func pkgScope(pkg *types.Package) *types.Scope { + if pkg != nil { + return pkg.Scope() + } + return types.Universe +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/codes.go b/vendor/golang.org/x/tools/go/internal/pkgbits/codes.go new file mode 100644 index 000000000000..f0cabde96eba --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/codes.go @@ -0,0 +1,77 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A Code is an enum value that can be encoded into bitstreams. +// +// Code types are preferable for enum types, because they allow +// Decoder to detect desyncs. +type Code interface { + // Marker returns the SyncMarker for the Code's dynamic type. + Marker() SyncMarker + + // Value returns the Code's ordinal value. + Value() int +} + +// A CodeVal distinguishes among go/constant.Value encodings. +type CodeVal int + +func (c CodeVal) Marker() SyncMarker { return SyncVal } +func (c CodeVal) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ValBool CodeVal = iota + ValString + ValInt64 + ValBigInt + ValBigRat + ValBigFloat +) + +// A CodeType distinguishes among go/types.Type encodings. +type CodeType int + +func (c CodeType) Marker() SyncMarker { return SyncType } +func (c CodeType) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + TypeBasic CodeType = iota + TypeNamed + TypePointer + TypeSlice + TypeArray + TypeChan + TypeMap + TypeSignature + TypeStruct + TypeInterface + TypeUnion + TypeTypeParam +) + +// A CodeObj distinguishes among go/types.Object encodings. +type CodeObj int + +func (c CodeObj) Marker() SyncMarker { return SyncCodeObj } +func (c CodeObj) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ObjAlias CodeObj = iota + ObjConst + ObjType + ObjFunc + ObjVar + ObjStub +) diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go new file mode 100644 index 000000000000..2bc793668ec9 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go @@ -0,0 +1,433 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "math/big" + "os" + "runtime" + "strings" +) + +// A PkgDecoder provides methods for decoding a package's Unified IR +// export data. +type PkgDecoder struct { + // version is the file format version. + version uint32 + + // sync indicates whether the file uses sync markers. + sync bool + + // pkgPath is the package path for the package to be decoded. + // + // TODO(mdempsky): Remove; unneeded since CL 391014. + pkgPath string + + // elemData is the full data payload of the encoded package. + // Elements are densely and contiguously packed together. + // + // The last 8 bytes of elemData are the package fingerprint. + elemData string + + // elemEnds stores the byte-offset end positions of element + // bitstreams within elemData. + // + // For example, element I's bitstream data starts at elemEnds[I-1] + // (or 0, if I==0) and ends at elemEnds[I]. + // + // Note: elemEnds is indexed by absolute indices, not + // section-relative indices. + elemEnds []uint32 + + // elemEndsEnds stores the index-offset end positions of relocation + // sections within elemEnds. + // + // For example, section K's end positions start at elemEndsEnds[K-1] + // (or 0, if K==0) and end at elemEndsEnds[K]. + elemEndsEnds [numRelocs]uint32 +} + +// PkgPath returns the package path for the package +// +// TODO(mdempsky): Remove; unneeded since CL 391014. +func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } + +// SyncMarkers reports whether pr uses sync markers. +func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } + +// NewPkgDecoder returns a PkgDecoder initialized to read the Unified +// IR export data from input. pkgPath is the package path for the +// compilation unit that produced the export data. +// +// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. +func NewPkgDecoder(pkgPath, input string) PkgDecoder { + pr := PkgDecoder{ + pkgPath: pkgPath, + } + + // TODO(mdempsky): Implement direct indexing of input string to + // avoid copying the position information. + + r := strings.NewReader(input) + + assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + + switch pr.version { + default: + panic(fmt.Errorf("unsupported version: %v", pr.version)) + case 0: + // no flags + case 1: + var flags uint32 + assert(binary.Read(r, binary.LittleEndian, &flags) == nil) + pr.sync = flags&flagSyncMarkers != 0 + } + + assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) + + pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) + assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) + + pos, err := r.Seek(0, os.SEEK_CUR) + assert(err == nil) + + pr.elemData = input[pos:] + assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + return pr +} + +// NumElems returns the number of elements in section k. +func (pr *PkgDecoder) NumElems(k RelocKind) int { + count := int(pr.elemEndsEnds[k]) + if k > 0 { + count -= int(pr.elemEndsEnds[k-1]) + } + return count +} + +// TotalElems returns the total number of elements across all sections. +func (pr *PkgDecoder) TotalElems() int { + return len(pr.elemEnds) +} + +// Fingerprint returns the package fingerprint. +func (pr *PkgDecoder) Fingerprint() [8]byte { + var fp [8]byte + copy(fp[:], pr.elemData[len(pr.elemData)-8:]) + return fp +} + +// AbsIdx returns the absolute index for the given (section, index) +// pair. +func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { + absIdx := int(idx) + if k > 0 { + absIdx += int(pr.elemEndsEnds[k-1]) + } + if absIdx >= int(pr.elemEndsEnds[k]) { + errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + } + return absIdx +} + +// DataIdx returns the raw element bitstream for the given (section, +// index) pair. +func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { + absIdx := pr.AbsIdx(k, idx) + + var start uint32 + if absIdx > 0 { + start = pr.elemEnds[absIdx-1] + } + end := pr.elemEnds[absIdx] + + return pr.elemData[start:end] +} + +// StringIdx returns the string value for the given string index. +func (pr *PkgDecoder) StringIdx(idx Index) string { + return pr.DataIdx(RelocString, idx) +} + +// NewDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.NewDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +// NewDecoderRaw returns a Decoder for the given (section, index) pair. +// +// Most callers should use NewDecoder instead. +func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. + r.Data = *strings.NewReader(pr.DataIdx(k, idx)) + + r.Sync(SyncRelocs) + r.Relocs = make([]RelocEnt, r.Len()) + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +// A Decoder provides methods for decoding an individual element's +// bitstream data. +type Decoder struct { + common *PkgDecoder + + Relocs []RelocEnt + Data strings.Reader + + k RelocKind + Idx Index +} + +func (r *Decoder) checkErr(err error) { + if err != nil { + errorf("unexpected decoding error: %w", err) + } +} + +func (r *Decoder) rawUvarint() uint64 { + x, err := binary.ReadUvarint(&r.Data) + r.checkErr(err) + return x +} + +func (r *Decoder) rawVarint() int64 { + ux := r.rawUvarint() + + // Zig-zag decode. + x := int64(ux >> 1) + if ux&1 != 0 { + x = ^x + } + return x +} + +func (r *Decoder) rawReloc(k RelocKind, idx int) Index { + e := r.Relocs[idx] + assert(e.Kind == k) + return e.Idx +} + +// Sync decodes a sync marker from the element bitstream and asserts +// that it matches the expected marker. +// +// If r.common.sync is false, then Sync is a no-op. +func (r *Decoder) Sync(mWant SyncMarker) { + if !r.common.sync { + return + } + + pos, _ := r.Data.Seek(0, os.SEEK_CUR) // TODO(mdempsky): io.SeekCurrent after #44505 is resolved + mHave := SyncMarker(r.rawUvarint()) + writerPCs := make([]int, r.rawUvarint()) + for i := range writerPCs { + writerPCs[i] = int(r.rawUvarint()) + } + + if mHave == mWant { + return + } + + // There's some tension here between printing: + // + // (1) full file paths that tools can recognize (e.g., so emacs + // hyperlinks the "file:line" text for easy navigation), or + // + // (2) short file paths that are easier for humans to read (e.g., by + // omitting redundant or irrelevant details, so it's easier to + // focus on the useful bits that remain). + // + // The current formatting favors the former, as it seems more + // helpful in practice. But perhaps the formatting could be improved + // to better address both concerns. For example, use relative file + // paths if they would be shorter, or rewrite file paths to contain + // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how + // to reliably expand that again. + + fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) + + fmt.Printf("\nfound %v, written at:\n", mHave) + if len(writerPCs) == 0 { + fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) + } + for _, pc := range writerPCs { + fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) + } + + fmt.Printf("\nexpected %v, reading at:\n", mWant) + var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? + n := runtime.Callers(2, readerPCs[:]) + for _, pc := range fmtFrames(readerPCs[:n]...) { + fmt.Printf("\t%s\n", pc) + } + + // We already printed a stack trace for the reader, so now we can + // simply exit. Printing a second one with panic or base.Fatalf + // would just be noise. + os.Exit(1) +} + +// Bool decodes and returns a bool value from the element bitstream. +func (r *Decoder) Bool() bool { + r.Sync(SyncBool) + x, err := r.Data.ReadByte() + r.checkErr(err) + assert(x < 2) + return x != 0 +} + +// Int64 decodes and returns an int64 value from the element bitstream. +func (r *Decoder) Int64() int64 { + r.Sync(SyncInt64) + return r.rawVarint() +} + +// Int64 decodes and returns a uint64 value from the element bitstream. +func (r *Decoder) Uint64() uint64 { + r.Sync(SyncUint64) + return r.rawUvarint() +} + +// Len decodes and returns a non-negative int value from the element bitstream. +func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } + +// Int decodes and returns an int value from the element bitstream. +func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } + +// Uint decodes and returns a uint value from the element bitstream. +func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } + +// Code decodes a Code value from the element bitstream and returns +// its ordinal value. It's the caller's responsibility to convert the +// result to an appropriate Code type. +// +// TODO(mdempsky): Ideally this method would have signature "Code[T +// Code] T" instead, but we don't allow generic methods and the +// compiler can't depend on generics yet anyway. +func (r *Decoder) Code(mark SyncMarker) int { + r.Sync(mark) + return r.Len() +} + +// Reloc decodes a relocation of expected section k from the element +// bitstream and returns an index to the referenced element. +func (r *Decoder) Reloc(k RelocKind) Index { + r.Sync(SyncUseReloc) + return r.rawReloc(k, r.Len()) +} + +// String decodes and returns a string value from the element +// bitstream. +func (r *Decoder) String() string { + r.Sync(SyncString) + return r.common.StringIdx(r.Reloc(RelocString)) +} + +// Strings decodes and returns a variable-length slice of strings from +// the element bitstream. +func (r *Decoder) Strings() []string { + res := make([]string, r.Len()) + for i := range res { + res[i] = r.String() + } + return res +} + +// Value decodes and returns a constant.Value from the element +// bitstream. +func (r *Decoder) Value() constant.Value { + r.Sync(SyncValue) + isComplex := r.Bool() + val := r.scalar() + if isComplex { + val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) + } + return val +} + +func (r *Decoder) scalar() constant.Value { + switch tag := CodeVal(r.Code(SyncVal)); tag { + default: + panic(fmt.Errorf("unexpected scalar tag: %v", tag)) + + case ValBool: + return constant.MakeBool(r.Bool()) + case ValString: + return constant.MakeString(r.String()) + case ValInt64: + return constant.MakeInt64(r.Int64()) + case ValBigInt: + return constant.Make(r.bigInt()) + case ValBigRat: + num := r.bigInt() + denom := r.bigInt() + return constant.Make(new(big.Rat).SetFrac(num, denom)) + case ValBigFloat: + return constant.Make(r.bigFloat()) + } +} + +func (r *Decoder) bigInt() *big.Int { + v := new(big.Int).SetBytes([]byte(r.String())) + if r.Bool() { + v.Neg(v) + } + return v +} + +func (r *Decoder) bigFloat() *big.Float { + v := new(big.Float).SetPrec(512) + assert(v.UnmarshalText([]byte(r.String())) == nil) + return v +} + +// @@@ Helpers + +// TODO(mdempsky): These should probably be removed. I think they're a +// smell that the export data format is not yet quite right. + +// PeekPkgPath returns the package path for the specified package +// index. +func (pr *PkgDecoder) PeekPkgPath(idx Index) string { + r := pr.NewDecoder(RelocPkg, idx, SyncPkgDef) + path := r.String() + if path == "" { + path = pr.pkgPath + } + return path +} + +// PeekObj returns the package path, object name, and CodeObj for the +// specified object index. +func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { + r := pr.NewDecoder(RelocName, idx, SyncObject1) + r.Sync(SyncSym) + r.Sync(SyncPkg) + path := pr.PeekPkgPath(r.Reloc(RelocPkg)) + name := r.String() + assert(name != "") + + tag := CodeObj(r.Code(SyncCodeObj)) + + return path, name, tag +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/doc.go b/vendor/golang.org/x/tools/go/internal/pkgbits/doc.go new file mode 100644 index 000000000000..c8a2796b5e4c --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/doc.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkgbits implements low-level coding abstractions for +// Unified IR's export data format. +// +// At a low-level, a package is a collection of bitstream elements. +// Each element has a "kind" and a dense, non-negative index. +// Elements can be randomly accessed given their kind and index. +// +// Individual elements are sequences of variable-length values (e.g., +// integers, booleans, strings, go/constant values, cross-references +// to other elements). Package pkgbits provides APIs for encoding and +// decoding these low-level values, but the details of mapping +// higher-level Go constructs into elements is left to higher-level +// abstractions. +// +// Elements may cross-reference each other with "relocations." For +// example, an element representing a pointer type has a relocation +// referring to the element type. +// +// Go constructs may be composed as a constellation of multiple +// elements. For example, a declared function may have one element to +// describe the object (e.g., its name, type, position), and a +// separate element to describe its function body. This allows readers +// some flexibility in efficiently seeking or re-reading data (e.g., +// inlining requires re-reading the function body for each inlined +// call, without needing to re-read the object-level details). +// +// This is a copy of internal/pkgbits in the Go implementation. +package pkgbits diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/go/internal/pkgbits/encoder.go new file mode 100644 index 000000000000..c50c838caaec --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/encoder.go @@ -0,0 +1,379 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "bytes" + "crypto/md5" + "encoding/binary" + "go/constant" + "io" + "math/big" + "runtime" +) + +// currentVersion is the current version number. +// +// - v0: initial prototype +// +// - v1: adds the flags uint32 word +const currentVersion uint32 = 1 + +// A PkgEncoder provides methods for encoding a package's Unified IR +// export data. +type PkgEncoder struct { + // elems holds the bitstream for previously encoded elements. + elems [numRelocs][]string + + // stringsIdx maps previously encoded strings to their index within + // the RelocString section, to allow deduplication. That is, + // elems[RelocString][stringsIdx[s]] == s (if present). + stringsIdx map[string]Index + + // syncFrames is the number of frames to write at each sync + // marker. A negative value means sync markers are omitted. + syncFrames int +} + +// SyncMarkers reports whether pw uses sync markers. +func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } + +// NewPkgEncoder returns an initialized PkgEncoder. +// +// syncFrames is the number of caller frames that should be serialized +// at Sync points. Serializing additional frames results in larger +// export data files, but can help diagnosing desync errors in +// higher-level Unified IR reader/writer code. If syncFrames is +// negative, then sync markers are omitted entirely. +func NewPkgEncoder(syncFrames int) PkgEncoder { + return PkgEncoder{ + stringsIdx: make(map[string]Index), + syncFrames: syncFrames, + } +} + +// DumpTo writes the package's encoded data to out0 and returns the +// package fingerprint. +func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { + h := md5.New() + out := io.MultiWriter(out0, h) + + writeUint32 := func(x uint32) { + assert(binary.Write(out, binary.LittleEndian, x) == nil) + } + + writeUint32(currentVersion) + + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) + + // Write elemEndsEnds. + var sum uint32 + for _, elems := range &pw.elems { + sum += uint32(len(elems)) + writeUint32(sum) + } + + // Write elemEnds. + sum = 0 + for _, elems := range &pw.elems { + for _, elem := range elems { + sum += uint32(len(elem)) + writeUint32(sum) + } + } + + // Write elemData. + for _, elems := range &pw.elems { + for _, elem := range elems { + _, err := io.WriteString(out, elem) + assert(err == nil) + } + } + + // Write fingerprint. + copy(fingerprint[:], h.Sum(nil)) + _, err := out0.Write(fingerprint[:]) + assert(err == nil) + + return +} + +// StringIdx adds a string value to the strings section, if not +// already present, and returns its index. +func (pw *PkgEncoder) StringIdx(s string) Index { + if idx, ok := pw.stringsIdx[s]; ok { + assert(pw.elems[RelocString][idx] == s) + return idx + } + + idx := Index(len(pw.elems[RelocString])) + pw.elems[RelocString] = append(pw.elems[RelocString], s) + pw.stringsIdx[s] = idx + return idx +} + +// NewEncoder returns an Encoder for a new element within the given +// section, and encodes the given SyncMarker as the start of the +// element bitstream. +func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder { + e := pw.NewEncoderRaw(k) + e.Sync(marker) + return e +} + +// NewEncoderRaw returns an Encoder for a new element within the given +// section. +// +// Most callers should use NewEncoder instead. +func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder { + idx := Index(len(pw.elems[k])) + pw.elems[k] = append(pw.elems[k], "") // placeholder + + return Encoder{ + p: pw, + k: k, + Idx: idx, + } +} + +// An Encoder provides methods for encoding an individual element's +// bitstream data. +type Encoder struct { + p *PkgEncoder + + Relocs []RelocEnt + Data bytes.Buffer // accumulated element bitstream data + + encodingRelocHeader bool + + k RelocKind + Idx Index // index within relocation section +} + +// Flush finalizes the element's bitstream and returns its Index. +func (w *Encoder) Flush() Index { + var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + + // Backup the data so we write the relocations at the front. + var tmp bytes.Buffer + io.Copy(&tmp, &w.Data) + + // TODO(mdempsky): Consider writing these out separately so they're + // easier to strip, along with function bodies, so that we can prune + // down to just the data that's relevant to go/types. + if w.encodingRelocHeader { + panic("encodingRelocHeader already true; recursive flush?") + } + w.encodingRelocHeader = true + w.Sync(SyncRelocs) + w.Len(len(w.Relocs)) + for _, rEnt := range w.Relocs { + w.Sync(SyncReloc) + w.Len(int(rEnt.Kind)) + w.Len(int(rEnt.Idx)) + } + + io.Copy(&sb, &w.Data) + io.Copy(&sb, &tmp) + w.p.elems[w.k][w.Idx] = sb.String() + + return w.Idx +} + +func (w *Encoder) checkErr(err error) { + if err != nil { + errorf("unexpected encoding error: %v", err) + } +} + +func (w *Encoder) rawUvarint(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + _, err := w.Data.Write(buf[:n]) + w.checkErr(err) +} + +func (w *Encoder) rawVarint(x int64) { + // Zig-zag encode. + ux := uint64(x) << 1 + if x < 0 { + ux = ^ux + } + + w.rawUvarint(ux) +} + +func (w *Encoder) rawReloc(r RelocKind, idx Index) int { + // TODO(mdempsky): Use map for lookup; this takes quadratic time. + for i, rEnt := range w.Relocs { + if rEnt.Kind == r && rEnt.Idx == idx { + return i + } + } + + i := len(w.Relocs) + w.Relocs = append(w.Relocs, RelocEnt{r, idx}) + return i +} + +func (w *Encoder) Sync(m SyncMarker) { + if !w.p.SyncMarkers() { + return + } + + // Writing out stack frame string references requires working + // relocations, but writing out the relocations themselves involves + // sync markers. To prevent infinite recursion, we simply trim the + // stack frame for sync markers within the relocation header. + var frames []string + if !w.encodingRelocHeader && w.p.syncFrames > 0 { + pcs := make([]uintptr, w.p.syncFrames) + n := runtime.Callers(2, pcs) + frames = fmtFrames(pcs[:n]...) + } + + // TODO(mdempsky): Save space by writing out stack frames as a + // linked list so we can share common stack frames. + w.rawUvarint(uint64(m)) + w.rawUvarint(uint64(len(frames))) + for _, frame := range frames { + w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame)))) + } +} + +// Bool encodes and writes a bool value into the element bitstream, +// and then returns the bool value. +// +// For simple, 2-alternative encodings, the idiomatic way to call Bool +// is something like: +// +// if w.Bool(x != 0) { +// // alternative #1 +// } else { +// // alternative #2 +// } +// +// For multi-alternative encodings, use Code instead. +func (w *Encoder) Bool(b bool) bool { + w.Sync(SyncBool) + var x byte + if b { + x = 1 + } + err := w.Data.WriteByte(x) + w.checkErr(err) + return b +} + +// Int64 encodes and writes an int64 value into the element bitstream. +func (w *Encoder) Int64(x int64) { + w.Sync(SyncInt64) + w.rawVarint(x) +} + +// Uint64 encodes and writes a uint64 value into the element bitstream. +func (w *Encoder) Uint64(x uint64) { + w.Sync(SyncUint64) + w.rawUvarint(x) +} + +// Len encodes and writes a non-negative int value into the element bitstream. +func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } + +// Int encodes and writes an int value into the element bitstream. +func (w *Encoder) Int(x int) { w.Int64(int64(x)) } + +// Len encodes and writes a uint value into the element bitstream. +func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } + +// Reloc encodes and writes a relocation for the given (section, +// index) pair into the element bitstream. +// +// Note: Only the index is formally written into the element +// bitstream, so bitstream decoders must know from context which +// section an encoded relocation refers to. +func (w *Encoder) Reloc(r RelocKind, idx Index) { + w.Sync(SyncUseReloc) + w.Len(w.rawReloc(r, idx)) +} + +// Code encodes and writes a Code value into the element bitstream. +func (w *Encoder) Code(c Code) { + w.Sync(c.Marker()) + w.Len(c.Value()) +} + +// String encodes and writes a string value into the element +// bitstream. +// +// Internally, strings are deduplicated by adding them to the strings +// section (if not already present), and then writing a relocation +// into the element bitstream. +func (w *Encoder) String(s string) { + w.Sync(SyncString) + w.Reloc(RelocString, w.p.StringIdx(s)) +} + +// Strings encodes and writes a variable-length slice of strings into +// the element bitstream. +func (w *Encoder) Strings(ss []string) { + w.Len(len(ss)) + for _, s := range ss { + w.String(s) + } +} + +// Value encodes and writes a constant.Value into the element +// bitstream. +func (w *Encoder) Value(val constant.Value) { + w.Sync(SyncValue) + if w.Bool(val.Kind() == constant.Complex) { + w.scalar(constant.Real(val)) + w.scalar(constant.Imag(val)) + } else { + w.scalar(val) + } +} + +func (w *Encoder) scalar(val constant.Value) { + switch v := constant.Val(val).(type) { + default: + errorf("unhandled %v (%v)", val, val.Kind()) + case bool: + w.Code(ValBool) + w.Bool(v) + case string: + w.Code(ValString) + w.String(v) + case int64: + w.Code(ValInt64) + w.Int64(v) + case *big.Int: + w.Code(ValBigInt) + w.bigInt(v) + case *big.Rat: + w.Code(ValBigRat) + w.bigInt(v.Num()) + w.bigInt(v.Denom()) + case *big.Float: + w.Code(ValBigFloat) + w.bigFloat(v) + } +} + +func (w *Encoder) bigInt(v *big.Int) { + b := v.Bytes() + w.String(string(b)) // TODO: More efficient encoding. + w.Bool(v.Sign() < 0) +} + +func (w *Encoder) bigFloat(v *big.Float) { + b := v.Append(nil, 'p', -1) + w.String(string(b)) // TODO: More efficient encoding. +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/flags.go b/vendor/golang.org/x/tools/go/internal/pkgbits/flags.go new file mode 100644 index 000000000000..654222745fac --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/flags.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +const ( + flagSyncMarkers = 1 << iota // file format contains sync markers +) diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go1.go new file mode 100644 index 000000000000..5294f6a63edd --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go1.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 +// +build !go1.7 + +// TODO(mdempsky): Remove after #44505 is resolved + +package pkgbits + +import "runtime" + +func walkFrames(pcs []uintptr, visit frameVisitor) { + for _, pc := range pcs { + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + + visit(file, line, fn.Name(), pc-fn.Entry()) + } +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go17.go new file mode 100644 index 000000000000..2324ae7adfe2 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go17.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 +// +build go1.7 + +package pkgbits + +import "runtime" + +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/reloc.go b/vendor/golang.org/x/tools/go/internal/pkgbits/reloc.go new file mode 100644 index 000000000000..7a8f04ab3fc6 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/reloc.go @@ -0,0 +1,42 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A RelocKind indicates a particular section within a unified IR export. +type RelocKind int + +// An Index represents a bitstream element index within a particular +// section. +type Index int + +// A relocEnt (relocation entry) is an entry in an element's local +// reference table. +// +// TODO(mdempsky): Rename this too. +type RelocEnt struct { + Kind RelocKind + Idx Index +} + +// Reserved indices within the meta relocation section. +const ( + PublicRootIdx Index = 0 + PrivateRootIdx Index = 1 +) + +const ( + RelocString RelocKind = iota + RelocMeta + RelocPosBase + RelocPkg + RelocName + RelocType + RelocObj + RelocObjExt + RelocObjDict + RelocBody + + numRelocs = iota +) diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/support.go b/vendor/golang.org/x/tools/go/internal/pkgbits/support.go new file mode 100644 index 000000000000..ad26d3b28cae --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/support.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import "fmt" + +func assert(b bool) { + if !b { + panic("assertion failed") + } +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Errorf(format, args...)) +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/go/internal/pkgbits/sync.go new file mode 100644 index 000000000000..5bd51ef71700 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/sync.go @@ -0,0 +1,113 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "fmt" + "strings" +) + +// fmtFrames formats a backtrace for reporting reader/writer desyncs. +func fmtFrames(pcs ...uintptr) []string { + res := make([]string, 0, len(pcs)) + walkFrames(pcs, func(file string, line int, name string, offset uintptr) { + // Trim package from function name. It's just redundant noise. + name = strings.TrimPrefix(name, "cmd/compile/internal/noder.") + + res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset)) + }) + return res +} + +type frameVisitor func(file string, line int, name string, offset uintptr) + +// SyncMarker is an enum type that represents markers that may be +// written to export data to ensure the reader and writer stay +// synchronized. +type SyncMarker int + +//go:generate stringer -type=SyncMarker -trimprefix=Sync + +const ( + _ SyncMarker = iota + + // Public markers (known to go/types importers). + + // Low-level coding markers. + SyncEOF + SyncBool + SyncInt64 + SyncUint64 + SyncString + SyncValue + SyncVal + SyncRelocs + SyncReloc + SyncUseReloc + + // Higher-level object and type markers. + SyncPublic + SyncPos + SyncPosBase + SyncObject + SyncObject1 + SyncPkg + SyncPkgDef + SyncMethod + SyncType + SyncTypeIdx + SyncTypeParamNames + SyncSignature + SyncParams + SyncParam + SyncCodeObj + SyncSym + SyncLocalIdent + SyncSelector + + // Private markers (only known to cmd/compile). + SyncPrivate + + SyncFuncExt + SyncVarExt + SyncTypeExt + SyncPragma + + SyncExprList + SyncExprs + SyncExpr + SyncExprType + SyncAssign + SyncOp + SyncFuncLit + SyncCompLit + + SyncDecl + SyncFuncBody + SyncOpenScope + SyncCloseScope + SyncCloseAnotherScope + SyncDeclNames + SyncDeclName + + SyncStmts + SyncBlockStmt + SyncIfStmt + SyncForStmt + SyncSwitchStmt + SyncRangeStmt + SyncCaseClause + SyncCommClause + SyncSelectStmt + SyncDecls + SyncLabeledStmt + SyncUseObjLocal + SyncAddLocal + SyncLinkname + SyncStmt1 + SyncStmtsEnd + SyncLabel + SyncOptLabel +) diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/go/internal/pkgbits/syncmarker_string.go new file mode 100644 index 000000000000..4a5b0ca5f2ff --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/syncmarker_string.go @@ -0,0 +1,89 @@ +// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT. + +package pkgbits + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SyncEOF-1] + _ = x[SyncBool-2] + _ = x[SyncInt64-3] + _ = x[SyncUint64-4] + _ = x[SyncString-5] + _ = x[SyncValue-6] + _ = x[SyncVal-7] + _ = x[SyncRelocs-8] + _ = x[SyncReloc-9] + _ = x[SyncUseReloc-10] + _ = x[SyncPublic-11] + _ = x[SyncPos-12] + _ = x[SyncPosBase-13] + _ = x[SyncObject-14] + _ = x[SyncObject1-15] + _ = x[SyncPkg-16] + _ = x[SyncPkgDef-17] + _ = x[SyncMethod-18] + _ = x[SyncType-19] + _ = x[SyncTypeIdx-20] + _ = x[SyncTypeParamNames-21] + _ = x[SyncSignature-22] + _ = x[SyncParams-23] + _ = x[SyncParam-24] + _ = x[SyncCodeObj-25] + _ = x[SyncSym-26] + _ = x[SyncLocalIdent-27] + _ = x[SyncSelector-28] + _ = x[SyncPrivate-29] + _ = x[SyncFuncExt-30] + _ = x[SyncVarExt-31] + _ = x[SyncTypeExt-32] + _ = x[SyncPragma-33] + _ = x[SyncExprList-34] + _ = x[SyncExprs-35] + _ = x[SyncExpr-36] + _ = x[SyncExprType-37] + _ = x[SyncAssign-38] + _ = x[SyncOp-39] + _ = x[SyncFuncLit-40] + _ = x[SyncCompLit-41] + _ = x[SyncDecl-42] + _ = x[SyncFuncBody-43] + _ = x[SyncOpenScope-44] + _ = x[SyncCloseScope-45] + _ = x[SyncCloseAnotherScope-46] + _ = x[SyncDeclNames-47] + _ = x[SyncDeclName-48] + _ = x[SyncStmts-49] + _ = x[SyncBlockStmt-50] + _ = x[SyncIfStmt-51] + _ = x[SyncForStmt-52] + _ = x[SyncSwitchStmt-53] + _ = x[SyncRangeStmt-54] + _ = x[SyncCaseClause-55] + _ = x[SyncCommClause-56] + _ = x[SyncSelectStmt-57] + _ = x[SyncDecls-58] + _ = x[SyncLabeledStmt-59] + _ = x[SyncUseObjLocal-60] + _ = x[SyncAddLocal-61] + _ = x[SyncLinkname-62] + _ = x[SyncStmt1-63] + _ = x[SyncStmtsEnd-64] + _ = x[SyncLabel-65] + _ = x[SyncOptLabel-66] +} + +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" + +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} + +func (i SyncMarker) String() string { + i -= 1 + if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) { + return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]] +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 4bfe28a51ff5..da4ab89fe63f 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -67,7 +67,6 @@ Most tools should pass their command-line arguments (after any flags) uninterpreted to the loader, so that the loader can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. - */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 0e1e7f11fee8..de881562de1d 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -26,7 +26,6 @@ import ( "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/xerrors" ) // debug controls verbose logging. @@ -303,11 +302,12 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries } dirResponse, err := state.createDriverResponse(pattern) - // If there was an error loading the package, or the package is returned - // with errors, try to load the file as an ad-hoc package. + // If there was an error loading the package, or no packages are returned, + // or the package is returned with errors, try to load the file as an + // ad-hoc package. // Usually the error will appear in a returned package, but may not if we're // in module mode and the ad-hoc is located outside a module. - if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && len(dirResponse.Packages[0].Errors) == 1 { var queryErr error if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { @@ -393,6 +393,8 @@ type jsonPackage struct { CompiledGoFiles []string IgnoredGoFiles []string IgnoredOtherFiles []string + EmbedPatterns []string + EmbedFiles []string CFiles []string CgoFiles []string CXXFiles []string @@ -444,7 +446,11 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse // Run "go list" for complete // information on the specified packages. - buf, err := state.invokeGo("list", golistargs(state.cfg, words)...) + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...) if err != nil { return nil, err } @@ -565,6 +571,8 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), + EmbedFiles: absJoin(p.Dir, p.EmbedFiles), + EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), forTest: p.ForTest, depsErrors: p.DepsErrors, @@ -805,17 +813,83 @@ func absJoin(dir string, fileses ...[]string) (res []string) { return res } -func golistargs(cfg *Config, words []string) []string { +func jsonFlag(cfg *Config, goVersion int) string { + if goVersion < 19 { + return "-json" + } + var fields []string + added := make(map[string]bool) + addFields := func(fs ...string) { + for _, f := range fs { + if !added[f] { + added[f] = true + fields = append(fields, f) + } + } + } + addFields("Name", "ImportPath", "Error") // These fields are always needed + if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", + "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", + "SwigFiles", "SwigCXXFiles", "SysoFiles") + if cfg.Tests { + addFields("TestGoFiles", "XTestGoFiles") + } + } + if cfg.Mode&NeedTypes != 0 { + // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, + // even when -compiled isn't passed in. + // TODO(#52435): Should we make the test ask for -compiled, or automatically + // request CompiledGoFiles in certain circumstances? + addFields("Dir", "CompiledGoFiles") + } + if cfg.Mode&NeedCompiledGoFiles != 0 { + addFields("Dir", "CompiledGoFiles", "Export") + } + if cfg.Mode&NeedImports != 0 { + // When imports are requested, DepOnly is used to distinguish between packages + // explicitly requested and transitive imports of those packages. + addFields("DepOnly", "Imports", "ImportMap") + if cfg.Tests { + addFields("TestImports", "XTestImports") + } + } + if cfg.Mode&NeedDeps != 0 { + addFields("DepOnly") + } + if usesExportData(cfg) { + // Request Dir in the unlikely case Export is not absolute. + addFields("Dir", "Export") + } + if cfg.Mode&needInternalForTest != 0 { + addFields("ForTest") + } + if cfg.Mode&needInternalDepsErrors != 0 { + addFields("DepsErrors") + } + if cfg.Mode&NeedModule != 0 { + addFields("Module") + } + if cfg.Mode&NeedEmbedFiles != 0 { + addFields("EmbedFiles") + } + if cfg.Mode&NeedEmbedPatterns != 0 { + addFields("EmbedPatterns") + } + return "-json=" + strings.Join(fields, ",") +} + +func golistargs(cfg *Config, words []string, goVersion int) []string { const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo fullargs := []string{ - "-e", "-json", + "-e", jsonFlag(cfg, goVersion), fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), fmt.Sprintf("-test=%t", cfg.Tests), fmt.Sprintf("-export=%t", usesExportData(cfg)), fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), // go list doesn't let you pass -test and -find together, // probably because you'd just get the TestMain. - fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), } fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") @@ -879,7 +953,7 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, if !ok { // Catastrophic error: // - context cancellation - return nil, xerrors.Errorf("couldn't run 'go': %w", err) + return nil, fmt.Errorf("couldn't run 'go': %w", err) } // Old go version? diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 7ea37e7eeac3..5c080d21b54a 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -15,7 +15,7 @@ var allModes = []LoadMode{ NeedCompiledGoFiles, NeedImports, NeedDeps, - NeedExportsFile, + NeedExportFile, NeedTypes, NeedSyntax, NeedTypesInfo, @@ -28,7 +28,7 @@ var modeStrings = []string{ "NeedCompiledGoFiles", "NeedImports", "NeedDeps", - "NeedExportsFile", + "NeedExportFile", "NeedTypes", "NeedSyntax", "NeedTypesInfo", diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 1b5424e78f7a..a93dc6add4d8 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -39,9 +39,6 @@ import ( // Load may return more information than requested. type LoadMode int -// TODO(matloob): When a V2 of go/packages is released, rename NeedExportsFile to -// NeedExportFile to make it consistent with the Package field it's adding. - const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota @@ -59,8 +56,8 @@ const ( // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. NeedDeps - // NeedExportsFile adds ExportFile. - NeedExportsFile + // NeedExportFile adds ExportFile. + NeedExportFile // NeedTypes adds Types, Fset, and IllTyped. NeedTypes @@ -74,12 +71,25 @@ const ( // NeedTypesSizes adds TypesSizes. NeedTypesSizes + // needInternalDepsErrors adds the internal deps errors field for use by gopls. + needInternalDepsErrors + + // needInternalForTest adds the internal forTest field. + // Tests must also be set on the context for this field to be populated. + needInternalForTest + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. // Modifies CompiledGoFiles and Types, and has no effect on its own. typecheckCgo // NeedModule adds Module. NeedModule + + // NeedEmbedFiles adds EmbedFiles. + NeedEmbedFiles + + // NeedEmbedPatterns adds EmbedPatterns. + NeedEmbedPatterns ) const ( @@ -102,6 +112,9 @@ const ( // Deprecated: LoadAllSyntax exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadAllSyntax = LoadSyntax | NeedDeps + + // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + NeedExportsFile = NeedExportFile ) // A Config specifies details about how packages should be loaded. @@ -296,6 +309,14 @@ type Package struct { // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. OtherFiles []string + // EmbedFiles lists the absolute file paths of the package's files + // embedded with go:embed. + EmbedFiles []string + + // EmbedPatterns lists the absolute file patterns of the package's + // files embedded with go:embed. + EmbedPatterns []string + // IgnoredFiles lists source files that are not part of the package // using the current build configuration but that might be part of // the package using other build configurations. @@ -389,6 +410,8 @@ func init() { config.(*Config).modFlag = value } packagesinternal.TypecheckCgo = int(typecheckCgo) + packagesinternal.DepsErrors = int(needInternalDepsErrors) + packagesinternal.ForTest = int(needInternalForTest) } // An Error describes a problem with a package's metadata, syntax, or types. @@ -431,6 +454,8 @@ type flatPackage struct { GoFiles []string `json:",omitempty"` CompiledGoFiles []string `json:",omitempty"` OtherFiles []string `json:",omitempty"` + EmbedFiles []string `json:",omitempty"` + EmbedPatterns []string `json:",omitempty"` IgnoredFiles []string `json:",omitempty"` ExportFile string `json:",omitempty"` Imports map[string]string `json:",omitempty"` @@ -454,6 +479,8 @@ func (p *Package) MarshalJSON() ([]byte, error) { GoFiles: p.GoFiles, CompiledGoFiles: p.CompiledGoFiles, OtherFiles: p.OtherFiles, + EmbedFiles: p.EmbedFiles, + EmbedPatterns: p.EmbedPatterns, IgnoredFiles: p.IgnoredFiles, ExportFile: p.ExportFile, } @@ -481,6 +508,8 @@ func (p *Package) UnmarshalJSON(b []byte) error { GoFiles: flat.GoFiles, CompiledGoFiles: flat.CompiledGoFiles, OtherFiles: flat.OtherFiles, + EmbedFiles: flat.EmbedFiles, + EmbedPatterns: flat.EmbedPatterns, ExportFile: flat.ExportFile, } if len(flat.Imports) > 0 { @@ -614,7 +643,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || // ... or if we need types and the exportData is invalid. We fall back to (incompletely) // typechecking packages from source if they fail to compile. - (ld.Mode&NeedTypes|NeedTypesInfo != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" + (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" lpkg := &loaderPackage{ Package: pkg, needtypes: needtypes, @@ -752,13 +781,19 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { ld.pkgs[i].OtherFiles = nil ld.pkgs[i].IgnoredFiles = nil } + if ld.requestedMode&NeedEmbedFiles == 0 { + ld.pkgs[i].EmbedFiles = nil + } + if ld.requestedMode&NeedEmbedPatterns == 0 { + ld.pkgs[i].EmbedPatterns = nil + } if ld.requestedMode&NeedCompiledGoFiles == 0 { ld.pkgs[i].CompiledGoFiles = nil } if ld.requestedMode&NeedImports == 0 { ld.pkgs[i].Imports = nil } - if ld.requestedMode&NeedExportsFile == 0 { + if ld.requestedMode&NeedExportFile == 0 { ld.pkgs[i].ExportFile = "" } if ld.requestedMode&NeedTypes == 0 { @@ -1053,7 +1088,6 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { // // Because files are scanned in parallel, the token.Pos // positions of the resulting ast.Files are not ordered. -// func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { var wg sync.WaitGroup n := len(filenames) @@ -1097,7 +1131,6 @@ func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { // sameFile returns true if x and y have the same basename and denote // the same file. -// func sameFile(x, y string) bool { if x == y { // It could be the case that y doesn't exist. @@ -1210,8 +1243,13 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error if err != nil { return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) } + if _, ok := view["go.shape"]; ok { + // Account for the pseudopackage "go.shape" that gets + // created by generic code. + viewLen++ + } if viewLen != len(view) { - log.Fatalf("Unexpected package creation during export data loading") + log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath) } lpkg.Types = tpkg @@ -1222,17 +1260,8 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error // impliedLoadMode returns loadMode with its dependencies. func impliedLoadMode(loadMode LoadMode) LoadMode { - if loadMode&NeedTypesInfo != 0 && loadMode&NeedImports == 0 { - // If NeedTypesInfo, go/packages needs to do typechecking itself so it can - // associate type info with the AST. To do so, we need the export data - // for dependencies, which means we need to ask for the direct dependencies. - // NeedImports is used to ask for the direct dependencies. - loadMode |= NeedImports - } - - if loadMode&NeedDeps != 0 && loadMode&NeedImports == 0 { - // With NeedDeps we need to load at least direct dependencies. - // NeedImports is used to ask for the direct dependencies. + if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 { + // All these things require knowing the import graph. loadMode |= NeedImports } @@ -1240,5 +1269,5 @@ func impliedLoadMode(loadMode LoadMode) LoadMode { } func usesExportData(cfg *Config) bool { - return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 + return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 } diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 8659a0c5da67..67256dc3974c 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -9,7 +9,6 @@ import ( "bytes" "context" "fmt" - exec "golang.org/x/sys/execabs" "io" "os" "regexp" @@ -18,6 +17,8 @@ import ( "sync" "time" + exec "golang.org/x/sys/execabs" + "golang.org/x/tools/internal/event" ) @@ -131,9 +132,16 @@ type Invocation struct { Verb string Args []string BuildFlags []string - ModFlag string - ModFile string - Overlay string + + // If ModFlag is set, the go command is invoked with -mod=ModFlag. + ModFlag string + + // If ModFile is set, the go command is invoked with -modfile=ModFile. + ModFile string + + // If Overlay is set, the go command is invoked with -overlay=Overlay. + Overlay string + // If CleanEnv is set, the invocation will run only with the environment // in Env, not starting with os.Environ. CleanEnv bool @@ -256,8 +264,10 @@ func cmdDebugStr(cmd *exec.Cmd) string { env := make(map[string]string) for _, kv := range cmd.Env { split := strings.SplitN(kv, "=", 2) - k, v := split[0], split[1] - env[k] = v + if len(split) == 2 { + k, v := split[0], split[1] + env[k] = v + } } var args []string diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go index 5e75bd6d8fa1..2d3d408c0bed 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/vendor.go +++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -38,10 +38,10 @@ var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) // with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, // of which only Verb and Args are modified to run the appropriate Go command. // Inspired by setDefaultBuildMod in modload/init.go -func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { +func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) { mainMod, go114, err := getMainModuleAnd114(ctx, inv, r) if err != nil { - return nil, false, err + return false, nil, err } // We check the GOFLAGS to see if there is anything overridden or not. @@ -49,7 +49,7 @@ func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, inv.Args = []string{"GOFLAGS"} stdout, err := r.Run(ctx, inv) if err != nil { - return nil, false, err + return false, nil, err } goflags := string(bytes.TrimSpace(stdout.Bytes())) matches := modFlagRegexp.FindStringSubmatch(goflags) @@ -57,25 +57,27 @@ func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, if len(matches) != 0 { modFlag = matches[1] } - if modFlag != "" { - // Don't override an explicit '-mod=' argument. - return mainMod, modFlag == "vendor", nil + // Don't override an explicit '-mod=' argument. + if modFlag == "vendor" { + return true, mainMod, nil + } else if modFlag != "" { + return false, nil, nil } if mainMod == nil || !go114 { - return mainMod, false, nil + return false, nil, nil } // Check 1.14's automatic vendor mode. if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { // The Go version is at least 1.14, and a vendor directory exists. // Set -mod=vendor by default. - return mainMod, true, nil + return true, mainMod, nil } } - return mainMod, false, nil + return false, nil, nil } -// getMainModuleAnd114 gets the main module's information and whether the +// getMainModuleAnd114 gets one of the main modules' information and whether the // go command in use is 1.14+. This is the information needed to figure out // if vendoring should be enabled. func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index 9702094c59ed..d9950b1f0bef 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -23,6 +23,8 @@ var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} var TypecheckCgo int +var DepsErrors int // must be set as a LoadMode to call GetDepsErrors +var ForTest int // must be set as a LoadMode to call GetForTest var SetModFlag = func(config interface{}, value string) {} var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index 1222764b6a31..25a1426d30ec 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -16,11 +16,10 @@ // Additionally, this package contains common utilities for working with the // new generic constructs, to supplement the standard library APIs. Notably, // the StructuralTerms API computes a minimal representation of the structural -// restrictions on a type parameter. In the future, this API may be available -// from go/types. +// restrictions on a type parameter. // -// See the example/README.md for a more detailed guide on how to update tools -// to support generics. +// An external version of these APIs is available in the +// golang.org/x/exp/typeparams module. package typeparams import ( @@ -77,3 +76,104 @@ func IsTypeParam(t types.Type) bool { _, ok := t.(*TypeParam) return ok } + +// OriginMethod returns the origin method associated with the method fn. +// For methods on a non-generic receiver base type, this is just +// fn. However, for methods with a generic receiver, OriginMethod returns the +// corresponding method in the method set of the origin type. +// +// As a special case, if fn is not a method (has no receiver), OriginMethod +// returns fn. +func OriginMethod(fn *types.Func) *types.Func { + recv := fn.Type().(*types.Signature).Recv() + if recv == nil { + + return fn + } + base := recv.Type() + p, isPtr := base.(*types.Pointer) + if isPtr { + base = p.Elem() + } + named, isNamed := base.(*types.Named) + if !isNamed { + // Receiver is a *types.Interface. + return fn + } + if ForNamed(named).Len() == 0 { + // Receiver base has no type parameters, so we can avoid the lookup below. + return fn + } + orig := NamedTypeOrigin(named) + gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) + return gfn.(*types.Func) +} + +// GenericAssignableTo is a generalization of types.AssignableTo that +// implements the following rule for uninstantiated generic types: +// +// If V and T are generic named types, then V is considered assignable to T if, +// for every possible instantation of V[A_1, ..., A_N], the instantiation +// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. +// +// If T has structural constraints, they must be satisfied by V. +// +// For example, consider the following type declarations: +// +// type Interface[T any] interface { +// Accept(T) +// } +// +// type Container[T any] struct { +// Element T +// } +// +// func (c Container[T]) Accept(t T) { c.Element = t } +// +// In this case, GenericAssignableTo reports that instantiations of Container +// are assignable to the corresponding instantiation of Interface. +func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { + // If V and T are not both named, or do not have matching non-empty type + // parameter lists, fall back on types.AssignableTo. + + VN, Vnamed := V.(*types.Named) + TN, Tnamed := T.(*types.Named) + if !Vnamed || !Tnamed { + return types.AssignableTo(V, T) + } + + vtparams := ForNamed(VN) + ttparams := ForNamed(TN) + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 { + return types.AssignableTo(V, T) + } + + // V and T have the same (non-zero) number of type params. Instantiate both + // with the type parameters of V. This must always succeed for V, and will + // succeed for T if and only if the type set of each type parameter of V is a + // subset of the type set of the corresponding type parameter of T, meaning + // that every instantiation of V corresponds to a valid instantiation of T. + + // Minor optimization: ensure we share a context across the two + // instantiations below. + if ctxt == nil { + ctxt = NewContext() + } + + var targs []types.Type + for i := 0; i < vtparams.Len(); i++ { + targs = append(targs, vtparams.At(i)) + } + + vinst, err := Instantiate(ctxt, V, targs, true) + if err != nil { + panic("type parameters should satisfy their own constraints") + } + + tinst, err := Instantiate(ctxt, T, targs, true) + if err != nil { + return false + } + + return types.AssignableTo(vinst, tinst) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go new file mode 100644 index 000000000000..993135ec90e8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -0,0 +1,122 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := _NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// _NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// _NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, _NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, _NormalTerms returns ErrEmptyTypeSet. +// +// _NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func _NormalTerms(typ types.Type) ([]*Term, error) { + switch typ := typ.(type) { + case *TypeParam: + return StructuralTerms(typ) + case *Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*Term{NewTerm(false, typ)}, nil + } +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index 090f142a5f34..9c631b6512de 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -24,20 +24,22 @@ var ErrEmptyTypeSet = errors.New("empty type set") // Structural type restrictions of a type parameter are created via // non-interface types embedded in its constraint interface (directly, or via a // chain of interface embeddings). For example, in the declaration -// type T[P interface{~int; m()}] int +// +// type T[P interface{~int; m()}] int +// // the structural restriction of the type parameter P is ~int. // // With interface embedding and unions, the specification of structural type // restrictions may be arbitrarily complex. For example, consider the // following: // -// type A interface{ ~string|~[]byte } +// type A interface{ ~string|~[]byte } // -// type B interface{ int|string } +// type B interface{ int|string } // -// type C interface { ~string|~int } +// type C interface { ~string|~int } // -// type T[P interface{ A|B; C }] int +// type T[P interface{ A|B; C }] int // // In this example, the structural type restriction of P is ~string|int: A|B // expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go index 10857d504c4f..933106a23dd4 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -97,15 +97,6 @@ func (xl termlist) norm() termlist { return rl } -// If the type set represented by xl is specified by a single (non-𝓤) term, -// structuralType returns that type. Otherwise it returns nil. -func (xl termlist) structuralType() types.Type { - if nl := xl.norm(); len(nl) == 1 { - return nl[0].typ // if nl.isAll() then typ is nil, which is ok - } - return nil -} - // union returns the union xl ∪ yl. func (xl termlist) union(yl termlist) termlist { return append(xl, yl...).norm() diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go index 5fd3fc351561..b4788978ff4b 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go @@ -185,6 +185,11 @@ func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } // this Go version. type Context struct{} +// NewContext returns a placeholder Context instance. +func NewContext() *Context { + return &Context{} +} + // Instantiate is unsupported on this Go version, and panics. func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { unsupported() diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go index 7470aed8c998..114a36b866bc 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go @@ -140,6 +140,11 @@ func GetInstances(info *types.Info) map[*ast.Ident]Instance { // Context is an alias for types.Context. type Context = types.Context +// NewContext calls types.NewContext. +func NewContext() *Context { + return types.NewContext() +} + // Instantiate calls types.Instantiate. func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { return types.Instantiate(ctxt, typ, targs, validate) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index fa2834e2ab85..d38ee3c27cd6 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -1365,4 +1365,162 @@ const ( // return i // } InvalidGo + + // All codes below were added in Go 1.17. + + /* decl */ + + // BadDecl occurs when a declaration has invalid syntax. + BadDecl + + // RepeatedDecl occurs when an identifier occurs more than once on the left + // hand side of a short variable declaration. + // + // Example: + // func _() { + // x, y, y := 1, 2, 3 + // } + RepeatedDecl + + /* unsafe */ + + // InvalidUnsafeAdd occurs when unsafe.Add is called with a + // length argument that is not of integer type. + // + // Example: + // import "unsafe" + // + // var p unsafe.Pointer + // var _ = unsafe.Add(p, float64(1)) + InvalidUnsafeAdd + + // InvalidUnsafeSlice occurs when unsafe.Slice is called with a + // pointer argument that is not of pointer type or a length argument + // that is not of integer type, negative, or out of bounds. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(x, 1) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, float64(1)) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, -1) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, uint64(1) << 63) + InvalidUnsafeSlice + + // All codes below were added in Go 1.18. + + /* features */ + + // UnsupportedFeature occurs when a language feature is used that is not + // supported at this Go version. + UnsupportedFeature + + /* type params */ + + // NotAGenericType occurs when a non-generic type is used where a generic + // type is expected: in type or function instantiation. + // + // Example: + // type T int + // + // var _ T[int] + NotAGenericType + + // WrongTypeArgCount occurs when a type or function is instantiated with an + // incorrent number of type arguments, including when a generic type or + // function is used without instantiation. + // + // Errors inolving failed type inference are assigned other error codes. + // + // Example: + // type T[p any] int + // + // var _ T[int, string] + // + // Example: + // func f[T any]() {} + // + // var x = f + WrongTypeArgCount + + // CannotInferTypeArgs occurs when type or function type argument inference + // fails to infer all type arguments. + // + // Example: + // func f[T any]() {} + // + // func _() { + // f() + // } + // + // Example: + // type N[P, Q any] struct{} + // + // var _ N[int] + CannotInferTypeArgs + + // InvalidTypeArg occurs when a type argument does not satisfy its + // corresponding type parameter constraints. + // + // Example: + // type T[P ~int] struct{} + // + // var _ T[string] + InvalidTypeArg // arguments? InferenceFailed + + // InvalidInstanceCycle occurs when an invalid cycle is detected + // within the instantiation graph. + // + // Example: + // func f[T any]() { f[*T]() } + InvalidInstanceCycle + + // InvalidUnion occurs when an embedded union or approximation element is + // not valid. + // + // Example: + // type _ interface { + // ~int | interface{ m() } + // } + InvalidUnion + + // MisplacedConstraintIface occurs when a constraint-type interface is used + // outside of constraint position. + // + // Example: + // type I interface { ~int } + // + // var _ I + MisplacedConstraintIface + + // InvalidMethodTypeParams occurs when methods have type parameters. + // + // It cannot be encountered with an AST parsed using go/parser. + InvalidMethodTypeParams + + // MisplacedTypeParam occurs when a type parameter is used in a place where + // it is not permitted. + // + // Example: + // type T[P any] P + // + // Example: + // type T[P any] struct{ *P } + MisplacedTypeParam ) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go index 3e5842a5f0fc..de90e9515ae5 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go @@ -138,11 +138,25 @@ func _() { _ = x[UnusedResults-128] _ = x[InvalidDefer-129] _ = x[InvalidGo-130] + _ = x[BadDecl-131] + _ = x[RepeatedDecl-132] + _ = x[InvalidUnsafeAdd-133] + _ = x[InvalidUnsafeSlice-134] + _ = x[UnsupportedFeature-135] + _ = x[NotAGenericType-136] + _ = x[WrongTypeArgCount-137] + _ = x[CannotInferTypeArgs-138] + _ = x[InvalidTypeArg-139] + _ = x[InvalidInstanceCycle-140] + _ = x[InvalidUnion-141] + _ = x[MisplacedConstraintIface-142] + _ = x[InvalidMethodTypeParams-143] + _ = x[MisplacedTypeParam-144] } -const _ErrorCode_name = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGo" +const _ErrorCode_name = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParam" -var _ErrorCode_index = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 215, 231, 250, 258, 274, 292, 309, 327, 351, 359, 374, 390, 408, 425, 440, 447, 458, 481, 496, 508, 519, 534, 548, 563, 578, 591, 600, 614, 629, 640, 655, 664, 680, 700, 718, 737, 749, 768, 787, 803, 820, 839, 853, 864, 879, 892, 907, 923, 937, 953, 968, 985, 1003, 1018, 1028, 1038, 1055, 1077, 1091, 1105, 1125, 1143, 1163, 1181, 1204, 1220, 1235, 1248, 1258, 1270, 1281, 1295, 1308, 1319, 1329, 1344, 1355, 1366, 1379, 1395, 1412, 1436, 1453, 1468, 1478, 1487, 1500, 1516, 1532, 1543, 1558, 1574, 1588, 1604, 1618, 1635, 1655, 1668, 1684, 1698, 1715, 1732, 1749, 1764, 1778, 1792, 1803, 1815, 1828, 1845, 1858, 1869, 1882, 1894, 1903} +var _ErrorCode_index = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 215, 231, 250, 258, 274, 292, 309, 327, 351, 359, 374, 390, 408, 425, 440, 447, 458, 481, 496, 508, 519, 534, 548, 563, 578, 591, 600, 614, 629, 640, 655, 664, 680, 700, 718, 737, 749, 768, 787, 803, 820, 839, 853, 864, 879, 892, 907, 923, 937, 953, 968, 985, 1003, 1018, 1028, 1038, 1055, 1077, 1091, 1105, 1125, 1143, 1163, 1181, 1204, 1220, 1235, 1248, 1258, 1270, 1281, 1295, 1308, 1319, 1329, 1344, 1355, 1366, 1379, 1395, 1412, 1436, 1453, 1468, 1478, 1487, 1500, 1516, 1532, 1543, 1558, 1574, 1588, 1604, 1618, 1635, 1655, 1668, 1684, 1698, 1715, 1732, 1749, 1764, 1778, 1792, 1803, 1815, 1828, 1845, 1858, 1869, 1882, 1894, 1903, 1910, 1922, 1938, 1956, 1974, 1989, 2006, 2025, 2039, 2059, 2071, 2095, 2118, 2136} func (i ErrorCode) String() string { i -= 1 diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 7c77c2fbc038..ce7d4351b220 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -48,3 +48,5 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true } + +var SetGoVersion = func(conf *types.Config, version string) bool { return false } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go new file mode 100644 index 000000000000..a42b072a67d3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go @@ -0,0 +1,19 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package typesinternal + +import ( + "go/types" +) + +func init() { + SetGoVersion = func(conf *types.Config, version string) bool { + conf.GoVersion = version + return true + } +} diff --git a/vendor/golang.org/x/xerrors/doc.go b/vendor/golang.org/x/xerrors/doc.go index eef99d9d54d7..2ef99f5a87bf 100644 --- a/vendor/golang.org/x/xerrors/doc.go +++ b/vendor/golang.org/x/xerrors/doc.go @@ -5,7 +5,8 @@ // Package xerrors implements functions to manipulate errors. // // This package is based on the Go 2 proposal for error values: -// https://golang.org/design/29934-error-values +// +// https://golang.org/design/29934-error-values // // These functions were incorporated into the standard library's errors package // in Go 1.13: diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go index 829862ddf6af..27a5d70bd6ec 100644 --- a/vendor/golang.org/x/xerrors/fmt.go +++ b/vendor/golang.org/x/xerrors/fmt.go @@ -33,6 +33,9 @@ const percentBangString = "%!" // It is invalid to include more than one %w verb or to supply it with an // operand that does not implement the error interface. The %w verb is otherwise // a synonym for %v. +// +// Note that as of Go 1.13, the fmt.Errorf function will do error formatting, +// but it will not capture a stack backtrace. func Errorf(format string, a ...interface{}) error { format = formatPlusW(format) // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter. diff --git a/vendor/golang.org/x/xerrors/wrap.go b/vendor/golang.org/x/xerrors/wrap.go index 9a3b510374ec..9842758ca7c4 100644 --- a/vendor/golang.org/x/xerrors/wrap.go +++ b/vendor/golang.org/x/xerrors/wrap.go @@ -35,6 +35,8 @@ func (e noWrapper) FormatError(p Printer) (next error) { // Unwrap returns the result of calling the Unwrap method on err, if err implements // Unwrap. Otherwise, Unwrap returns nil. +// +// Deprecated: As of Go 1.13, use errors.Unwrap instead. func Unwrap(err error) error { u, ok := err.(Wrapper) if !ok { @@ -47,6 +49,8 @@ func Unwrap(err error) error { // // An error is considered to match a target if it is equal to that target or if // it implements a method Is(error) bool such that Is(target) returns true. +// +// Deprecated: As of Go 1.13, use errors.Is instead. func Is(err, target error) bool { if target == nil { return err == target @@ -77,6 +81,8 @@ func Is(err, target error) bool { // // The As method should set the target to its value and return true if err // matches the type to which target points. +// +// Deprecated: As of Go 1.13, use errors.As instead. func As(err error, target interface{}) bool { if target == nil { panic("errors: target cannot be nil") diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json index 844ec6a66417..d73517d78ff9 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json @@ -517,7 +517,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -652,7 +652,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -708,7 +708,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -893,7 +893,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", "required": true, "type": "string" @@ -948,7 +948,7 @@ "parameterOrder": [], "parameters": { "filter": { - "description": "Optional. An expression for filtering the results of the request. Filter rules are case insensitive. If multiple fields are included in a filter query, the query will return results that match any of the fields. Some eligible fields for filtering are: + `name` + `id` + `labels.` (where *key* is the name of a label) + `parent.type` + `parent.id` + `lifecycleState` Some examples of filter queries: | Query | Description | |------------------|-----------------------------------------------------| | name:how* | The project's name starts with \"how\". | | name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | Equivalent to above. | | NAME:howl | Equivalent to above. | | labels.color:* | The project has the label `color`. | | labels.color:red | The project's label `color` has the value `red`. | | labels.color:red labels.size:big | The project's label `color` has the value `red` and its label `size` has the value `big`.| | lifecycleState:DELETE_REQUESTED | Only show projects that are pending deletion.| If no filter is specified, the call will return projects for which the user has the `resourcemanager.projects.get` permission. NOTE: To perform a by-parent query (eg., what projects are directly in a Folder), the caller must have the `resourcemanager.projects.list` permission on the parent and the filter must contain both a `parent.type` and a `parent.id` restriction (example: \"parent.type:folder parent.id:123\"). In this case an alternate search index is used which provides more consistent results.", + "description": "Optional. An expression for filtering the results of the request. Filter rules are case insensitive. If multiple fields are included in a filter query, the query will return results that match any of the fields. Some eligible fields for filtering are: + `name` + `id` + `labels.` (where *key* is the name of a label) + `parent.type` + `parent.id` + `lifecycleState` Some examples of filter queries: | Query | Description | |------------------|-----------------------------------------------------| | name:how* | The project's name starts with \"how\". | | name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | Equivalent to above. | | NAME:howl | Equivalent to above. | | labels.color:* | The project has the label `color`. | | labels.color:red | The project's label `color` has the value `red`. | | labels.color:red labels.size:big | The project's label `color` has the value `red` or its label `size` has the value `big`. | | lifecycleState:DELETE_REQUESTED | Only show projects that are pending deletion.| If no filter is specified, the call will return projects for which the user has the `resourcemanager.projects.get` permission. NOTE: To perform a by-parent query (eg., what projects are directly in a Folder), the caller must have the `resourcemanager.projects.list` permission on the parent and the filter must contain both a `parent.type` and a `parent.id` restriction (example: \"parent.type:folder parent.id:123\"). In this case an alternate search index is used which provides more consistent results.", "location": "query", "type": "string" }, @@ -1041,7 +1041,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", "required": true, "type": "string" @@ -1096,7 +1096,7 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", + "description": "REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", "required": true, "type": "string" @@ -1171,7 +1171,7 @@ } } }, - "revision": "20220109", + "revision": "20220901", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { @@ -1186,7 +1186,7 @@ "type": "object" }, "AuditConfig": { - "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts `jose@example.com` from DATA_READ logging, and `aliya@example.com` from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -1242,7 +1242,7 @@ "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", + "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, @@ -1490,7 +1490,7 @@ "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", "id": "Empty", "properties": {}, "type": "object" @@ -2163,7 +2163,7 @@ "properties": { "policy": { "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them." + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them." }, "updateMask": { "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: \"bindings, etag\"`", @@ -2216,7 +2216,7 @@ "id": "TestIamPermissionsRequest", "properties": { "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as `*` or `storage.*`) are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { "type": "string" }, diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index 31e1da9ac9c0..012a50a87098 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -8,35 +8,35 @@ // // For product documentation, see: https://cloud.google.com/resource-manager // -// Creating a client +// # Creating a client // // Usage example: // -// import "google.golang.org/api/cloudresourcemanager/v1" -// ... -// ctx := context.Background() -// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx) +// import "google.golang.org/api/cloudresourcemanager/v1" +// ... +// ctx := context.Background() +// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // -// Other authentication options +// # Other authentication options // // By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: // -// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithScopes(cloudresourcemanager.CloudPlatformReadOnlyScope)) +// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithScopes(cloudresourcemanager.CloudPlatformReadOnlyScope)) // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // -// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithAPIKey("AIza...")) +// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package cloudresourcemanager // import "google.golang.org/api/cloudresourcemanager/v1" @@ -95,7 +95,7 @@ const ( // NewService creates a new Service. func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { - scopesOption := option.WithScopes( + scopesOption := internaloption.WithDefaultScopes( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only", ) @@ -247,8 +247,8 @@ func (s *Ancestor) MarshalJSON() ([]byte, error) { // "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ // "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy // enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts -// jose@example.com from DATA_READ logging, and aliya@example.com from -// DATA_WRITE logging. +// `jose@example.com` from DATA_READ logging, and `aliya@example.com` +// from DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. @@ -340,24 +340,29 @@ type Binding struct { // (https://cloud.google.com/iam/help/conditions/resource-policies). Condition *Expr `json:"condition,omitempty"` - // Members: Specifies the principals requesting access for a Cloud - // Platform resource. `members` can have the following values: * + // Members: Specifies the principals requesting access for a Google + // Cloud resource. `members` can have the following values: * // `allUsers`: A special identifier that represents anyone who is on the // internet; with or without a Google account. * // `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. * - // `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . * - // `serviceAccount:{emailid}`: An email address that represents a - // service account. For example, - // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An - // email address that represents a Google group. For example, - // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An - // email address (plus unique identifier) representing a user that has - // been recently deleted. For example, - // `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered - // user retains the role in the binding. * + // who is authenticated with a Google account or a service account. Does + // not include identities that come from external identity providers + // (IdPs) through identity federation. * `user:{emailid}`: An email + // address that represents a specific Google account. For example, + // `alice@example.com` . * `serviceAccount:{emailid}`: An email address + // that represents a Google service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * + // `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: + // An identifier for a Kubernetes service account + // (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). + // For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. + // * `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. * + // `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a user that has been recently + // deleted. For example, `alice@example.com?uid=123456789012345678901`. + // If the user is recovered, this value reverts to `user:{emailid}` and + // the recovered user retains the role in the binding. * // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address // (plus unique identifier) representing a service account that has been // recently deleted. For example, @@ -496,7 +501,8 @@ func (s *ClearOrgPolicyRequest) MarshalJSON() ([]byte, error) { } // CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation: -// Metadata describing a long running folder operation +// +// Metadata describing a long running folder operation type CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation struct { // DestinationParent: The resource name of the folder or organization we // are either creating the folder under or moving the folder to. @@ -781,8 +787,7 @@ type DeleteTagValueMetadata struct { // duplicated empty messages in your APIs. A typical example is to use // it as the request or the response type of an API method. For // instance: service Foo { rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); } The JSON representation for `Empty` is -// empty JSON object `{}`. +// (google.protobuf.Empty); } type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -2218,7 +2223,7 @@ func (s *SearchOrganizationsResponse) MarshalJSON() ([]byte, error) { type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the // `resource`. The size of the policy is limited to a few 10s of KB. An - // empty policy is a valid policy but certain Cloud Platform services + // empty policy is a valid policy but certain Google Cloud services // (such as Projects) might reject them. Policy *Policy `json:"policy,omitempty"` @@ -2328,7 +2333,7 @@ func (s *Status) MarshalJSON() ([]byte, error) { // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. - // Permissions with wildcards (such as '*' or 'storage.*') are not + // Permissions with wildcards (such as `*` or `storage.*`) are not // allowed. For more information see IAM Overview // (https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` @@ -2518,17 +2523,17 @@ func (c *FoldersClearOrgPolicyCall) Do(opts ...googleapi.CallOption) (*Empty, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -2589,8 +2594,8 @@ type FoldersGetEffectiveOrgPolicyCall struct { // computed `Policy` across multiple resources. Subtrees of Resource // Manager resource hierarchy with 'under:' prefix will not be expanded. // -// - resource: The name of the resource to start computing the effective -// `Policy`. +// - resource: The name of the resource to start computing the effective +// `Policy`. func (r *FoldersService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *FoldersGetEffectiveOrgPolicyCall { c := &FoldersGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2665,17 +2670,17 @@ func (c *FoldersGetEffectiveOrgPolicyCall) Do(opts ...googleapi.CallOption) (*Or if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -2812,17 +2817,17 @@ func (c *FoldersGetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -2958,17 +2963,17 @@ func (c *FoldersListAvailableOrgPolicyConstraintsCall) Do(opts ...googleapi.Call if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListAvailableOrgPolicyConstraintsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -3123,17 +3128,17 @@ func (c *FoldersListOrgPoliciesCall) Do(opts ...googleapi.CallOption) (*ListOrgP if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListOrgPoliciesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -3290,17 +3295,17 @@ func (c *FoldersSetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -3429,17 +3434,17 @@ func (c *LiensCreateCall) Do(opts ...googleapi.CallOption) (*Lien, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Lien{ ServerResponse: googleapi.ServerResponse{ @@ -3558,17 +3563,17 @@ func (c *LiensDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -3708,17 +3713,17 @@ func (c *LiensGetCall) Do(opts ...googleapi.CallOption) (*Lien, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Lien{ ServerResponse: googleapi.ServerResponse{ @@ -3878,17 +3883,17 @@ func (c *LiensListCall) Do(opts ...googleapi.CallOption) (*ListLiensResponse, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListLiensResponse{ ServerResponse: googleapi.ServerResponse{ @@ -4055,17 +4060,17 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -4195,17 +4200,17 @@ func (c *OrganizationsClearOrgPolicyCall) Do(opts ...googleapi.CallOption) (*Emp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -4263,10 +4268,10 @@ type OrganizationsGetCall struct { // Get: Fetches an Organization resource identified by the specified // resource name. // -// - name: The resource name of the Organization to fetch. This is the -// organization's relative path in the API, formatted as -// "organizations/[organizationId]". For example, -// "organizations/1234". +// - name: The resource name of the Organization to fetch. This is the +// organization's relative path in the API, formatted as +// "organizations/[organizationId]". For example, +// "organizations/1234". func (r *OrganizationsService) Get(name string) *OrganizationsGetCall { c := &OrganizationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4348,17 +4353,17 @@ func (c *OrganizationsGetCall) Do(opts ...googleapi.CallOption) (*Organization, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Organization{ ServerResponse: googleapi.ServerResponse{ @@ -4417,8 +4422,8 @@ type OrganizationsGetEffectiveOrgPolicyCall struct { // computed `Policy` across multiple resources. Subtrees of Resource // Manager resource hierarchy with 'under:' prefix will not be expanded. // -// - resource: The name of the resource to start computing the effective -// `Policy`. +// - resource: The name of the resource to start computing the effective +// `Policy`. func (r *OrganizationsService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *OrganizationsGetEffectiveOrgPolicyCall { c := &OrganizationsGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4493,17 +4498,17 @@ func (c *OrganizationsGetEffectiveOrgPolicyCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -4566,9 +4571,10 @@ type OrganizationsGetIamPolicyCall struct { // `resourcemanager.organizations.getIamPolicy` on the specified // organization // -// - resource: REQUIRED: The resource for which the policy is being -// requested. See the operation documentation for the appropriate -// value for this field. +// - resource: REQUIRED: The resource for which the policy is being +// requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *OrganizationsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *OrganizationsGetIamPolicyCall { c := &OrganizationsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4643,17 +4649,17 @@ func (c *OrganizationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -4676,7 +4682,7 @@ func (c *OrganizationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, @@ -4790,17 +4796,17 @@ func (c *OrganizationsGetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -4936,17 +4942,17 @@ func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) Do(opts ...googleap if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListAvailableOrgPolicyConstraintsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5101,17 +5107,17 @@ func (c *OrganizationsListOrgPoliciesCall) Do(opts ...googleapi.CallOption) (*Li if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListOrgPoliciesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5262,17 +5268,17 @@ func (c *OrganizationsSearchCall) Do(opts ...googleapi.CallOption) (*SearchOrgan if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SearchOrganizationsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5346,9 +5352,10 @@ type OrganizationsSetIamPolicyCall struct { // `resourcemanager.organizations.setIamPolicy` on the specified // organization // -// - resource: REQUIRED: The resource for which the policy is being -// specified. See the operation documentation for the appropriate -// value for this field. +// - resource: REQUIRED: The resource for which the policy is being +// specified. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *OrganizationsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *OrganizationsSetIamPolicyCall { c := &OrganizationsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5423,17 +5430,17 @@ func (c *OrganizationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -5456,7 +5463,7 @@ func (c *OrganizationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, @@ -5568,17 +5575,17 @@ func (c *OrganizationsSetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -5638,9 +5645,10 @@ type OrganizationsTestIamPermissionsCall struct { // organization's resource name, e.g. "organizations/123". There are no // permissions required for making this API call. // -// - resource: REQUIRED: The resource for which the policy detail is -// being requested. See the operation documentation for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy detail is +// being requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *OrganizationsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *OrganizationsTestIamPermissionsCall { c := &OrganizationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5715,17 +5723,17 @@ func (c *OrganizationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5748,7 +5756,7 @@ func (c *OrganizationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, @@ -5858,17 +5866,17 @@ func (c *ProjectsClearOrgPolicyCall) Do(opts ...googleapi.CallOption) (*Empty, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -6004,17 +6012,17 @@ func (c *ProjectsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -6140,17 +6148,17 @@ func (c *ProjectsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -6287,17 +6295,17 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Project{ ServerResponse: googleapi.ServerResponse{ @@ -6429,17 +6437,17 @@ func (c *ProjectsGetAncestryCall) Do(opts ...googleapi.CallOption) (*GetAncestry if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &GetAncestryResponse{ ServerResponse: googleapi.ServerResponse{ @@ -6500,8 +6508,8 @@ type ProjectsGetEffectiveOrgPolicyCall struct { // computed `Policy` across multiple resources. Subtrees of Resource // Manager resource hierarchy with 'under:' prefix will not be expanded. // -// - resource: The name of the resource to start computing the effective -// `Policy`. +// - resource: The name of the resource to start computing the effective +// `Policy`. func (r *ProjectsService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *ProjectsGetEffectiveOrgPolicyCall { c := &ProjectsGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6576,17 +6584,17 @@ func (c *ProjectsGetEffectiveOrgPolicyCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -6650,9 +6658,10 @@ type ProjectsGetIamPolicyCall struct { // structure and identification, see Resource Names // (https://cloud.google.com/apis/design/resource_names). // -// - resource: REQUIRED: The resource for which the policy is being -// requested. See the operation documentation for the appropriate -// value for this field. +// - resource: REQUIRED: The resource for which the policy is being +// requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *ProjectsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsGetIamPolicyCall { c := &ProjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6727,17 +6736,17 @@ func (c *ProjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -6760,7 +6769,7 @@ func (c *ProjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", // "location": "path", // "required": true, // "type": "string" @@ -6873,17 +6882,17 @@ func (c *ProjectsGetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -6971,7 +6980,7 @@ func (r *ProjectsService) List() *ProjectsListCall { // labels.color:* | The project has the label `color`. | | // labels.color:red | The project's label `color` has the value `red`. | // | labels.color:red labels.size:big | The project's label `color` has -// the value `red` and its label `size` has the value `big`.| | +// the value `red` or its label `size` has the value `big`. | | // lifecycleState:DELETE_REQUESTED | Only show projects that are pending // deletion.| If no filter is specified, the call will return projects // for which the user has the `resourcemanager.projects.get` permission. @@ -7076,17 +7085,17 @@ func (c *ProjectsListCall) Do(opts ...googleapi.CallOption) (*ListProjectsRespon if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListProjectsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -7107,7 +7116,7 @@ func (c *ProjectsListCall) Do(opts ...googleapi.CallOption) (*ListProjectsRespon // "parameterOrder": [], // "parameters": { // "filter": { - // "description": "Optional. An expression for filtering the results of the request. Filter rules are case insensitive. If multiple fields are included in a filter query, the query will return results that match any of the fields. Some eligible fields for filtering are: + `name` + `id` + `labels.` (where *key* is the name of a label) + `parent.type` + `parent.id` + `lifecycleState` Some examples of filter queries: | Query | Description | |------------------|-----------------------------------------------------| | name:how* | The project's name starts with \"how\". | | name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | Equivalent to above. | | NAME:howl | Equivalent to above. | | labels.color:* | The project has the label `color`. | | labels.color:red | The project's label `color` has the value `red`. | | labels.color:red labels.size:big | The project's label `color` has the value `red` and its label `size` has the value `big`.| | lifecycleState:DELETE_REQUESTED | Only show projects that are pending deletion.| If no filter is specified, the call will return projects for which the user has the `resourcemanager.projects.get` permission. NOTE: To perform a by-parent query (eg., what projects are directly in a Folder), the caller must have the `resourcemanager.projects.list` permission on the parent and the filter must contain both a `parent.type` and a `parent.id` restriction (example: \"parent.type:folder parent.id:123\"). In this case an alternate search index is used which provides more consistent results.", + // "description": "Optional. An expression for filtering the results of the request. Filter rules are case insensitive. If multiple fields are included in a filter query, the query will return results that match any of the fields. Some eligible fields for filtering are: + `name` + `id` + `labels.` (where *key* is the name of a label) + `parent.type` + `parent.id` + `lifecycleState` Some examples of filter queries: | Query | Description | |------------------|-----------------------------------------------------| | name:how* | The project's name starts with \"how\". | | name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | Equivalent to above. | | NAME:howl | Equivalent to above. | | labels.color:* | The project has the label `color`. | | labels.color:red | The project's label `color` has the value `red`. | | labels.color:red labels.size:big | The project's label `color` has the value `red` or its label `size` has the value `big`. | | lifecycleState:DELETE_REQUESTED | Only show projects that are pending deletion.| If no filter is specified, the call will return projects for which the user has the `resourcemanager.projects.get` permission. NOTE: To perform a by-parent query (eg., what projects are directly in a Folder), the caller must have the `resourcemanager.projects.list` permission on the parent and the filter must contain both a `parent.type` and a `parent.id` restriction (example: \"parent.type:folder parent.id:123\"). In this case an alternate search index is used which provides more consistent results.", // "location": "query", // "type": "string" // }, @@ -7247,17 +7256,17 @@ func (c *ProjectsListAvailableOrgPolicyConstraintsCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListAvailableOrgPolicyConstraintsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -7412,17 +7421,17 @@ func (c *ProjectsListOrgPoliciesCall) Do(opts ...googleapi.CallOption) (*ListOrg if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListOrgPoliciesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -7536,9 +7545,10 @@ type ProjectsSetIamPolicyCall struct { // organization inaccessible. Authorization requires the Google IAM // permission `resourcemanager.projects.setIamPolicy` on the project // -// - resource: REQUIRED: The resource for which the policy is being -// specified. See the operation documentation for the appropriate -// value for this field. +// - resource: REQUIRED: The resource for which the policy is being +// specified. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *ProjectsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsSetIamPolicyCall { c := &ProjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7613,17 +7623,17 @@ func (c *ProjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -7646,7 +7656,7 @@ func (c *ProjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", // "location": "path", // "required": true, // "type": "string" @@ -7757,17 +7767,17 @@ func (c *ProjectsSetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -7828,9 +7838,10 @@ type ProjectsTestIamPermissionsCall struct { // (https://cloud.google.com/apis/design/resource_names). There are no // permissions required for making this API call. // -// - resource: REQUIRED: The resource for which the policy detail is -// being requested. See the operation documentation for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy detail is +// being requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *ProjectsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsTestIamPermissionsCall { c := &ProjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7905,17 +7916,17 @@ func (c *ProjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -7938,7 +7949,7 @@ func (c *ProjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test // ], // "parameters": { // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.", + // "description": "REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", // "location": "path", // "required": true, // "type": "string" @@ -8051,17 +8062,17 @@ func (c *ProjectsUndeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -8194,17 +8205,17 @@ func (c *ProjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Project, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Project{ ServerResponse: googleapi.ServerResponse{ diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 0ae0c29b72c9..9a475a243af6 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -135,7 +135,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -239,7 +239,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -305,7 +305,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -498,7 +498,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -549,6 +549,56 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "setLabels": { + "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.addresses.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -564,7 +614,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -757,7 +807,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -1113,7 +1163,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -1338,7 +1388,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -1544,72 +1594,22 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Creates a BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview .", - "flatPath": "projects/{project}/global/backendServices", - "httpMethod": "POST", - "id": "compute.backendServices.insert", - "parameterOrder": [ - "project" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/global/backendServices", - "request": { - "$ref": "BackendService" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "description": "Retrieves the list of BackendService resources available to the specified project.", - "flatPath": "projects/{project}/global/backendServices", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/global/backendServices/{resource}/getIamPolicy", "httpMethod": "GET", - "id": "compute.backendServices.list", + "id": "compute.backendServices.getIamPolicy", "parameterOrder": [ - "project" + "project", + "resource" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", "location": "query", - "minimum": "0", "type": "integer" }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -1617,163 +1617,33 @@ "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" - } - }, - "path": "projects/{project}/global/backendServices", - "response": { - "$ref": "BackendServiceList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "description": "Patches the specified BackendService resource with the data included in the request. For more information, see Backend services overview. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/global/backendServices/{backendService}", - "httpMethod": "PATCH", - "id": "compute.backendServices.patch", - "parameterOrder": [ - "project", - "backendService" - ], - "parameters": { - "backendService": { - "description": "Name of the BackendService resource to patch.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/global/backendServices/{backendService}", - "request": { - "$ref": "BackendService" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setEdgeSecurityPolicy": { - "description": "Sets the edge security policy for the specified backend service.", - "flatPath": "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy", - "httpMethod": "POST", - "id": "compute.backendServices.setEdgeSecurityPolicy", - "parameterOrder": [ - "project", - "backendService" - ], - "parameters": { - "backendService": { - "description": "Name of the BackendService resource to which the edge security policy should be set. The name should conform to RFC1035.", - "location": "path", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy", - "request": { - "$ref": "SecurityPolicyReference" - }, + "path": "projects/{project}/global/backendServices/{resource}/getIamPolicy", "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setSecurityPolicy": { - "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", - "flatPath": "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy", + "insert": { + "description": "Creates a BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview .", + "flatPath": "projects/{project}/global/backendServices", "httpMethod": "POST", - "id": "compute.backendServices.setSecurityPolicy", - "parameterOrder": [ - "project", - "backendService" - ], - "parameters": { - "backendService": { - "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", - "location": "path", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy", - "request": { - "$ref": "SecurityPolicyReference" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "update": { - "description": "Updates the specified BackendService resource with the data included in the request. For more information, see Backend services overview.", - "flatPath": "projects/{project}/global/backendServices/{backendService}", - "httpMethod": "PUT", - "id": "compute.backendServices.update", + "id": "compute.backendServices.insert", "parameterOrder": [ - "project", - "backendService" + "project" ], "parameters": { - "backendService": { - "description": "Name of the BackendService resource to update.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -1787,7 +1657,7 @@ "type": "string" } }, - "path": "projects/{project}/global/backendServices/{backendService}", + "path": "projects/{project}/global/backendServices", "request": { "$ref": "BackendService" }, @@ -1798,30 +1668,288 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "diskTypes": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of disk types.", - "flatPath": "projects/{project}/aggregated/diskTypes", + }, + "list": { + "description": "Retrieves the list of BackendService resources available to the specified project.", + "flatPath": "projects/{project}/global/backendServices", "httpMethod": "GET", - "id": "compute.diskTypes.aggregatedList", + "id": "compute.backendServices.list", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/global/backendServices", + "response": { + "$ref": "BackendServiceList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patches the specified BackendService resource with the data included in the request. For more information, see Backend services overview. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/global/backendServices/{backendService}", + "httpMethod": "PATCH", + "id": "compute.backendServices.patch", + "parameterOrder": [ + "project", + "backendService" + ], + "parameters": { + "backendService": { + "description": "Name of the BackendService resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/backendServices/{backendService}", + "request": { + "$ref": "BackendService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setEdgeSecurityPolicy": { + "description": "Sets the edge security policy for the specified backend service.", + "flatPath": "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy", + "httpMethod": "POST", + "id": "compute.backendServices.setEdgeSecurityPolicy", + "parameterOrder": [ + "project", + "backendService" + ], + "parameters": { + "backendService": { + "description": "Name of the BackendService resource to which the edge security policy should be set. The name should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy", + "request": { + "$ref": "SecurityPolicyReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/global/backendServices/{resource}/setIamPolicy", + "httpMethod": "POST", + "id": "compute.backendServices.setIamPolicy", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/backendServices/{resource}/setIamPolicy", + "request": { + "$ref": "GlobalSetPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setSecurityPolicy": { + "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", + "flatPath": "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy", + "httpMethod": "POST", + "id": "compute.backendServices.setSecurityPolicy", + "parameterOrder": [ + "project", + "backendService" + ], + "parameters": { + "backendService": { + "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy", + "request": { + "$ref": "SecurityPolicyReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "description": "Updates the specified BackendService resource with the data included in the request. For more information, see Backend services overview.", + "flatPath": "projects/{project}/global/backendServices/{backendService}", + "httpMethod": "PUT", + "id": "compute.backendServices.update", + "parameterOrder": [ + "project", + "backendService" + ], + "parameters": { + "backendService": { + "description": "Name of the BackendService resource to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/backendServices/{backendService}", + "request": { + "$ref": "BackendService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "diskTypes": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of disk types.", + "flatPath": "projects/{project}/aggregated/diskTypes", + "httpMethod": "GET", + "id": "compute.diskTypes.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -1917,7 +2045,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -2033,7 +2161,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -2102,7 +2230,7 @@ "type": "string" }, "guestFlush": { - "description": "[Input Only] Whether to attempt an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", + "description": "[Input Only] Whether to attempt an application consistent snapshot by informing the OS to prepare for the snapshot process.", "location": "query", "type": "boolean" }, @@ -2334,7 +2462,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -2749,7 +2877,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -3174,7 +3302,7 @@ "id": "compute.firewallPolicies.list", "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -3603,7 +3731,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -3746,7 +3874,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -3939,7 +4067,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -4263,7 +4391,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -4307,6 +4435,43 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "setLabels": { + "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/global/addresses/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.globalAddresses.setLabels", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/addresses/{resource}/setLabels", + "request": { + "$ref": "GlobalSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -4430,7 +4595,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -4798,7 +4963,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -4854,7 +5019,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -4919,7 +5084,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -5045,7 +5210,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -5196,7 +5361,7 @@ "id": "compute.globalOrganizationOperations.list", "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -5361,7 +5526,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -5462,7 +5627,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -5630,7 +5795,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -5881,7 +6046,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -6132,7 +6297,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -6556,7 +6721,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -6817,7 +6982,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -6911,7 +7076,7 @@ ] }, "createInstances": { - "description": "Creates instances with per-instance configs in this managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", + "description": "Creates instances with per-instance configurations in this managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/createInstances", "httpMethod": "POST", "id": "compute.instanceGroupManagers.createInstances", @@ -7052,7 +7217,7 @@ ] }, "deletePerInstanceConfigs": { - "description": "Deletes selected per-instance configs for the managed instance group.", + "description": "Deletes selected per-instance configurations for the managed instance group.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", "httpMethod": "POST", "id": "compute.instanceGroupManagers.deletePerInstanceConfigs", @@ -7187,7 +7352,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -7250,7 +7415,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -7308,7 +7473,7 @@ ] }, "listManagedInstances": { - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported.", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listManagedInstances", @@ -7319,7 +7484,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -7377,7 +7542,7 @@ ] }, "listPerInstanceConfigs": { - "description": "Lists all of the per-instance configs defined for the managed instance group. The orderBy query parameter is not supported.", + "description": "Lists all of the per-instance configurations defined for the managed instance group. The orderBy query parameter is not supported.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listPerInstanceConfigs", @@ -7388,7 +7553,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -7494,7 +7659,7 @@ ] }, "patchPerInstanceConfigs": { - "description": "Inserts or patches per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", + "description": "Inserts or patches per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", "httpMethod": "POST", "id": "compute.instanceGroupManagers.patchPerInstanceConfigs", @@ -7739,7 +7904,7 @@ ] }, "updatePerInstanceConfigs": { - "description": "Inserts or updates per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", + "description": "Inserts or updates per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", "httpMethod": "POST", "id": "compute.instanceGroupManagers.updatePerInstanceConfigs", @@ -7848,7 +8013,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -8036,7 +8201,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -8088,7 +8253,7 @@ ] }, "listInstances": { - "description": "Lists the instances in the specified instance group. The orderBy query parameter is not supported.", + "description": "Lists the instances in the specified instance group. The orderBy query parameter is not supported. The filter query parameter is supported, but only for expressions that use `eq` (equal) or `ne` (not equal) operators.", "flatPath": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", "httpMethod": "POST", "id": "compute.instanceGroups.listInstances", @@ -8099,7 +8264,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -8418,7 +8583,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -8659,7 +8824,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -8765,7 +8930,7 @@ ] }, "bulkInsert": { - "description": "Creates multiple instances. Count specifies the number of instances to create.", + "description": "Creates multiple instances. Count specifies the number of instances to create. For more information, see About bulk creation of VMs.", "flatPath": "projects/{project}/zones/{zone}/instances/bulkInsert", "httpMethod": "POST", "id": "compute.instances.bulkInsert", @@ -9370,7 +9535,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -9434,7 +9599,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -9544,7 +9709,7 @@ ] }, "reset": { - "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance.", + "description": "Performs a reset on the instance. This is a hard reset. The VM does not do a graceful shutdown. For more information, see Resetting an instance.", "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/reset", "httpMethod": "POST", "id": "compute.instances.reset", @@ -10087,7 +10252,7 @@ ] }, "setScheduling": { - "description": "Sets an instance's scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. For more information about setting scheduling options for a VM, see Set VM availability policies.", + "description": "Sets an instance's scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. For more information about setting scheduling options for a VM, see Set VM host maintenance policy.", "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setScheduling", "httpMethod": "POST", "id": "compute.instances.setScheduling", @@ -10436,6 +10601,11 @@ "instance" ], "parameters": { + "discardLocalSsd": { + "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "location": "query", + "type": "boolean" + }, "instance": { "description": "Name of the instance resource to stop.", "location": "path", @@ -10483,6 +10653,11 @@ "instance" ], "parameters": { + "discardLocalSsd": { + "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "location": "query", + "type": "boolean" + }, "instance": { "description": "Name of the instance resource to suspend.", "location": "path", @@ -10877,7 +11052,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -11075,7 +11250,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -11176,6 +11351,56 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "setLabels": { + "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.interconnectAttachments.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -11226,7 +11451,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -11276,7 +11501,7 @@ "interconnects": { "methods": { "delete": { - "description": "Deletes the specified interconnect.", + "description": "Deletes the specified Interconnect.", "flatPath": "projects/{project}/global/interconnects/{interconnect}", "httpMethod": "DELETE", "id": "compute.interconnects.delete", @@ -11315,7 +11540,7 @@ ] }, "get": { - "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + "description": "Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.", "flatPath": "projects/{project}/global/interconnects/{interconnect}", "httpMethod": "GET", "id": "compute.interconnects.get", @@ -11350,7 +11575,7 @@ ] }, "getDiagnostics": { - "description": "Returns the interconnectDiagnostics for the specified interconnect.", + "description": "Returns the interconnectDiagnostics for the specified Interconnect.", "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", "httpMethod": "GET", "id": "compute.interconnects.getDiagnostics", @@ -11385,7 +11610,7 @@ ] }, "insert": { - "description": "Creates a Interconnect in the specified project using the data included in the request.", + "description": "Creates an Interconnect in the specified project using the data included in the request.", "flatPath": "projects/{project}/global/interconnects", "httpMethod": "POST", "id": "compute.interconnects.insert", @@ -11419,7 +11644,7 @@ ] }, "list": { - "description": "Retrieves the list of interconnect available to the specified project.", + "description": "Retrieves the list of Interconnects available to the specified project.", "flatPath": "projects/{project}/global/interconnects", "httpMethod": "GET", "id": "compute.interconnects.list", @@ -11428,7 +11653,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -11474,7 +11699,7 @@ ] }, "patch": { - "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates the specified Interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "flatPath": "projects/{project}/global/interconnects/{interconnect}", "httpMethod": "PATCH", "id": "compute.interconnects.patch", @@ -11514,6 +11739,43 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "setLabels": { + "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/global/interconnects/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.interconnects.setLabels", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/interconnects/{resource}/setLabels", + "request": { + "$ref": "GlobalSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -11758,7 +12020,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -12046,7 +12308,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -12180,7 +12442,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -12284,7 +12546,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -12338,19 +12600,19 @@ } } }, - "networkEndpointGroups": { + "networkAttachments": { "methods": { "aggregatedList": { - "description": "Retrieves the list of network endpoint groups and sorts them by zone.", - "flatPath": "projects/{project}/aggregated/networkEndpointGroups", + "description": "Retrieves the list of all NetworkAttachment resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/networkAttachments", "httpMethod": "GET", - "id": "compute.networkEndpointGroups.aggregatedList", + "id": "compute.networkAttachments.aggregatedList", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -12390,9 +12652,9 @@ "type": "boolean" } }, - "path": "projects/{project}/aggregated/networkEndpointGroups", + "path": "projects/{project}/aggregated/networkAttachments", "response": { - "$ref": "NetworkEndpointGroupAggregatedList" + "$ref": "NetworkAttachmentAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -12400,20 +12662,21 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "attachNetworkEndpoints": { - "description": "Attach a list of network endpoints to the specified network endpoint group.", - "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", - "httpMethod": "POST", - "id": "compute.networkEndpointGroups.attachNetworkEndpoints", + "delete": { + "description": "Deletes the specified NetworkAttachment in the given scope", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + "httpMethod": "DELETE", + "id": "compute.networkAttachments.delete", "parameterOrder": [ "project", - "zone", - "networkEndpointGroup" + "region", + "networkAttachment" ], "parameters": { - "networkEndpointGroup": { - "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", + "networkAttachment": { + "description": "Name of the NetworkAttachment resource to delete.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12424,67 +12687,20 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "zone": { - "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", - "location": "path", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", - "request": { - "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "delete": { - "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", - "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", - "httpMethod": "DELETE", - "id": "compute.networkEndpointGroups.delete", - "parameterOrder": [ - "project", - "zone", - "networkEndpointGroup" - ], - "parameters": { - "networkEndpointGroup": { - "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", - "location": "path", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", + "region": { + "description": "Name of the region of this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", "location": "query", "type": "string" - }, - "zone": { - "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", - "location": "path", - "required": true, - "type": "string" } }, - "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", "response": { "$ref": "Operation" }, @@ -12493,20 +12709,21 @@ "https://www.googleapis.com/auth/compute" ] }, - "detachNetworkEndpoints": { - "description": "Detach a list of network endpoints from the specified network endpoint group.", - "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", - "httpMethod": "POST", - "id": "compute.networkEndpointGroups.detachNetworkEndpoints", + "get": { + "description": "Returns the specified NetworkAttachment resource in the given scope.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + "httpMethod": "GET", + "id": "compute.networkAttachments.get", "parameterOrder": [ "project", - "zone", - "networkEndpointGroup" + "region", + "networkAttachment" ], "parameters": { - "networkEndpointGroup": { - "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", + "networkAttachment": { + "description": "Name of the NetworkAttachment resource to return.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12517,46 +12734,40 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "zone": { - "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "region": { + "description": "Name of the region of this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", - "request": { - "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" - }, + "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", "response": { - "$ref": "Operation" + "$ref": "NetworkAttachment" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "get": { - "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", - "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", "httpMethod": "GET", - "id": "compute.networkEndpointGroups.get", + "id": "compute.networkAttachments.getIamPolicy", "parameterOrder": [ "project", - "zone", - "networkEndpointGroup" + "region", + "resource" ], "parameters": { - "networkEndpointGroup": { - "description": "The name of the network endpoint group. It should comply with RFC1035.", - "location": "path", - "required": true, - "type": "string" + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" }, "project": { "description": "Project ID for this request.", @@ -12565,16 +12776,24 @@ "required": true, "type": "string" }, - "zone": { - "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "region": { + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", "response": { - "$ref": "NetworkEndpointGroup" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -12583,13 +12802,13 @@ ] }, "insert": { - "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", - "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", + "description": "Creates a NetworkAttachment in the specified project in the given scope using the parameters that are included in the request.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments", "httpMethod": "POST", - "id": "compute.networkEndpointGroups.insert", + "id": "compute.networkAttachments.insert", "parameterOrder": [ "project", - "zone" + "region" ], "parameters": { "project": { @@ -12599,21 +12818,22 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "zone": { - "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", + "region": { + "description": "Name of the region of this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/zones/{zone}/networkEndpointGroups", + "path": "projects/{project}/regions/{region}/networkAttachments", "request": { - "$ref": "NetworkEndpointGroup" + "$ref": "NetworkAttachment" }, "response": { "$ref": "Operation" @@ -12624,17 +12844,17 @@ ] }, "list": { - "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", - "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", + "description": "Lists the NetworkAttachments for a project in the given scope.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments", "httpMethod": "GET", - "id": "compute.networkEndpointGroups.list", + "id": "compute.networkAttachments.list", "parameterOrder": [ "project", - "zone" + "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -12663,21 +12883,22 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region of this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" - }, - "zone": { - "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", - "location": "path", - "required": true, - "type": "string" } }, - "path": "projects/{project}/zones/{zone}/networkEndpointGroups", + "path": "projects/{project}/regions/{region}/networkAttachments", "response": { - "$ref": "NetworkEndpointGroupList" + "$ref": "NetworkAttachmentList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -12685,46 +12906,17 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "listNetworkEndpoints": { - "description": "Lists the network endpoints in the specified network endpoint group.", - "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", "httpMethod": "POST", - "id": "compute.networkEndpointGroups.listNetworkEndpoints", + "id": "compute.networkAttachments.setIamPolicy", "parameterOrder": [ "project", - "zone", - "networkEndpointGroup" + "region", + "resource" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "networkEndpointGroup": { - "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", - "location": "path", - "required": true, - "type": "string" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -12732,39 +12924,41 @@ "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" }, - "zone": { - "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", "request": { - "$ref": "NetworkEndpointGroupsListEndpointsRequest" + "$ref": "RegionSetPolicyRequest" }, "response": { - "$ref": "NetworkEndpointGroupsListNetworkEndpoints" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", - "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", "httpMethod": "POST", - "id": "compute.networkEndpointGroups.testIamPermissions", + "id": "compute.networkAttachments.testIamPermissions", "parameterOrder": [ "project", - "zone", + "region", "resource" ], "parameters": { @@ -12775,22 +12969,22 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "region": { + "description": "The name of the region for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "zone": { - "description": "The name of the zone for this request.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", + "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", "request": { "$ref": "TestPermissionsRequest" }, @@ -12805,62 +12999,81 @@ } } }, - "networks": { + "networkEdgeSecurityServices": { "methods": { - "addPeering": { - "description": "Adds a peering to the specified network.", - "flatPath": "projects/{project}/global/networks/{network}/addPeering", - "httpMethod": "POST", - "id": "compute.networks.addPeering", + "aggregatedList": { + "description": "Retrieves the list of all NetworkEdgeSecurityService resources available to the specified project.", + "flatPath": "projects/{project}/aggregated/networkEdgeSecurityServices", + "httpMethod": "GET", + "id": "compute.networkEdgeSecurityServices.aggregatedList", "parameterOrder": [ - "project", - "network" + "project" ], "parameters": { - "network": { - "description": "Name of the network resource to add peering to.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", "type": "string" }, "project": { - "description": "Project ID for this request.", + "description": "Name of the project scoping this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", - "type": "string" + "type": "boolean" } }, - "path": "projects/{project}/global/networks/{network}/addPeering", - "request": { - "$ref": "NetworksAddPeeringRequest" - }, + "path": "projects/{project}/aggregated/networkEdgeSecurityServices", "response": { - "$ref": "Operation" + "$ref": "NetworkEdgeSecurityServiceAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "delete": { - "description": "Deletes the specified network.", - "flatPath": "projects/{project}/global/networks/{network}", + "description": "Deletes the specified service.", + "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", "httpMethod": "DELETE", - "id": "compute.networks.delete", + "id": "compute.networkEdgeSecurityServices.delete", "parameterOrder": [ "project", - "network" + "region", + "networkEdgeSecurityService" ], "parameters": { - "network": { - "description": "Name of the network to delete.", + "networkEdgeSecurityService": { + "description": "Name of the network edge security service to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -12873,13 +13086,20 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "projects/{project}/global/networks/{network}", + "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", "response": { "$ref": "Operation" }, @@ -12889,17 +13109,18 @@ ] }, "get": { - "description": "Returns the specified network. Gets a list of available networks by making a list() request.", - "flatPath": "projects/{project}/global/networks/{network}", + "description": "Gets a specified NetworkEdgeSecurityService.", + "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", "httpMethod": "GET", - "id": "compute.networks.get", + "id": "compute.networkEdgeSecurityServices.get", "parameterOrder": [ "project", - "network" + "region", + "networkEdgeSecurityService" ], "parameters": { - "network": { - "description": "Name of the network to return.", + "networkEdgeSecurityService": { + "description": "Name of the network edge security service to get.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -12911,46 +13132,18 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - } - }, - "path": "projects/{project}/global/networks/{network}", - "response": { - "$ref": "Network" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getEffectiveFirewalls": { - "description": "Returns the effective firewalls on a given network.", - "flatPath": "projects/{project}/global/networks/{network}/getEffectiveFirewalls", - "httpMethod": "GET", - "id": "compute.networks.getEffectiveFirewalls", - "parameterOrder": [ - "project", - "network" - ], - "parameters": { - "network": { - "description": "Name of the network for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" }, - "project": { - "description": "Project ID for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/global/networks/{network}/getEffectiveFirewalls", + "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", "response": { - "$ref": "NetworksGetEffectiveFirewallsResponse" + "$ref": "NetworkEdgeSecurityService" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -12959,12 +13152,13 @@ ] }, "insert": { - "description": "Creates a network in the specified project using the data included in the request.", - "flatPath": "projects/{project}/global/networks", + "description": "Creates a new service in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices", "httpMethod": "POST", - "id": "compute.networks.insert", + "id": "compute.networkEdgeSecurityServices.insert", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "project": { @@ -12974,15 +13168,27 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "validateOnly": { + "description": "If true, the request will not be committed.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/global/networks", + "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices", "request": { - "$ref": "Network" + "$ref": "NetworkEdgeSecurityService" }, "response": { "$ref": "Operation" @@ -12992,36 +13198,27 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves the list of networks available to the specified project.", - "flatPath": "projects/{project}/global/networks", - "httpMethod": "GET", - "id": "compute.networks.list", + "patch": { + "description": "Patches the specified policy with the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "httpMethod": "PATCH", + "id": "compute.networkEdgeSecurityServices.patch", "parameterOrder": [ - "project" + "project", + "region", + "networkEdgeSecurityService" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", + "networkEdgeSecurityService": { + "description": "Name of the network edge security service to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "paths": { "location": "query", + "repeated": true, "type": "string" }, "project": { @@ -13031,49 +13228,59 @@ "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" + }, + "updateMask": { + "description": "Indicates fields to be updated as part of this request.", + "format": "google-fieldmask", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/global/networks", + "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "request": { + "$ref": "NetworkEdgeSecurityService" + }, "response": { - "$ref": "NetworkList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - }, - "listPeeringRoutes": { - "description": "Lists the peering routes exchanged over peering connection.", - "flatPath": "projects/{project}/global/networks/{network}/listPeeringRoutes", + } + } + }, + "networkEndpointGroups": { + "methods": { + "aggregatedList": { + "description": "Retrieves the list of network endpoint groups and sorts them by zone.", + "flatPath": "projects/{project}/aggregated/networkEndpointGroups", "httpMethod": "GET", - "id": "compute.networks.listPeeringRoutes", + "id": "compute.networkEndpointGroups.aggregatedList", "parameterOrder": [ - "project", - "network" + "project" ], "parameters": { - "direction": { - "description": "The direction of the exchanged routes.", - "enum": [ - "INCOMING", - "OUTGOING" - ], - "enumDescriptions": [ - "For routes exported from peer network.", - "For routes exported from local network." - ], + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", "location": "query", - "type": "string" + "type": "boolean" }, "maxResults": { "default": "500", @@ -13083,13 +13290,6 @@ "minimum": "0", "type": "integer" }, - "network": { - "description": "Name of the network for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "orderBy": { "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", @@ -13100,11 +13300,6 @@ "location": "query", "type": "string" }, - "peeringName": { - "description": "The response will show routes exchanged over the given peering connection.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -13112,20 +13307,15 @@ "required": true, "type": "string" }, - "region": { - "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", - "location": "query", - "type": "string" - }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/global/networks/{network}/listPeeringRoutes", + "path": "projects/{project}/aggregated/networkEndpointGroups", "response": { - "$ref": "ExchangedPeeringRoutesList" + "$ref": "NetworkEndpointGroupAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -13133,20 +13323,20 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", - "flatPath": "projects/{project}/global/networks/{network}", - "httpMethod": "PATCH", - "id": "compute.networks.patch", + "attachNetworkEndpoints": { + "description": "Attach a list of network endpoints to the specified network endpoint group.", + "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + "httpMethod": "POST", + "id": "compute.networkEndpointGroups.attachNetworkEndpoints", "parameterOrder": [ "project", - "network" + "zone", + "networkEndpointGroup" ], "parameters": { - "network": { - "description": "Name of the network to update.", + "networkEndpointGroup": { + "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -13161,11 +13351,17 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "zone": { + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" } }, - "path": "projects/{project}/global/networks/{network}", + "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", "request": { - "$ref": "Network" + "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" }, "response": { "$ref": "Operation" @@ -13175,20 +13371,20 @@ "https://www.googleapis.com/auth/compute" ] }, - "removePeering": { - "description": "Removes a peering from the specified network.", - "flatPath": "projects/{project}/global/networks/{network}/removePeering", - "httpMethod": "POST", - "id": "compute.networks.removePeering", + "delete": { + "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", + "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + "httpMethod": "DELETE", + "id": "compute.networkEndpointGroups.delete", "parameterOrder": [ "project", - "network" + "zone", + "networkEndpointGroup" ], "parameters": { - "network": { - "description": "Name of the network resource to remove peering from.", + "networkEndpointGroup": { + "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -13203,12 +13399,15 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "zone": { + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" } }, - "path": "projects/{project}/global/networks/{network}/removePeering", - "request": { - "$ref": "NetworksRemovePeeringRequest" - }, + "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", "response": { "$ref": "Operation" }, @@ -13217,20 +13416,20 @@ "https://www.googleapis.com/auth/compute" ] }, - "switchToCustomMode": { - "description": "Switches the network mode from auto subnet mode to custom subnet mode.", - "flatPath": "projects/{project}/global/networks/{network}/switchToCustomMode", + "detachNetworkEndpoints": { + "description": "Detach a list of network endpoints from the specified network endpoint group.", + "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", "httpMethod": "POST", - "id": "compute.networks.switchToCustomMode", + "id": "compute.networkEndpointGroups.detachNetworkEndpoints", "parameterOrder": [ "project", - "network" + "zone", + "networkEndpointGroup" ], "parameters": { - "network": { - "description": "Name of the network to be updated.", + "networkEndpointGroup": { + "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -13245,9 +13444,18 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "zone": { + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" } }, - "path": "projects/{project}/global/networks/{network}/switchToCustomMode", + "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + "request": { + "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" + }, "response": { "$ref": "Operation" }, @@ -13256,20 +13464,20 @@ "https://www.googleapis.com/auth/compute" ] }, - "updatePeering": { - "description": "Updates the specified network peering with the data included in the request. You can only modify the NetworkPeering.export_custom_routes field and the NetworkPeering.import_custom_routes field.", - "flatPath": "projects/{project}/global/networks/{network}/updatePeering", - "httpMethod": "PATCH", - "id": "compute.networks.updatePeering", + "get": { + "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + "httpMethod": "GET", + "id": "compute.networkEndpointGroups.get", "parameterOrder": [ "project", - "network" + "zone", + "networkEndpointGroup" ], "parameters": { - "network": { - "description": "Name of the network resource which the updated peering is belonging to.", + "networkEndpointGroup": { + "description": "The name of the network endpoint group. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -13280,46 +13488,33 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "zone": { + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, "type": "string" } }, - "path": "projects/{project}/global/networks/{network}/updatePeering", - "request": { - "$ref": "NetworksUpdatePeeringRequest" - }, + "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", "response": { - "$ref": "Operation" + "$ref": "NetworkEndpointGroup" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "nodeGroups": { - "methods": { - "addNodes": { - "description": "Adds specified number of nodes to the node group.", - "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes", + }, + "insert": { + "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", "httpMethod": "POST", - "id": "compute.nodeGroups.addNodes", + "id": "compute.networkEndpointGroups.insert", "parameterOrder": [ "project", - "zone", - "nodeGroup" + "zone" ], "parameters": { - "nodeGroup": { - "description": "Name of the NodeGroup resource.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -13333,16 +13528,15 @@ "type": "string" }, "zone": { - "description": "The name of the zone for this request.", + "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes", + "path": "projects/{project}/zones/{zone}/networkEndpointGroups", "request": { - "$ref": "NodeGroupsAddNodesRequest" + "$ref": "NetworkEndpointGroup" }, "response": { "$ref": "Operation" @@ -13352,25 +13546,21 @@ "https://www.googleapis.com/auth/compute" ] }, - "aggregatedList": { - "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group.", - "flatPath": "projects/{project}/aggregated/nodeGroups", + "list": { + "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", + "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", "httpMethod": "GET", - "id": "compute.nodeGroups.aggregatedList", + "id": "compute.networkEndpointGroups.list", "parameterOrder": [ - "project" + "project", + "zone" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -13400,11 +13590,17 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" - } - }, - "path": "projects/{project}/aggregated/nodeGroups", - "response": { - "$ref": "NodeGroupAggregatedList" + }, + "zone": { + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/networkEndpointGroups", + "response": { + "$ref": "NetworkEndpointGroupList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -13412,24 +13608,46 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified NodeGroup resource.", - "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", - "httpMethod": "DELETE", - "id": "compute.nodeGroups.delete", + "listNetworkEndpoints": { + "description": "Lists the network endpoints in the specified network endpoint group.", + "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + "httpMethod": "POST", + "id": "compute.networkEndpointGroups.listNetworkEndpoints", "parameterOrder": [ "project", "zone", - "nodeGroup" + "networkEndpointGroup" ], "parameters": { - "nodeGroup": { - "description": "Name of the NodeGroup resource to delete.", + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "networkEndpointGroup": { + "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -13437,46 +13655,42 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", - "type": "string" + "type": "boolean" }, "zone": { - "description": "The name of the zone for this request.", + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", + "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + "request": { + "$ref": "NetworkEndpointGroupsListEndpointsRequest" + }, "response": { - "$ref": "Operation" + "$ref": "NetworkEndpointGroupsListNetworkEndpoints" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "deleteNodes": { - "description": "Deletes specified nodes from the node group.", - "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", "httpMethod": "POST", - "id": "compute.nodeGroups.deleteNodes", + "id": "compute.networkEndpointGroups.testIamPermissions", "parameterOrder": [ "project", "zone", - "nodeGroup" + "resource" ], "parameters": { - "nodeGroup": { - "description": "Name of the NodeGroup resource whose nodes will be deleted.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -13484,9 +13698,11 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" }, "zone": { @@ -13497,31 +13713,35 @@ "type": "string" } }, - "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes", + "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", "request": { - "$ref": "NodeGroupsDeleteNodesRequest" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "get": { - "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead.", - "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", - "httpMethod": "GET", - "id": "compute.nodeGroups.get", + } + } + }, + "networkFirewallPolicies": { + "methods": { + "addAssociation": { + "description": "Inserts an association for the specified firewall policy.", + "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation", + "httpMethod": "POST", + "id": "compute.networkFirewallPolicies.addAssociation", "parameterOrder": [ "project", - "zone", - "nodeGroup" + "firewallPolicy" ], "parameters": { - "nodeGroup": { - "description": "Name of the node group to return.", + "firewallPolicy": { + "description": "Name of the firewall policy to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -13534,37 +13754,54 @@ "required": true, "type": "string" }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "replaceExistingAssociation": { + "description": "Indicates whether or not to replace it if an association of the attachment already exists. This is false by default, in which case an error will be returned if an association already exists.", + "location": "query", + "type": "boolean" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" } }, - "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", + "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation", + "request": { + "$ref": "FirewallPolicyAssociation" + }, "response": { - "$ref": "NodeGroup" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy", - "httpMethod": "GET", - "id": "compute.nodeGroups.getIamPolicy", + "addRule": { + "description": "Inserts a rule into a firewall policy.", + "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule", + "httpMethod": "POST", + "id": "compute.networkFirewallPolicies.addRule", "parameterOrder": [ "project", - "zone", - "resource" + "firewallPolicy" ], "parameters": { - "optionsRequestedPolicyVersion": { - "description": "Requested IAM Policy version.", + "firewallPolicy": { + "description": "Name of the firewall policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "maxPriority": { + "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "minPriority": { + "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", "format": "int32", "location": "query", "type": "integer" @@ -13576,48 +13813,40 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" } }, - "path": "projects/{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy", + "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule", + "request": { + "$ref": "FirewallPolicyRule" + }, "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a NodeGroup resource in the specified project using the data included in the request.", - "flatPath": "projects/{project}/zones/{zone}/nodeGroups", + "cloneRules": { + "description": "Copies rules to the specified firewall policy.", + "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/cloneRules", "httpMethod": "POST", - "id": "compute.nodeGroups.insert", + "id": "compute.networkFirewallPolicies.cloneRules", "parameterOrder": [ "project", - "zone", - "initialNodeCount" + "firewallPolicy" ], "parameters": { - "initialNodeCount": { - "description": "Initial count of nodes in the node group.", - "format": "int32", - "location": "query", + "firewallPolicy": { + "description": "Name of the firewall policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, - "type": "integer" + "type": "string" }, "project": { "description": "Project ID for this request.", @@ -13631,18 +13860,13 @@ "location": "query", "type": "string" }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "sourceFirewallPolicy": { + "description": "The firewall policy from which to copy rules.", + "location": "query", "type": "string" } }, - "path": "projects/{project}/zones/{zone}/nodeGroups", - "request": { - "$ref": "NodeGroup" - }, + "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/cloneRules", "response": { "$ref": "Operation" }, @@ -13651,37 +13875,21 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group.", - "flatPath": "projects/{project}/zones/{zone}/nodeGroups", - "httpMethod": "GET", - "id": "compute.nodeGroups.list", + "delete": { + "description": "Deletes the specified policy.", + "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}", + "httpMethod": "DELETE", + "id": "compute.networkFirewallPolicies.delete", "parameterOrder": [ "project", - "zone" + "firewallPolicy" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", + "firewallPolicy": { + "description": "Name of the firewall policy to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" }, "project": { @@ -13691,93 +13899,49 @@ "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/nodeGroups", + "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}", "response": { - "$ref": "NodeGroupList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "listNodes": { - "description": "Lists nodes in the node group.", - "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes", - "httpMethod": "POST", - "id": "compute.nodeGroups.listNodes", + "get": { + "description": "Returns the specified network firewall policy.", + "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}", + "httpMethod": "GET", + "id": "compute.networkFirewallPolicies.get", "parameterOrder": [ "project", - "zone", - "nodeGroup" + "firewallPolicy" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "nodeGroup": { - "description": "Name of the NodeGroup resource whose nodes you want to list.", + "firewallPolicy": { + "description": "Name of the firewall policy to get.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes", + "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}", "response": { - "$ref": "NodeGroupsListNodes" + "$ref": "FirewallPolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -13785,67 +13949,62 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Updates the specified node group.", - "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", - "httpMethod": "PATCH", - "id": "compute.nodeGroups.patch", + "getAssociation": { + "description": "Gets an association with the specified name.", + "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/getAssociation", + "httpMethod": "GET", + "id": "compute.networkFirewallPolicies.getAssociation", "parameterOrder": [ "project", - "zone", - "nodeGroup" + "firewallPolicy" ], "parameters": { - "nodeGroup": { - "description": "Name of the NodeGroup resource to update.", + "firewallPolicy": { + "description": "Name of the firewall policy to which the queried association belongs.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "name": { + "description": "The name of the association to get from the firewall policy.", "location": "query", "type": "string" }, - "zone": { - "description": "The name of the zone for this request.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", - "request": { - "$ref": "NodeGroup" - }, + "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/getAssociation", "response": { - "$ref": "Operation" + "$ref": "FirewallPolicyAssociation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", - "httpMethod": "POST", - "id": "compute.nodeGroups.setIamPolicy", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/global/firewallPolicies/{resource}/getIamPolicy", + "httpMethod": "GET", + "id": "compute.networkFirewallPolicies.getIamPolicy", "parameterOrder": [ "project", - "zone", "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -13856,89 +14015,69 @@ "resource": { "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", - "request": { - "$ref": "ZoneSetPolicyRequest" - }, + "path": "projects/{project}/global/firewallPolicies/{resource}/getIamPolicy", "response": { "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setNodeTemplate": { - "description": "Updates the node template of the node group.", - "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", - "httpMethod": "POST", - "id": "compute.nodeGroups.setNodeTemplate", + "getRule": { + "description": "Gets a rule of the specified priority.", + "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/getRule", + "httpMethod": "GET", + "id": "compute.networkFirewallPolicies.getRule", "parameterOrder": [ "project", - "zone", - "nodeGroup" + "firewallPolicy" ], "parameters": { - "nodeGroup": { - "description": "Name of the NodeGroup resource to update.", + "firewallPolicy": { + "description": "Name of the firewall policy to which the queried rule belongs.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, + "priority": { + "description": "The priority of the rule to get from the firewall policy.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", - "request": { - "$ref": "NodeGroupsSetNodeTemplateRequest" - }, + "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/getRule", "response": { - "$ref": "Operation" + "$ref": "FirewallPolicyRule" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", + "insert": { + "description": "Creates a new policy in the specified project using the data included in the request.", + "flatPath": "projects/{project}/global/firewallPolicies", "httpMethod": "POST", - "id": "compute.nodeGroups.testIamPermissions", + "id": "compute.networkFirewallPolicies.insert", "parameterOrder": [ - "project", - "zone", - "resource" + "project" ], "parameters": { "project": { @@ -13948,57 +14087,38 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" } }, - "path": "projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", + "path": "projects/{project}/global/firewallPolicies", "request": { - "$ref": "TestPermissionsRequest" + "$ref": "FirewallPolicy" }, "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - } - } - }, - "nodeTemplates": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of node templates.", - "flatPath": "projects/{project}/aggregated/nodeTemplates", + }, + "list": { + "description": "Lists all the policies that have been configured for the specified project.", + "flatPath": "projects/{project}/global/firewallPolicies", "httpMethod": "GET", - "id": "compute.nodeTemplates.aggregatedList", + "id": "compute.networkFirewallPolicies.list", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -14030,9 +14150,9 @@ "type": "boolean" } }, - "path": "projects/{project}/aggregated/nodeTemplates", + "path": "projects/{project}/global/firewallPolicies", "response": { - "$ref": "NodeTemplateAggregatedList" + "$ref": "FirewallPolicyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -14040,19 +14160,18 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified NodeTemplate resource.", - "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", - "httpMethod": "DELETE", - "id": "compute.nodeTemplates.delete", + "patch": { + "description": "Patches the specified policy with the data included in the request.", + "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}", + "httpMethod": "PATCH", + "id": "compute.networkFirewallPolicies.patch", "parameterOrder": [ "project", - "region", - "nodeTemplate" + "firewallPolicy" ], "parameters": { - "nodeTemplate": { - "description": "Name of the NodeTemplate resource to delete.", + "firewallPolicy": { + "description": "Name of the firewall policy to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -14065,20 +14184,16 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}", + "request": { + "$ref": "FirewallPolicy" + }, "response": { "$ref": "Operation" }, @@ -14087,62 +14202,25 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", - "httpMethod": "GET", - "id": "compute.nodeTemplates.get", + "patchRule": { + "description": "Patches a rule of the specified priority.", + "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/patchRule", + "httpMethod": "POST", + "id": "compute.networkFirewallPolicies.patchRule", "parameterOrder": [ "project", - "region", - "nodeTemplate" + "firewallPolicy" ], "parameters": { - "nodeTemplate": { - "description": "Name of the node template to return.", + "firewallPolicy": { + "description": "Name of the firewall policy to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", - "response": { - "$ref": "NodeTemplate" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", - "httpMethod": "GET", - "id": "compute.nodeTemplates.getIamPolicy", - "parameterOrder": [ - "project", - "region", - "resource" - ], - "parameters": { - "optionsRequestedPolicyVersion": { - "description": "Requested IAM Policy version.", + "priority": { + "description": "The priority of the rule to patch.", "format": "int32", "location": "query", "type": "integer" @@ -14154,52 +14232,50 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", + "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/patchRule", + "request": { + "$ref": "FirewallPolicyRule" + }, "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a NodeTemplate resource in the specified project using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/nodeTemplates", + "removeAssociation": { + "description": "Removes an association for the specified firewall policy.", + "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/removeAssociation", "httpMethod": "POST", - "id": "compute.nodeTemplates.insert", + "id": "compute.networkFirewallPolicies.removeAssociation", "parameterOrder": [ "project", - "region" + "firewallPolicy" ], "parameters": { - "project": { - "description": "Project ID for this request.", + "firewallPolicy": { + "description": "Name of the firewall policy to update.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", + "name": { + "description": "Name for the attachment that will be removed.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, @@ -14209,10 +14285,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/nodeTemplates", - "request": { - "$ref": "NodeTemplate" - }, + "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/removeAssociation", "response": { "$ref": "Operation" }, @@ -14221,39 +14294,29 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves a list of node templates available to the specified project.", - "flatPath": "projects/{project}/regions/{region}/nodeTemplates", - "httpMethod": "GET", - "id": "compute.nodeTemplates.list", + "removeRule": { + "description": "Deletes a rule of the specified priority.", + "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/removeRule", + "httpMethod": "POST", + "id": "compute.networkFirewallPolicies.removeRule", "parameterOrder": [ "project", - "region" + "firewallPolicy" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", + "firewallPolicy": { + "description": "Name of the firewall policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", + "priority": { + "description": "The priority of the rule to remove from the firewall policy.", + "format": "int32", "location": "query", - "minimum": "0", "type": "integer" }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -14261,37 +14324,28 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" } }, - "path": "projects/{project}/regions/{region}/nodeTemplates", + "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/removeRule", "response": { - "$ref": "NodeTemplateList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy", + "flatPath": "projects/{project}/global/firewallPolicies/{resource}/setIamPolicy", "httpMethod": "POST", - "id": "compute.nodeTemplates.setIamPolicy", + "id": "compute.networkFirewallPolicies.setIamPolicy", "parameterOrder": [ "project", - "region", "resource" ], "parameters": { @@ -14302,24 +14356,17 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "resource": { "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy", + "path": "projects/{project}/global/firewallPolicies/{resource}/setIamPolicy", "request": { - "$ref": "RegionSetPolicyRequest" + "$ref": "GlobalSetPolicyRequest" }, "response": { "$ref": "Policy" @@ -14331,12 +14378,11 @@ }, "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", - "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", + "flatPath": "projects/{project}/global/firewallPolicies/{resource}/testIamPermissions", "httpMethod": "POST", - "id": "compute.nodeTemplates.testIamPermissions", + "id": "compute.networkFirewallPolicies.testIamPermissions", "parameterOrder": [ "project", - "region", "resource" ], "parameters": { @@ -14347,22 +14393,15 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "resource": { "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", + "path": "projects/{project}/global/firewallPolicies/{resource}/testIamPermissions", "request": { "$ref": "TestPermissionsRequest" }, @@ -14377,44 +14416,66 @@ } } }, - "nodeTypes": { + "networks": { "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of node types.", - "flatPath": "projects/{project}/aggregated/nodeTypes", - "httpMethod": "GET", - "id": "compute.nodeTypes.aggregatedList", + "addPeering": { + "description": "Adds a peering to the specified network.", + "flatPath": "projects/{project}/global/networks/{network}/addPeering", + "httpMethod": "POST", + "id": "compute.networks.addPeering", "parameterOrder": [ - "project" + "project", + "network" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", + "network": { + "description": "Name of the network resource to add peering to.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, "type": "string" }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + } + }, + "path": "projects/{project}/global/networks/{network}/addPeering", + "request": { + "$ref": "NetworksAddPeeringRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "delete": { + "description": "Deletes the specified network.", + "flatPath": "projects/{project}/global/networks/{network}", + "httpMethod": "DELETE", + "id": "compute.networks.delete", + "parameterOrder": [ + "project", + "network" + ], + "parameters": { + "network": { + "description": "Name of the network to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" }, "project": { "description": "Project ID for this request.", @@ -14423,35 +14484,33 @@ "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" } }, - "path": "projects/{project}/aggregated/nodeTypes", + "path": "projects/{project}/global/networks/{network}", "response": { - "$ref": "NodeTypeAggregatedList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "get": { - "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", - "flatPath": "projects/{project}/zones/{zone}/nodeTypes/{nodeType}", + "description": "Returns the specified network. Gets a list of available networks by making a list() request.", + "flatPath": "projects/{project}/global/networks/{network}", "httpMethod": "GET", - "id": "compute.nodeTypes.get", + "id": "compute.networks.get", "parameterOrder": [ "project", - "zone", - "nodeType" + "network" ], "parameters": { - "nodeType": { - "description": "Name of the node type to return.", + "network": { + "description": "Name of the network to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -14463,18 +14522,46 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", + } + }, + "path": "projects/{project}/global/networks/{network}", + "response": { + "$ref": "Network" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getEffectiveFirewalls": { + "description": "Returns the effective firewalls on a given network.", + "flatPath": "projects/{project}/global/networks/{network}/getEffectiveFirewalls", + "httpMethod": "GET", + "id": "compute.networks.getEffectiveFirewalls", + "parameterOrder": [ + "project", + "network" + ], + "parameters": { + "network": { + "description": "Name of the network for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" } }, - "path": "projects/{project}/zones/{zone}/nodeTypes/{nodeType}", + "path": "projects/{project}/global/networks/{network}/getEffectiveFirewalls", "response": { - "$ref": "NodeType" + "$ref": "NetworksGetEffectiveFirewallsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -14482,18 +14569,51 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "insert": { + "description": "Creates a network in the specified project using the data included in the request.", + "flatPath": "projects/{project}/global/networks", + "httpMethod": "POST", + "id": "compute.networks.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/networks", + "request": { + "$ref": "Network" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "list": { - "description": "Retrieves a list of node types available to the specified project.", - "flatPath": "projects/{project}/zones/{zone}/nodeTypes", + "description": "Retrieves the list of networks available to the specified project.", + "flatPath": "projects/{project}/global/networks", "httpMethod": "GET", - "id": "compute.nodeTypes.list", + "id": "compute.networks.list", "parameterOrder": [ - "project", - "zone" + "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -14526,47 +14646,45 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "projects/{project}/zones/{zone}/nodeTypes", + "path": "projects/{project}/global/networks", "response": { - "$ref": "NodeTypeList" + "$ref": "NetworkList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "packetMirrorings": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of packetMirrorings.", - "flatPath": "projects/{project}/aggregated/packetMirrorings", + }, + "listPeeringRoutes": { + "description": "Lists the peering routes exchanged over peering connection.", + "flatPath": "projects/{project}/global/networks/{network}/listPeeringRoutes", "httpMethod": "GET", - "id": "compute.packetMirrorings.aggregatedList", + "id": "compute.networks.listPeeringRoutes", "parameterOrder": [ - "project" + "project", + "network" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "direction": { + "description": "The direction of the exchanged routes.", + "enum": [ + "INCOMING", + "OUTGOING" + ], + "enumDescriptions": [ + "For routes exported from peer network.", + "For routes exported from local network." + ], "location": "query", "type": "string" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", - "type": "boolean" + "type": "string" }, "maxResults": { "default": "500", @@ -14576,6 +14694,13 @@ "minimum": "0", "type": "integer" }, + "network": { + "description": "Name of the network for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "orderBy": { "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", @@ -14586,6 +14711,11 @@ "location": "query", "type": "string" }, + "peeringName": { + "description": "The response will show routes exchanged over the given peering connection.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -14593,15 +14723,20 @@ "required": true, "type": "string" }, + "region": { + "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", + "location": "query", + "type": "string" + }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/aggregated/packetMirrorings", + "path": "projects/{project}/global/networks/{network}/listPeeringRoutes", "response": { - "$ref": "PacketMirroringAggregatedList" + "$ref": "ExchangedPeeringRoutesList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -14609,19 +14744,18 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified PacketMirroring resource.", - "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", - "httpMethod": "DELETE", - "id": "compute.packetMirrorings.delete", + "patch": { + "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", + "flatPath": "projects/{project}/global/networks/{network}", + "httpMethod": "PATCH", + "id": "compute.networks.patch", "parameterOrder": [ "project", - "region", - "packetMirroring" + "network" ], "parameters": { - "packetMirroring": { - "description": "Name of the PacketMirroring resource to delete.", + "network": { + "description": "Name of the network to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -14634,20 +14768,16 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", + "path": "projects/{project}/global/networks/{network}", + "request": { + "$ref": "Network" + }, "response": { "$ref": "Operation" }, @@ -14656,19 +14786,18 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified PacketMirroring resource.", - "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", - "httpMethod": "GET", - "id": "compute.packetMirrorings.get", + "removePeering": { + "description": "Removes a peering from the specified network.", + "flatPath": "projects/{project}/global/networks/{network}/removePeering", + "httpMethod": "POST", + "id": "compute.networks.removePeering", "parameterOrder": [ "project", - "region", - "packetMirroring" + "network" ], "parameters": { - "packetMirroring": { - "description": "Name of the PacketMirroring resource to return.", + "network": { + "description": "Name of the network resource to remove peering from.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -14681,45 +14810,45 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", + "path": "projects/{project}/global/networks/{network}/removePeering", + "request": { + "$ref": "NetworksRemovePeeringRequest" + }, "response": { - "$ref": "PacketMirroring" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a PacketMirroring resource in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/packetMirrorings", + "switchToCustomMode": { + "description": "Switches the network mode from auto subnet mode to custom subnet mode.", + "flatPath": "projects/{project}/global/networks/{network}/switchToCustomMode", "httpMethod": "POST", - "id": "compute.packetMirrorings.insert", + "id": "compute.networks.switchToCustomMode", "parameterOrder": [ "project", - "region" + "network" ], "parameters": { - "project": { - "description": "Project ID for this request.", + "network": { + "description": "Name of the network to be updated.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, @@ -14729,10 +14858,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/packetMirrorings", - "request": { - "$ref": "PacketMirroring" - }, + "path": "projects/{project}/global/networks/{network}/switchToCustomMode", "response": { "$ref": "Operation" }, @@ -14741,82 +14867,18 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves a list of PacketMirroring resources available to the specified project and region.", - "flatPath": "projects/{project}/regions/{region}/packetMirrorings", - "httpMethod": "GET", - "id": "compute.packetMirrorings.list", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" - } - }, - "path": "projects/{project}/regions/{region}/packetMirrorings", - "response": { - "$ref": "PacketMirroringList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "description": "Patches the specified PacketMirroring resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", + "updatePeering": { + "description": "Updates the specified network peering with the data included in the request. You can only modify the NetworkPeering.export_custom_routes field and the NetworkPeering.import_custom_routes field.", + "flatPath": "projects/{project}/global/networks/{network}/updatePeering", "httpMethod": "PATCH", - "id": "compute.packetMirrorings.patch", + "id": "compute.networks.updatePeering", "parameterOrder": [ "project", - "region", - "packetMirroring" + "network" ], "parameters": { - "packetMirroring": { - "description": "Name of the PacketMirroring resource to patch.", + "network": { + "description": "Name of the network resource which the updated peering is belonging to.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -14829,22 +14891,15 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", + "path": "projects/{project}/global/networks/{network}/updatePeering", "request": { - "$ref": "PacketMirroring" + "$ref": "NetworksUpdatePeeringRequest" }, "response": { "$ref": "Operation" @@ -14853,18 +14908,29 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions", + } + } + }, + "nodeGroups": { + "methods": { + "addNodes": { + "description": "Adds specified number of nodes to the node group.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes", "httpMethod": "POST", - "id": "compute.packetMirrorings.testIamPermissions", + "id": "compute.nodeGroups.addNodes", "parameterOrder": [ "project", - "region", - "resource" + "zone", + "nodeGroup" ], "parameters": { + "nodeGroup": { + "description": "Name of the NodeGroup resource.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -14872,47 +14938,68 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "zone": { + "description": "The name of the zone for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions", + "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes", "request": { - "$ref": "TestPermissionsRequest" + "$ref": "NodeGroupsAddNodesRequest" }, "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - } - } - }, - "projects": { - "methods": { - "disableXpnHost": { - "description": "Disable this project as a shared VPC host project.", - "flatPath": "projects/{project}/disableXpnHost", - "httpMethod": "POST", - "id": "compute.projects.disableXpnHost", + }, + "aggregatedList": { + "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group.", + "flatPath": "projects/{project}/aggregated/nodeGroups", + "httpMethod": "GET", + "id": "compute.nodeGroups.aggregatedList", "parameterOrder": [ "project" ], "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -14920,30 +15007,40 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", - "type": "string" + "type": "boolean" } }, - "path": "projects/{project}/disableXpnHost", + "path": "projects/{project}/aggregated/nodeGroups", "response": { - "$ref": "Operation" + "$ref": "NodeGroupAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "disableXpnResource": { - "description": "Disable a service resource (also known as service project) associated with this host project.", - "flatPath": "projects/{project}/disableXpnResource", - "httpMethod": "POST", - "id": "compute.projects.disableXpnResource", + "delete": { + "description": "Deletes the specified NodeGroup resource.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", + "httpMethod": "DELETE", + "id": "compute.nodeGroups.delete", "parameterOrder": [ - "project" + "project", + "zone", + "nodeGroup" ], "parameters": { + "nodeGroup": { + "description": "Name of the NodeGroup resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -14955,12 +15052,16 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "projects/{project}/disableXpnResource", - "request": { - "$ref": "ProjectsDisableXpnResourceRequest" - }, + "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", "response": { "$ref": "Operation" }, @@ -14969,15 +15070,24 @@ "https://www.googleapis.com/auth/compute" ] }, - "enableXpnHost": { - "description": "Enable this project as a shared VPC host project.", - "flatPath": "projects/{project}/enableXpnHost", + "deleteNodes": { + "description": "Deletes specified nodes from the node group.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes", "httpMethod": "POST", - "id": "compute.projects.enableXpnHost", + "id": "compute.nodeGroups.deleteNodes", "parameterOrder": [ - "project" + "project", + "zone", + "nodeGroup" ], "parameters": { + "nodeGroup": { + "description": "Name of the NodeGroup resource whose nodes will be deleted.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -14989,9 +15099,19 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "projects/{project}/enableXpnHost", + "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes", + "request": { + "$ref": "NodeGroupsDeleteNodesRequest" + }, "response": { "$ref": "Operation" }, @@ -15000,15 +15120,24 @@ "https://www.googleapis.com/auth/compute" ] }, - "enableXpnResource": { - "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", - "flatPath": "projects/{project}/enableXpnResource", - "httpMethod": "POST", - "id": "compute.projects.enableXpnResource", + "get": { + "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", + "httpMethod": "GET", + "id": "compute.nodeGroups.get", "parameterOrder": [ - "project" + "project", + "zone", + "nodeGroup" ], "parameters": { + "nodeGroup": { + "description": "Name of the node group to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -15016,44 +15145,66 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" } }, - "path": "projects/{project}/enableXpnResource", - "request": { - "$ref": "ProjectsEnableXpnResourceRequest" - }, + "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", "response": { - "$ref": "Operation" + "$ref": "NodeGroup" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "get": { - "description": "Returns the specified Project resource. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", - "flatPath": "projects/{project}", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy", "httpMethod": "GET", - "id": "compute.projects.get", + "id": "compute.nodeGroups.getIamPolicy", "parameterOrder": [ - "project" + "project", + "zone", + "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "projects/{project}", + "path": "projects/{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy", "response": { - "$ref": "Project" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15061,43 +15212,68 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "getXpnHost": { - "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists.", - "flatPath": "projects/{project}/getXpnHost", - "httpMethod": "GET", - "id": "compute.projects.getXpnHost", + "insert": { + "description": "Creates a NodeGroup resource in the specified project using the data included in the request.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups", + "httpMethod": "POST", + "id": "compute.nodeGroups.insert", "parameterOrder": [ - "project" + "project", + "zone", + "initialNodeCount" ], "parameters": { + "initialNodeCount": { + "description": "Initial count of nodes in the node group.", + "format": "int32", + "location": "query", + "required": true, + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "projects/{project}/getXpnHost", + "path": "projects/{project}/zones/{zone}/nodeGroups", + "request": { + "$ref": "NodeGroup" + }, "response": { - "$ref": "Project" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "getXpnResources": { - "description": "Gets service resources (a.k.a service project) associated with this host project.", - "flatPath": "projects/{project}/getXpnResources", + "list": { + "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups", "httpMethod": "GET", - "id": "compute.projects.getXpnResources", + "id": "compute.nodeGroups.list", "parameterOrder": [ - "project" + "project", + "zone" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -15130,28 +15306,38 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "projects/{project}/getXpnResources", + "path": "projects/{project}/zones/{zone}/nodeGroups", "response": { - "$ref": "ProjectsGetXpnResources" + "$ref": "NodeGroupList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "listXpnHosts": { - "description": "Lists all shared VPC host projects visible to the user in an organization.", - "flatPath": "projects/{project}/listXpnHosts", + "listNodes": { + "description": "Lists nodes in the node group.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes", "httpMethod": "POST", - "id": "compute.projects.listXpnHosts", + "id": "compute.nodeGroups.listNodes", "parameterOrder": [ - "project" + "project", + "zone", + "nodeGroup" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -15163,6 +15349,13 @@ "minimum": "0", "type": "integer" }, + "nodeGroup": { + "description": "Name of the NodeGroup resource whose nodes you want to list.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "orderBy": { "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", @@ -15184,29 +15377,43 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "projects/{project}/listXpnHosts", - "request": { - "$ref": "ProjectsListXpnHostsRequest" - }, + "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes", "response": { - "$ref": "XpnHostList" + "$ref": "NodeGroupsListNodes" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "moveDisk": { - "description": "Moves a persistent disk from one zone to another.", - "flatPath": "projects/{project}/moveDisk", - "httpMethod": "POST", - "id": "compute.projects.moveDisk", + "patch": { + "description": "Updates the specified node group.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", + "httpMethod": "PATCH", + "id": "compute.nodeGroups.patch", "parameterOrder": [ - "project" + "project", + "zone", + "nodeGroup" ], "parameters": { + "nodeGroup": { + "description": "Name of the NodeGroup resource to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -15218,11 +15425,18 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "projects/{project}/moveDisk", + "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", "request": { - "$ref": "DiskMoveRequest" + "$ref": "NodeGroup" }, "response": { "$ref": "Operation" @@ -15232,13 +15446,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "moveInstance": { - "description": "Moves an instance and its attached persistent disks from one zone to another. *Note*: Moving VMs or disks by using this method might cause unexpected behavior. For more information, see the [known issue](/compute/docs/troubleshooting/known-issues#moving_vms_or_disks_using_the_moveinstance_api_or_the_causes_unexpected_behavior).", - "flatPath": "projects/{project}/moveInstance", + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", "httpMethod": "POST", - "id": "compute.projects.moveInstance", + "id": "compute.nodeGroups.setIamPolicy", "parameterOrder": [ - "project" + "project", + "zone", + "resource" ], "parameters": { "project": { @@ -15248,33 +15464,51 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" - } - }, - "path": "projects/{project}/moveInstance", - "request": { - "$ref": "InstanceMoveRequest" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", + "request": { + "$ref": "ZoneSetPolicyRequest" }, "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "setCommonInstanceMetadata": { - "description": "Sets metadata common to all instances within the specified project using the data included in the request.", - "flatPath": "projects/{project}/setCommonInstanceMetadata", + "setNodeTemplate": { + "description": "Updates the node template of the node group.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", "httpMethod": "POST", - "id": "compute.projects.setCommonInstanceMetadata", + "id": "compute.nodeGroups.setNodeTemplate", "parameterOrder": [ - "project" + "project", + "zone", + "nodeGroup" ], "parameters": { + "nodeGroup": { + "description": "Name of the NodeGroup resource to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -15286,11 +15520,18 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "projects/{project}/setCommonInstanceMetadata", + "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", "request": { - "$ref": "Metadata" + "$ref": "NodeGroupsSetNodeTemplateRequest" }, "response": { "$ref": "Operation" @@ -15300,13 +15541,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "setDefaultNetworkTier": { - "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", - "flatPath": "projects/{project}/setDefaultNetworkTier", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", "httpMethod": "POST", - "id": "compute.projects.setDefaultNetworkTier", + "id": "compute.nodeGroups.testIamPermissions", "parameterOrder": [ - "project" + "project", + "zone", + "resource" ], "parameters": { "project": { @@ -15316,33 +15559,75 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" } }, - "path": "projects/{project}/setDefaultNetworkTier", + "path": "projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", "request": { - "$ref": "ProjectsSetDefaultNetworkTierRequest" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "setUsageExportBucket": { - "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", - "flatPath": "projects/{project}/setUsageExportBucket", - "httpMethod": "POST", - "id": "compute.projects.setUsageExportBucket", + } + } + }, + "nodeTemplates": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of node templates.", + "flatPath": "projects/{project}/aggregated/nodeTemplates", + "httpMethod": "GET", + "id": "compute.nodeTemplates.aggregatedList", "parameterOrder": [ "project" ], "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -15350,41 +15635,40 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", - "type": "string" + "type": "boolean" } }, - "path": "projects/{project}/setUsageExportBucket", - "request": { - "$ref": "UsageExportLocation" - }, + "path": "projects/{project}/aggregated/nodeTemplates", "response": { - "$ref": "Operation" + "$ref": "NodeTemplateAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" + "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "publicAdvertisedPrefixes": { - "methods": { + }, "delete": { - "description": "Deletes the specified PublicAdvertisedPrefix", - "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + "description": "Deletes the specified NodeTemplate resource.", + "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", "httpMethod": "DELETE", - "id": "compute.publicAdvertisedPrefixes.delete", + "id": "compute.nodeTemplates.delete", "parameterOrder": [ "project", - "publicAdvertisedPrefix" + "region", + "nodeTemplate" ], "parameters": { + "nodeTemplate": { + "description": "Name of the NodeTemplate resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -15392,10 +15676,10 @@ "required": true, "type": "string" }, - "publicAdvertisedPrefix": { - "description": "Name of the PublicAdvertisedPrefix resource to delete.", + "region": { + "description": "The name of the region for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -15405,7 +15689,7 @@ "type": "string" } }, - "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + "path": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", "response": { "$ref": "Operation" }, @@ -15415,15 +15699,23 @@ ] }, "get": { - "description": "Returns the specified PublicAdvertisedPrefix resource.", - "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", "httpMethod": "GET", - "id": "compute.publicAdvertisedPrefixes.get", + "id": "compute.nodeTemplates.get", "parameterOrder": [ "project", - "publicAdvertisedPrefix" + "region", + "nodeTemplate" ], "parameters": { + "nodeTemplate": { + "description": "Name of the node template to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -15431,17 +15723,66 @@ "required": true, "type": "string" }, - "publicAdvertisedPrefix": { - "description": "Name of the PublicAdvertisedPrefix resource to return.", + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + "response": { + "$ref": "NodeTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", + "httpMethod": "GET", + "id": "compute.nodeTemplates.getIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + "path": "projects/{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", "response": { - "$ref": "PublicAdvertisedPrefix" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15450,12 +15791,13 @@ ] }, "insert": { - "description": "Creates a PublicAdvertisedPrefix in the specified project using the parameters that are included in the request.", - "flatPath": "projects/{project}/global/publicAdvertisedPrefixes", + "description": "Creates a NodeTemplate resource in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/nodeTemplates", "httpMethod": "POST", - "id": "compute.publicAdvertisedPrefixes.insert", + "id": "compute.nodeTemplates.insert", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "project": { @@ -15465,15 +15807,22 @@ "required": true, "type": "string" }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "projects/{project}/global/publicAdvertisedPrefixes", + "path": "projects/{project}/regions/{region}/nodeTemplates", "request": { - "$ref": "PublicAdvertisedPrefix" + "$ref": "NodeTemplate" }, "response": { "$ref": "Operation" @@ -15484,16 +15833,17 @@ ] }, "list": { - "description": "Lists the PublicAdvertisedPrefixes for a project.", - "flatPath": "projects/{project}/global/publicAdvertisedPrefixes", + "description": "Retrieves a list of node templates available to the specified project.", + "flatPath": "projects/{project}/regions/{region}/nodeTemplates", "httpMethod": "GET", - "id": "compute.publicAdvertisedPrefixes.list", + "id": "compute.nodeTemplates.list", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -15522,15 +15872,22 @@ "required": true, "type": "string" }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/global/publicAdvertisedPrefixes", + "path": "projects/{project}/regions/{region}/nodeTemplates", "response": { - "$ref": "PublicAdvertisedPrefixList" + "$ref": "NodeTemplateList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15538,14 +15895,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", - "httpMethod": "PATCH", - "id": "compute.publicAdvertisedPrefixes.patch", + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy", + "httpMethod": "POST", + "id": "compute.nodeTemplates.setIamPolicy", "parameterOrder": [ "project", - "publicAdvertisedPrefix" + "region", + "resource" ], "parameters": { "project": { @@ -15555,46 +15913,94 @@ "required": true, "type": "string" }, - "publicAdvertisedPrefix": { - "description": "Name of the PublicAdvertisedPrefix resource to patch.", + "region": { + "description": "The name of the region for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" } }, - "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + "path": "projects/{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy", "request": { - "$ref": "PublicAdvertisedPrefix" + "$ref": "RegionSetPolicyRequest" }, "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", + "httpMethod": "POST", + "id": "compute.nodeTemplates.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] } } }, - "publicDelegatedPrefixes": { + "nodeTypes": { "methods": { "aggregatedList": { - "description": "Lists all PublicDelegatedPrefix resources owned by the specific project across all scopes.", - "flatPath": "projects/{project}/aggregated/publicDelegatedPrefixes", + "description": "Retrieves an aggregated list of node types.", + "flatPath": "projects/{project}/aggregated/nodeTypes", "httpMethod": "GET", - "id": "compute.publicDelegatedPrefixes.aggregatedList", + "id": "compute.nodeTypes.aggregatedList", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -15622,7 +16028,7 @@ "type": "string" }, "project": { - "description": "Name of the project scoping this request.", + "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, @@ -15634,9 +16040,9 @@ "type": "boolean" } }, - "path": "projects/{project}/aggregated/publicDelegatedPrefixes", + "path": "projects/{project}/aggregated/nodeTypes", "response": { - "$ref": "PublicDelegatedPrefixAggregatedList" + "$ref": "NodeTypeAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15644,106 +16050,24 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified PublicDelegatedPrefix in the given region.", - "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", - "httpMethod": "DELETE", - "id": "compute.publicDelegatedPrefixes.delete", - "parameterOrder": [ - "project", - "region", - "publicDelegatedPrefix" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "publicDelegatedPrefix": { - "description": "Name of the PublicDelegatedPrefix resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region of this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, "get": { - "description": "Returns the specified PublicDelegatedPrefix resource in the given region.", - "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", + "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", + "flatPath": "projects/{project}/zones/{zone}/nodeTypes/{nodeType}", "httpMethod": "GET", - "id": "compute.publicDelegatedPrefixes.get", + "id": "compute.nodeTypes.get", "parameterOrder": [ "project", - "region", - "publicDelegatedPrefix" + "zone", + "nodeType" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "publicDelegatedPrefix": { - "description": "Name of the PublicDelegatedPrefix resource to return.", + "nodeType": { + "description": "Name of the node type to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "region": { - "description": "Name of the region of this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", - "response": { - "$ref": "PublicDelegatedPrefix" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a PublicDelegatedPrefix in the specified project in the given region using the parameters that are included in the request.", - "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes", - "httpMethod": "POST", - "id": "compute.publicDelegatedPrefixes.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -15751,43 +16075,36 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region of this request.", + "zone": { + "description": "The name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes", - "request": { - "$ref": "PublicDelegatedPrefix" - }, + "path": "projects/{project}/zones/{zone}/nodeTypes/{nodeType}", "response": { - "$ref": "Operation" + "$ref": "NodeType" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "list": { - "description": "Lists the PublicDelegatedPrefixes for a project in the given region.", - "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes", + "description": "Retrieves a list of node types available to the specified project.", + "flatPath": "projects/{project}/zones/{zone}/nodeTypes", "httpMethod": "GET", - "id": "compute.publicDelegatedPrefixes.list", + "id": "compute.nodeTypes.list", "parameterOrder": [ "project", - "region" + "zone" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -15816,96 +16133,106 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region of this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" - } - }, - "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes", - "response": { - "$ref": "PublicDelegatedPrefixList" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/nodeTypes", + "response": { + "$ref": "NodeTypeList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "patch": { - "description": "Patches the specified PublicDelegatedPrefix resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", - "httpMethod": "PATCH", - "id": "compute.publicDelegatedPrefixes.patch", + } + } + }, + "packetMirrorings": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of packetMirrorings.", + "flatPath": "projects/{project}/aggregated/packetMirrorings", + "httpMethod": "GET", + "id": "compute.packetMirrorings.aggregatedList", "parameterOrder": [ - "project", - "region", - "publicDelegatedPrefix" + "project" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", "type": "string" }, - "publicDelegatedPrefix": { - "description": "Name of the PublicDelegatedPrefix resource to patch.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", "type": "string" }, - "region": { - "description": "Name of the region for this request.", + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", - "type": "string" + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", - "request": { - "$ref": "PublicDelegatedPrefix" - }, + "path": "projects/{project}/aggregated/packetMirrorings", "response": { - "$ref": "Operation" + "$ref": "PacketMirroringAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "regionAutoscalers": { - "methods": { + }, "delete": { - "description": "Deletes the specified autoscaler.", - "flatPath": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", + "description": "Deletes the specified PacketMirroring resource.", + "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", "httpMethod": "DELETE", - "id": "compute.regionAutoscalers.delete", + "id": "compute.packetMirrorings.delete", "parameterOrder": [ "project", "region", - "autoscaler" + "packetMirroring" ], "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to delete.", + "packetMirroring": { + "description": "Name of the PacketMirroring resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -15919,7 +16246,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -15931,7 +16258,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", + "path": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", "response": { "$ref": "Operation" }, @@ -15941,18 +16268,18 @@ ] }, "get": { - "description": "Returns the specified autoscaler.", - "flatPath": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", + "description": "Returns the specified PacketMirroring resource.", + "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", "httpMethod": "GET", - "id": "compute.regionAutoscalers.get", + "id": "compute.packetMirrorings.get", "parameterOrder": [ "project", "region", - "autoscaler" + "packetMirroring" ], "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to return.", + "packetMirroring": { + "description": "Name of the PacketMirroring resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -15966,16 +16293,16 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", + "path": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", "response": { - "$ref": "Autoscaler" + "$ref": "PacketMirroring" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15984,10 +16311,10 @@ ] }, "insert": { - "description": "Creates an autoscaler in the specified project using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/autoscalers", + "description": "Creates a PacketMirroring resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/packetMirrorings", "httpMethod": "POST", - "id": "compute.regionAutoscalers.insert", + "id": "compute.packetMirrorings.insert", "parameterOrder": [ "project", "region" @@ -16001,7 +16328,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -16013,9 +16340,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/autoscalers", + "path": "projects/{project}/regions/{region}/packetMirrorings", "request": { - "$ref": "Autoscaler" + "$ref": "PacketMirroring" }, "response": { "$ref": "Operation" @@ -16026,17 +16353,17 @@ ] }, "list": { - "description": "Retrieves a list of autoscalers contained within the specified region.", - "flatPath": "projects/{project}/regions/{region}/autoscalers", + "description": "Retrieves a list of PacketMirroring resources available to the specified project and region.", + "flatPath": "projects/{project}/regions/{region}/packetMirrorings", "httpMethod": "GET", - "id": "compute.regionAutoscalers.list", + "id": "compute.packetMirrorings.list", "parameterOrder": [ "project", "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -16066,7 +16393,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -16078,9 +16405,9 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/autoscalers", + "path": "projects/{project}/regions/{region}/packetMirrorings", "response": { - "$ref": "RegionAutoscalerList" + "$ref": "PacketMirroringList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16089,19 +16416,21 @@ ] }, "patch": { - "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/regions/{region}/autoscalers", + "description": "Patches the specified PacketMirroring resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", "httpMethod": "PATCH", - "id": "compute.regionAutoscalers.patch", + "id": "compute.packetMirrorings.patch", "parameterOrder": [ "project", - "region" + "region", + "packetMirroring" ], "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to patch.", - "location": "query", + "packetMirroring": { + "description": "Name of the PacketMirroring resource to patch.", + "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" }, "project": { @@ -16112,7 +16441,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -16124,9 +16453,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/autoscalers", + "path": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", "request": { - "$ref": "Autoscaler" + "$ref": "PacketMirroring" }, "response": { "$ref": "Operation" @@ -16136,22 +16465,17 @@ "https://www.googleapis.com/auth/compute" ] }, - "update": { - "description": "Updates an autoscaler in the specified project using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/autoscalers", - "httpMethod": "PUT", - "id": "compute.regionAutoscalers.update", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions", + "httpMethod": "POST", + "id": "compute.packetMirrorings.testIamPermissions", "parameterOrder": [ "project", - "region" + "region", + "resource" ], "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to update.", - "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16160,52 +16484,46 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/autoscalers", + "path": "projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions", "request": { - "$ref": "Autoscaler" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] } } }, - "regionBackendServices": { + "projects": { "methods": { - "delete": { - "description": "Deletes the specified regional BackendService resource.", - "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", - "httpMethod": "DELETE", - "id": "compute.regionBackendServices.delete", + "disableXpnHost": { + "description": "Disable this project as a shared VPC host project.", + "flatPath": "projects/{project}/disableXpnHost", + "httpMethod": "POST", + "id": "compute.projects.disableXpnHost", "parameterOrder": [ - "project", - "region", - "backendService" + "project" ], "parameters": { - "backendService": { - "description": "Name of the BackendService resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16213,20 +16531,13 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/backendServices/{backendService}", + "path": "projects/{project}/disableXpnHost", "response": { "$ref": "Operation" }, @@ -16235,24 +16546,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified regional BackendService resource.", - "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", - "httpMethod": "GET", - "id": "compute.regionBackendServices.get", + "disableXpnResource": { + "description": "Disable a service resource (also known as service project) associated with this host project.", + "flatPath": "projects/{project}/disableXpnResource", + "httpMethod": "POST", + "id": "compute.projects.disableXpnResource", "parameterOrder": [ - "project", - "region", - "backendService" + "project" ], "parameters": { - "backendService": { - "description": "Name of the BackendService resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16260,77 +16562,62 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/backendServices/{backendService}", + "path": "projects/{project}/disableXpnResource", + "request": { + "$ref": "ProjectsDisableXpnResourceRequest" + }, "response": { - "$ref": "BackendService" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "getHealth": { - "description": "Gets the most recent health check results for this regional BackendService.", - "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth", + "enableXpnHost": { + "description": "Enable this project as a shared VPC host project.", + "flatPath": "projects/{project}/enableXpnHost", "httpMethod": "POST", - "id": "compute.regionBackendServices.getHealth", + "id": "compute.projects.enableXpnHost", "parameterOrder": [ - "project", - "region", - "backendService" + "project" ], "parameters": { - "backendService": { - "description": "Name of the BackendService resource for which to get health.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { + "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth", - "request": { - "$ref": "ResourceGroupReference" - }, + "path": "projects/{project}/enableXpnHost", "response": { - "$ref": "BackendServiceGroupHealth" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", - "flatPath": "projects/{project}/regions/{region}/backendServices", + "enableXpnResource": { + "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", + "flatPath": "projects/{project}/enableXpnResource", "httpMethod": "POST", - "id": "compute.regionBackendServices.insert", + "id": "compute.projects.enableXpnResource", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "project": { @@ -16340,22 +16627,15 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/backendServices", + "path": "projects/{project}/enableXpnResource", "request": { - "$ref": "BackendService" + "$ref": "ProjectsEnableXpnResourceRequest" }, "response": { "$ref": "Operation" @@ -16365,62 +16645,26 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", - "flatPath": "projects/{project}/regions/{region}/backendServices", + "get": { + "description": "Returns the specified Project resource. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", + "flatPath": "projects/{project}", "httpMethod": "GET", - "id": "compute.regionBackendServices.list", + "id": "compute.projects.get", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/backendServices", + "path": "projects/{project}", "response": { - "$ref": "BackendServiceList" + "$ref": "Project" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16428,72 +16672,62 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Understanding backend services This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", - "httpMethod": "PATCH", - "id": "compute.regionBackendServices.patch", + "getXpnHost": { + "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists.", + "flatPath": "projects/{project}/getXpnHost", + "httpMethod": "GET", + "id": "compute.projects.getXpnHost", "parameterOrder": [ - "project", - "region", - "backendService" + "project" ], "parameters": { - "backendService": { - "description": "Name of the BackendService resource to patch.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/regions/{region}/backendServices/{backendService}", - "request": { - "$ref": "BackendService" - }, + "path": "projects/{project}/getXpnHost", "response": { - "$ref": "Operation" + "$ref": "Project" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "update": { - "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Backend services overview .", - "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", - "httpMethod": "PUT", - "id": "compute.regionBackendServices.update", + "getXpnResources": { + "description": "Gets service resources (a.k.a service project) associated with this host project.", + "flatPath": "projects/{project}/getXpnResources", + "httpMethod": "GET", + "id": "compute.projects.getXpnResources", "parameterOrder": [ - "project", - "region", - "backendService" + "project" ], "parameters": { - "backendService": { - "description": "Name of the BackendService resource to update.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", "type": "string" }, "project": { @@ -16503,54 +16737,35 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", - "type": "string" + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/backendServices/{backendService}", - "request": { - "$ref": "BackendService" - }, + "path": "projects/{project}/getXpnResources", "response": { - "$ref": "Operation" + "$ref": "ProjectsGetXpnResources" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "regionCommitments": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of commitments by region.", - "flatPath": "projects/{project}/aggregated/commitments", - "httpMethod": "GET", - "id": "compute.regionCommitments.aggregatedList", + }, + "listXpnHosts": { + "description": "Lists all shared VPC host projects visible to the user in an organization.", + "flatPath": "projects/{project}/listXpnHosts", + "httpMethod": "POST", + "id": "compute.projects.listXpnHosts", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -16582,34 +16797,27 @@ "type": "boolean" } }, - "path": "projects/{project}/aggregated/commitments", + "path": "projects/{project}/listXpnHosts", + "request": { + "$ref": "ProjectsListXpnHostsRequest" + }, "response": { - "$ref": "CommitmentAggregatedList" + "$ref": "XpnHostList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", - "httpMethod": "GET", - "id": "compute.regionCommitments.get", + "moveDisk": { + "description": "Moves a persistent disk from one zone to another.", + "flatPath": "projects/{project}/moveDisk", + "httpMethod": "POST", + "id": "compute.projects.moveDisk", "parameterOrder": [ - "project", - "region", - "commitment" + "project" ], "parameters": { - "commitment": { - "description": "Name of the commitment to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16617,32 +16825,31 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/commitments/{commitment}", + "path": "projects/{project}/moveDisk", + "request": { + "$ref": "DiskMoveRequest" + }, "response": { - "$ref": "Commitment" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a commitment in the specified project using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/commitments", + "moveInstance": { + "description": "Moves an instance and its attached persistent disks from one zone to another. *Note*: Moving VMs or disks by using this method might cause unexpected behavior. For more information, see the [known issue](/compute/docs/troubleshooting/known-issues#moving_vms_or_disks_using_the_moveinstance_api_or_the_causes_unexpected_behavior).", + "flatPath": "projects/{project}/moveInstance", "httpMethod": "POST", - "id": "compute.regionCommitments.insert", + "id": "compute.projects.moveInstance", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "project": { @@ -16652,22 +16859,15 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/commitments", + "path": "projects/{project}/moveInstance", "request": { - "$ref": "Commitment" + "$ref": "InstanceMoveRequest" }, "response": { "$ref": "Operation" @@ -16677,39 +16877,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves a list of commitments contained within the specified region.", - "flatPath": "projects/{project}/regions/{region}/commitments", - "httpMethod": "GET", - "id": "compute.regionCommitments.list", + "setCommonInstanceMetadata": { + "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + "flatPath": "projects/{project}/setCommonInstanceMetadata", + "httpMethod": "POST", + "id": "compute.projects.setCommonInstanceMetadata", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16717,52 +16893,67 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" } }, - "path": "projects/{project}/regions/{region}/commitments", + "path": "projects/{project}/setCommonInstanceMetadata", + "request": { + "$ref": "Metadata" + }, "response": { - "$ref": "CommitmentList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "update": { - "description": "Updates the specified commitment with the data included in the request. Update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: auto_renew.", - "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", - "httpMethod": "PATCH", - "id": "compute.regionCommitments.update", + "setDefaultNetworkTier": { + "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", + "flatPath": "projects/{project}/setDefaultNetworkTier", + "httpMethod": "POST", + "id": "compute.projects.setDefaultNetworkTier", "parameterOrder": [ - "project", - "region", - "commitment" + "project" ], "parameters": { - "commitment": { - "description": "Name of the commitment for which auto renew is being updated.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "paths": { + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "repeated": true, "type": "string" - }, + } + }, + "path": "projects/{project}/setDefaultNetworkTier", + "request": { + "$ref": "ProjectsSetDefaultNetworkTierRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setUsageExportBucket": { + "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + "flatPath": "projects/{project}/setUsageExportBucket", + "httpMethod": "POST", + "id": "compute.projects.setUsageExportBucket", + "parameterOrder": [ + "project" + ], + "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -16770,59 +16961,80 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "updateMask": { - "description": "update_mask indicates fields to be updated as part of this request.", - "format": "google-fieldmask", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/regions/{region}/commitments/{commitment}", + "path": "projects/{project}/setUsageExportBucket", "request": { - "$ref": "Commitment" + "$ref": "UsageExportLocation" }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" ] } } }, - "regionDiskTypes": { + "publicAdvertisedPrefixes": { "methods": { - "get": { - "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/diskTypes/{diskType}", - "httpMethod": "GET", - "id": "compute.regionDiskTypes.get", + "delete": { + "description": "Deletes the specified PublicAdvertisedPrefix", + "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + "httpMethod": "DELETE", + "id": "compute.publicAdvertisedPrefixes.delete", "parameterOrder": [ "project", - "region", - "diskType" + "publicAdvertisedPrefix" ], "parameters": { - "diskType": { - "description": "Name of the disk type to return.", + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "publicAdvertisedPrefix": { + "description": "Name of the PublicAdvertisedPrefix resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified PublicAdvertisedPrefix resource.", + "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + "httpMethod": "GET", + "id": "compute.publicAdvertisedPrefixes.get", + "parameterOrder": [ + "project", + "publicAdvertisedPrefix" + ], + "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -16830,17 +17042,17 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", + "publicAdvertisedPrefix": { + "description": "Name of the PublicAdvertisedPrefix resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/diskTypes/{diskType}", + "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", "response": { - "$ref": "DiskType" + "$ref": "PublicAdvertisedPrefix" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16848,18 +17060,51 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "insert": { + "description": "Creates a PublicAdvertisedPrefix in the specified project using the parameters that are included in the request.", + "flatPath": "projects/{project}/global/publicAdvertisedPrefixes", + "httpMethod": "POST", + "id": "compute.publicAdvertisedPrefixes.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/publicAdvertisedPrefixes", + "request": { + "$ref": "PublicAdvertisedPrefix" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "list": { - "description": "Retrieves a list of regional disk types available to the specified project.", - "flatPath": "projects/{project}/regions/{region}/diskTypes", + "description": "Lists the PublicAdvertisedPrefixes for a project.", + "flatPath": "projects/{project}/global/publicAdvertisedPrefixes", "httpMethod": "GET", - "id": "compute.regionDiskTypes.list", + "id": "compute.publicAdvertisedPrefixes.list", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -16888,51 +17133,32 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/diskTypes", + "path": "projects/{project}/global/publicAdvertisedPrefixes", "response": { - "$ref": "RegionDiskTypeList" + "$ref": "PublicAdvertisedPrefixList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "regionDisks": { - "methods": { - "addResourcePolicies": { - "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", - "flatPath": "projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies", - "httpMethod": "POST", - "id": "compute.regionDisks.addResourcePolicies", + }, + "patch": { + "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + "httpMethod": "PATCH", + "id": "compute.publicAdvertisedPrefixes.patch", "parameterOrder": [ "project", - "region", - "disk" + "publicAdvertisedPrefix" ], "parameters": { - "disk": { - "description": "The disk name for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16940,10 +17166,10 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", + "publicAdvertisedPrefix": { + "description": "Name of the PublicAdvertisedPrefix resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -16953,9 +17179,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies", + "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", "request": { - "$ref": "RegionDisksAddResourcePoliciesRequest" + "$ref": "PublicAdvertisedPrefix" }, "response": { "$ref": "Operation" @@ -16964,83 +17190,98 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "createSnapshot": { - "description": "Creates a snapshot of a specified persistent disk. For regular snapshot creation, consider using snapshots.insert instead, as that method supports more features, such as creating snapshots in a project different from the source disk project.", - "flatPath": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", - "httpMethod": "POST", - "id": "compute.regionDisks.createSnapshot", + } + } + }, + "publicDelegatedPrefixes": { + "methods": { + "aggregatedList": { + "description": "Lists all PublicDelegatedPrefix resources owned by the specific project across all scopes.", + "flatPath": "projects/{project}/aggregated/publicDelegatedPrefixes", + "httpMethod": "GET", + "id": "compute.publicDelegatedPrefixes.aggregatedList", "parameterOrder": [ - "project", - "region", - "disk" + "project" ], "parameters": { - "disk": { - "description": "Name of the regional persistent disk to snapshot.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", "type": "string" }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", "type": "string" }, - "region": { - "description": "Name of the region for this request.", + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", - "type": "string" + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", - "request": { - "$ref": "Snapshot" - }, + "path": "projects/{project}/aggregated/publicDelegatedPrefixes", "response": { - "$ref": "Operation" + "$ref": "PublicDelegatedPrefixAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "delete": { - "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", - "flatPath": "projects/{project}/regions/{region}/disks/{disk}", + "description": "Deletes the specified PublicDelegatedPrefix in the given region.", + "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", "httpMethod": "DELETE", - "id": "compute.regionDisks.delete", + "id": "compute.publicDelegatedPrefixes.delete", "parameterOrder": [ "project", "region", - "disk" + "publicDelegatedPrefix" ], "parameters": { - "disk": { - "description": "Name of the regional persistent disk to delete.", + "project": { + "description": "Project ID for this request.", "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "project": { - "description": "Project ID for this request.", + "publicDelegatedPrefix": { + "description": "Name of the PublicDelegatedPrefix resource to delete.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -17052,7 +17293,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{disk}", + "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", "response": { "$ref": "Operation" }, @@ -17062,41 +17303,41 @@ ] }, "get": { - "description": "Returns a specified regional persistent disk.", - "flatPath": "projects/{project}/regions/{region}/disks/{disk}", + "description": "Returns the specified PublicDelegatedPrefix resource in the given region.", + "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", "httpMethod": "GET", - "id": "compute.regionDisks.get", + "id": "compute.publicDelegatedPrefixes.get", "parameterOrder": [ "project", "region", - "disk" + "publicDelegatedPrefix" ], "parameters": { - "disk": { - "description": "Name of the regional persistent disk to return.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "project": { - "description": "Project ID for this request.", + "publicDelegatedPrefix": { + "description": "Name of the PublicDelegatedPrefix resource to return.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{disk}", + "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", "response": { - "$ref": "Disk" + "$ref": "PublicDelegatedPrefix" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17104,23 +17345,16 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - "flatPath": "projects/{project}/regions/{region}/disks/{resource}/getIamPolicy", - "httpMethod": "GET", - "id": "compute.regionDisks.getIamPolicy", + "insert": { + "description": "Creates a PublicDelegatedPrefix in the specified project in the given region using the parameters that are included in the request.", + "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes", + "httpMethod": "POST", + "id": "compute.publicDelegatedPrefixes.insert", "parameterOrder": [ "project", - "region", - "resource" + "region" ], "parameters": { - "optionsRequestedPolicyVersion": { - "description": "Requested IAM Policy version.", - "format": "int32", - "location": "query", - "type": "integer" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -17129,89 +17363,42 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{resource}/getIamPolicy", + "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes", + "request": { + "$ref": "PublicDelegatedPrefix" + }, "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a persistent regional disk in the specified project using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/disks", - "httpMethod": "POST", - "id": "compute.regionDisks.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "sourceImage": { - "description": "Source image to restore onto a disk. This field is optional.", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/disks", - "request": { - "$ref": "Disk" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute" ] }, "list": { - "description": "Retrieves the list of persistent disks contained within the specified region.", - "flatPath": "projects/{project}/regions/{region}/disks", + "description": "Lists the PublicDelegatedPrefixes for a project in the given region.", + "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes", "httpMethod": "GET", - "id": "compute.regionDisks.list", + "id": "compute.publicDelegatedPrefixes.list", "parameterOrder": [ "project", "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -17241,7 +17428,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -17253,9 +17440,9 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/disks", + "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes", "response": { - "$ref": "DiskList" + "$ref": "PublicDelegatedPrefixList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17263,33 +17450,33 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "removeResourcePolicies": { - "description": "Removes resource policies from a regional disk.", - "flatPath": "projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies", - "httpMethod": "POST", - "id": "compute.regionDisks.removeResourcePolicies", + "patch": { + "description": "Patches the specified PublicDelegatedPrefix resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", + "httpMethod": "PATCH", + "id": "compute.publicDelegatedPrefixes.patch", "parameterOrder": [ "project", "region", - "disk" + "publicDelegatedPrefix" ], "parameters": { - "disk": { - "description": "The disk name for this request.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "project": { - "description": "Project ID for this request.", + "publicDelegatedPrefix": { + "description": "Name of the PublicDelegatedPrefix resource to patch.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -17301,9 +17488,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies", + "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", "request": { - "$ref": "RegionDisksRemoveResourcePoliciesRequest" + "$ref": "PublicDelegatedPrefix" }, "response": { "$ref": "Operation" @@ -17312,34 +17499,38 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "resize": { - "description": "Resizes the specified regional persistent disk.", - "flatPath": "projects/{project}/regions/{region}/disks/{disk}/resize", - "httpMethod": "POST", - "id": "compute.regionDisks.resize", + } + } + }, + "regionAutoscalers": { + "methods": { + "delete": { + "description": "Deletes the specified autoscaler.", + "flatPath": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", + "httpMethod": "DELETE", + "id": "compute.regionAutoscalers.delete", "parameterOrder": [ "project", "region", - "disk" + "autoscaler" ], "parameters": { - "disk": { - "description": "Name of the regional persistent disk.", + "autoscaler": { + "description": "Name of the autoscaler to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, "project": { - "description": "The project ID for this request.", + "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -17351,10 +17542,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{disk}/resize", - "request": { - "$ref": "RegionDisksResizeRequest" - }, + "path": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", "response": { "$ref": "Operation" }, @@ -17363,17 +17551,24 @@ "https://www.googleapis.com/auth/compute" ] }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "flatPath": "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy", - "httpMethod": "POST", - "id": "compute.regionDisks.setIamPolicy", + "get": { + "description": "Returns the specified autoscaler.", + "flatPath": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", + "httpMethod": "GET", + "id": "compute.regionAutoscalers.get", "parameterOrder": [ "project", "region", - "resource" + "autoscaler" ], "parameters": { + "autoscaler": { + "description": "Name of the autoscaler to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -17382,41 +17577,31 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy", - "request": { - "$ref": "RegionSetPolicyRequest" - }, + "path": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", "response": { - "$ref": "Policy" + "$ref": "Autoscaler" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setLabels": { - "description": "Sets the labels on the target regional disk.", - "flatPath": "projects/{project}/regions/{region}/disks/{resource}/setLabels", + "insert": { + "description": "Creates an autoscaler in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/autoscalers", "httpMethod": "POST", - "id": "compute.regionDisks.setLabels", + "id": "compute.regionAutoscalers.insert", "parameterOrder": [ "project", - "region", - "resource" + "region" ], "parameters": { "project": { @@ -17427,7 +17612,7 @@ "type": "string" }, "region": { - "description": "The region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -17437,18 +17622,11 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/disks/{resource}/setLabels", + "path": "projects/{project}/regions/{region}/autoscalers", "request": { - "$ref": "RegionSetLabelsRequest" + "$ref": "Autoscaler" }, "response": { "$ref": "Operation" @@ -17458,17 +17636,39 @@ "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "flatPath": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", - "httpMethod": "POST", - "id": "compute.regionDisks.testIamPermissions", + "list": { + "description": "Retrieves a list of autoscalers contained within the specified region.", + "flatPath": "projects/{project}/regions/{region}/autoscalers", + "httpMethod": "GET", + "id": "compute.regionAutoscalers.list", "parameterOrder": [ "project", - "region", - "resource" + "region" ], "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -17477,52 +17677,42 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "projects/{project}/regions/{region}/autoscalers", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "RegionAutoscalerList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "regionHealthCheckServices": { - "methods": { - "delete": { - "description": "Deletes the specified regional HealthCheckService.", - "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - "httpMethod": "DELETE", - "id": "compute.regionHealthCheckServices.delete", + }, + "patch": { + "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/autoscalers", + "httpMethod": "PATCH", + "id": "compute.regionAutoscalers.patch", "parameterOrder": [ "project", - "region", - "healthCheckService" + "region" ], "parameters": { - "healthCheckService": { - "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", - "location": "path", - "required": true, + "autoscaler": { + "description": "Name of the autoscaler to patch.", + "location": "query", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "type": "string" }, "project": { @@ -17545,7 +17735,10 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "path": "projects/{project}/regions/{region}/autoscalers", + "request": { + "$ref": "Autoscaler" + }, "response": { "$ref": "Operation" }, @@ -17554,58 +17747,22 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified regional HealthCheckService resource.", - "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - "httpMethod": "GET", - "id": "compute.regionHealthCheckServices.get", + "update": { + "description": "Updates an autoscaler in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/autoscalers", + "httpMethod": "PUT", + "id": "compute.regionAutoscalers.update", "parameterOrder": [ "project", - "region", - "healthCheckService" + "region" ], "parameters": { - "healthCheckService": { - "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", - "location": "path", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, + "autoscaler": { + "description": "Name of the autoscaler to update.", + "location": "query", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - "response": { - "$ref": "HealthCheckService" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/healthCheckServices", - "httpMethod": "POST", - "id": "compute.regionHealthCheckServices.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -17626,9 +17783,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthCheckServices", + "path": "projects/{project}/regions/{region}/autoscalers", "request": { - "$ref": "HealthCheckService" + "$ref": "Autoscaler" }, "response": { "$ref": "Operation" @@ -17637,38 +17794,27 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "list": { - "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", - "flatPath": "projects/{project}/regions/{region}/healthCheckServices", - "httpMethod": "GET", - "id": "compute.regionHealthCheckServices.list", + } + } + }, + "regionBackendServices": { + "methods": { + "delete": { + "description": "Deletes the specified regional BackendService resource.", + "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", + "httpMethod": "DELETE", + "id": "compute.regionBackendServices.delete", "parameterOrder": [ "project", - "region" + "region", + "backendService" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", + "backendService": { + "description": "Name of the BackendService resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" }, "project": { @@ -17685,36 +17831,36 @@ "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthCheckServices", + "path": "projects/{project}/regions/{region}/backendServices/{backendService}", "response": { - "$ref": "HealthCheckServicesList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "patch": { - "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - "httpMethod": "PATCH", - "id": "compute.regionHealthCheckServices.patch", + "get": { + "description": "Returns the specified regional BackendService resource.", + "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", + "httpMethod": "GET", + "id": "compute.regionBackendServices.get", "parameterOrder": [ "project", "region", - "healthCheckService" + "backendService" ], "parameters": { - "healthCheckService": { - "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + "backendService": { + "description": "Name of the BackendService resource to return.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -17731,49 +17877,37 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - "request": { - "$ref": "HealthCheckService" - }, + "path": "projects/{project}/regions/{region}/backendServices/{backendService}", "response": { - "$ref": "Operation" + "$ref": "BackendService" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "regionHealthChecks": { - "methods": { - "delete": { - "description": "Deletes the specified HealthCheck resource.", - "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", - "httpMethod": "DELETE", - "id": "compute.regionHealthChecks.delete", + }, + "getHealth": { + "description": "Gets the most recent health check results for this regional BackendService.", + "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth", + "httpMethod": "POST", + "id": "compute.regionBackendServices.getHealth", "parameterOrder": [ "project", "region", - "healthCheck" + "backendService" ], "parameters": { - "healthCheck": { - "description": "Name of the HealthCheck resource to delete.", + "backendService": { + "description": "Name of the BackendService resource for which to get health.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, "project": { - "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, @@ -17785,39 +17919,37 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + "path": "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth", + "request": { + "$ref": "ResourceGroupReference" + }, "response": { - "$ref": "Operation" + "$ref": "BackendServiceGroupHealth" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "get": { - "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", "httpMethod": "GET", - "id": "compute.regionHealthChecks.get", + "id": "compute.regionBackendServices.getIamPolicy", "parameterOrder": [ "project", "region", - "healthCheck" + "resource" ], "parameters": { - "healthCheck": { - "description": "Name of the HealthCheck resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" }, "project": { "description": "Project ID for this request.", @@ -17827,16 +17959,23 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + "path": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", "response": { - "$ref": "HealthCheck" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17845,10 +17984,10 @@ ] }, "insert": { - "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/healthChecks", + "description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", + "flatPath": "projects/{project}/regions/{region}/backendServices", "httpMethod": "POST", - "id": "compute.regionHealthChecks.insert", + "id": "compute.regionBackendServices.insert", "parameterOrder": [ "project", "region" @@ -17874,9 +18013,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthChecks", + "path": "projects/{project}/regions/{region}/backendServices", "request": { - "$ref": "HealthCheck" + "$ref": "BackendService" }, "response": { "$ref": "Operation" @@ -17887,17 +18026,17 @@ ] }, "list": { - "description": "Retrieves the list of HealthCheck resources available to the specified project.", - "flatPath": "projects/{project}/regions/{region}/healthChecks", + "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", + "flatPath": "projects/{project}/regions/{region}/backendServices", "httpMethod": "GET", - "id": "compute.regionHealthChecks.list", + "id": "compute.regionBackendServices.list", "parameterOrder": [ "project", "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -17939,9 +18078,9 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/healthChecks", + "path": "projects/{project}/regions/{region}/backendServices", "response": { - "$ref": "HealthCheckList" + "$ref": "BackendServiceList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17950,18 +18089,18 @@ ] }, "patch": { - "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Understanding backend services This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", "httpMethod": "PATCH", - "id": "compute.regionHealthChecks.patch", + "id": "compute.regionBackendServices.patch", "parameterOrder": [ "project", "region", - "healthCheck" + "backendService" ], "parameters": { - "healthCheck": { - "description": "Name of the HealthCheck resource to patch.", + "backendService": { + "description": "Name of the BackendService resource to patch.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -17987,9 +18126,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + "path": "projects/{project}/regions/{region}/backendServices/{backendService}", "request": { - "$ref": "HealthCheck" + "$ref": "BackendService" }, "response": { "$ref": "Operation" @@ -17999,24 +18138,17 @@ "https://www.googleapis.com/auth/compute" ] }, - "update": { - "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", - "httpMethod": "PUT", - "id": "compute.regionHealthChecks.update", + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", + "httpMethod": "POST", + "id": "compute.regionBackendServices.setIamPolicy", "parameterOrder": [ "project", "region", - "healthCheck" + "resource" ], "parameters": { - "healthCheck": { - "description": "Name of the HealthCheck resource to update.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -18025,48 +18157,47 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + "path": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", "request": { - "$ref": "HealthCheck" + "$ref": "RegionSetPolicyRequest" }, "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "regionInstanceGroupManagers": { - "methods": { - "abandonInstances": { - "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.abandonInstances", + }, + "update": { + "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Backend services overview .", + "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", + "httpMethod": "PUT", + "id": "compute.regionBackendServices.update", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "backendService" ], "parameters": { - "instanceGroupManager": { - "description": "Name of the managed instance group.", + "backendService": { + "description": "Name of the BackendService resource to update.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -18080,6 +18211,7 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -18089,9 +18221,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + "path": "projects/{project}/regions/{region}/backendServices/{backendService}", "request": { - "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + "$ref": "BackendService" }, "response": { "$ref": "Operation" @@ -18100,22 +18232,46 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "applyUpdatesToInstances": { - "description": "Apply updates to selected instances the managed instance group.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.applyUpdatesToInstances", + } + } + }, + "regionCommitments": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of commitments by region.", + "flatPath": "projects/{project}/aggregated/commitments", + "httpMethod": "GET", + "id": "compute.regionCommitments.aggregatedList", "parameterOrder": [ - "project", - "region", - "instanceGroupManager" + "project" ], "parameters": { - "instanceGroupManager": { - "description": "The name of the managed instance group, should conform to RFC1035.", - "location": "path", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", "type": "string" }, "project": { @@ -18125,39 +18281,37 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request, should conform to RFC1035.", - "location": "path", - "required": true, - "type": "string" + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", - "request": { - "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" - }, + "path": "projects/{project}/aggregated/commitments", "response": { - "$ref": "Operation" + "$ref": "CommitmentAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "createInstances": { - "description": "Creates instances with per-instance configs in this regional managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.createInstances", + "get": { + "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", + "httpMethod": "GET", + "id": "compute.regionCommitments.get", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "commitment" ], "parameters": { - "instanceGroupManager": { - "description": "The name of the managed instance group. It should conform to RFC1035.", + "commitment": { + "description": "Name of the commitment to return.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -18169,46 +18323,33 @@ "type": "string" }, "region": { - "description": "The name of the region where the managed instance group is located. It should conform to RFC1035.", + "description": "Name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", - "request": { - "$ref": "RegionInstanceGroupManagersCreateInstancesRequest" - }, + "path": "projects/{project}/regions/{region}/commitments/{commitment}", "response": { - "$ref": "Operation" + "$ref": "Commitment" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified managed instance group and all of the instances in that group.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - "httpMethod": "DELETE", - "id": "compute.regionInstanceGroupManagers.delete", + "insert": { + "description": "Creates a commitment in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/commitments", + "httpMethod": "POST", + "id": "compute.regionCommitments.insert", "parameterOrder": [ "project", - "region", - "instanceGroupManager" + "region" ], "parameters": { - "instanceGroupManager": { - "description": "Name of the managed instance group to delete.", - "location": "path", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -18217,8 +18358,9 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -18228,7 +18370,10 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + "path": "projects/{project}/regions/{region}/commitments", + "request": { + "$ref": "Commitment" + }, "response": { "$ref": "Operation" }, @@ -18237,21 +18382,37 @@ "https://www.googleapis.com/auth/compute" ] }, - "deleteInstances": { - "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.deleteInstances", + "list": { + "description": "Retrieves a list of commitments contained within the specified region.", + "flatPath": "projects/{project}/regions/{region}/commitments", + "httpMethod": "GET", + "id": "compute.regionCommitments.list", "parameterOrder": [ "project", - "region", - "instanceGroupManager" + "region" ], "parameters": { - "instanceGroupManager": { - "description": "Name of the managed instance group.", - "location": "path", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", "type": "string" }, "project": { @@ -18262,46 +18423,51 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", - "type": "string" + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - "request": { - "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" - }, + "path": "projects/{project}/regions/{region}/commitments", "response": { - "$ref": "Operation" + "$ref": "CommitmentList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "deletePerInstanceConfigs": { - "description": "Deletes selected per-instance configs for the managed instance group.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.deletePerInstanceConfigs", + "update": { + "description": "Updates the specified commitment with the data included in the request. Update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: auto_renew.", + "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", + "httpMethod": "PATCH", + "id": "compute.regionCommitments.update", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "commitment" ], "parameters": { - "instanceGroupManager": { - "description": "The name of the managed instance group. It should conform to RFC1035.", + "commitment": { + "description": "Name of the commitment for which auto renew is being updated.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, + "paths": { + "location": "query", + "repeated": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -18310,15 +18476,27 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request, should conform to RFC1035.", + "description": "Name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "updateMask": { + "description": "update_mask indicates fields to be updated as part of this request.", + "format": "google-fieldmask", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + "path": "projects/{project}/regions/{region}/commitments/{commitment}", "request": { - "$ref": "RegionInstanceGroupManagerDeleteInstanceConfigReq" + "$ref": "Commitment" }, "response": { "$ref": "Operation" @@ -18327,21 +18505,26 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, + } + } + }, + "regionDiskTypes": { + "methods": { "get": { - "description": "Returns all of the details about the specified managed instance group.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/diskTypes/{diskType}", "httpMethod": "GET", - "id": "compute.regionInstanceGroupManagers.get", + "id": "compute.regionDiskTypes.get", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "diskType" ], "parameters": { - "instanceGroupManager": { - "description": "Name of the managed instance group to return.", + "diskType": { + "description": "Name of the disk type to return.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -18353,15 +18536,16 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + "path": "projects/{project}/regions/{region}/diskTypes/{diskType}", "response": { - "$ref": "InstanceGroupManager" + "$ref": "DiskType" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -18369,59 +18553,18 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method. A regional managed instance group can contain up to 2000 instances.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers", - "request": { - "$ref": "InstanceGroupManager" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, "list": { - "description": "Retrieves the list of managed instance groups that are contained within the specified region.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers", + "description": "Retrieves a list of regional disk types available to the specified project.", + "flatPath": "projects/{project}/regions/{region}/diskTypes", "httpMethod": "GET", - "id": "compute.regionInstanceGroupManagers.list", + "id": "compute.regionDiskTypes.list", "parameterOrder": [ "project", "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -18451,8 +18594,9 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -18462,56 +18606,38 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers", + "path": "projects/{project}/regions/{region}/diskTypes", "response": { - "$ref": "RegionInstanceGroupManagerList" + "$ref": "RegionDiskTypeList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "listErrors": { - "description": "Lists all errors thrown by actions on instances for a given regional managed instance group. The filter and orderBy query parameters are not supported.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", - "httpMethod": "GET", - "id": "compute.regionInstanceGroupManagers.listErrors", + } + } + }, + "regionDisks": { + "methods": { + "addResourcePolicies": { + "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", + "flatPath": "projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies", + "httpMethod": "POST", + "id": "compute.regionDisks.addResourcePolicies", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "disk" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "instanceGroupManager": { - "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035, or an unsigned long integer: must match regexp pattern: (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}.", + "disk": { + "description": "The disk name for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -18520,67 +18646,48 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request. This should conform to RFC1035.", + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", + "path": "projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies", + "request": { + "$ref": "RegionDisksAddResourcePoliciesRequest" + }, "response": { - "$ref": "RegionInstanceGroupManagersListErrorsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "listManagedInstances": { - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + "createSnapshot": { + "description": "Creates a snapshot of a specified persistent disk. For regular snapshot creation, consider using snapshots.insert instead, as that method supports more features, such as creating snapshots in a project different from the source disk project.", + "flatPath": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.listManagedInstances", + "id": "compute.regionDisks.createSnapshot", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "disk" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "instanceGroupManager": { - "description": "The name of the managed instance group.", + "disk": { + "description": "Name of the regional persistent disk to snapshot.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -18589,66 +18696,93 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + "path": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", + "request": { + "$ref": "Snapshot" + }, "response": { - "$ref": "RegionInstanceGroupManagersListInstancesResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "listPerInstanceConfigs": { - "description": "Lists all of the per-instance configs defined for the managed instance group. The orderBy query parameter is not supported.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", + "delete": { + "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + "flatPath": "projects/{project}/regions/{region}/disks/{disk}", + "httpMethod": "DELETE", + "id": "compute.regionDisks.delete", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "disk" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "instanceGroupManager": { - "description": "The name of the managed instance group. It should conform to RFC1035.", + "disk": { + "description": "Name of the regional persistent disk to delete.", "location": "path", "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/disks/{disk}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns a specified regional persistent disk.", + "flatPath": "projects/{project}/regions/{region}/disks/{disk}", + "httpMethod": "GET", + "id": "compute.regionDisks.get", + "parameterOrder": [ + "project", + "region", + "disk" + ], + "parameters": { + "disk": { + "description": "Name of the regional persistent disk to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" }, "project": { "description": "Project ID for this request.", @@ -18658,20 +18792,16 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request, should conform to RFC1035.", + "description": "Name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + "path": "projects/{project}/regions/{region}/disks/{disk}", "response": { - "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" + "$ref": "Disk" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -18679,23 +18809,65 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. If you update your group to specify a new template or instance configuration, it's possible that your intended specification for each VM in the group is different from the current state of that VM. To learn how to apply an updated configuration to the VMs in a MIG, see Updating instances in a MIG.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - "httpMethod": "PATCH", - "id": "compute.regionInstanceGroupManagers.patch", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/regions/{region}/disks/{resource}/getIamPolicy", + "httpMethod": "GET", + "id": "compute.regionDisks.getIamPolicy", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "resource" ], "parameters": { - "instanceGroupManager": { - "description": "The name of the instance group manager.", + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/disks/{resource}/getIamPolicy", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a persistent regional disk in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/disks", + "httpMethod": "POST", + "id": "compute.regionDisks.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -18704,8 +18876,9 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -18713,11 +18886,16 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "sourceImage": { + "description": "Source image to restore onto a disk. This field is optional.", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + "path": "projects/{project}/regions/{region}/disks", "request": { - "$ref": "InstanceGroupManager" + "$ref": "Disk" }, "response": { "$ref": "Operation" @@ -18727,21 +18905,37 @@ "https://www.googleapis.com/auth/compute" ] }, - "patchPerInstanceConfigs": { - "description": "Inserts or patches per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.patchPerInstanceConfigs", + "list": { + "description": "Retrieves the list of persistent disks contained within the specified region.", + "flatPath": "projects/{project}/regions/{region}/disks", + "httpMethod": "GET", + "id": "compute.regionDisks.list", "parameterOrder": [ "project", - "region", - "instanceGroupManager" + "region" ], "parameters": { - "instanceGroupManager": { - "description": "The name of the managed instance group. It should conform to RFC1035.", - "location": "path", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", "type": "string" }, "project": { @@ -18752,43 +18946,43 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request, should conform to RFC1035.", + "description": "Name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", - "type": "string" + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", - "request": { - "$ref": "RegionInstanceGroupManagerPatchInstanceConfigReq" - }, + "path": "projects/{project}/regions/{region}/disks", "response": { - "$ref": "Operation" + "$ref": "DiskList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "recreateInstances": { - "description": "Flags the specified VM instances in the managed instance group to be immediately recreated. Each instance is recreated using the group's current configuration. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of each instance by checking its currentAction field; for more information, see Checking the status of managed instances. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + "removeResourcePolicies": { + "description": "Removes resource policies from a regional disk.", + "flatPath": "projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.recreateInstances", + "id": "compute.regionDisks.removeResourcePolicies", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "disk" ], "parameters": { - "instanceGroupManager": { - "description": "Name of the managed instance group.", + "disk": { + "description": "The disk name for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -18800,8 +18994,9 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -18811,9 +19006,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + "path": "projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies", "request": { - "$ref": "RegionInstanceGroupManagersRecreateRequest" + "$ref": "RegionDisksRemoveResourcePoliciesRequest" }, "response": { "$ref": "Operation" @@ -18824,33 +19019,34 @@ ] }, "resize": { - "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances. The resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", + "description": "Resizes the specified regional persistent disk.", + "flatPath": "projects/{project}/regions/{region}/disks/{disk}/resize", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.resize", + "id": "compute.regionDisks.resize", "parameterOrder": [ "project", "region", - "instanceGroupManager", - "size" + "disk" ], "parameters": { - "instanceGroupManager": { - "description": "Name of the managed instance group.", + "disk": { + "description": "Name of the regional persistent disk.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, "project": { - "description": "Project ID for this request.", + "description": "The project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -18858,17 +19054,12 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "size": { - "description": "Number of instances that should exist in this instance group manager.", - "format": "int32", - "location": "query", - "minimum": "0", - "required": true, - "type": "integer" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", + "path": "projects/{project}/regions/{region}/disks/{disk}/resize", + "request": { + "$ref": "RegionDisksResizeRequest" + }, "response": { "$ref": "Operation" }, @@ -18877,23 +19068,62 @@ "https://www.googleapis.com/auth/compute" ] }, - "setInstanceTemplate": { - "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", + "id": "compute.regionDisks.setIamPolicy", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "resource" ], "parameters": { - "instanceGroupManager": { - "description": "The name of the managed instance group.", + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy", + "request": { + "$ref": "RegionSetPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setLabels": { + "description": "Sets the labels on the target regional disk.", + "flatPath": "projects/{project}/regions/{region}/disks/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.regionDisks.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -18902,8 +19132,9 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -18911,11 +19142,18 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + "path": "projects/{project}/regions/{region}/disks/{resource}/setLabels", "request": { - "$ref": "RegionInstanceGroupManagersSetTemplateRequest" + "$ref": "RegionSetLabelsRequest" }, "response": { "$ref": "Operation" @@ -18925,23 +19163,17 @@ "https://www.googleapis.com/auth/compute" ] }, - "setTargetPools": { - "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.setTargetPools", + "id": "compute.regionDisks.testIamPermissions", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "resource" ], "parameters": { - "instanceGroupManager": { - "description": "Name of the managed instance group.", - "location": "path", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -18950,42 +19182,50 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + "path": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", "request": { - "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "updatePerInstanceConfigs": { - "description": "Inserts or updates per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", - "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", - "httpMethod": "POST", - "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", + } + } + }, + "regionHealthCheckServices": { + "methods": { + "delete": { + "description": "Deletes the specified regional HealthCheckService.", + "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "httpMethod": "DELETE", + "id": "compute.regionHealthCheckServices.delete", "parameterOrder": [ "project", "region", - "instanceGroupManager" + "healthCheckService" ], "parameters": { - "instanceGroupManager": { - "description": "The name of the managed instance group. It should conform to RFC1035.", + "healthCheckService": { + "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", "required": true, "type": "string" @@ -18998,8 +19238,9 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request, should conform to RFC1035.", + "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -19009,10 +19250,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", - "request": { - "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" - }, + "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", "response": { "$ref": "Operation" }, @@ -19020,24 +19258,20 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "regionInstanceGroups": { - "methods": { + }, "get": { - "description": "Returns the specified instance group resource.", - "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", + "description": "Returns the specified regional HealthCheckService resource.", + "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", "httpMethod": "GET", - "id": "compute.regionInstanceGroups.get", + "id": "compute.regionHealthCheckServices.get", "parameterOrder": [ "project", "region", - "instanceGroup" + "healthCheckService" ], "parameters": { - "instanceGroup": { - "description": "Name of the instance group resource to return.", + "healthCheckService": { + "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", "required": true, "type": "string" @@ -19052,13 +19286,14 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", + "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", "response": { - "$ref": "InstanceGroup" + "$ref": "HealthCheckService" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19066,39 +19301,16 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "list": { - "description": "Retrieves the list of instance group resources contained within the specified region.", - "flatPath": "projects/{project}/regions/{region}/instanceGroups", - "httpMethod": "GET", - "id": "compute.regionInstanceGroups.list", + "insert": { + "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/healthCheckServices", + "httpMethod": "POST", + "id": "compute.regionHealthCheckServices.insert", "parameterOrder": [ "project", "region" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -19109,47 +19321,43 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroups", + "path": "projects/{project}/regions/{region}/healthCheckServices", + "request": { + "$ref": "HealthCheckService" + }, "response": { - "$ref": "RegionInstanceGroupList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "listInstances": { - "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", - "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", - "httpMethod": "POST", - "id": "compute.regionInstanceGroups.listInstances", + "list": { + "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", + "flatPath": "projects/{project}/regions/{region}/healthCheckServices", + "httpMethod": "GET", + "id": "compute.regionHealthCheckServices.list", "parameterOrder": [ "project", - "region", - "instanceGroup" + "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, - "instanceGroup": { - "description": "Name of the regional instance group for which we want to list the instances.", - "location": "path", - "required": true, - "type": "string" - }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -19178,6 +19386,7 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -19187,12 +19396,9 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", - "request": { - "$ref": "RegionInstanceGroupsListInstancesRequest" - }, + "path": "projects/{project}/regions/{region}/healthCheckServices", "response": { - "$ref": "RegionInstanceGroupsListInstances" + "$ref": "HealthCheckServicesList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19200,19 +19406,19 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setNamedPorts": { - "description": "Sets the named ports for the specified regional instance group.", - "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - "httpMethod": "POST", - "id": "compute.regionInstanceGroups.setNamedPorts", + "patch": { + "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "httpMethod": "PATCH", + "id": "compute.regionHealthCheckServices.patch", "parameterOrder": [ "project", "region", - "instanceGroup" + "healthCheckService" ], "parameters": { - "instanceGroup": { - "description": "The name of the regional instance group where the named ports are updated.", + "healthCheckService": { + "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", "required": true, "type": "string" @@ -19227,6 +19433,7 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -19236,9 +19443,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", "request": { - "$ref": "RegionInstanceGroupsSetNamedPortsRequest" + "$ref": "HealthCheckService" }, "response": { "$ref": "Operation" @@ -19250,68 +19457,23 @@ } } }, - "regionInstances": { + "regionHealthChecks": { "methods": { - "bulkInsert": { - "description": "Creates multiple instances in a given region. Count specifies the number of instances to create.", - "flatPath": "projects/{project}/regions/{region}/instances/bulkInsert", - "httpMethod": "POST", - "id": "compute.regionInstances.bulkInsert", + "delete": { + "description": "Deletes the specified HealthCheck resource.", + "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + "httpMethod": "DELETE", + "id": "compute.regionHealthChecks.delete", "parameterOrder": [ "project", - "region" + "region", + "healthCheck" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/instances/bulkInsert", - "request": { - "$ref": "BulkInsertInstanceResource" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "regionNetworkEndpointGroups": { - "methods": { - "delete": { - "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", - "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", - "httpMethod": "DELETE", - "id": "compute.regionNetworkEndpointGroups.delete", - "parameterOrder": [ - "project", - "region", - "networkEndpointGroup" - ], - "parameters": { - "networkEndpointGroup": { - "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + "healthCheck": { + "description": "Name of the HealthCheck resource to delete.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -19323,8 +19485,9 @@ "type": "string" }, "region": { - "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -19334,7 +19497,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", "response": { "$ref": "Operation" }, @@ -19344,19 +19507,20 @@ ] }, "get": { - "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", "httpMethod": "GET", - "id": "compute.regionNetworkEndpointGroups.get", + "id": "compute.regionHealthChecks.get", "parameterOrder": [ "project", "region", - "networkEndpointGroup" + "healthCheck" ], "parameters": { - "networkEndpointGroup": { - "description": "The name of the network endpoint group. It should comply with RFC1035.", + "healthCheck": { + "description": "Name of the HealthCheck resource to return.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -19368,15 +19532,16 @@ "type": "string" }, "region": { - "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", "response": { - "$ref": "NetworkEndpointGroup" + "$ref": "HealthCheck" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19385,10 +19550,10 @@ ] }, "insert": { - "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", - "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", + "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/healthChecks", "httpMethod": "POST", - "id": "compute.regionNetworkEndpointGroups.insert", + "id": "compute.regionHealthChecks.insert", "parameterOrder": [ "project", "region" @@ -19402,8 +19567,9 @@ "type": "string" }, "region": { - "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", + "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -19413,9 +19579,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEndpointGroups", + "path": "projects/{project}/regions/{region}/healthChecks", "request": { - "$ref": "NetworkEndpointGroup" + "$ref": "HealthCheck" }, "response": { "$ref": "Operation" @@ -19426,17 +19592,17 @@ ] }, "list": { - "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", - "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", + "description": "Retrieves the list of HealthCheck resources available to the specified project.", + "flatPath": "projects/{project}/regions/{region}/healthChecks", "httpMethod": "GET", - "id": "compute.regionNetworkEndpointGroups.list", + "id": "compute.regionHealthChecks.list", "parameterOrder": [ "project", "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -19466,8 +19632,9 @@ "type": "string" }, "region": { - "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -19477,33 +19644,29 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/networkEndpointGroups", + "path": "projects/{project}/regions/{region}/healthChecks", "response": { - "$ref": "NetworkEndpointGroupList" + "$ref": "HealthCheckList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "regionNotificationEndpoints": { - "methods": { - "delete": { - "description": "Deletes the specified NotificationEndpoint in the given region", - "flatPath": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", - "httpMethod": "DELETE", - "id": "compute.regionNotificationEndpoints.delete", + }, + "patch": { + "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + "httpMethod": "PATCH", + "id": "compute.regionHealthChecks.patch", "parameterOrder": [ "project", "region", - "notificationEndpoint" + "healthCheck" ], "parameters": { - "notificationEndpoint": { - "description": "Name of the NotificationEndpoint resource to delete.", + "healthCheck": { + "description": "Name of the HealthCheck resource to patch.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -19529,7 +19692,10 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + "request": { + "$ref": "HealthCheck" + }, "response": { "$ref": "Operation" }, @@ -19538,59 +19704,24 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified NotificationEndpoint resource in the given region.", - "flatPath": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", - "httpMethod": "GET", - "id": "compute.regionNotificationEndpoints.get", + "update": { + "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + "httpMethod": "PUT", + "id": "compute.regionHealthChecks.update", "parameterOrder": [ "project", "region", - "notificationEndpoint" + "healthCheck" ], "parameters": { - "notificationEndpoint": { - "description": "Name of the NotificationEndpoint resource to return.", + "healthCheck": { + "description": "Name of the HealthCheck resource to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", - "response": { - "$ref": "NotificationEndpoint" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Create a NotificationEndpoint in the specified project in the given region using the parameters that are included in the request.", - "flatPath": "projects/{project}/regions/{region}/notificationEndpoints", - "httpMethod": "POST", - "id": "compute.regionNotificationEndpoints.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -19611,9 +19742,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/notificationEndpoints", + "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", "request": { - "$ref": "NotificationEndpoint" + "$ref": "HealthCheck" }, "response": { "$ref": "Operation" @@ -19622,38 +19753,26 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "list": { - "description": "Lists the NotificationEndpoints for a project in the given region.", - "flatPath": "projects/{project}/regions/{region}/notificationEndpoints", - "httpMethod": "GET", - "id": "compute.regionNotificationEndpoints.list", + } + } + }, + "regionInstanceGroupManagers": { + "methods": { + "abandonInstances": { + "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.abandonInstances", "parameterOrder": [ "project", - "region" + "region", + "instanceGroupManager" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", + "instanceGroupManager": { + "description": "Name of the managed instance group.", + "location": "path", + "required": true, "type": "string" }, "project": { @@ -19666,45 +19785,41 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" } }, - "path": "projects/{project}/regions/{region}/notificationEndpoints", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + "request": { + "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + }, "response": { - "$ref": "NotificationEndpointList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - } - } - }, - "regionOperations": { - "methods": { - "delete": { - "description": "Deletes the specified region-specific Operations resource.", - "flatPath": "projects/{project}/regions/{region}/operations/{operation}", - "httpMethod": "DELETE", - "id": "compute.regionOperations.delete", + }, + "applyUpdatesToInstances": { + "description": "Apply updates to selected instances the managed instance group.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.applyUpdatesToInstances", "parameterOrder": [ "project", "region", - "operation" + "instanceGroupManager" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to delete.", + "instanceGroupManager": { + "description": "The name of the managed instance group, should conform to RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -19716,34 +19831,38 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request, should conform to RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/operations/{operation}", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + "request": { + "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" + }, + "response": { + "$ref": "Operation" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Retrieves the specified region-specific Operations resource.", - "flatPath": "projects/{project}/regions/{region}/operations/{operation}", - "httpMethod": "GET", - "id": "compute.regionOperations.get", + "createInstances": { + "description": "Creates instances with per-instance configurations in this regional managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.createInstances", "parameterOrder": [ "project", "region", - "operation" + "instanceGroupManager" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to return.", + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -19755,54 +19874,44 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "The name of the region where the managed instance group is located. It should conform to RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/regions/{region}/operations/{operation}", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", + "request": { + "$ref": "RegionInstanceGroupManagersCreateInstancesRequest" + }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves a list of Operation resources contained within the specified region.", - "flatPath": "projects/{project}/regions/{region}/operations", - "httpMethod": "GET", - "id": "compute.regionOperations.list", + "delete": { + "description": "Deletes the specified managed instance group and all of the instances in that group.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + "httpMethod": "DELETE", + "id": "compute.regionInstanceGroupManagers.delete", "parameterOrder": [ "project", - "region" + "region", + "instanceGroupManager" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", + "instanceGroupManager": { + "description": "Name of the managed instance group to delete.", + "location": "path", + "required": true, "type": "string" }, "project": { @@ -19813,43 +19922,40 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" } }, - "path": "projects/{project}/regions/{region}/operations", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", "response": { - "$ref": "OperationList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "wait": { - "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress. This method is called on a best-effort basis. Specifically: - In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. - If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`. ", - "flatPath": "projects/{project}/regions/{region}/operations/{operation}/wait", + "deleteInstances": { + "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", "httpMethod": "POST", - "id": "compute.regionOperations.wait", + "id": "compute.regionInstanceGroupManagers.deleteInstances", "parameterOrder": [ "project", "region", - "operation" + "instanceGroupManager" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to return.", + "instanceGroupManager": { + "description": "Name of the managed instance group.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -19861,66 +19967,64 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/regions/{region}/operations/{operation}/wait", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + "request": { + "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" + }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - } - } - }, - "regionSslCertificates": { - "methods": { - "delete": { - "description": "Deletes the specified SslCertificate resource in the region.", - "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", - "httpMethod": "DELETE", - "id": "compute.regionSslCertificates.delete", + }, + "deletePerInstanceConfigs": { + "description": "Deletes selected per-instance configurations for the managed instance group.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.deletePerInstanceConfigs", "parameterOrder": [ "project", "region", - "sslCertificate" + "instanceGroupManager" ], "parameters": { - "project": { - "description": "Project ID for this request.", + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "sslCertificate": { - "description": "Name of the SslCertificate resource to delete.", + "region": { + "description": "Name of the region scoping this request, should conform to RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + "request": { + "$ref": "RegionInstanceGroupManagerDeleteInstanceConfigReq" + }, "response": { "$ref": "Operation" }, @@ -19930,16 +20034,22 @@ ] }, "get": { - "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + "description": "Returns all of the details about the specified managed instance group.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", "httpMethod": "GET", - "id": "compute.regionSslCertificates.get", + "id": "compute.regionInstanceGroupManagers.get", "parameterOrder": [ "project", "region", - "sslCertificate" + "instanceGroupManager" ], "parameters": { + "instanceGroupManager": { + "description": "Name of the managed instance group to return.", + "location": "path", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -19950,21 +20060,13 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "sslCertificate": { - "description": "Name of the SslCertificate resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", "response": { - "$ref": "SslCertificate" + "$ref": "InstanceGroupManager" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19973,10 +20075,10 @@ ] }, "insert": { - "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", - "flatPath": "projects/{project}/regions/{region}/sslCertificates", + "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method. A regional managed instance group can contain up to 2000 instances.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers", "httpMethod": "POST", - "id": "compute.regionSslCertificates.insert", + "id": "compute.regionInstanceGroupManagers.insert", "parameterOrder": [ "project", "region" @@ -19992,7 +20094,6 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -20002,9 +20103,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/sslCertificates", + "path": "projects/{project}/regions/{region}/instanceGroupManagers", "request": { - "$ref": "SslCertificate" + "$ref": "InstanceGroupManager" }, "response": { "$ref": "Operation" @@ -20015,17 +20116,17 @@ ] }, "list": { - "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", - "flatPath": "projects/{project}/regions/{region}/sslCertificates", + "description": "Retrieves the list of managed instance groups that are contained within the specified region.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers", "httpMethod": "GET", - "id": "compute.regionSslCertificates.list", + "id": "compute.regionInstanceGroupManagers.list", "parameterOrder": [ "project", "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -20057,7 +20158,6 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -20067,78 +20167,56 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/sslCertificates", + "path": "projects/{project}/regions/{region}/instanceGroupManagers", "response": { - "$ref": "SslCertificateList" + "$ref": "RegionInstanceGroupManagerList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "regionTargetHttpProxies": { - "methods": { - "delete": { - "description": "Deletes the specified TargetHttpProxy resource.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", - "httpMethod": "DELETE", - "id": "compute.regionTargetHttpProxies.delete", + }, + "listErrors": { + "description": "Lists all errors thrown by actions on instances for a given regional managed instance group. The filter and orderBy query parameters are not supported.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", + "httpMethod": "GET", + "id": "compute.regionInstanceGroupManagers.listErrors", "parameterOrder": [ "project", "region", - "targetHttpProxy" + "instanceGroupManager" ], "parameters": { - "project": { - "description": "Project ID for this request.", + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "instanceGroupManager": { + "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035, or an unsigned long integer: must match regexp pattern: (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", - "httpMethod": "GET", - "id": "compute.regionTargetHttpProxies.get", - "parameterOrder": [ - "project", - "region", - "targetHttpProxy" - ], - "parameters": { + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -20147,23 +20225,20 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region scoping this request. This should conform to RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", "response": { - "$ref": "TargetHttpProxy" + "$ref": "RegionInstanceGroupManagersListErrorsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -20171,16 +20246,46 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", + "listManagedInstances": { + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", - "id": "compute.regionTargetHttpProxies.insert", + "id": "compute.regionInstanceGroupManagers.listManagedInstances", "parameterOrder": [ "project", - "region" + "region", + "instanceGroupManager" ], "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "instanceGroupManager": { + "description": "The name of the managed instance group.", + "location": "path", + "required": true, + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -20191,43 +20296,47 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", - "type": "string" + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies", - "request": { - "$ref": "TargetHttpProxy" - }, + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "response": { - "$ref": "Operation" + "$ref": "RegionInstanceGroupManagersListInstancesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "list": { - "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", - "httpMethod": "GET", - "id": "compute.regionTargetHttpProxies.list", + "listPerInstanceConfigs": { + "description": "Lists all of the per-instance configurations defined for the managed instance group. The orderBy query parameter is not supported.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", "parameterOrder": [ "project", - "region" + "region", + "instanceGroupManager" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -20254,9 +20363,8 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region scoping this request, should conform to RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -20266,9 +20374,9 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", "response": { - "$ref": "TargetHttpProxyList" + "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -20276,17 +20384,23 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setUrlMap": { - "description": "Changes the URL map for TargetHttpProxy.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", - "httpMethod": "POST", - "id": "compute.regionTargetHttpProxies.setUrlMap", + "patch": { + "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. If you update your group to specify a new template or instance configuration, it's possible that your intended specification for each VM in the group is different from the current state of that VM. To learn how to apply an updated configuration to the VMs in a MIG, see Updating instances in a MIG.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + "httpMethod": "PATCH", + "id": "compute.regionInstanceGroupManagers.patch", "parameterOrder": [ "project", "region", - "targetHttpProxy" + "instanceGroupManager" ], "parameters": { + "instanceGroupManager": { + "description": "The name of the instance group manager.", + "location": "path", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -20297,7 +20411,6 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -20305,18 +20418,11 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy to set a URL map for.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", "request": { - "$ref": "UrlMapReference" + "$ref": "InstanceGroupManager" }, "response": { "$ref": "Operation" @@ -20325,22 +20431,24 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "regionTargetHttpsProxies": { - "methods": { - "delete": { - "description": "Deletes the specified TargetHttpsProxy resource.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", - "httpMethod": "DELETE", - "id": "compute.regionTargetHttpsProxies.delete", + }, + "patchPerInstanceConfigs": { + "description": "Inserts or patches per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.patchPerInstanceConfigs", "parameterOrder": [ "project", "region", - "targetHttpsProxy" + "instanceGroupManager" ], "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -20349,9 +20457,8 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region scoping this request, should conform to RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -20359,16 +20466,12 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", + "request": { + "$ref": "RegionInstanceGroupManagerPatchInstanceConfigReq" + }, "response": { "$ref": "Operation" }, @@ -20377,59 +20480,23 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", - "httpMethod": "GET", - "id": "compute.regionTargetHttpsProxies.get", + "recreateInstances": { + "description": "Flags the specified VM instances in the managed instance group to be immediately recreated. Each instance is recreated using the group's current configuration. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of each instance by checking its currentAction field; for more information, see Checking the status of managed instances. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.recreateInstances", "parameterOrder": [ "project", "region", - "targetHttpsProxy" + "instanceGroupManager" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", + "instanceGroupManager": { + "description": "Name of the managed instance group.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", - "response": { - "$ref": "TargetHttpsProxy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", - "httpMethod": "POST", - "id": "compute.regionTargetHttpsProxies.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -20440,7 +20507,6 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -20450,9 +20516,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", "request": { - "$ref": "TargetHttpsProxy" + "$ref": "RegionInstanceGroupManagersRecreateRequest" }, "response": { "$ref": "Operation" @@ -20462,37 +20528,22 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", - "httpMethod": "GET", - "id": "compute.regionTargetHttpsProxies.list", + "resize": { + "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances. The resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.resize", "parameterOrder": [ "project", - "region" + "region", + "instanceGroupManager", + "size" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", + "instanceGroupManager": { + "description": "Name of the managed instance group.", + "location": "path", + "required": true, "type": "string" }, "project": { @@ -20505,37 +20556,49 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" + }, + "size": { + "description": "Number of instances that should exist in this instance group manager.", + "format": "int32", + "location": "query", + "minimum": "0", + "required": true, + "type": "integer" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", "response": { - "$ref": "TargetHttpsProxyList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "setSslCertificates": { - "description": "Replaces SslCertificates for TargetHttpsProxy.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "setInstanceTemplate": { + "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", "httpMethod": "POST", - "id": "compute.regionTargetHttpsProxies.setSslCertificates", + "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", "parameterOrder": [ "project", "region", - "targetHttpsProxy" + "instanceGroupManager" ], "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group.", + "location": "path", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -20546,7 +20609,6 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -20554,18 +20616,11 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", "request": { - "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" + "$ref": "RegionInstanceGroupManagersSetTemplateRequest" }, "response": { "$ref": "Operation" @@ -20575,17 +20630,23 @@ "https://www.googleapis.com/auth/compute" ] }, - "setUrlMap": { - "description": "Changes the URL map for TargetHttpsProxy.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + "setTargetPools": { + "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", "httpMethod": "POST", - "id": "compute.regionTargetHttpsProxies.setUrlMap", + "id": "compute.regionInstanceGroupManagers.setTargetPools", "parameterOrder": [ "project", "region", - "targetHttpsProxy" + "instanceGroupManager" ], "parameters": { + "instanceGroupManager": { + "description": "Name of the managed instance group.", + "location": "path", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -20596,7 +20657,6 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -20604,18 +20664,11 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy to set a URL map for.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", "request": { - "$ref": "UrlMapReference" + "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" }, "response": { "$ref": "Operation" @@ -20624,22 +20677,24 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "regionUrlMaps": { - "methods": { - "delete": { - "description": "Deletes the specified UrlMap resource.", - "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", - "httpMethod": "DELETE", - "id": "compute.regionUrlMaps.delete", + }, + "updatePerInstanceConfigs": { + "description": "Inserts or updates per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", "parameterOrder": [ "project", "region", - "urlMap" + "instanceGroupManager" ], "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -20648,26 +20703,21 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region scoping this request, should conform to RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "requestId": { - "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "urlMap": { - "description": "Name of the UrlMap resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + "request": { + "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" + }, "response": { "$ref": "Operation" }, @@ -20675,60 +20725,28 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, + } + } + }, + "regionInstanceGroups": { + "methods": { "get": { - "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "description": "Returns the specified instance group resource.", + "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", "httpMethod": "GET", - "id": "compute.regionUrlMaps.get", + "id": "compute.regionInstanceGroups.get", "parameterOrder": [ "project", "region", - "urlMap" + "instanceGroup" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", + "instanceGroup": { + "description": "Name of the instance group resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "urlMap": { - "description": "Name of the UrlMap resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/urlMaps/{urlMap}", - "response": { - "$ref": "UrlMap" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a UrlMap resource in the specified project using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/urlMaps", - "httpMethod": "POST", - "id": "compute.regionUrlMaps.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -20739,40 +20757,32 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "requestId": { - "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/regions/{region}/urlMaps", - "request": { - "$ref": "UrlMap" - }, + "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", "response": { - "$ref": "Operation" + "$ref": "InstanceGroup" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "list": { - "description": "Retrieves the list of UrlMap resources available to the specified project in the specified region.", - "flatPath": "projects/{project}/regions/{region}/urlMaps", + "description": "Retrieves the list of instance group resources contained within the specified region.", + "flatPath": "projects/{project}/regions/{region}/instanceGroups", "httpMethod": "GET", - "id": "compute.regionUrlMaps.list", + "id": "compute.regionInstanceGroups.list", "parameterOrder": [ "project", "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -20804,7 +20814,6 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -20814,9 +20823,9 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/urlMaps", + "path": "projects/{project}/regions/{region}/instanceGroups", "response": { - "$ref": "UrlMapList" + "$ref": "RegionInstanceGroupList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -20824,17 +20833,46 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", - "httpMethod": "PATCH", - "id": "compute.regionUrlMaps.patch", + "listInstances": { + "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", + "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + "httpMethod": "POST", + "id": "compute.regionInstanceGroups.listInstances", "parameterOrder": [ "project", "region", - "urlMap" + "instanceGroup" ], "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "instanceGroup": { + "description": "Name of the regional instance group for which we want to list the instances.", + "location": "path", + "required": true, + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -20845,46 +20883,45 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", - "type": "string" - }, - "urlMap": { - "description": "Name of the UrlMap resource to patch.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", "request": { - "$ref": "UrlMap" + "$ref": "RegionInstanceGroupsListInstancesRequest" }, "response": { - "$ref": "Operation" + "$ref": "RegionInstanceGroupsListInstances" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "update": { - "description": "Updates the specified UrlMap resource with the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", - "httpMethod": "PUT", - "id": "compute.regionUrlMaps.update", + "setNamedPorts": { + "description": "Sets the named ports for the specified regional instance group.", + "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + "httpMethod": "POST", + "id": "compute.regionInstanceGroups.setNamedPorts", "parameterOrder": [ "project", "region", - "urlMap" + "instanceGroup" ], "parameters": { + "instanceGroup": { + "description": "The name of the regional instance group where the named ports are updated.", + "location": "path", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -20895,26 +20932,18 @@ "region": { "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "requestId": { - "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "urlMap": { - "description": "Name of the UrlMap resource to update.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", "request": { - "$ref": "UrlMap" + "$ref": "RegionInstanceGroupsSetNamedPortsRequest" }, "response": { "$ref": "Operation" @@ -20923,16 +20952,19 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "validate": { - "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", - "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}/validate", + } + } + }, + "regionInstances": { + "methods": { + "bulkInsert": { + "description": "Creates multiple instances in a given region. Count specifies the number of instances to create.", + "flatPath": "projects/{project}/regions/{region}/instances/bulkInsert", "httpMethod": "POST", - "id": "compute.regionUrlMaps.validate", + "id": "compute.regionInstances.bulkInsert", "parameterOrder": [ "project", - "region", - "urlMap" + "region" ], "parameters": { "project": { @@ -20943,26 +20975,24 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "urlMap": { - "description": "Name of the UrlMap resource to be validated as.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/urlMaps/{urlMap}/validate", + "path": "projects/{project}/regions/{region}/instances/bulkInsert", "request": { - "$ref": "RegionUrlMapsValidateRequest" + "$ref": "BulkInsertInstanceResource" }, "response": { - "$ref": "UrlMapsValidateResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -20971,18 +21001,25 @@ } } }, - "regions": { + "regionNetworkEndpointGroups": { "methods": { - "get": { - "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", - "flatPath": "projects/{project}/regions/{region}", - "httpMethod": "GET", - "id": "compute.regions.get", + "delete": { + "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "httpMethod": "DELETE", + "id": "compute.regionNetworkEndpointGroups.delete", "parameterOrder": [ "project", - "region" + "region", + "networkEndpointGroup" ], "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -20991,55 +21028,77 @@ "type": "string" }, "region": { - "description": "Name of the region resource to return.", + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/regions/{region}", + "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", "response": { - "$ref": "Region" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves the list of region resources available to the specified project. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `items.quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", - "flatPath": "projects/{project}/regions", + "get": { + "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", "httpMethod": "GET", - "id": "compute.regions.list", + "id": "compute.regionNetworkEndpointGroups.get", "parameterOrder": [ - "project" + "project", + "region", + "networkEndpointGroup" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", + "networkEndpointGroup": { + "description": "The name of the network endpoint group. It should comply with RFC1035.", + "location": "path", + "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, "type": "string" }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", + "region": { + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, "type": "string" - }, + } + }, + "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "response": { + "$ref": "NetworkEndpointGroup" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", + "httpMethod": "POST", + "id": "compute.regionNetworkEndpointGroups.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -21047,45 +21106,45 @@ "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "region": { + "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" } }, - "path": "projects/{project}/regions", + "path": "projects/{project}/regions/{region}/networkEndpointGroups", + "request": { + "$ref": "NetworkEndpointGroup" + }, "response": { - "$ref": "RegionList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - } - } - }, - "reservations": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of reservations.", - "flatPath": "projects/{project}/aggregated/reservations", + }, + "list": { + "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", "httpMethod": "GET", - "id": "compute.reservations.aggregatedList", + "id": "compute.regionNetworkEndpointGroups.list", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -21111,61 +21170,79 @@ "required": true, "type": "string" }, + "region": { + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/aggregated/reservations", + "path": "projects/{project}/regions/{region}/networkEndpointGroups", "response": { - "$ref": "ReservationAggregatedList" + "$ref": "NetworkEndpointGroupList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "delete": { - "description": "Deletes the specified reservation.", - "flatPath": "projects/{project}/zones/{zone}/reservations/{reservation}", - "httpMethod": "DELETE", - "id": "compute.reservations.delete", + } + } + }, + "regionNetworkFirewallPolicies": { + "methods": { + "addAssociation": { + "description": "Inserts an association for the specified network firewall policy.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/addAssociation", + "httpMethod": "POST", + "id": "compute.regionNetworkFirewallPolicies.addAssociation", "parameterOrder": [ "project", - "zone", - "reservation" + "region", + "firewallPolicy" ], "parameters": { - "project": { - "description": "Project ID for this request.", + "firewallPolicy": { + "description": "Name of the firewall policy to update.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "reservation": { - "description": "Name of the reservation to delete.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "zone": { - "description": "Name of the zone for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "replaceExistingAssociation": { + "description": "Indicates whether or not to replace it if an association already exists. This is false by default, in which case an error will be returned if an association already exists.", + "location": "query", + "type": "boolean" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/zones/{zone}/reservations/{reservation}", + "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/addAssociation", + "request": { + "$ref": "FirewallPolicyAssociation" + }, "response": { "$ref": "Operation" }, @@ -21174,65 +21251,85 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Retrieves information about the specified reservation.", - "flatPath": "projects/{project}/zones/{zone}/reservations/{reservation}", - "httpMethod": "GET", - "id": "compute.reservations.get", + "addRule": { + "description": "Inserts a rule into a network firewall policy.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/addRule", + "httpMethod": "POST", + "id": "compute.regionNetworkFirewallPolicies.addRule", "parameterOrder": [ "project", - "zone", - "reservation" + "region", + "firewallPolicy" ], "parameters": { - "project": { - "description": "Project ID for this request.", + "firewallPolicy": { + "description": "Name of the firewall policy to update.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "reservation": { - "description": "Name of the reservation to retrieve.", + "maxPriority": { + "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "minPriority": { + "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "zone": { - "description": "Name of the zone for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/zones/{zone}/reservations/{reservation}", + "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/addRule", + "request": { + "$ref": "FirewallPolicyRule" + }, "response": { - "$ref": "Reservation" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - "flatPath": "projects/{project}/zones/{zone}/reservations/{resource}/getIamPolicy", - "httpMethod": "GET", - "id": "compute.reservations.getIamPolicy", + "cloneRules": { + "description": "Copies rules to the specified network firewall policy.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/cloneRules", + "httpMethod": "POST", + "id": "compute.regionNetworkFirewallPolicies.cloneRules", "parameterOrder": [ "project", - "zone", - "resource" + "region", + "firewallPolicy" ], "parameters": { - "optionsRequestedPolicyVersion": { - "description": "Requested IAM Policy version.", - "format": "int32", - "location": "query", - "type": "integer" + "firewallPolicy": { + "description": "Name of the firewall policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" }, "project": { "description": "Project ID for this request.", @@ -21241,41 +21338,51 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "sourceFirewallPolicy": { + "description": "The firewall policy from which to copy rules.", + "location": "query", "type": "string" } }, - "path": "projects/{project}/zones/{zone}/reservations/{resource}/getIamPolicy", + "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/cloneRules", "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a new reservation. For more information, read Reserving zonal resources.", - "flatPath": "projects/{project}/zones/{zone}/reservations", - "httpMethod": "POST", - "id": "compute.reservations.insert", + "delete": { + "description": "Deletes the specified network firewall policy.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}", + "httpMethod": "DELETE", + "id": "compute.regionNetworkFirewallPolicies.delete", "parameterOrder": [ "project", - "zone" + "region", + "firewallPolicy" ], "parameters": { + "firewallPolicy": { + "description": "Name of the firewall policy to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -21283,23 +21390,20 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "zone": { - "description": "Name of the zone for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/zones/{zone}/reservations", - "request": { - "$ref": "Reservation" - }, + "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}", "response": { "$ref": "Operation" }, @@ -21308,37 +21412,22 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "A list of all the reservations that have been configured for the specified project in specified zone.", - "flatPath": "projects/{project}/zones/{zone}/reservations", + "get": { + "description": "Returns the specified network firewall policy.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}", "httpMethod": "GET", - "id": "compute.reservations.list", + "id": "compute.regionNetworkFirewallPolicies.get", "parameterOrder": [ "project", - "zone" + "region", + "firewallPolicy" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", + "firewallPolicy": { + "description": "Name of the firewall policy to get.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" }, "project": { @@ -21348,22 +21437,17 @@ "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" - }, - "zone": { - "description": "Name of the zone for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/reservations", + "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}", "response": { - "$ref": "ReservationList" + "$ref": "FirewallPolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -21371,112 +21455,113 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "resize": { - "description": "Resizes the reservation (applicable to standalone reservations only). For more information, read Modifying reservations.", - "flatPath": "projects/{project}/zones/{zone}/reservations/{reservation}/resize", - "httpMethod": "POST", - "id": "compute.reservations.resize", + "getAssociation": { + "description": "Gets an association with the specified name.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/getAssociation", + "httpMethod": "GET", + "id": "compute.regionNetworkFirewallPolicies.getAssociation", "parameterOrder": [ "project", - "zone", - "reservation" + "region", + "firewallPolicy" ], "parameters": { - "project": { - "description": "Project ID for this request.", + "firewallPolicy": { + "description": "Name of the firewall policy to which the queried association belongs.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "name": { + "description": "The name of the association to get from the firewall policy.", "location": "query", "type": "string" }, - "reservation": { - "description": "Name of the reservation to update.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "zone": { - "description": "Name of the zone for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/reservations/{reservation}/resize", - "request": { - "$ref": "ReservationsResizeRequest" - }, + "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/getAssociation", "response": { - "$ref": "Operation" + "$ref": "FirewallPolicyAssociation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "flatPath": "projects/{project}/zones/{zone}/reservations/{resource}/setIamPolicy", - "httpMethod": "POST", - "id": "compute.reservations.setIamPolicy", + "getEffectiveFirewalls": { + "description": "Returns the effective firewalls on a given network.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/getEffectiveFirewalls", + "httpMethod": "GET", + "id": "compute.regionNetworkFirewallPolicies.getEffectiveFirewalls", "parameterOrder": [ "project", - "zone", - "resource" + "region", + "network" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "network": { + "description": "Network reference", + "location": "query", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "zone": { - "description": "The name of the zone for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/reservations/{resource}/setIamPolicy", - "request": { - "$ref": "ZoneSetPolicyRequest" - }, + "path": "projects/{project}/regions/{region}/firewallPolicies/getEffectiveFirewalls", "response": { - "$ref": "Policy" + "$ref": "RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "flatPath": "projects/{project}/zones/{zone}/reservations/{resource}/testIamPermissions", - "httpMethod": "POST", - "id": "compute.reservations.testIamPermissions", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{resource}/getIamPolicy", + "httpMethod": "GET", + "id": "compute.regionNetworkFirewallPolicies.getIamPolicy", "parameterOrder": [ "project", - "zone", + "region", "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -21484,27 +21569,24 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "region": { + "description": "The name of the region for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "zone": { - "description": "The name of the zone for this request.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/reservations/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "projects/{project}/regions/{region}/firewallPolicies/{resource}/getIamPolicy", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -21512,22 +21594,30 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "update": { - "description": "Update share settings of the reservation.", - "flatPath": "projects/{project}/zones/{zone}/reservations/{reservation}", - "httpMethod": "PATCH", - "id": "compute.reservations.update", + "getRule": { + "description": "Gets a rule of the specified priority.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/getRule", + "httpMethod": "GET", + "id": "compute.regionNetworkFirewallPolicies.getRule", "parameterOrder": [ "project", - "zone", - "reservation" + "region", + "firewallPolicy" ], "parameters": { - "paths": { - "location": "query", - "repeated": true, + "firewallPolicy": { + "description": "Name of the firewall policy to which the queried rule belongs.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" }, + "priority": { + "description": "The priority of the rule to get from the firewall policy.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -21535,35 +21625,57 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "reservation": { - "description": "Name of the reservation to update.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "updateMask": { - "description": "Update_mask indicates fields to be updated as part of this request.", - "format": "google-fieldmask", - "location": "query", + } + }, + "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/getRule", + "response": { + "$ref": "FirewallPolicyRule" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a new network firewall policy in the specified project and region.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies", + "httpMethod": "POST", + "id": "compute.regionNetworkFirewallPolicies.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, "type": "string" }, - "zone": { - "description": "Name of the zone for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/zones/{zone}/reservations/{reservation}", + "path": "projects/{project}/regions/{region}/firewallPolicies", "request": { - "$ref": "Reservation" + "$ref": "FirewallPolicy" }, "response": { "$ref": "Operation" @@ -21572,30 +21684,22 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "resourcePolicies": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of resource policies.", - "flatPath": "projects/{project}/aggregated/resourcePolicies", + }, + "list": { + "description": "Lists all the network firewall policies that have been configured for the specified project in the given region.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies", "httpMethod": "GET", - "id": "compute.resourcePolicies.aggregatedList", + "id": "compute.regionNetworkFirewallPolicies.list", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -21621,15 +21725,22 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/aggregated/resourcePolicies", + "path": "projects/{project}/regions/{region}/firewallPolicies", "response": { - "$ref": "ResourcePolicyAggregatedList" + "$ref": "FirewallPolicyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -21637,17 +21748,24 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified resource policy.", - "flatPath": "projects/{project}/regions/{region}/resourcePolicies/{resourcePolicy}", - "httpMethod": "DELETE", - "id": "compute.resourcePolicies.delete", + "patch": { + "description": "Patches the specified network firewall policy.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}", + "httpMethod": "PATCH", + "id": "compute.regionNetworkFirewallPolicies.patch", "parameterOrder": [ "project", "region", - "resourcePolicy" + "firewallPolicy" ], "parameters": { + "firewallPolicy": { + "description": "Name of the firewall policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -21656,7 +21774,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -21666,16 +21784,12 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "resourcePolicy": { - "description": "Name of the resource policy to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/resourcePolicies/{resourcePolicy}", + "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}", + "request": { + "$ref": "FirewallPolicy" + }, "response": { "$ref": "Operation" }, @@ -21684,62 +21798,26 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Retrieves all information of the specified resource policy.", - "flatPath": "projects/{project}/regions/{region}/resourcePolicies/{resourcePolicy}", - "httpMethod": "GET", - "id": "compute.resourcePolicies.get", + "patchRule": { + "description": "Patches a rule of the specified priority.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchRule", + "httpMethod": "POST", + "id": "compute.regionNetworkFirewallPolicies.patchRule", "parameterOrder": [ "project", "region", - "resourcePolicy" + "firewallPolicy" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "resourcePolicy": { - "description": "Name of the resource policy to retrieve.", + "firewallPolicy": { + "description": "Name of the firewall policy to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/resourcePolicies/{resourcePolicy}", - "response": { - "$ref": "ResourcePolicy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - "flatPath": "projects/{project}/regions/{region}/resourcePolicies/{resource}/getIamPolicy", - "httpMethod": "GET", - "id": "compute.resourcePolicies.getIamPolicy", - "parameterOrder": [ - "project", - "region", - "resource" - ], - "parameters": { - "optionsRequestedPolicyVersion": { - "description": "Requested IAM Policy version.", + }, + "priority": { + "description": "The priority of the rule to patch.", "format": "int32", "location": "query", "type": "integer" @@ -21752,40 +21830,53 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/resourcePolicies/{resource}/getIamPolicy", + "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchRule", + "request": { + "$ref": "FirewallPolicyRule" + }, "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a new resource policy.", - "flatPath": "projects/{project}/regions/{region}/resourcePolicies", + "removeAssociation": { + "description": "Removes an association for the specified network firewall policy.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/removeAssociation", "httpMethod": "POST", - "id": "compute.resourcePolicies.insert", + "id": "compute.regionNetworkFirewallPolicies.removeAssociation", "parameterOrder": [ "project", - "region" + "region", + "firewallPolicy" ], "parameters": { + "firewallPolicy": { + "description": "Name of the firewall policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "name": { + "description": "Name for the association that will be removed.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -21794,7 +21885,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -21806,10 +21897,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/resourcePolicies", - "request": { - "$ref": "ResourcePolicy" - }, + "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/removeAssociation", "response": { "$ref": "Operation" }, @@ -21818,39 +21906,30 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "A list all the resource policies that have been configured for the specified project in specified region.", - "flatPath": "projects/{project}/regions/{region}/resourcePolicies", - "httpMethod": "GET", - "id": "compute.resourcePolicies.list", + "removeRule": { + "description": "Deletes a rule of the specified priority.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/removeRule", + "httpMethod": "POST", + "id": "compute.regionNetworkFirewallPolicies.removeRule", "parameterOrder": [ "project", - "region" + "region", + "firewallPolicy" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", + "firewallPolicy": { + "description": "Name of the firewall policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", + "priority": { + "description": "The priority of the rule to remove from the firewall policy.", + "format": "int32", "location": "query", - "minimum": "0", "type": "integer" }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -21859,33 +21938,32 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" + "type": "string" } }, - "path": "projects/{project}/regions/{region}/resourcePolicies", + "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/removeRule", "response": { - "$ref": "ResourcePolicyList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "flatPath": "projects/{project}/regions/{region}/resourcePolicies/{resource}/setIamPolicy", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{resource}/setIamPolicy", "httpMethod": "POST", - "id": "compute.resourcePolicies.setIamPolicy", + "id": "compute.regionNetworkFirewallPolicies.setIamPolicy", "parameterOrder": [ "project", "region", @@ -21914,7 +21992,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/resourcePolicies/{resource}/setIamPolicy", + "path": "projects/{project}/regions/{region}/firewallPolicies/{resource}/setIamPolicy", "request": { "$ref": "RegionSetPolicyRequest" }, @@ -21928,9 +22006,9 @@ }, "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", - "flatPath": "projects/{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{resource}/testIamPermissions", "httpMethod": "POST", - "id": "compute.resourcePolicies.testIamPermissions", + "id": "compute.regionNetworkFirewallPolicies.testIamPermissions", "parameterOrder": [ "project", "region", @@ -21959,7 +22037,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions", + "path": "projects/{project}/regions/{region}/firewallPolicies/{resource}/testIamPermissions", "request": { "$ref": "TestPermissionsRequest" }, @@ -21974,79 +22052,26 @@ } } }, - "routers": { + "regionNotificationEndpoints": { "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of routers.", - "flatPath": "projects/{project}/aggregated/routers", - "httpMethod": "GET", - "id": "compute.routers.aggregatedList", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" - } - }, - "path": "projects/{project}/aggregated/routers", - "response": { - "$ref": "RouterAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, "delete": { - "description": "Deletes the specified Router resource.", - "flatPath": "projects/{project}/regions/{region}/routers/{router}", + "description": "Deletes the specified NotificationEndpoint in the given region", + "flatPath": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", "httpMethod": "DELETE", - "id": "compute.routers.delete", + "id": "compute.regionNotificationEndpoints.delete", "parameterOrder": [ "project", "region", - "router" + "notificationEndpoint" ], "parameters": { + "notificationEndpoint": { + "description": "Name of the NotificationEndpoint resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -22055,7 +22080,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -22065,16 +22090,9 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "router": { - "description": "Name of the Router resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/routers/{router}", + "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", "response": { "$ref": "Operation" }, @@ -22084,16 +22102,23 @@ ] }, "get": { - "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/routers/{router}", + "description": "Returns the specified NotificationEndpoint resource in the given region.", + "flatPath": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", "httpMethod": "GET", - "id": "compute.routers.get", + "id": "compute.regionNotificationEndpoints.get", "parameterOrder": [ "project", "region", - "router" + "notificationEndpoint" ], "parameters": { + "notificationEndpoint": { + "description": "Name of the NotificationEndpoint resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -22102,23 +22127,16 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "router": { - "description": "Name of the Router resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/routers/{router}", + "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", "response": { - "$ref": "Router" + "$ref": "NotificationEndpoint" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22126,19 +22144,60 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "getNatMappingInfo": { - "description": "Retrieves runtime Nat mapping information of VM endpoints.", - "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatMappingInfo", - "httpMethod": "GET", - "id": "compute.routers.getNatMappingInfo", + "insert": { + "description": "Create a NotificationEndpoint in the specified project in the given region using the parameters that are included in the request.", + "flatPath": "projects/{project}/regions/{region}/notificationEndpoints", + "httpMethod": "POST", + "id": "compute.regionNotificationEndpoints.insert", "parameterOrder": [ "project", - "region", - "router" + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/notificationEndpoints", + "request": { + "$ref": "NotificationEndpoint" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Lists the NotificationEndpoints for a project in the given region.", + "flatPath": "projects/{project}/regions/{region}/notificationEndpoints", + "httpMethod": "GET", + "id": "compute.regionNotificationEndpoints.list", + "parameterOrder": [ + "project", + "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -22168,7 +22227,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -22178,36 +22237,40 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" - }, - "router": { - "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/routers/{router}/getNatMappingInfo", + "path": "projects/{project}/regions/{region}/notificationEndpoints", "response": { - "$ref": "VmEndpointNatMappingsList" + "$ref": "NotificationEndpointList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "getRouterStatus": { - "description": "Retrieves runtime information of the specified router.", - "flatPath": "projects/{project}/regions/{region}/routers/{router}/getRouterStatus", - "httpMethod": "GET", - "id": "compute.routers.getRouterStatus", + } + } + }, + "regionOperations": { + "methods": { + "delete": { + "description": "Deletes the specified region-specific Operations resource.", + "flatPath": "projects/{project}/regions/{region}/operations/{operation}", + "httpMethod": "DELETE", + "id": "compute.regionOperations.delete", "parameterOrder": [ "project", "region", - "router" + "operation" ], "parameters": { + "operation": { + "description": "Name of the Operations resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -22221,35 +22284,32 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "router": { - "description": "Name of the Router resource to query.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/routers/{router}/getRouterStatus", - "response": { - "$ref": "RouterStatusResponse" - }, + "path": "projects/{project}/regions/{region}/operations/{operation}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a Router resource in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/routers", - "httpMethod": "POST", - "id": "compute.routers.insert", + "get": { + "description": "Retrieves the specified region-specific Operations resource.", + "flatPath": "projects/{project}/regions/{region}/operations/{operation}", + "httpMethod": "GET", + "id": "compute.regionOperations.get", "parameterOrder": [ "project", - "region" + "region", + "operation" ], "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -22263,37 +22323,30 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/regions/{region}/routers", - "request": { - "$ref": "Router" - }, + "path": "projects/{project}/regions/{region}/operations/{operation}", "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "list": { - "description": "Retrieves a list of Router resources available to the specified project.", - "flatPath": "projects/{project}/regions/{region}/routers", + "description": "Retrieves a list of Operation resources contained within the specified region.", + "flatPath": "projects/{project}/regions/{region}/operations", "httpMethod": "GET", - "id": "compute.routers.list", + "id": "compute.regionOperations.list", "parameterOrder": [ "project", "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -22335,9 +22388,9 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/routers", + "path": "projects/{project}/regions/{region}/operations", "response": { - "$ref": "RouterList" + "$ref": "OperationList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22345,67 +22398,24 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/regions/{region}/routers/{router}", - "httpMethod": "PATCH", - "id": "compute.routers.patch", + "wait": { + "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress. This method is called on a best-effort basis. Specifically: - In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. - If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`. ", + "flatPath": "projects/{project}/regions/{region}/operations/{operation}/wait", + "httpMethod": "POST", + "id": "compute.regionOperations.wait", "parameterOrder": [ "project", "region", - "router" + "operation" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "router": { - "description": "Name of the Router resource to patch.", + "operation": { + "description": "Name of the Operations resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/routers/{router}", - "request": { - "$ref": "Router" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "preview": { - "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", - "flatPath": "projects/{project}/regions/{region}/routers/{router}/preview", - "httpMethod": "POST", - "id": "compute.routers.preview", - "parameterOrder": [ - "project", - "region", - "router" - ], - "parameters": { + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -22419,37 +22429,31 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "router": { - "description": "Name of the Router resource to query.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/routers/{router}/preview", - "request": { - "$ref": "Router" - }, + "path": "projects/{project}/regions/{region}/operations/{operation}/wait", "response": { - "$ref": "RoutersPreviewResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "update": { - "description": "Updates the specified Router resource with the data included in the request. This method conforms to PUT semantics, which requests that the state of the target resource be created or replaced with the state defined by the representation enclosed in the request message payload.", - "flatPath": "projects/{project}/regions/{region}/routers/{router}", - "httpMethod": "PUT", - "id": "compute.routers.update", + } + } + }, + "regionSecurityPolicies": { + "methods": { + "delete": { + "description": "Deletes the specified policy.", + "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + "httpMethod": "DELETE", + "id": "compute.regionSecurityPolicies.delete", "parameterOrder": [ "project", "region", - "router" + "securityPolicy" ], "parameters": { "project": { @@ -22460,7 +22464,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -22471,18 +22475,15 @@ "location": "query", "type": "string" }, - "router": { - "description": "Name of the Router resource to update.", + "securityPolicy": { + "description": "Name of the security policy to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/routers/{router}", - "request": { - "$ref": "Router" - }, + "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", "response": { "$ref": "Operation" }, @@ -22490,19 +22491,16 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "routes": { - "methods": { - "delete": { - "description": "Deletes the specified Route resource.", - "flatPath": "projects/{project}/global/routes/{route}", - "httpMethod": "DELETE", - "id": "compute.routes.delete", + }, + "get": { + "description": "List all of the ordered rules present in a single specified policy.", + "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + "httpMethod": "GET", + "id": "compute.regionSecurityPolicies.get", "parameterOrder": [ "project", - "route" + "region", + "securityPolicy" ], "parameters": { "project": { @@ -22512,56 +22510,24 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "route": { - "description": "Name of the Route resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/global/routes/{route}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request.", - "flatPath": "projects/{project}/global/routes/{route}", - "httpMethod": "GET", - "id": "compute.routes.get", - "parameterOrder": [ - "project", - "route" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "route": { - "description": "Name of the Route resource to return.", + "securityPolicy": { + "description": "Name of the security policy to get.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/routes/{route}", + "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", "response": { - "$ref": "Route" + "$ref": "SecurityPolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22570,12 +22536,13 @@ ] }, "insert": { - "description": "Creates a Route resource in the specified project using the data included in the request.", - "flatPath": "projects/{project}/global/routes", + "description": "Creates a new policy in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/securityPolicies", "httpMethod": "POST", - "id": "compute.routes.insert", + "id": "compute.regionSecurityPolicies.insert", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "project": { @@ -22585,15 +22552,27 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "validateOnly": { + "description": "If true, the request will not be committed.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/global/routes", + "path": "projects/{project}/regions/{region}/securityPolicies", "request": { - "$ref": "Route" + "$ref": "SecurityPolicy" }, "response": { "$ref": "Operation" @@ -22604,16 +22583,17 @@ ] }, "list": { - "description": "Retrieves the list of Route resources available to the specified project.", - "flatPath": "projects/{project}/global/routes", + "description": "List all the policies that have been configured for the specified project and region.", + "flatPath": "projects/{project}/regions/{region}/securityPolicies", "httpMethod": "GET", - "id": "compute.routes.list", + "id": "compute.regionSecurityPolicies.list", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -22642,33 +22622,37 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/global/routes", + "path": "projects/{project}/regions/{region}/securityPolicies", "response": { - "$ref": "RouteList" + "$ref": "SecurityPolicyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "securityPolicies": { - "methods": { - "addRule": { - "description": "Inserts a rule into a security policy.", - "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}/addRule", - "httpMethod": "POST", - "id": "compute.securityPolicies.addRule", + }, + "patch": { + "description": "Patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", + "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + "httpMethod": "PATCH", + "id": "compute.regionSecurityPolicies.patch", "parameterOrder": [ "project", + "region", "securityPolicy" ], "parameters": { @@ -22679,6 +22663,18 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, "securityPolicy": { "description": "Name of the security policy to update.", "location": "path", @@ -22687,9 +22683,9 @@ "type": "string" } }, - "path": "projects/{project}/global/securityPolicies/{securityPolicy}/addRule", + "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", "request": { - "$ref": "SecurityPolicyRule" + "$ref": "SecurityPolicy" }, "response": { "$ref": "Operation" @@ -22698,15 +22694,20 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, + } + } + }, + "regionSslCertificates": { + "methods": { "delete": { - "description": "Deletes the specified policy.", - "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}", + "description": "Deletes the specified SslCertificate resource in the region.", + "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", "httpMethod": "DELETE", - "id": "compute.securityPolicies.delete", + "id": "compute.regionSslCertificates.delete", "parameterOrder": [ "project", - "securityPolicy" + "region", + "sslCertificate" ], "parameters": { "project": { @@ -22716,20 +22717,27 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "securityPolicy": { - "description": "Name of the security policy to delete.", + "sslCertificate": { + "description": "Name of the SslCertificate resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/securityPolicies/{securityPolicy}", + "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", "response": { "$ref": "Operation" }, @@ -22739,13 +22747,14 @@ ] }, "get": { - "description": "List all of the ordered rules present in a single specified policy.", - "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}", + "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", "httpMethod": "GET", - "id": "compute.securityPolicies.get", + "id": "compute.regionSslCertificates.get", "parameterOrder": [ "project", - "securityPolicy" + "region", + "sslCertificate" ], "parameters": { "project": { @@ -22755,58 +22764,24 @@ "required": true, "type": "string" }, - "securityPolicy": { - "description": "Name of the security policy to get.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/global/securityPolicies/{securityPolicy}", - "response": { - "$ref": "SecurityPolicy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getRule": { - "description": "Gets a rule at the specified priority.", - "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}/getRule", - "httpMethod": "GET", - "id": "compute.securityPolicies.getRule", - "parameterOrder": [ - "project", - "securityPolicy" - ], - "parameters": { - "priority": { - "description": "The priority of the rule to get from the security policy.", - "format": "int32", - "location": "query", - "type": "integer" - }, - "project": { - "description": "Project ID for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "securityPolicy": { - "description": "Name of the security policy to which the queried rule belongs.", + "sslCertificate": { + "description": "Name of the SslCertificate resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/securityPolicies/{securityPolicy}/getRule", + "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", "response": { - "$ref": "SecurityPolicyRule" + "$ref": "SslCertificate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22815,12 +22790,13 @@ ] }, "insert": { - "description": "Creates a new policy in the specified project using the data included in the request.", - "flatPath": "projects/{project}/global/securityPolicies", + "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", + "flatPath": "projects/{project}/regions/{region}/sslCertificates", "httpMethod": "POST", - "id": "compute.securityPolicies.insert", + "id": "compute.regionSslCertificates.insert", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "project": { @@ -22830,15 +22806,22 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "projects/{project}/global/securityPolicies", + "path": "projects/{project}/regions/{region}/sslCertificates", "request": { - "$ref": "SecurityPolicy" + "$ref": "SslCertificate" }, "response": { "$ref": "Operation" @@ -22849,16 +22832,17 @@ ] }, "list": { - "description": "List all the policies that have been configured for the specified project.", - "flatPath": "projects/{project}/global/securityPolicies", + "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", + "flatPath": "projects/{project}/regions/{region}/sslCertificates", "httpMethod": "GET", - "id": "compute.securityPolicies.list", + "id": "compute.regionSslCertificates.list", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -22887,85 +22871,88 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/global/securityPolicies", + "path": "projects/{project}/regions/{region}/sslCertificates", "response": { - "$ref": "SecurityPolicyList" + "$ref": "SslCertificateList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "listPreconfiguredExpressionSets": { - "description": "Gets the current list of preconfigured Web Application Firewall (WAF) expressions.", - "flatPath": "projects/{project}/global/securityPolicies/listPreconfiguredExpressionSets", - "httpMethod": "GET", - "id": "compute.securityPolicies.listPreconfiguredExpressionSets", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", + } + } + }, + "regionSslPolicies": { + "methods": { + "delete": { + "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", + "httpMethod": "DELETE", + "id": "compute.regionSslPolicies.delete", + "parameterOrder": [ + "project", + "region", + "sslPolicy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "project": { - "description": "Project ID for this request.", + "sslPolicy": { + "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" } }, - "path": "projects/{project}/global/securityPolicies/listPreconfiguredExpressionSets", + "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", "response": { - "$ref": "SecurityPoliciesListPreconfiguredExpressionSetsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "patch": { - "description": "Patches the specified policy with the data included in the request. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", - "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}", - "httpMethod": "PATCH", - "id": "compute.securityPolicies.patch", + "get": { + "description": "Lists all of the ordered rules present in a single specified policy.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", + "httpMethod": "GET", + "id": "compute.regionSslPolicies.get", "parameterOrder": [ "project", - "securityPolicy" + "region", + "sslPolicy" ], "parameters": { "project": { @@ -22975,47 +22962,40 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" }, - "securityPolicy": { - "description": "Name of the security policy to update.", + "sslPolicy": { + "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/securityPolicies/{securityPolicy}", - "request": { - "$ref": "SecurityPolicy" - }, + "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", "response": { - "$ref": "Operation" + "$ref": "SslPolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "patchRule": { - "description": "Patches a rule at the specified priority.", - "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}/patchRule", + "insert": { + "description": "Creates a new policy in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies", "httpMethod": "POST", - "id": "compute.securityPolicies.patchRule", + "id": "compute.regionSslPolicies.insert", "parameterOrder": [ "project", - "securityPolicy" + "region" ], "parameters": { - "priority": { - "description": "The priority of the rule to patch.", - "format": "int32", - "location": "query", - "type": "integer" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -23023,17 +23003,22 @@ "required": true, "type": "string" }, - "securityPolicy": { - "description": "Name of the security policy to update.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/global/securityPolicies/{securityPolicy}/patchRule", + "path": "projects/{project}/regions/{region}/sslPolicies", "request": { - "$ref": "SecurityPolicyRule" + "$ref": "SslPolicy" }, "response": { "$ref": "Operation" @@ -23043,22 +23028,39 @@ "https://www.googleapis.com/auth/compute" ] }, - "removeRule": { - "description": "Deletes a rule at the specified priority.", - "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}/removeRule", - "httpMethod": "POST", - "id": "compute.securityPolicies.removeRule", + "list": { + "description": "Lists all the SSL policies that have been configured for the specified project and region.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies", + "httpMethod": "GET", + "id": "compute.regionSslPolicies.list", "parameterOrder": [ "project", - "securityPolicy" + "region" ], "parameters": { - "priority": { - "description": "The priority of the rule to remove from the security policy.", - "format": "int32", + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", "location": "query", + "minimum": "0", "type": "integer" }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -23066,46 +23068,44 @@ "required": true, "type": "string" }, - "securityPolicy": { - "description": "Name of the security policy to update.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/global/securityPolicies/{securityPolicy}/removeRule", + "path": "projects/{project}/regions/{region}/sslPolicies", "response": { - "$ref": "Operation" + "$ref": "SslPoliciesList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "serviceAttachments": { - "methods": { - "aggregatedList": { - "description": "Retrieves the list of all ServiceAttachment resources, regional and global, available to the specified project.", - "flatPath": "projects/{project}/aggregated/serviceAttachments", + }, + "listAvailableFeatures": { + "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures", "httpMethod": "GET", - "id": "compute.serviceAttachments.aggregatedList", + "id": "compute.regionSslPolicies.listAvailableFeatures", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -23125,21 +23125,28 @@ "type": "string" }, "project": { - "description": "Name of the project scoping this request.", + "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/aggregated/serviceAttachments", + "path": "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures", "response": { - "$ref": "ServiceAttachmentAggregatedList" + "$ref": "SslPoliciesListAvailableFeaturesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -23147,15 +23154,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified ServiceAttachment in the given scope", - "flatPath": "projects/{project}/regions/{region}/serviceAttachments/{serviceAttachment}", - "httpMethod": "DELETE", - "id": "compute.serviceAttachments.delete", + "patch": { + "description": "Patches the specified SSL policy with the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", + "httpMethod": "PATCH", + "id": "compute.regionSslPolicies.patch", "parameterOrder": [ "project", "region", - "serviceAttachment" + "sslPolicy" ], "parameters": { "project": { @@ -23166,7 +23173,7 @@ "type": "string" }, "region": { - "description": "Name of the region of this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -23177,15 +23184,17 @@ "location": "query", "type": "string" }, - "serviceAttachment": { - "description": "Name of the ServiceAttachment resource to delete.", + "sslPolicy": { + "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/serviceAttachments/{serviceAttachment}", + "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", + "request": { + "$ref": "SslPolicy" + }, "response": { "$ref": "Operation" }, @@ -23193,16 +23202,20 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "get": { - "description": "Returns the specified ServiceAttachment resource in the given scope.", - "flatPath": "projects/{project}/regions/{region}/serviceAttachments/{serviceAttachment}", - "httpMethod": "GET", - "id": "compute.serviceAttachments.get", + } + } + }, + "regionTargetHttpProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetHttpProxy resource.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "httpMethod": "DELETE", + "id": "compute.regionTargetHttpProxies.delete", "parameterOrder": [ "project", "region", - "serviceAttachment" + "targetHttpProxy" ], "parameters": { "project": { @@ -23213,47 +23226,45 @@ "type": "string" }, "region": { - "description": "Name of the region of this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "serviceAttachment": { - "description": "Name of the ServiceAttachment resource to return.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/serviceAttachments/{serviceAttachment}", + "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", "response": { - "$ref": "ServiceAttachment" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - "flatPath": "projects/{project}/regions/{region}/serviceAttachments/{resource}/getIamPolicy", + "get": { + "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", "httpMethod": "GET", - "id": "compute.serviceAttachments.getIamPolicy", + "id": "compute.regionTargetHttpProxies.get", "parameterOrder": [ "project", "region", - "resource" + "targetHttpProxy" ], "parameters": { - "optionsRequestedPolicyVersion": { - "description": "Requested IAM Policy version.", - "format": "int32", - "location": "query", - "type": "integer" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -23262,23 +23273,23 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/serviceAttachments/{resource}/getIamPolicy", + "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", "response": { - "$ref": "Policy" + "$ref": "TargetHttpProxy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -23287,10 +23298,10 @@ ] }, "insert": { - "description": "Creates a ServiceAttachment in the specified project in the given scope using the parameters that are included in the request.", - "flatPath": "projects/{project}/regions/{region}/serviceAttachments", + "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", "httpMethod": "POST", - "id": "compute.serviceAttachments.insert", + "id": "compute.regionTargetHttpProxies.insert", "parameterOrder": [ "project", "region" @@ -23304,7 +23315,7 @@ "type": "string" }, "region": { - "description": "Name of the region of this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -23316,9 +23327,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/serviceAttachments", + "path": "projects/{project}/regions/{region}/targetHttpProxies", "request": { - "$ref": "ServiceAttachment" + "$ref": "TargetHttpProxy" }, "response": { "$ref": "Operation" @@ -23329,17 +23340,17 @@ ] }, "list": { - "description": "Lists the ServiceAttachments for a project in the given scope.", - "flatPath": "projects/{project}/regions/{region}/serviceAttachments", + "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", "httpMethod": "GET", - "id": "compute.serviceAttachments.list", + "id": "compute.regionTargetHttpProxies.list", "parameterOrder": [ "project", "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -23369,7 +23380,7 @@ "type": "string" }, "region": { - "description": "Name of the region of this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -23381,9 +23392,9 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/serviceAttachments", + "path": "projects/{project}/regions/{region}/targetHttpProxies", "response": { - "$ref": "ServiceAttachmentList" + "$ref": "TargetHttpProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -23391,15 +23402,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified ServiceAttachment resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/regions/{region}/serviceAttachments/{serviceAttachment}", - "httpMethod": "PATCH", - "id": "compute.serviceAttachments.patch", + "setUrlMap": { + "description": "Changes the URL map for TargetHttpProxy.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "httpMethod": "POST", + "id": "compute.regionTargetHttpProxies.setUrlMap", "parameterOrder": [ "project", "region", - "serviceAttachment" + "targetHttpProxy" ], "parameters": { "project": { @@ -23410,8 +23421,9 @@ "type": "string" }, "region": { - "description": "The region scoping this request and should conform to RFC1035.", + "description": "Name of the region scoping this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -23420,16 +23432,17 @@ "location": "query", "type": "string" }, - "serviceAttachment": { - "description": "The resource id of the ServiceAttachment to patch. It should conform to RFC1035 resource name or be a string form on an unsigned long number.", + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy to set a URL map for.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/serviceAttachments/{serviceAttachment}", + "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", "request": { - "$ref": "ServiceAttachment" + "$ref": "UrlMapReference" }, "response": { "$ref": "Operation" @@ -23438,16 +23451,20 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "flatPath": "projects/{project}/regions/{region}/serviceAttachments/{resource}/setIamPolicy", - "httpMethod": "POST", - "id": "compute.serviceAttachments.setIamPolicy", + } + } + }, + "regionTargetHttpsProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetHttpsProxy resource.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "DELETE", + "id": "compute.regionTargetHttpsProxies.delete", "parameterOrder": [ "project", "region", - "resource" + "targetHttpsProxy" ], "parameters": { "project": { @@ -23458,41 +23475,43 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/serviceAttachments/{resource}/setIamPolicy", - "request": { - "$ref": "RegionSetPolicyRequest" - }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "flatPath": "projects/{project}/regions/{region}/serviceAttachments/{resource}/testIamPermissions", - "httpMethod": "POST", - "id": "compute.serviceAttachments.testIamPermissions", + "get": { + "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "GET", + "id": "compute.regionTargetHttpsProxies.get", "parameterOrder": [ "project", "region", - "resource" + "targetHttpsProxy" ], "parameters": { "project": { @@ -23503,45 +23522,38 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/serviceAttachments/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "TargetHttpsProxy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "snapshots": { - "methods": { - "delete": { - "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot. For more information, see Deleting snapshots.", - "flatPath": "projects/{project}/global/snapshots/{snapshot}", - "httpMethod": "DELETE", - "id": "compute.snapshots.delete", + }, + "insert": { + "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", + "httpMethod": "POST", + "id": "compute.regionTargetHttpsProxies.insert", "parameterOrder": [ "project", - "snapshot" + "region" ], "parameters": { "project": { @@ -23551,20 +23563,23 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "snapshot": { - "description": "Name of the Snapshot resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/global/snapshots/{snapshot}", + "path": "projects/{project}/regions/{region}/targetHttpsProxies", + "request": { + "$ref": "TargetHttpsProxy" + }, "response": { "$ref": "Operation" }, @@ -23573,16 +23588,39 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request.", - "flatPath": "projects/{project}/global/snapshots/{snapshot}", + "list": { + "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", "httpMethod": "GET", - "id": "compute.snapshots.get", + "id": "compute.regionTargetHttpsProxies.list", "parameterOrder": [ "project", - "snapshot" + "region" ], "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -23590,17 +23628,22 @@ "required": true, "type": "string" }, - "snapshot": { - "description": "Name of the Snapshot resource to return.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/global/snapshots/{snapshot}", + "path": "projects/{project}/regions/{region}/targetHttpsProxies", "response": { - "$ref": "Snapshot" + "$ref": "TargetHttpsProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -23608,22 +23651,17 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - "flatPath": "projects/{project}/global/snapshots/{resource}/getIamPolicy", - "httpMethod": "GET", - "id": "compute.snapshots.getIamPolicy", + "patch": { + "description": "Patches the specified regional TargetHttpsProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "PATCH", + "id": "compute.regionTargetHttpsProxies.patch", "parameterOrder": [ "project", - "resource" + "region", + "targetHttpsProxy" ], "parameters": { - "optionsRequestedPolicyVersion": { - "description": "Requested IAM Policy version.", - "format": "int32", - "location": "query", - "type": "integer" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -23631,31 +23669,47 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "region": { + "description": "Name of the region for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/snapshots/{resource}/getIamPolicy", + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "request": { + "$ref": "TargetHttpsProxy" + }, "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a snapshot in the specified project using the data included in the request. For regular snapshot creation, consider using this method instead of disks.createSnapshot, as this method supports more features, such as creating snapshots in a project different from the source disk project.", - "flatPath": "projects/{project}/global/snapshots", + "setSslCertificates": { + "description": "Replaces SslCertificates for TargetHttpsProxy.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", "httpMethod": "POST", - "id": "compute.snapshots.insert", + "id": "compute.regionTargetHttpsProxies.setSslCertificates", "parameterOrder": [ - "project" + "project", + "region", + "targetHttpsProxy" ], "parameters": { "project": { @@ -23665,15 +23719,29 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "projects/{project}/global/snapshots", + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", "request": { - "$ref": "Snapshot" + "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" }, "response": { "$ref": "Operation" @@ -23683,69 +23751,69 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves the list of Snapshot resources contained within the specified project.", - "flatPath": "projects/{project}/global/snapshots", - "httpMethod": "GET", - "id": "compute.snapshots.list", + "setUrlMap": { + "description": "Changes the URL map for TargetHttpsProxy.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + "httpMethod": "POST", + "id": "compute.regionTargetHttpsProxies.setUrlMap", "parameterOrder": [ - "project" + "project", + "region", + "targetHttpsProxy" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "project": { - "description": "Project ID for this request.", + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy to set a URL map for.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" } }, - "path": "projects/{project}/global/snapshots", + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + "request": { + "$ref": "UrlMapReference" + }, "response": { - "$ref": "SnapshotList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "flatPath": "projects/{project}/global/snapshots/{resource}/setIamPolicy", - "httpMethod": "POST", - "id": "compute.snapshots.setIamPolicy", + } + } + }, + "regionTargetTcpProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetTcpProxy resource.", + "flatPath": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + "httpMethod": "DELETE", + "id": "compute.regionTargetTcpProxies.delete", "parameterOrder": [ "project", - "resource" + "region", + "targetTcpProxy" ], "parameters": { "project": { @@ -23755,34 +23823,44 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetTcpProxy": { + "description": "Name of the TargetTcpProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/snapshots/{resource}/setIamPolicy", - "request": { - "$ref": "GlobalSetPolicyRequest" - }, + "path": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "setLabels": { - "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation.", - "flatPath": "projects/{project}/global/snapshots/{resource}/setLabels", - "httpMethod": "POST", - "id": "compute.snapshots.setLabels", + "get": { + "description": "Returns the specified TargetTcpProxy resource.", + "flatPath": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + "httpMethod": "GET", + "id": "compute.regionTargetTcpProxies.get", "parameterOrder": [ "project", - "resource" + "region", + "targetTcpProxy" ], "parameters": { "project": { @@ -23792,34 +23870,39 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "targetTcpProxy": { + "description": "Name of the TargetTcpProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/snapshots/{resource}/setLabels", - "request": { - "$ref": "GlobalSetLabelsRequest" - }, + "path": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", "response": { - "$ref": "Operation" + "$ref": "TargetTcpProxy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "flatPath": "projects/{project}/global/snapshots/{resource}/testIamPermissions", + "insert": { + "description": "Creates a TargetTcpProxy resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/targetTcpProxies", "httpMethod": "POST", - "id": "compute.snapshots.testIamPermissions", + "id": "compute.regionTargetTcpProxies.insert", "parameterOrder": [ "project", - "resource" + "region" ], "parameters": { "project": { @@ -23829,50 +23912,46 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/global/snapshots/{resource}/testIamPermissions", + "path": "projects/{project}/regions/{region}/targetTcpProxies", "request": { - "$ref": "TestPermissionsRequest" + "$ref": "TargetTcpProxy" }, "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - } - } - }, - "sslCertificates": { - "methods": { - "aggregatedList": { - "description": "Retrieves the list of all SslCertificate resources, regional and global, available to the specified project.", - "flatPath": "projects/{project}/aggregated/sslCertificates", + }, + "list": { + "description": "Retrieves a list of TargetTcpProxy resources available to the specified project in a given region.", + "flatPath": "projects/{project}/regions/{region}/targetTcpProxies", "httpMethod": "GET", - "id": "compute.sslCertificates.aggregatedList", + "id": "compute.regionTargetTcpProxies.list", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -23892,36 +23971,48 @@ "type": "string" }, "project": { - "description": "Name of the project scoping this request.", + "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/aggregated/sslCertificates", + "path": "projects/{project}/regions/{region}/targetTcpProxies", "response": { - "$ref": "SslCertificateAggregatedList" + "$ref": "TargetTcpProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, + } + } + }, + "regionUrlMaps": { + "methods": { "delete": { - "description": "Deletes the specified SslCertificate resource.", - "flatPath": "projects/{project}/global/sslCertificates/{sslCertificate}", + "description": "Deletes the specified UrlMap resource.", + "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", "httpMethod": "DELETE", - "id": "compute.sslCertificates.delete", + "id": "compute.regionUrlMaps.delete", "parameterOrder": [ "project", - "sslCertificate" + "region", + "urlMap" ], "parameters": { "project": { @@ -23931,20 +24022,27 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", "location": "query", "type": "string" }, - "sslCertificate": { - "description": "Name of the SslCertificate resource to delete.", + "urlMap": { + "description": "Name of the UrlMap resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/sslCertificates/{sslCertificate}", + "path": "projects/{project}/regions/{region}/urlMaps/{urlMap}", "response": { "$ref": "Operation" }, @@ -23954,13 +24052,14 @@ ] }, "get": { - "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request.", - "flatPath": "projects/{project}/global/sslCertificates/{sslCertificate}", + "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", "httpMethod": "GET", - "id": "compute.sslCertificates.get", + "id": "compute.regionUrlMaps.get", "parameterOrder": [ "project", - "sslCertificate" + "region", + "urlMap" ], "parameters": { "project": { @@ -23970,17 +24069,24 @@ "required": true, "type": "string" }, - "sslCertificate": { - "description": "Name of the SslCertificate resource to return.", + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/sslCertificates/{sslCertificate}", + "path": "projects/{project}/regions/{region}/urlMaps/{urlMap}", "response": { - "$ref": "SslCertificate" + "$ref": "UrlMap" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -23989,12 +24095,13 @@ ] }, "insert": { - "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", - "flatPath": "projects/{project}/global/sslCertificates", + "description": "Creates a UrlMap resource in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/urlMaps", "httpMethod": "POST", - "id": "compute.sslCertificates.insert", + "id": "compute.regionUrlMaps.insert", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "project": { @@ -24004,15 +24111,22 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", "location": "query", "type": "string" } }, - "path": "projects/{project}/global/sslCertificates", + "path": "projects/{project}/regions/{region}/urlMaps", "request": { - "$ref": "SslCertificate" + "$ref": "UrlMap" }, "response": { "$ref": "Operation" @@ -24023,16 +24137,17 @@ ] }, "list": { - "description": "Retrieves the list of SslCertificate resources available to the specified project.", - "flatPath": "projects/{project}/global/sslCertificates", + "description": "Retrieves the list of UrlMap resources available to the specified project in the specified region.", + "flatPath": "projects/{project}/regions/{region}/urlMaps", "httpMethod": "GET", - "id": "compute.sslCertificates.list", + "id": "compute.regionUrlMaps.list", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -24061,34 +24176,38 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/global/sslCertificates", + "path": "projects/{project}/regions/{region}/urlMaps", "response": { - "$ref": "SslCertificateList" + "$ref": "UrlMapList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "sslPolicies": { - "methods": { - "delete": { - "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", - "flatPath": "projects/{project}/global/sslPolicies/{sslPolicy}", - "httpMethod": "DELETE", - "id": "compute.sslPolicies.delete", + }, + "patch": { + "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "httpMethod": "PATCH", + "id": "compute.regionUrlMaps.patch", "parameterOrder": [ "project", - "sslPolicy" + "region", + "urlMap" ], "parameters": { "project": { @@ -24098,19 +24217,30 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", "location": "query", "type": "string" }, - "sslPolicy": { - "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", + "urlMap": { + "description": "Name of the UrlMap resource to patch.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/sslPolicies/{sslPolicy}", + "path": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "request": { + "$ref": "UrlMap" + }, "response": { "$ref": "Operation" }, @@ -24119,14 +24249,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Lists all of the ordered rules present in a single specified policy.", - "flatPath": "projects/{project}/global/sslPolicies/{sslPolicy}", - "httpMethod": "GET", - "id": "compute.sslPolicies.get", + "update": { + "description": "Updates the specified UrlMap resource with the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "httpMethod": "PUT", + "id": "compute.regionUrlMaps.update", "parameterOrder": [ "project", - "sslPolicy" + "region", + "urlMap" ], "parameters": { "project": { @@ -24136,30 +24267,47 @@ "required": true, "type": "string" }, - "sslPolicy": { - "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + "location": "query", + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to update.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/sslPolicies/{sslPolicy}", + "path": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "request": { + "$ref": "UrlMap" + }, "response": { - "$ref": "SslPolicy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request.", - "flatPath": "projects/{project}/global/sslPolicies", + "validate": { + "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", + "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}/validate", "httpMethod": "POST", - "id": "compute.sslPolicies.insert", + "id": "compute.regionUrlMaps.validate", "parameterOrder": [ - "project" + "project", + "region", + "urlMap" ], "parameters": { "project": { @@ -24169,35 +24317,83 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to be validated as.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" } }, - "path": "projects/{project}/global/sslPolicies", + "path": "projects/{project}/regions/{region}/urlMaps/{urlMap}/validate", "request": { - "$ref": "SslPolicy" + "$ref": "RegionUrlMapsValidateRequest" }, "response": { - "$ref": "Operation" + "$ref": "UrlMapsValidateResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + } + } + }, + "regions": { + "methods": { + "get": { + "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", + "flatPath": "projects/{project}/regions/{region}", + "httpMethod": "GET", + "id": "compute.regions.get", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}", + "response": { + "$ref": "Region" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] }, "list": { - "description": "Lists all the SSL policies that have been configured for the specified project.", - "flatPath": "projects/{project}/global/sslPolicies", + "description": "Retrieves the list of region resources available to the specified project. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `items.quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", + "flatPath": "projects/{project}/regions", "httpMethod": "GET", - "id": "compute.sslPolicies.list", + "id": "compute.regions.list", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -24232,30 +24428,39 @@ "type": "boolean" } }, - "path": "projects/{project}/global/sslPolicies", + "path": "projects/{project}/regions", "response": { - "$ref": "SslPoliciesList" + "$ref": "RegionList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "listAvailableFeatures": { - "description": "Lists all features that can be specified in the SSL policy when using custom profile.", - "flatPath": "projects/{project}/global/sslPolicies/listAvailableFeatures", + } + } + }, + "reservations": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of reservations.", + "flatPath": "projects/{project}/aggregated/reservations", "httpMethod": "GET", - "id": "compute.sslPolicies.listAvailableFeatures", + "id": "compute.reservations.aggregatedList", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -24287,9 +24492,9 @@ "type": "boolean" } }, - "path": "projects/{project}/global/sslPolicies/listAvailableFeatures", + "path": "projects/{project}/aggregated/reservations", "response": { - "$ref": "SslPoliciesListAvailableFeaturesResponse" + "$ref": "ReservationAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -24297,14 +24502,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified SSL policy with the data included in the request.", - "flatPath": "projects/{project}/global/sslPolicies/{sslPolicy}", - "httpMethod": "PATCH", - "id": "compute.sslPolicies.patch", + "delete": { + "description": "Deletes the specified reservation.", + "flatPath": "projects/{project}/zones/{zone}/reservations/{reservation}", + "httpMethod": "DELETE", + "id": "compute.reservations.delete", "parameterOrder": [ "project", - "sslPolicy" + "zone", + "reservation" ], "parameters": { "project": { @@ -24319,17 +24525,22 @@ "location": "query", "type": "string" }, - "sslPolicy": { - "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + "reservation": { + "description": "Name of the reservation to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/global/sslPolicies/{sslPolicy}", - "request": { - "$ref": "SslPolicy" - }, + "path": "projects/{project}/zones/{zone}/reservations/{reservation}", "response": { "$ref": "Operation" }, @@ -24337,48 +24548,18 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "subnetworks": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of subnetworks.", - "flatPath": "projects/{project}/aggregated/subnetworks", + }, + "get": { + "description": "Retrieves information about the specified reservation.", + "flatPath": "projects/{project}/zones/{zone}/reservations/{reservation}", "httpMethod": "GET", - "id": "compute.subnetworks.aggregatedList", + "id": "compute.reservations.get", "parameterOrder": [ - "project" + "project", + "zone", + "reservation" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -24386,15 +24567,24 @@ "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" + "reservation": { + "description": "Name of the reservation to retrieve.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "projects/{project}/aggregated/subnetworks", + "path": "projects/{project}/zones/{zone}/reservations/{reservation}", "response": { - "$ref": "SubnetworkAggregatedList" + "$ref": "Reservation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -24402,17 +24592,23 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified subnetwork.", - "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", - "httpMethod": "DELETE", - "id": "compute.subnetworks.delete", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/zones/{zone}/reservations/{resource}/getIamPolicy", + "httpMethod": "GET", + "id": "compute.reservations.getIamPolicy", "parameterOrder": [ "project", - "region", - "subnetwork" + "zone", + "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -24420,44 +24616,39 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "subnetwork": { - "description": "Name of the Subnetwork resource to delete.", + "zone": { + "description": "The name of the zone for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", + "path": "projects/{project}/zones/{zone}/reservations/{resource}/getIamPolicy", "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "expandIpCidrRange": { - "description": "Expands the IP CIDR range of the subnetwork to a specified value.", - "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", + "insert": { + "description": "Creates a new reservation. For more information, read Reserving zonal resources.", + "flatPath": "projects/{project}/zones/{zone}/reservations", "httpMethod": "POST", - "id": "compute.subnetworks.expandIpCidrRange", + "id": "compute.reservations.insert", "parameterOrder": [ "project", - "region", - "subnetwork" + "zone" ], "parameters": { "project": { @@ -24467,29 +24658,22 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "subnetwork": { - "description": "Name of the Subnetwork resource to update.", + "zone": { + "description": "Name of the zone for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", + "path": "projects/{project}/zones/{zone}/reservations", "request": { - "$ref": "SubnetworksExpandIpCidrRangeRequest" + "$ref": "Reservation" }, "response": { "$ref": "Operation" @@ -24499,17 +24683,39 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request.", - "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", + "list": { + "description": "A list of all the reservations that have been configured for the specified project in specified zone.", + "flatPath": "projects/{project}/zones/{zone}/reservations", "httpMethod": "GET", - "id": "compute.subnetworks.get", + "id": "compute.reservations.list", "parameterOrder": [ "project", - "region", - "subnetwork" + "zone" ], "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -24517,24 +24723,22 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" }, - "subnetwork": { - "description": "Name of the Subnetwork resource to return.", + "zone": { + "description": "Name of the zone for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", + "path": "projects/{project}/zones/{zone}/reservations", "response": { - "$ref": "Subnetwork" + "$ref": "ReservationList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -24542,23 +24746,17 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - "flatPath": "projects/{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", - "httpMethod": "GET", - "id": "compute.subnetworks.getIamPolicy", + "resize": { + "description": "Resizes the reservation (applicable to standalone reservations only). For more information, read Modifying reservations.", + "flatPath": "projects/{project}/zones/{zone}/reservations/{reservation}/resize", + "httpMethod": "POST", + "id": "compute.reservations.resize", "parameterOrder": [ "project", - "region", - "resource" + "zone", + "reservation" ], "parameters": { - "optionsRequestedPolicyVersion": { - "description": "Requested IAM Policy version.", - "format": "int32", - "location": "query", - "type": "integer" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -24566,39 +24764,47 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "reservation": { + "description": "Name of the reservation to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "zone": { + "description": "Name of the zone for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", + "path": "projects/{project}/zones/{zone}/reservations/{reservation}/resize", + "request": { + "$ref": "ReservationsResizeRequest" + }, "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a subnetwork in the specified project using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/subnetworks", + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/zones/{zone}/reservations/{resource}/setIamPolicy", "httpMethod": "POST", - "id": "compute.subnetworks.insert", + "id": "compute.reservations.setIamPolicy", "parameterOrder": [ "project", - "region" + "zone", + "resource" ], "parameters": { "project": { @@ -24608,62 +24814,93 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/subnetworks", + "path": "projects/{project}/zones/{zone}/reservations/{resource}/setIamPolicy", "request": { - "$ref": "Subnetwork" + "$ref": "ZoneSetPolicyRequest" }, "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves a list of subnetworks available to the specified project.", - "flatPath": "projects/{project}/regions/{region}/subnetworks", - "httpMethod": "GET", - "id": "compute.subnetworks.list", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/zones/{zone}/reservations/{resource}/testIamPermissions", + "httpMethod": "POST", + "id": "compute.reservations.testIamPermissions", "parameterOrder": [ "project", - "region" + "zone", + "resource" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/reservations/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "update": { + "description": "Update share settings of the reservation.", + "flatPath": "projects/{project}/zones/{zone}/reservations/{reservation}", + "httpMethod": "PATCH", + "id": "compute.reservations.update", + "parameterOrder": [ + "project", + "zone", + "reservation" + ], + "parameters": { + "paths": { "location": "query", + "repeated": true, "type": "string" }, "project": { @@ -24673,43 +24910,67 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "reservation": { + "description": "Name of the reservation to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "updateMask": { + "description": "Update_mask indicates fields to be updated as part of this request.", + "format": "google-fieldmask", "location": "query", - "type": "boolean" + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "projects/{project}/regions/{region}/subnetworks", + "path": "projects/{project}/zones/{zone}/reservations/{reservation}", + "request": { + "$ref": "Reservation" + }, "response": { - "$ref": "SubnetworkList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - }, - "listUsable": { - "description": "Retrieves an aggregated list of all usable subnetworks in the project.", - "flatPath": "projects/{project}/aggregated/subnetworks/listUsable", + } + } + }, + "resourcePolicies": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of resource policies.", + "flatPath": "projects/{project}/aggregated/resourcePolicies", "httpMethod": "GET", - "id": "compute.subnetworks.listUsable", + "id": "compute.resourcePolicies.aggregatedList", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -24741,9 +25002,9 @@ "type": "boolean" } }, - "path": "projects/{project}/aggregated/subnetworks/listUsable", + "path": "projects/{project}/aggregated/resourcePolicies", "response": { - "$ref": "UsableSubnetworksAggregatedList" + "$ref": "ResourcePolicyAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -24751,23 +25012,17 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified subnetwork with the data included in the request. Only certain fields can be updated with a patch request as indicated in the field descriptions. You must specify the current fingerprint of the subnetwork resource being patched.", - "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", - "httpMethod": "PATCH", - "id": "compute.subnetworks.patch", + "delete": { + "description": "Deletes the specified resource policy.", + "flatPath": "projects/{project}/regions/{region}/resourcePolicies/{resourcePolicy}", + "httpMethod": "DELETE", + "id": "compute.resourcePolicies.delete", "parameterOrder": [ "project", "region", - "subnetwork" + "resourcePolicy" ], "parameters": { - "drainTimeoutSeconds": { - "description": "The drain timeout specifies the upper bound in seconds on the amount of time allowed to drain connections from the current ACTIVE subnetwork to the current BACKUP subnetwork. The drain timeout is only applicable when the following conditions are true: - the subnetwork being patched has purpose = INTERNAL_HTTPS_LOAD_BALANCER - the subnetwork being patched has role = BACKUP - the patch request is setting the role to ACTIVE. Note that after this patch operation the roles of the ACTIVE and BACKUP subnetworks will be swapped.", - "format": "int32", - "location": "query", - "type": "integer" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -24776,7 +25031,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -24787,18 +25042,15 @@ "location": "query", "type": "string" }, - "subnetwork": { - "description": "Name of the Subnetwork resource to patch.", + "resourcePolicy": { + "description": "Name of the resource policy to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", - "request": { - "$ref": "Subnetwork" - }, + "path": "projects/{project}/regions/{region}/resourcePolicies/{resourcePolicy}", "response": { "$ref": "Operation" }, @@ -24807,15 +25059,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "flatPath": "projects/{project}/regions/{region}/subnetworks/{resource}/setIamPolicy", - "httpMethod": "POST", - "id": "compute.subnetworks.setIamPolicy", + "get": { + "description": "Retrieves all information of the specified resource policy.", + "flatPath": "projects/{project}/regions/{region}/resourcePolicies/{resourcePolicy}", + "httpMethod": "GET", + "id": "compute.resourcePolicies.get", "parameterOrder": [ "project", "region", - "resource" + "resourcePolicy" ], "parameters": { "project": { @@ -24826,43 +25078,47 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "resourcePolicy": { + "description": "Name of the resource policy to retrieve.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/subnetworks/{resource}/setIamPolicy", - "request": { - "$ref": "RegionSetPolicyRequest" - }, + "path": "projects/{project}/regions/{region}/resourcePolicies/{resourcePolicy}", "response": { - "$ref": "Policy" + "$ref": "ResourcePolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setPrivateIpGoogleAccess": { - "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access.", - "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess", - "httpMethod": "POST", - "id": "compute.subnetworks.setPrivateIpGoogleAccess", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/regions/{region}/resourcePolicies/{resource}/getIamPolicy", + "httpMethod": "GET", + "id": "compute.resourcePolicies.getIamPolicy", "parameterOrder": [ "project", "region", - "subnetwork" + "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -24871,134 +25127,38 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "subnetwork": { - "description": "Name of the Subnetwork resource.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess", - "request": { - "$ref": "SubnetworksSetPrivateIpGoogleAccessRequest" - }, + "path": "projects/{project}/regions/{region}/resourcePolicies/{resource}/getIamPolicy", "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "flatPath": "projects/{project}/regions/{region}/subnetworks/{resource}/testIamPermissions", - "httpMethod": "POST", - "id": "compute.subnetworks.testIamPermissions", - "parameterOrder": [ - "project", - "region", - "resource" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/subnetworks/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "targetGrpcProxies": { - "methods": { - "delete": { - "description": "Deletes the specified TargetGrpcProxy in the given scope", - "flatPath": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", - "httpMethod": "DELETE", - "id": "compute.targetGrpcProxies.delete", - "parameterOrder": [ - "project", - "targetGrpcProxy" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetGrpcProxy": { - "description": "Name of the TargetGrpcProxy resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] }, - "get": { - "description": "Returns the specified TargetGrpcProxy resource in the given scope.", - "flatPath": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", - "httpMethod": "GET", - "id": "compute.targetGrpcProxies.get", + "insert": { + "description": "Creates a new resource policy.", + "flatPath": "projects/{project}/regions/{region}/resourcePolicies", + "httpMethod": "POST", + "id": "compute.resourcePolicies.insert", "parameterOrder": [ "project", - "targetGrpcProxy" + "region" ], "parameters": { "project": { @@ -25008,37 +25168,10 @@ "required": true, "type": "string" }, - "targetGrpcProxy": { - "description": "Name of the TargetGrpcProxy resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", - "response": { - "$ref": "TargetGrpcProxy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a TargetGrpcProxy in the specified project in the given scope using the parameters that are included in the request.", - "flatPath": "projects/{project}/global/targetGrpcProxies", - "httpMethod": "POST", - "id": "compute.targetGrpcProxies.insert", - "parameterOrder": [ - "project" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", + "region": { + "description": "Name of the region for this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -25048,9 +25181,9 @@ "type": "string" } }, - "path": "projects/{project}/global/targetGrpcProxies", + "path": "projects/{project}/regions/{region}/resourcePolicies", "request": { - "$ref": "TargetGrpcProxy" + "$ref": "ResourcePolicy" }, "response": { "$ref": "Operation" @@ -25061,16 +25194,17 @@ ] }, "list": { - "description": "Lists the TargetGrpcProxies for a project in the given scope.", - "flatPath": "projects/{project}/global/targetGrpcProxies", + "description": "A list all the resource policies that have been configured for the specified project in specified region.", + "flatPath": "projects/{project}/regions/{region}/resourcePolicies", "httpMethod": "GET", - "id": "compute.targetGrpcProxies.list", + "id": "compute.resourcePolicies.list", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -25099,15 +25233,22 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/global/targetGrpcProxies", + "path": "projects/{project}/regions/{region}/resourcePolicies", "response": { - "$ref": "TargetGrpcProxyList" + "$ref": "ResourcePolicyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -25115,14 +25256,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified TargetGrpcProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", - "httpMethod": "PATCH", - "id": "compute.targetGrpcProxies.patch", + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/regions/{region}/resourcePolicies/{resource}/setIamPolicy", + "httpMethod": "POST", + "id": "compute.resourcePolicies.setIamPolicy", "parameterOrder": [ "project", - "targetGrpcProxy" + "region", + "resource" ], "parameters": { "project": { @@ -25132,46 +25274,94 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" }, - "targetGrpcProxy": { - "description": "Name of the TargetGrpcProxy resource to patch.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", + "path": "projects/{project}/regions/{region}/resourcePolicies/{resource}/setIamPolicy", "request": { - "$ref": "TargetGrpcProxy" + "$ref": "RegionSetPolicyRequest" }, "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions", + "httpMethod": "POST", + "id": "compute.resourcePolicies.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] } } }, - "targetHttpProxies": { + "routers": { "methods": { "aggregatedList": { - "description": "Retrieves the list of all TargetHttpProxy resources, regional and global, available to the specified project.", - "flatPath": "projects/{project}/aggregated/targetHttpProxies", + "description": "Retrieves an aggregated list of routers.", + "flatPath": "projects/{project}/aggregated/routers", "httpMethod": "GET", - "id": "compute.targetHttpProxies.aggregatedList", + "id": "compute.routers.aggregatedList", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -25199,7 +25389,7 @@ "type": "string" }, "project": { - "description": "Name of the project scoping this request.", + "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, @@ -25211,9 +25401,9 @@ "type": "boolean" } }, - "path": "projects/{project}/aggregated/targetHttpProxies", + "path": "projects/{project}/aggregated/routers", "response": { - "$ref": "TargetHttpProxyAggregatedList" + "$ref": "RouterAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -25222,13 +25412,14 @@ ] }, "delete": { - "description": "Deletes the specified TargetHttpProxy resource.", - "flatPath": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", + "description": "Deletes the specified Router resource.", + "flatPath": "projects/{project}/regions/{region}/routers/{router}", "httpMethod": "DELETE", - "id": "compute.targetHttpProxies.delete", + "id": "compute.routers.delete", "parameterOrder": [ "project", - "targetHttpProxy" + "region", + "router" ], "parameters": { "project": { @@ -25238,20 +25429,27 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to delete.", + "router": { + "description": "Name of the Router resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", + "path": "projects/{project}/regions/{region}/routers/{router}", "response": { "$ref": "Operation" }, @@ -25261,13 +25459,14 @@ ] }, "get": { - "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", - "flatPath": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", + "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/routers/{router}", "httpMethod": "GET", - "id": "compute.targetHttpProxies.get", + "id": "compute.routers.get", "parameterOrder": [ "project", - "targetHttpProxy" + "region", + "router" ], "parameters": { "project": { @@ -25277,69 +25476,44 @@ "required": true, "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to return.", + "region": { + "description": "Name of the region for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - } - }, - "path": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", - "response": { - "$ref": "TargetHttpProxy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", - "flatPath": "projects/{project}/global/targetHttpProxies", - "httpMethod": "POST", - "id": "compute.targetHttpProxies.insert", - "parameterOrder": [ - "project" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", + }, + "router": { + "description": "Name of the Router resource to return.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/global/targetHttpProxies", - "request": { - "$ref": "TargetHttpProxy" - }, + "path": "projects/{project}/regions/{region}/routers/{router}", "response": { - "$ref": "Operation" + "$ref": "Router" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "list": { - "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", - "flatPath": "projects/{project}/global/targetHttpProxies", + "getNatMappingInfo": { + "description": "Retrieves runtime Nat mapping information of VM endpoints.", + "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatMappingInfo", "httpMethod": "GET", - "id": "compute.targetHttpProxies.list", + "id": "compute.routers.getNatMappingInfo", "parameterOrder": [ - "project" + "project", + "region", + "router" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -25368,15 +25542,29 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "router": { + "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "projects/{project}/global/targetHttpProxies", + "path": "projects/{project}/regions/{region}/routers/{router}/getNatMappingInfo", "response": { - "$ref": "TargetHttpProxyList" + "$ref": "VmEndpointNatMappingsList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -25384,14 +25572,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified TargetHttpProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", - "httpMethod": "PATCH", - "id": "compute.targetHttpProxies.patch", + "getRouterStatus": { + "description": "Retrieves runtime information of the specified router.", + "flatPath": "projects/{project}/regions/{region}/routers/{router}/getRouterStatus", + "httpMethod": "GET", + "id": "compute.routers.getRouterStatus", "parameterOrder": [ "project", - "targetHttpProxy" + "region", + "router" ], "parameters": { "project": { @@ -25401,39 +25590,39 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to patch.", + "router": { + "description": "Name of the Router resource to query.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", - "request": { - "$ref": "TargetHttpProxy" - }, + "path": "projects/{project}/regions/{region}/routers/{router}/getRouterStatus", "response": { - "$ref": "Operation" + "$ref": "RouterStatusResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setUrlMap": { - "description": "Changes the URL map for TargetHttpProxy.", - "flatPath": "projects/{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "insert": { + "description": "Creates a Router resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/routers", "httpMethod": "POST", - "id": "compute.targetHttpProxies.setUrlMap", + "id": "compute.routers.insert", "parameterOrder": [ "project", - "targetHttpProxy" + "region" ], "parameters": { "project": { @@ -25443,22 +25632,22 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy to set a URL map for.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "path": "projects/{project}/regions/{region}/routers", "request": { - "$ref": "UrlMapReference" + "$ref": "Router" }, "response": { "$ref": "Operation" @@ -25467,30 +25656,22 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "targetHttpsProxies": { - "methods": { - "aggregatedList": { - "description": "Retrieves the list of all TargetHttpsProxy resources, regional and global, available to the specified project.", - "flatPath": "projects/{project}/aggregated/targetHttpsProxies", + }, + "list": { + "description": "Retrieves a list of Router resources available to the specified project.", + "flatPath": "projects/{project}/regions/{region}/routers", "httpMethod": "GET", - "id": "compute.targetHttpsProxies.aggregatedList", + "id": "compute.routers.list", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -25510,21 +25691,28 @@ "type": "string" }, "project": { - "description": "Name of the project scoping this request.", + "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/aggregated/targetHttpsProxies", + "path": "projects/{project}/regions/{region}/routers", "response": { - "$ref": "TargetHttpsProxyAggregatedList" + "$ref": "RouterList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -25532,14 +25720,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified TargetHttpsProxy resource.", - "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", - "httpMethod": "DELETE", - "id": "compute.targetHttpsProxies.delete", + "patch": { + "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/routers/{router}", + "httpMethod": "PATCH", + "id": "compute.routers.patch", "parameterOrder": [ "project", - "targetHttpsProxy" + "region", + "router" ], "parameters": { "project": { @@ -25549,20 +25738,30 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to delete.", + "router": { + "description": "Name of the Router resource to patch.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", + "path": "projects/{project}/regions/{region}/routers/{router}", + "request": { + "$ref": "Router" + }, "response": { "$ref": "Operation" }, @@ -25571,14 +25770,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request.", - "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", - "httpMethod": "GET", - "id": "compute.targetHttpsProxies.get", + "preview": { + "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", + "flatPath": "projects/{project}/regions/{region}/routers/{router}/preview", + "httpMethod": "POST", + "id": "compute.routers.preview", "parameterOrder": [ "project", - "targetHttpsProxy" + "region", + "router" ], "parameters": { "project": { @@ -25588,17 +25788,27 @@ "required": true, "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to return.", + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "router": { + "description": "Name of the Router resource to query.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", + "path": "projects/{project}/regions/{region}/routers/{router}/preview", + "request": { + "$ref": "Router" + }, "response": { - "$ref": "TargetHttpsProxy" + "$ref": "RoutersPreviewResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -25606,13 +25816,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", - "flatPath": "projects/{project}/global/targetHttpsProxies", - "httpMethod": "POST", - "id": "compute.targetHttpsProxies.insert", + "update": { + "description": "Updates the specified Router resource with the data included in the request. This method conforms to PUT semantics, which requests that the state of the target resource be created or replaced with the state defined by the representation enclosed in the request message payload.", + "flatPath": "projects/{project}/regions/{region}/routers/{router}", + "httpMethod": "PUT", + "id": "compute.routers.update", "parameterOrder": [ - "project" + "project", + "region", + "router" ], "parameters": { "project": { @@ -25622,87 +25834,50 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/global/targetHttpsProxies", - "request": { - "$ref": "TargetHttpsProxy" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", - "flatPath": "projects/{project}/global/targetHttpsProxies", - "httpMethod": "GET", - "id": "compute.targetHttpsProxies.list", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "project": { - "description": "Project ID for this request.", + "router": { + "description": "Name of the Router resource to update.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" } }, - "path": "projects/{project}/global/targetHttpsProxies", + "path": "projects/{project}/regions/{region}/routers/{router}", + "request": { + "$ref": "Router" + }, "response": { - "$ref": "TargetHttpsProxyList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - }, - "patch": { - "description": "Patches the specified TargetHttpsProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", - "httpMethod": "PATCH", - "id": "compute.targetHttpsProxies.patch", + } + } + }, + "routes": { + "methods": { + "delete": { + "description": "Deletes the specified Route resource.", + "flatPath": "projects/{project}/global/routes/{route}", + "httpMethod": "DELETE", + "id": "compute.routes.delete", "parameterOrder": [ "project", - "targetHttpsProxy" + "route" ], "parameters": { "project": { @@ -25717,18 +25892,15 @@ "location": "query", "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to patch.", + "route": { + "description": "Name of the Route resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", - "request": { - "$ref": "TargetHttpsProxy" - }, + "path": "projects/{project}/global/routes/{route}", "response": { "$ref": "Operation" }, @@ -25737,14 +25909,14 @@ "https://www.googleapis.com/auth/compute" ] }, - "setQuicOverride": { - "description": "Sets the QUIC override policy for TargetHttpsProxy.", - "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride", - "httpMethod": "POST", - "id": "compute.targetHttpsProxies.setQuicOverride", + "get": { + "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request.", + "flatPath": "projects/{project}/global/routes/{route}", + "httpMethod": "GET", + "id": "compute.routes.get", "parameterOrder": [ "project", - "targetHttpsProxy" + "route" ], "parameters": { "project": { @@ -25754,38 +25926,31 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to set the QUIC override policy for. The name should conform to RFC1035.", + "route": { + "description": "Name of the Route resource to return.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride", - "request": { - "$ref": "TargetHttpsProxiesSetQuicOverrideRequest" - }, + "path": "projects/{project}/global/routes/{route}", "response": { - "$ref": "Operation" + "$ref": "Route" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setSslCertificates": { - "description": "Replaces SslCertificates for TargetHttpsProxy.", - "flatPath": "projects/{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "insert": { + "description": "Creates a Route resource in the specified project using the data included in the request.", + "flatPath": "projects/{project}/global/routes", "httpMethod": "POST", - "id": "compute.targetHttpsProxies.setSslCertificates", + "id": "compute.routes.insert", "parameterOrder": [ - "project", - "targetHttpsProxy" + "project" ], "parameters": { "project": { @@ -25799,18 +25964,11 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "path": "projects/{project}/global/routes", "request": { - "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" + "$ref": "Route" }, "response": { "$ref": "Operation" @@ -25820,55 +25978,73 @@ "https://www.googleapis.com/auth/compute" ] }, - "setSslPolicy": { - "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends.", - "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy", - "httpMethod": "POST", - "id": "compute.targetHttpsProxies.setSslPolicy", + "list": { + "description": "Retrieves the list of Route resources available to the specified project.", + "flatPath": "projects/{project}/global/routes", + "httpMethod": "GET", + "id": "compute.routes.list", "parameterOrder": [ - "project", - "targetHttpsProxy" + "project" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy", - "request": { - "$ref": "SslPolicyReference" - }, + "path": "projects/{project}/global/routes", "response": { - "$ref": "Operation" + "$ref": "RouteList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "setUrlMap": { - "description": "Changes the URL map for TargetHttpsProxy.", - "flatPath": "projects/{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + } + } + }, + "securityPolicies": { + "methods": { + "addRule": { + "description": "Inserts a rule into a security policy.", + "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}/addRule", "httpMethod": "POST", - "id": "compute.targetHttpsProxies.setUrlMap", + "id": "compute.securityPolicies.addRule", "parameterOrder": [ "project", - "targetHttpsProxy" + "securityPolicy" ], "parameters": { "project": { @@ -25878,22 +26054,22 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", + "securityPolicy": { + "description": "Name of the security policy to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" + }, + "validateOnly": { + "description": "If true, the request will not be committed.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + "path": "projects/{project}/global/securityPolicies/{securityPolicy}/addRule", "request": { - "$ref": "UrlMapReference" + "$ref": "SecurityPolicyRule" }, "response": { "$ref": "Operation" @@ -25902,22 +26078,18 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "targetInstances": { - "methods": { + }, "aggregatedList": { - "description": "Retrieves an aggregated list of target instances.", - "flatPath": "projects/{project}/aggregated/targetInstances", + "description": "Retrieves the list of all SecurityPolicy resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/securityPolicies", "httpMethod": "GET", - "id": "compute.targetInstances.aggregatedList", + "id": "compute.securityPolicies.aggregatedList", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -25945,7 +26117,7 @@ "type": "string" }, "project": { - "description": "Project ID for this request.", + "description": "Name of the project scoping this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, @@ -25957,9 +26129,9 @@ "type": "boolean" } }, - "path": "projects/{project}/aggregated/targetInstances", + "path": "projects/{project}/aggregated/securityPolicies", "response": { - "$ref": "TargetInstanceAggregatedList" + "$ref": "SecurityPoliciesAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -25968,14 +26140,13 @@ ] }, "delete": { - "description": "Deletes the specified TargetInstance resource.", - "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}", + "description": "Deletes the specified policy.", + "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}", "httpMethod": "DELETE", - "id": "compute.targetInstances.delete", + "id": "compute.securityPolicies.delete", "parameterOrder": [ "project", - "zone", - "targetInstance" + "securityPolicy" ], "parameters": { "project": { @@ -25990,22 +26161,15 @@ "location": "query", "type": "string" }, - "targetInstance": { - "description": "Name of the TargetInstance resource to delete.", + "securityPolicy": { + "description": "Name of the security policy to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" - }, - "zone": { - "description": "Name of the zone scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}", + "path": "projects/{project}/global/securityPolicies/{securityPolicy}", "response": { "$ref": "Operation" }, @@ -26015,14 +26179,13 @@ ] }, "get": { - "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request.", - "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}", + "description": "List all of the ordered rules present in a single specified policy.", + "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}", "httpMethod": "GET", - "id": "compute.targetInstances.get", + "id": "compute.securityPolicies.get", "parameterOrder": [ "project", - "zone", - "targetInstance" + "securityPolicy" ], "parameters": { "project": { @@ -26032,24 +26195,58 @@ "required": true, "type": "string" }, - "targetInstance": { - "description": "Name of the TargetInstance resource to return.", + "securityPolicy": { + "description": "Name of the security policy to get.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" + } + }, + "path": "projects/{project}/global/securityPolicies/{securityPolicy}", + "response": { + "$ref": "SecurityPolicy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getRule": { + "description": "Gets a rule at the specified priority.", + "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}/getRule", + "httpMethod": "GET", + "id": "compute.securityPolicies.getRule", + "parameterOrder": [ + "project", + "securityPolicy" + ], + "parameters": { + "priority": { + "description": "The priority of the rule to get from the security policy.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" }, - "zone": { - "description": "Name of the zone scoping this request.", + "securityPolicy": { + "description": "Name of the security policy to which the queried rule belongs.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}", + "path": "projects/{project}/global/securityPolicies/{securityPolicy}/getRule", "response": { - "$ref": "TargetInstance" + "$ref": "SecurityPolicyRule" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -26058,13 +26255,12 @@ ] }, "insert": { - "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", - "flatPath": "projects/{project}/zones/{zone}/targetInstances", + "description": "Creates a new policy in the specified project using the data included in the request.", + "flatPath": "projects/{project}/global/securityPolicies", "httpMethod": "POST", - "id": "compute.targetInstances.insert", + "id": "compute.securityPolicies.insert", "parameterOrder": [ - "project", - "zone" + "project" ], "parameters": { "project": { @@ -26079,17 +26275,15 @@ "location": "query", "type": "string" }, - "zone": { - "description": "Name of the zone scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" + "validateOnly": { + "description": "If true, the request will not be committed.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/zones/{zone}/targetInstances", + "path": "projects/{project}/global/securityPolicies", "request": { - "$ref": "TargetInstance" + "$ref": "SecurityPolicy" }, "response": { "$ref": "Operation" @@ -26100,17 +26294,16 @@ ] }, "list": { - "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", - "flatPath": "projects/{project}/zones/{zone}/targetInstances", + "description": "List all the policies that have been configured for the specified project.", + "flatPath": "projects/{project}/global/securityPolicies", "httpMethod": "GET", - "id": "compute.targetInstances.list", + "id": "compute.securityPolicies.list", "parameterOrder": [ - "project", - "zone" + "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -26143,88 +26336,81 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" - }, - "zone": { - "description": "Name of the zone scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "projects/{project}/zones/{zone}/targetInstances", + "path": "projects/{project}/global/securityPolicies", "response": { - "$ref": "TargetInstanceList" + "$ref": "SecurityPolicyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "targetPools": { - "methods": { - "addHealthCheck": { - "description": "Adds health check URLs to a target pool.", - "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", - "httpMethod": "POST", - "id": "compute.targetPools.addHealthCheck", + }, + "listPreconfiguredExpressionSets": { + "description": "Gets the current list of preconfigured Web Application Firewall (WAF) expressions.", + "flatPath": "projects/{project}/global/securityPolicies/listPreconfiguredExpressionSets", + "httpMethod": "GET", + "id": "compute.securityPolicies.listPreconfiguredExpressionSets", "parameterOrder": [ - "project", - "region", - "targetPool" + "project" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", "location": "query", "type": "string" }, - "targetPool": { - "description": "Name of the target pool to add a health check to.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", - "request": { - "$ref": "TargetPoolsAddHealthCheckRequest" - }, + "path": "projects/{project}/global/securityPolicies/listPreconfiguredExpressionSets", "response": { - "$ref": "Operation" + "$ref": "SecurityPoliciesListPreconfiguredExpressionSetsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "addInstance": { - "description": "Adds an instance to a target pool.", - "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/addInstance", - "httpMethod": "POST", - "id": "compute.targetPools.addInstance", + "patch": { + "description": "Patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", + "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}", + "httpMethod": "PATCH", + "id": "compute.securityPolicies.patch", "parameterOrder": [ "project", - "region", - "targetPool" + "securityPolicy" ], "parameters": { "project": { @@ -26234,29 +26420,22 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "targetPool": { - "description": "Name of the TargetPool resource to add instances to.", + "securityPolicy": { + "description": "Name of the security policy to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/addInstance", + "path": "projects/{project}/global/securityPolicies/{securityPolicy}", "request": { - "$ref": "TargetPoolsAddInstanceRequest" + "$ref": "SecurityPolicy" }, "response": { "$ref": "Operation" @@ -26266,23 +26445,152 @@ "https://www.googleapis.com/auth/compute" ] }, - "aggregatedList": { - "description": "Retrieves an aggregated list of target pools.", - "flatPath": "projects/{project}/aggregated/targetPools", - "httpMethod": "GET", - "id": "compute.targetPools.aggregatedList", + "patchRule": { + "description": "Patches a rule at the specified priority.", + "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}/patchRule", + "httpMethod": "POST", + "id": "compute.securityPolicies.patchRule", "parameterOrder": [ - "project" + "project", + "securityPolicy" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "priority": { + "description": "The priority of the rule to patch.", + "format": "int32", "location": "query", - "type": "string" + "type": "integer" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "securityPolicy": { + "description": "Name of the security policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "validateOnly": { + "description": "If true, the request will not be committed.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/global/securityPolicies/{securityPolicy}/patchRule", + "request": { + "$ref": "SecurityPolicyRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "removeRule": { + "description": "Deletes a rule at the specified priority.", + "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}/removeRule", + "httpMethod": "POST", + "id": "compute.securityPolicies.removeRule", + "parameterOrder": [ + "project", + "securityPolicy" + ], + "parameters": { + "priority": { + "description": "The priority of the rule to remove from the security policy.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "securityPolicy": { + "description": "Name of the security policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/securityPolicies/{securityPolicy}/removeRule", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setLabels": { + "description": "Sets the labels on a security policy. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/global/securityPolicies/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.securityPolicies.setLabels", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/securityPolicies/{resource}/setLabels", + "request": { + "$ref": "GlobalSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "serviceAttachments": { + "methods": { + "aggregatedList": { + "description": "Retrieves the list of all ServiceAttachment resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/serviceAttachments", + "httpMethod": "GET", + "id": "compute.serviceAttachments.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", "type": "boolean" }, "maxResults": { @@ -26304,7 +26612,7 @@ "type": "string" }, "project": { - "description": "Project ID for this request.", + "description": "Name of the project scoping this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, @@ -26316,9 +26624,9 @@ "type": "boolean" } }, - "path": "projects/{project}/aggregated/targetPools", + "path": "projects/{project}/aggregated/serviceAttachments", "response": { - "$ref": "TargetPoolAggregatedList" + "$ref": "ServiceAttachmentAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -26327,14 +26635,14 @@ ] }, "delete": { - "description": "Deletes the specified target pool.", - "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}", + "description": "Deletes the specified ServiceAttachment in the given scope", + "flatPath": "projects/{project}/regions/{region}/serviceAttachments/{serviceAttachment}", "httpMethod": "DELETE", - "id": "compute.targetPools.delete", + "id": "compute.serviceAttachments.delete", "parameterOrder": [ "project", "region", - "targetPool" + "serviceAttachment" ], "parameters": { "project": { @@ -26345,7 +26653,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -26356,15 +26664,15 @@ "location": "query", "type": "string" }, - "targetPool": { - "description": "Name of the TargetPool resource to delete.", + "serviceAttachment": { + "description": "Name of the ServiceAttachment resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetPools/{targetPool}", + "path": "projects/{project}/regions/{region}/serviceAttachments/{serviceAttachment}", "response": { "$ref": "Operation" }, @@ -26374,14 +26682,14 @@ ] }, "get": { - "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}", + "description": "Returns the specified ServiceAttachment resource in the given scope.", + "flatPath": "projects/{project}/regions/{region}/serviceAttachments/{serviceAttachment}", "httpMethod": "GET", - "id": "compute.targetPools.get", + "id": "compute.serviceAttachments.get", "parameterOrder": [ "project", "region", - "targetPool" + "serviceAttachment" ], "parameters": { "project": { @@ -26392,23 +26700,23 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "targetPool": { - "description": "Name of the TargetPool resource to return.", + "serviceAttachment": { + "description": "Name of the ServiceAttachment resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetPools/{targetPool}", + "path": "projects/{project}/regions/{region}/serviceAttachments/{serviceAttachment}", "response": { - "$ref": "TargetPool" + "$ref": "ServiceAttachment" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -26416,17 +26724,23 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "getHealth": { - "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", - "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/getHealth", - "httpMethod": "POST", - "id": "compute.targetPools.getHealth", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/regions/{region}/serviceAttachments/{resource}/getIamPolicy", + "httpMethod": "GET", + "id": "compute.serviceAttachments.getIamPolicy", "parameterOrder": [ "project", "region", - "targetPool" + "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -26435,26 +26749,23 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "targetPool": { - "description": "Name of the TargetPool resource to which the queried instance belongs.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/getHealth", - "request": { - "$ref": "InstanceReference" - }, + "path": "projects/{project}/regions/{region}/serviceAttachments/{resource}/getIamPolicy", "response": { - "$ref": "TargetPoolInstanceHealth" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -26463,10 +26774,10 @@ ] }, "insert": { - "description": "Creates a target pool in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/targetPools", + "description": "Creates a ServiceAttachment in the specified project in the given scope using the parameters that are included in the request.", + "flatPath": "projects/{project}/regions/{region}/serviceAttachments", "httpMethod": "POST", - "id": "compute.targetPools.insert", + "id": "compute.serviceAttachments.insert", "parameterOrder": [ "project", "region" @@ -26480,7 +26791,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -26492,9 +26803,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetPools", + "path": "projects/{project}/regions/{region}/serviceAttachments", "request": { - "$ref": "TargetPool" + "$ref": "ServiceAttachment" }, "response": { "$ref": "Operation" @@ -26505,17 +26816,17 @@ ] }, "list": { - "description": "Retrieves a list of target pools available to the specified project and region.", - "flatPath": "projects/{project}/regions/{region}/targetPools", + "description": "Lists the ServiceAttachments for a project in the given scope.", + "flatPath": "projects/{project}/regions/{region}/serviceAttachments", "httpMethod": "GET", - "id": "compute.targetPools.list", + "id": "compute.serviceAttachments.list", "parameterOrder": [ "project", "region" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -26545,7 +26856,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -26557,9 +26868,9 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/targetPools", + "path": "projects/{project}/regions/{region}/serviceAttachments", "response": { - "$ref": "TargetPoolList" + "$ref": "ServiceAttachmentList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -26567,15 +26878,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "removeHealthCheck": { - "description": "Removes health check URL from a target pool.", - "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", - "httpMethod": "POST", - "id": "compute.targetPools.removeHealthCheck", + "patch": { + "description": "Patches the specified ServiceAttachment resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/serviceAttachments/{serviceAttachment}", + "httpMethod": "PATCH", + "id": "compute.serviceAttachments.patch", "parameterOrder": [ "project", "region", - "targetPool" + "serviceAttachment" ], "parameters": { "project": { @@ -26586,9 +26897,8 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "The region scoping this request and should conform to RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -26597,17 +26907,16 @@ "location": "query", "type": "string" }, - "targetPool": { - "description": "Name of the target pool to remove health checks from.", + "serviceAttachment": { + "description": "The resource id of the ServiceAttachment to patch. It should conform to RFC1035 resource name or be a string form on an unsigned long number.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", + "path": "projects/{project}/regions/{region}/serviceAttachments/{serviceAttachment}", "request": { - "$ref": "TargetPoolsRemoveHealthCheckRequest" + "$ref": "ServiceAttachment" }, "response": { "$ref": "Operation" @@ -26617,15 +26926,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "removeInstance": { - "description": "Removes instance URL from a target pool.", - "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/regions/{region}/serviceAttachments/{resource}/setIamPolicy", "httpMethod": "POST", - "id": "compute.targetPools.removeInstance", + "id": "compute.serviceAttachments.setIamPolicy", "parameterOrder": [ "project", "region", - "targetPool" + "resource" ], "parameters": { "project": { @@ -26636,54 +26945,43 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetPool": { - "description": "Name of the TargetPool resource to remove instances from.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + "path": "projects/{project}/regions/{region}/serviceAttachments/{resource}/setIamPolicy", "request": { - "$ref": "TargetPoolsRemoveInstanceRequest" + "$ref": "RegionSetPolicyRequest" }, "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "setBackup": { - "description": "Changes a backup target pool's configurations.", - "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/setBackup", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/serviceAttachments/{resource}/testIamPermissions", "httpMethod": "POST", - "id": "compute.targetPools.setBackup", + "id": "compute.serviceAttachments.testIamPermissions", "parameterOrder": [ "project", "region", - "targetPool" + "resource" ], "parameters": { - "failoverRatio": { - "description": "New failoverRatio value for the target pool.", - "format": "float", - "location": "query", - "type": "number" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -26692,49 +26990,45 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetPool": { - "description": "Name of the TargetPool resource to set a backup pool for.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/setBackup", + "path": "projects/{project}/regions/{region}/serviceAttachments/{resource}/testIamPermissions", "request": { - "$ref": "TargetReference" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] } } }, - "targetSslProxies": { + "snapshots": { "methods": { "delete": { - "description": "Deletes the specified TargetSslProxy resource.", - "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}", + "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot. For more information, see Deleting snapshots.", + "flatPath": "projects/{project}/global/snapshots/{snapshot}", "httpMethod": "DELETE", - "id": "compute.targetSslProxies.delete", + "id": "compute.snapshots.delete", "parameterOrder": [ "project", - "targetSslProxy" + "snapshot" ], "parameters": { "project": { @@ -26749,15 +27043,15 @@ "location": "query", "type": "string" }, - "targetSslProxy": { - "description": "Name of the TargetSslProxy resource to delete.", + "snapshot": { + "description": "Name of the Snapshot resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/targetSslProxies/{targetSslProxy}", + "path": "projects/{project}/global/snapshots/{snapshot}", "response": { "$ref": "Operation" }, @@ -26767,13 +27061,13 @@ ] }, "get": { - "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request.", - "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}", + "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request.", + "flatPath": "projects/{project}/global/snapshots/{snapshot}", "httpMethod": "GET", - "id": "compute.targetSslProxies.get", + "id": "compute.snapshots.get", "parameterOrder": [ "project", - "targetSslProxy" + "snapshot" ], "parameters": { "project": { @@ -26783,17 +27077,58 @@ "required": true, "type": "string" }, - "targetSslProxy": { - "description": "Name of the TargetSslProxy resource to return.", + "snapshot": { + "description": "Name of the Snapshot resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/targetSslProxies/{targetSslProxy}", + "path": "projects/{project}/global/snapshots/{snapshot}", "response": { - "$ref": "TargetSslProxy" + "$ref": "Snapshot" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/global/snapshots/{resource}/getIamPolicy", + "httpMethod": "GET", + "id": "compute.snapshots.getIamPolicy", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/snapshots/{resource}/getIamPolicy", + "response": { + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -26802,10 +27137,10 @@ ] }, "insert": { - "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request.", - "flatPath": "projects/{project}/global/targetSslProxies", + "description": "Creates a snapshot in the specified project using the data included in the request. For regular snapshot creation, consider using this method instead of disks.createSnapshot, as this method supports more features, such as creating snapshots in a project different from the source disk project.", + "flatPath": "projects/{project}/global/snapshots", "httpMethod": "POST", - "id": "compute.targetSslProxies.insert", + "id": "compute.snapshots.insert", "parameterOrder": [ "project" ], @@ -26823,9 +27158,9 @@ "type": "string" } }, - "path": "projects/{project}/global/targetSslProxies", + "path": "projects/{project}/global/snapshots", "request": { - "$ref": "TargetSslProxy" + "$ref": "Snapshot" }, "response": { "$ref": "Operation" @@ -26836,16 +27171,16 @@ ] }, "list": { - "description": "Retrieves the list of TargetSslProxy resources available to the specified project.", - "flatPath": "projects/{project}/global/targetSslProxies", + "description": "Retrieves the list of Snapshot resources contained within the specified project.", + "flatPath": "projects/{project}/global/snapshots", "httpMethod": "GET", - "id": "compute.targetSslProxies.list", + "id": "compute.snapshots.list", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -26880,9 +27215,9 @@ "type": "boolean" } }, - "path": "projects/{project}/global/targetSslProxies", + "path": "projects/{project}/global/snapshots", "response": { - "$ref": "TargetSslProxyList" + "$ref": "SnapshotList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -26890,14 +27225,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setBackendService": { - "description": "Changes the BackendService for TargetSslProxy.", - "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setBackendService", + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/global/snapshots/{resource}/setIamPolicy", "httpMethod": "POST", - "id": "compute.targetSslProxies.setBackendService", + "id": "compute.snapshots.setIamPolicy", "parameterOrder": [ "project", - "targetSslProxy" + "resource" ], "parameters": { "project": { @@ -26907,39 +27242,34 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetSslProxy": { - "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setBackendService", + "path": "projects/{project}/global/snapshots/{resource}/setIamPolicy", "request": { - "$ref": "TargetSslProxiesSetBackendServiceRequest" + "$ref": "GlobalSetPolicyRequest" }, "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "setProxyHeader": { - "description": "Changes the ProxyHeaderType for TargetSslProxy.", - "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader", + "setLabels": { + "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/global/snapshots/{resource}/setLabels", "httpMethod": "POST", - "id": "compute.targetSslProxies.setProxyHeader", + "id": "compute.snapshots.setLabels", "parameterOrder": [ "project", - "targetSslProxy" + "resource" ], "parameters": { "project": { @@ -26949,22 +27279,17 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetSslProxy": { - "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader", + "path": "projects/{project}/global/snapshots/{resource}/setLabels", "request": { - "$ref": "TargetSslProxiesSetProxyHeaderRequest" + "$ref": "GlobalSetLabelsRequest" }, "response": { "$ref": "Operation" @@ -26974,14 +27299,14 @@ "https://www.googleapis.com/auth/compute" ] }, - "setSslCertificates": { - "description": "Changes SslCertificates for TargetSslProxy.", - "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/global/snapshots/{resource}/testIamPermissions", "httpMethod": "POST", - "id": "compute.targetSslProxies.setSslCertificates", + "id": "compute.snapshots.testIamPermissions", "parameterOrder": [ "project", - "targetSslProxy" + "resource" ], "parameters": { "project": { @@ -26991,84 +27316,99 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetSslProxy": { - "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates", + "path": "projects/{project}/global/snapshots/{resource}/testIamPermissions", "request": { - "$ref": "TargetSslProxiesSetSslCertificatesRequest" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "setSslPolicy": { - "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", - "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", - "httpMethod": "POST", - "id": "compute.targetSslProxies.setSslPolicy", + } + } + }, + "sslCertificates": { + "methods": { + "aggregatedList": { + "description": "Retrieves the list of all SslCertificate resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/sslCertificates", + "httpMethod": "GET", + "id": "compute.sslCertificates.aggregatedList", "parameterOrder": [ - "project", - "targetSslProxy" + "project" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, - "targetSslProxy": { - "description": "Name of the TargetSslProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", - "request": { - "$ref": "SslPolicyReference" - }, + "path": "projects/{project}/aggregated/sslCertificates", "response": { - "$ref": "Operation" + "$ref": "SslCertificateAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "targetTcpProxies": { - "methods": { + }, "delete": { - "description": "Deletes the specified TargetTcpProxy resource.", - "flatPath": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}", + "description": "Deletes the specified SslCertificate resource.", + "flatPath": "projects/{project}/global/sslCertificates/{sslCertificate}", "httpMethod": "DELETE", - "id": "compute.targetTcpProxies.delete", + "id": "compute.sslCertificates.delete", "parameterOrder": [ "project", - "targetTcpProxy" + "sslCertificate" ], "parameters": { "project": { @@ -27083,15 +27423,15 @@ "location": "query", "type": "string" }, - "targetTcpProxy": { - "description": "Name of the TargetTcpProxy resource to delete.", + "sslCertificate": { + "description": "Name of the SslCertificate resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}", + "path": "projects/{project}/global/sslCertificates/{sslCertificate}", "response": { "$ref": "Operation" }, @@ -27101,13 +27441,13 @@ ] }, "get": { - "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request.", - "flatPath": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}", + "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request.", + "flatPath": "projects/{project}/global/sslCertificates/{sslCertificate}", "httpMethod": "GET", - "id": "compute.targetTcpProxies.get", + "id": "compute.sslCertificates.get", "parameterOrder": [ "project", - "targetTcpProxy" + "sslCertificate" ], "parameters": { "project": { @@ -27117,17 +27457,17 @@ "required": true, "type": "string" }, - "targetTcpProxy": { - "description": "Name of the TargetTcpProxy resource to return.", + "sslCertificate": { + "description": "Name of the SslCertificate resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}", + "path": "projects/{project}/global/sslCertificates/{sslCertificate}", "response": { - "$ref": "TargetTcpProxy" + "$ref": "SslCertificate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -27136,10 +27476,10 @@ ] }, "insert": { - "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request.", - "flatPath": "projects/{project}/global/targetTcpProxies", + "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", + "flatPath": "projects/{project}/global/sslCertificates", "httpMethod": "POST", - "id": "compute.targetTcpProxies.insert", + "id": "compute.sslCertificates.insert", "parameterOrder": [ "project" ], @@ -27157,9 +27497,9 @@ "type": "string" } }, - "path": "projects/{project}/global/targetTcpProxies", + "path": "projects/{project}/global/sslCertificates", "request": { - "$ref": "TargetTcpProxy" + "$ref": "SslCertificate" }, "response": { "$ref": "Operation" @@ -27170,16 +27510,16 @@ ] }, "list": { - "description": "Retrieves the list of TargetTcpProxy resources available to the specified project.", - "flatPath": "projects/{project}/global/targetTcpProxies", + "description": "Retrieves the list of SslCertificate resources available to the specified project.", + "flatPath": "projects/{project}/global/sslCertificates", "httpMethod": "GET", - "id": "compute.targetTcpProxies.list", + "id": "compute.sslCertificates.list", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -27214,115 +27554,31 @@ "type": "boolean" } }, - "path": "projects/{project}/global/targetTcpProxies", + "path": "projects/{project}/global/sslCertificates", "response": { - "$ref": "TargetTcpProxyList" + "$ref": "SslCertificateList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "setBackendService": { - "description": "Changes the BackendService for TargetTcpProxy.", - "flatPath": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService", - "httpMethod": "POST", - "id": "compute.targetTcpProxies.setBackendService", - "parameterOrder": [ - "project", - "targetTcpProxy" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetTcpProxy": { - "description": "Name of the TargetTcpProxy resource whose BackendService resource is to be set.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService", - "request": { - "$ref": "TargetTcpProxiesSetBackendServiceRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setProxyHeader": { - "description": "Changes the ProxyHeaderType for TargetTcpProxy.", - "flatPath": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader", - "httpMethod": "POST", - "id": "compute.targetTcpProxies.setProxyHeader", - "parameterOrder": [ - "project", - "targetTcpProxy" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetTcpProxy": { - "description": "Name of the TargetTcpProxy resource whose ProxyHeader is to be set.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader", - "request": { - "$ref": "TargetTcpProxiesSetProxyHeaderRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] } } }, - "targetVpnGateways": { + "sslPolicies": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of target VPN gateways.", - "flatPath": "projects/{project}/aggregated/targetVpnGateways", + "description": "Retrieves the list of all SslPolicy resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/sslPolicies", "httpMethod": "GET", - "id": "compute.targetVpnGateways.aggregatedList", + "id": "compute.sslPolicies.aggregatedList", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -27350,7 +27606,7 @@ "type": "string" }, "project": { - "description": "Project ID for this request.", + "description": "Name of the project scoping this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, @@ -27362,9 +27618,9 @@ "type": "boolean" } }, - "path": "projects/{project}/aggregated/targetVpnGateways", + "path": "projects/{project}/aggregated/sslPolicies", "response": { - "$ref": "TargetVpnGatewayAggregatedList" + "$ref": "SslPoliciesAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -27373,14 +27629,13 @@ ] }, "delete": { - "description": "Deletes the specified target VPN gateway.", - "flatPath": "projects/{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", + "flatPath": "projects/{project}/global/sslPolicies/{sslPolicy}", "httpMethod": "DELETE", - "id": "compute.targetVpnGateways.delete", + "id": "compute.sslPolicies.delete", "parameterOrder": [ "project", - "region", - "targetVpnGateway" + "sslPolicy" ], "parameters": { "project": { @@ -27390,27 +27645,19 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "targetVpnGateway": { - "description": "Name of the target VPN gateway to delete.", + "sslPolicy": { + "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "path": "projects/{project}/global/sslPolicies/{sslPolicy}", "response": { "$ref": "Operation" }, @@ -27420,14 +27667,13 @@ ] }, "get": { - "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "description": "Lists all of the ordered rules present in a single specified policy.", + "flatPath": "projects/{project}/global/sslPolicies/{sslPolicy}", "httpMethod": "GET", - "id": "compute.targetVpnGateways.get", + "id": "compute.sslPolicies.get", "parameterOrder": [ "project", - "region", - "targetVpnGateway" + "sslPolicy" ], "parameters": { "project": { @@ -27437,24 +27683,16 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "targetVpnGateway": { - "description": "Name of the target VPN gateway to return.", + "sslPolicy": { + "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "path": "projects/{project}/global/sslPolicies/{sslPolicy}", "response": { - "$ref": "TargetVpnGateway" + "$ref": "SslPolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -27463,13 +27701,12 @@ ] }, "insert": { - "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/targetVpnGateways", + "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request.", + "flatPath": "projects/{project}/global/sslPolicies", "httpMethod": "POST", - "id": "compute.targetVpnGateways.insert", + "id": "compute.sslPolicies.insert", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "project": { @@ -27479,22 +27716,15 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetVpnGateways", + "path": "projects/{project}/global/sslPolicies", "request": { - "$ref": "TargetVpnGateway" + "$ref": "SslPolicy" }, "response": { "$ref": "Operation" @@ -27505,17 +27735,16 @@ ] }, "list": { - "description": "Retrieves a list of target VPN gateways available to the specified project and region.", - "flatPath": "projects/{project}/regions/{region}/targetVpnGateways", + "description": "Lists all the SSL policies that have been configured for the specified project.", + "flatPath": "projects/{project}/global/sslPolicies", "httpMethod": "GET", - "id": "compute.targetVpnGateways.list", + "id": "compute.sslPolicies.list", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -27544,52 +27773,36 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/targetVpnGateways", + "path": "projects/{project}/global/sslPolicies", "response": { - "$ref": "TargetVpnGatewayList" + "$ref": "SslPoliciesList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "urlMaps": { - "methods": { - "aggregatedList": { - "description": "Retrieves the list of all UrlMap resources, regional and global, available to the specified project.", - "flatPath": "projects/{project}/aggregated/urlMaps", + }, + "listAvailableFeatures": { + "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + "flatPath": "projects/{project}/global/sslPolicies/listAvailableFeatures", "httpMethod": "GET", - "id": "compute.urlMaps.aggregatedList", + "id": "compute.sslPolicies.listAvailableFeatures", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -27609,7 +27822,7 @@ "type": "string" }, "project": { - "description": "Name of the project scoping this request.", + "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, @@ -27621,83 +27834,9 @@ "type": "boolean" } }, - "path": "projects/{project}/aggregated/urlMaps", - "response": { - "$ref": "UrlMapsAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "description": "Deletes the specified UrlMap resource.", - "flatPath": "projects/{project}/global/urlMaps/{urlMap}", - "httpMethod": "DELETE", - "id": "compute.urlMaps.delete", - "parameterOrder": [ - "project", - "urlMap" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "urlMap": { - "description": "Name of the UrlMap resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/global/urlMaps/{urlMap}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", - "flatPath": "projects/{project}/global/urlMaps/{urlMap}", - "httpMethod": "GET", - "id": "compute.urlMaps.get", - "parameterOrder": [ - "project", - "urlMap" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "urlMap": { - "description": "Name of the UrlMap resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/global/urlMaps/{urlMap}", + "path": "projects/{project}/global/sslPolicies/listAvailableFeatures", "response": { - "$ref": "UrlMap" + "$ref": "SslPoliciesListAvailableFeaturesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -27705,48 +27844,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Creates a UrlMap resource in the specified project using the data included in the request.", - "flatPath": "projects/{project}/global/urlMaps", - "httpMethod": "POST", - "id": "compute.urlMaps.insert", - "parameterOrder": [ - "project" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/global/urlMaps", - "request": { - "$ref": "UrlMap" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "invalidateCache": { - "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap. For more information, see [Invalidating cached content](/cdn/docs/invalidating-cached-content).", - "flatPath": "projects/{project}/global/urlMaps/{urlMap}/invalidateCache", - "httpMethod": "POST", - "id": "compute.urlMaps.invalidateCache", + "patch": { + "description": "Patches the specified SSL policy with the data included in the request.", + "flatPath": "projects/{project}/global/sslPolicies/{sslPolicy}", + "httpMethod": "PATCH", + "id": "compute.sslPolicies.patch", "parameterOrder": [ "project", - "urlMap" + "sslPolicy" ], "parameters": { "project": { @@ -27761,17 +27866,16 @@ "location": "query", "type": "string" }, - "urlMap": { - "description": "Name of the UrlMap scoping this request.", + "sslPolicy": { + "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/urlMaps/{urlMap}/invalidateCache", + "path": "projects/{project}/global/sslPolicies/{sslPolicy}", "request": { - "$ref": "CacheInvalidationRule" + "$ref": "SslPolicy" }, "response": { "$ref": "Operation" @@ -27780,21 +27884,30 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "list": { - "description": "Retrieves the list of UrlMap resources available to the specified project.", - "flatPath": "projects/{project}/global/urlMaps", + } + } + }, + "subnetworks": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of subnetworks.", + "flatPath": "projects/{project}/aggregated/subnetworks", "httpMethod": "GET", - "id": "compute.urlMaps.list", + "id": "compute.subnetworks.aggregatedList", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -27826,9 +27939,9 @@ "type": "boolean" } }, - "path": "projects/{project}/global/urlMaps", + "path": "projects/{project}/aggregated/subnetworks", "response": { - "$ref": "UrlMapList" + "$ref": "SubnetworkAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -27836,14 +27949,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/global/urlMaps/{urlMap}", - "httpMethod": "PATCH", - "id": "compute.urlMaps.patch", + "delete": { + "description": "Deletes the specified subnetwork.", + "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", + "httpMethod": "DELETE", + "id": "compute.subnetworks.delete", "parameterOrder": [ "project", - "urlMap" + "region", + "subnetwork" ], "parameters": { "project": { @@ -27853,23 +27967,27 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "urlMap": { - "description": "Name of the UrlMap resource to patch.", + "subnetwork": { + "description": "Name of the Subnetwork resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/urlMaps/{urlMap}", - "request": { - "$ref": "UrlMap" - }, + "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", "response": { "$ref": "Operation" }, @@ -27878,14 +27996,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "update": { - "description": "Updates the specified UrlMap resource with the data included in the request.", - "flatPath": "projects/{project}/global/urlMaps/{urlMap}", - "httpMethod": "PUT", - "id": "compute.urlMaps.update", + "expandIpCidrRange": { + "description": "Expands the IP CIDR range of the subnetwork to a specified value.", + "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", + "httpMethod": "POST", + "id": "compute.subnetworks.expandIpCidrRange", "parameterOrder": [ "project", - "urlMap" + "region", + "subnetwork" ], "parameters": { "project": { @@ -27895,22 +28014,29 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "urlMap": { - "description": "Name of the UrlMap resource to update.", + "subnetwork": { + "description": "Name of the Subnetwork resource to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/urlMaps/{urlMap}", + "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", "request": { - "$ref": "UrlMap" + "$ref": "SubnetworksExpandIpCidrRangeRequest" }, "response": { "$ref": "Operation" @@ -27920,14 +28046,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "validate": { - "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", - "flatPath": "projects/{project}/global/urlMaps/{urlMap}/validate", - "httpMethod": "POST", - "id": "compute.urlMaps.validate", + "get": { + "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request.", + "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", + "httpMethod": "GET", + "id": "compute.subnetworks.get", "parameterOrder": [ "project", - "urlMap" + "region", + "subnetwork" ], "parameters": { "project": { @@ -27937,67 +28064,48 @@ "required": true, "type": "string" }, - "urlMap": { - "description": "Name of the UrlMap resource to be validated as.", + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "subnetwork": { + "description": "Name of the Subnetwork resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/global/urlMaps/{urlMap}/validate", - "request": { - "$ref": "UrlMapsValidateRequest" - }, + "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", "response": { - "$ref": "UrlMapsValidateResponse" + "$ref": "Subnetwork" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "vpnGateways": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of VPN gateways.", - "flatPath": "projects/{project}/aggregated/vpnGateways", + }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", "httpMethod": "GET", - "id": "compute.vpnGateways.aggregatedList", + "id": "compute.subnetworks.getIamPolicy", "parameterOrder": [ - "project" + "project", + "region", + "resource" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", "location": "query", - "minimum": "0", "type": "integer" }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -28005,15 +28113,24 @@ "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "projects/{project}/aggregated/vpnGateways", + "path": "projects/{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", "response": { - "$ref": "VpnGatewayAggregatedList" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -28021,15 +28138,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified VPN gateway.", - "flatPath": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}", - "httpMethod": "DELETE", - "id": "compute.vpnGateways.delete", + "insert": { + "description": "Creates a subnetwork in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/subnetworks", + "httpMethod": "POST", + "id": "compute.subnetworks.insert", "parameterOrder": [ "project", - "region", - "vpnGateway" + "region" ], "parameters": { "project": { @@ -28040,7 +28156,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -28050,16 +28166,12 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "vpnGateway": { - "description": "Name of the VPN gateway to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}", + "path": "projects/{project}/regions/{region}/subnetworks", + "request": { + "$ref": "Subnetwork" + }, "response": { "$ref": "Operation" }, @@ -28068,17 +28180,39 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified VPN gateway. Gets a list of available VPN gateways by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}", + "list": { + "description": "Retrieves a list of subnetworks available to the specified project.", + "flatPath": "projects/{project}/regions/{region}/subnetworks", "httpMethod": "GET", - "id": "compute.vpnGateways.get", + "id": "compute.subnetworks.list", "parameterOrder": [ "project", - "region", - "vpnGateway" + "region" ], "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -28087,23 +28221,21 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "vpnGateway": { - "description": "Name of the VPN gateway to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}", + "path": "projects/{project}/regions/{region}/subnetworks", "response": { - "$ref": "VpnGateway" + "$ref": "SubnetworkList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -28111,42 +28243,54 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "getStatus": { - "description": "Returns the status for the specified VPN gateway.", - "flatPath": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}/getStatus", + "listUsable": { + "description": "Retrieves an aggregated list of all usable subnetworks in the project.", + "flatPath": "projects/{project}/aggregated/subnetworks/listUsable", "httpMethod": "GET", - "id": "compute.vpnGateways.getStatus", + "id": "compute.subnetworks.listUsable", "parameterOrder": [ - "project", - "region", - "vpnGateway" + "project" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", "type": "string" }, - "vpnGateway": { - "description": "Name of the VPN gateway to return.", + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}/getStatus", + "path": "projects/{project}/aggregated/subnetworks/listUsable", "response": { - "$ref": "VpnGatewaysGetStatusResponse" + "$ref": "UsableSubnetworksAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -28154,16 +28298,23 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Creates a VPN gateway in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/vpnGateways", - "httpMethod": "POST", - "id": "compute.vpnGateways.insert", + "patch": { + "description": "Patches the specified subnetwork with the data included in the request. Only certain fields can be updated with a patch request as indicated in the field descriptions. You must specify the current fingerprint of the subnetwork resource being patched.", + "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", + "httpMethod": "PATCH", + "id": "compute.subnetworks.patch", "parameterOrder": [ "project", - "region" + "region", + "subnetwork" ], "parameters": { + "drainTimeoutSeconds": { + "description": "The drain timeout specifies the upper bound in seconds on the amount of time allowed to drain connections from the current ACTIVE subnetwork to the current BACKUP subnetwork. The drain timeout is only applicable when the following conditions are true: - the subnetwork being patched has purpose = INTERNAL_HTTPS_LOAD_BALANCER - the subnetwork being patched has role = BACKUP - the patch request is setting the role to ACTIVE. Note that after this patch operation the roles of the ACTIVE and BACKUP subnetworks will be swapped.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -28172,7 +28323,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -28182,11 +28333,18 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "subnetwork": { + "description": "Name of the Subnetwork resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "projects/{project}/regions/{region}/vpnGateways", + "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", "request": { - "$ref": "VpnGateway" + "$ref": "Subnetwork" }, "response": { "$ref": "Operation" @@ -28196,39 +28354,17 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves a list of VPN gateways available to the specified project and region.", - "flatPath": "projects/{project}/regions/{region}/vpnGateways", - "httpMethod": "GET", - "id": "compute.vpnGateways.list", + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/regions/{region}/subnetworks/{resource}/setIamPolicy", + "httpMethod": "POST", + "id": "compute.subnetworks.setIamPolicy", "parameterOrder": [ "project", - "region" + "region", + "resource" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -28237,37 +28373,41 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "projects/{project}/regions/{region}/vpnGateways", + "path": "projects/{project}/regions/{region}/subnetworks/{resource}/setIamPolicy", + "request": { + "$ref": "RegionSetPolicyRequest" + }, "response": { - "$ref": "VpnGatewayList" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "setLabels": { - "description": "Sets the labels on a VpnGateway. To learn more about labels, read the Labeling Resources documentation.", - "flatPath": "projects/{project}/regions/{region}/vpnGateways/{resource}/setLabels", + "setPrivateIpGoogleAccess": { + "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access.", + "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess", "httpMethod": "POST", - "id": "compute.vpnGateways.setLabels", + "id": "compute.subnetworks.setPrivateIpGoogleAccess", "parameterOrder": [ "project", "region", - "resource" + "subnetwork" ], "parameters": { "project": { @@ -28278,7 +28418,7 @@ "type": "string" }, "region": { - "description": "The region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -28289,17 +28429,17 @@ "location": "query", "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "subnetwork": { + "description": "Name of the Subnetwork resource.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/vpnGateways/{resource}/setLabels", + "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess", "request": { - "$ref": "RegionSetLabelsRequest" + "$ref": "SubnetworksSetPrivateIpGoogleAccessRequest" }, "response": { "$ref": "Operation" @@ -28311,9 +28451,9 @@ }, "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", - "flatPath": "projects/{project}/regions/{region}/vpnGateways/{resource}/testIamPermissions", + "flatPath": "projects/{project}/regions/{region}/subnetworks/{resource}/testIamPermissions", "httpMethod": "POST", - "id": "compute.vpnGateways.testIamPermissions", + "id": "compute.subnetworks.testIamPermissions", "parameterOrder": [ "project", "region", @@ -28342,7 +28482,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/vpnGateways/{resource}/testIamPermissions", + "path": "projects/{project}/regions/{region}/subnetworks/{resource}/testIamPermissions", "request": { "$ref": "TestPermissionsRequest" }, @@ -28357,77 +28497,16 @@ } } }, - "vpnTunnels": { + "targetGrpcProxies": { "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of VPN tunnels.", - "flatPath": "projects/{project}/aggregated/vpnTunnels", - "httpMethod": "GET", - "id": "compute.vpnTunnels.aggregatedList", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - "location": "query", - "type": "string" - }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" - } - }, - "path": "projects/{project}/aggregated/vpnTunnels", - "response": { - "$ref": "VpnTunnelAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, "delete": { - "description": "Deletes the specified VpnTunnel resource.", - "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "description": "Deletes the specified TargetGrpcProxy in the given scope", + "flatPath": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", "httpMethod": "DELETE", - "id": "compute.vpnTunnels.delete", + "id": "compute.targetGrpcProxies.delete", "parameterOrder": [ "project", - "region", - "vpnTunnel" + "targetGrpcProxy" ], "parameters": { "project": { @@ -28437,27 +28516,20 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "vpnTunnel": { - "description": "Name of the VpnTunnel resource to delete.", + "targetGrpcProxy": { + "description": "Name of the TargetGrpcProxy resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "path": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", "response": { "$ref": "Operation" }, @@ -28467,14 +28539,13 @@ ] }, "get": { - "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "description": "Returns the specified TargetGrpcProxy resource in the given scope.", + "flatPath": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", "httpMethod": "GET", - "id": "compute.vpnTunnels.get", + "id": "compute.targetGrpcProxies.get", "parameterOrder": [ "project", - "region", - "vpnTunnel" + "targetGrpcProxy" ], "parameters": { "project": { @@ -28484,24 +28555,17 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "vpnTunnel": { - "description": "Name of the VpnTunnel resource to return.", + "targetGrpcProxy": { + "description": "Name of the TargetGrpcProxy resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "path": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", "response": { - "$ref": "VpnTunnel" + "$ref": "TargetGrpcProxy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -28510,13 +28574,12 @@ ] }, "insert": { - "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/vpnTunnels", + "description": "Creates a TargetGrpcProxy in the specified project in the given scope using the parameters that are included in the request.", + "flatPath": "projects/{project}/global/targetGrpcProxies", "httpMethod": "POST", - "id": "compute.vpnTunnels.insert", + "id": "compute.targetGrpcProxies.insert", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "project": { @@ -28526,22 +28589,15 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/vpnTunnels", + "path": "projects/{project}/global/targetGrpcProxies", "request": { - "$ref": "VpnTunnel" + "$ref": "TargetGrpcProxy" }, "response": { "$ref": "Operation" @@ -28552,17 +28608,16 @@ ] }, "list": { - "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region.", - "flatPath": "projects/{project}/regions/{region}/vpnTunnels", + "description": "Lists the TargetGrpcProxies for a project in the given scope.", + "flatPath": "projects/{project}/global/targetGrpcProxies", "httpMethod": "GET", - "id": "compute.vpnTunnels.list", + "id": "compute.targetGrpcProxies.list", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -28591,51 +28646,138 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "returnPartialSuccess": { "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/vpnTunnels", + "path": "projects/{project}/global/targetGrpcProxies", "response": { - "$ref": "VpnTunnelList" + "$ref": "TargetGrpcProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "zoneOperations": { - "methods": { - "delete": { - "description": "Deletes the specified zone-specific Operations resource.", - "flatPath": "projects/{project}/zones/{zone}/operations/{operation}", - "httpMethod": "DELETE", - "id": "compute.zoneOperations.delete", + }, + "patch": { + "description": "Patches the specified TargetGrpcProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", + "httpMethod": "PATCH", + "id": "compute.targetGrpcProxies.patch", "parameterOrder": [ "project", - "zone", - "operation" + "targetGrpcProxy" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to delete.", + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetGrpcProxy": { + "description": "Name of the TargetGrpcProxy resource to patch.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" + } + }, + "path": "projects/{project}/global/targetGrpcProxies/{targetGrpcProxy}", + "request": { + "$ref": "TargetGrpcProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "targetHttpProxies": { + "methods": { + "aggregatedList": { + "description": "Retrieves the list of all TargetHttpProxy resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/targetHttpProxies", + "httpMethod": "GET", + "id": "compute.targetHttpProxies.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/targetHttpProxies", + "response": { + "$ref": "TargetHttpProxyAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified TargetHttpProxy resource.", + "flatPath": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", + "httpMethod": "DELETE", + "id": "compute.targetHttpProxies.delete", + "parameterOrder": [ + "project", + "targetHttpProxy" + ], + "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -28643,38 +28785,72 @@ "required": true, "type": "string" }, - "zone": { - "description": "Name of the zone for this request.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/operations/{operation}", + "path": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", + "response": { + "$ref": "Operation" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, "get": { - "description": "Retrieves the specified zone-specific Operations resource.", - "flatPath": "projects/{project}/zones/{zone}/operations/{operation}", + "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", + "flatPath": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", "httpMethod": "GET", - "id": "compute.zoneOperations.get", + "id": "compute.targetHttpProxies.get", "parameterOrder": [ "project", - "zone", - "operation" + "targetHttpProxy" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to return.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", + "response": { + "$ref": "TargetHttpProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", + "flatPath": "projects/{project}/global/targetHttpProxies", + "httpMethod": "POST", + "id": "compute.targetHttpProxies.insert", + "parameterOrder": [ + "project" + ], + "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -28682,36 +28858,35 @@ "required": true, "type": "string" }, - "zone": { - "description": "Name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" } }, - "path": "projects/{project}/zones/{zone}/operations/{operation}", + "path": "projects/{project}/global/targetHttpProxies", + "request": { + "$ref": "TargetHttpProxy" + }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "list": { - "description": "Retrieves a list of Operation resources contained within the specified zone.", - "flatPath": "projects/{project}/zones/{zone}/operations", + "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", + "flatPath": "projects/{project}/global/targetHttpProxies", "httpMethod": "GET", - "id": "compute.zoneOperations.list", + "id": "compute.targetHttpProxies.list", "parameterOrder": [ - "project", - "zone" + "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -28744,18 +28919,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" - }, - "zone": { - "description": "Name of the zone for request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "projects/{project}/zones/{zone}/operations", + "path": "projects/{project}/global/targetHttpProxies", "response": { - "$ref": "OperationList" + "$ref": "TargetHttpProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -28763,24 +28931,58 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "wait": { - "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method waits for no more than the 2 minutes and then returns the current state of the operation, which might be `DONE` or still in progress. This method is called on a best-effort basis. Specifically: - In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. - If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`. ", - "flatPath": "projects/{project}/zones/{zone}/operations/{operation}/wait", - "httpMethod": "POST", - "id": "compute.zoneOperations.wait", + "patch": { + "description": "Patches the specified TargetHttpProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", + "httpMethod": "PATCH", + "id": "compute.targetHttpProxies.patch", "parameterOrder": [ "project", - "zone", - "operation" + "targetHttpProxy" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to return.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", + "request": { + "$ref": "TargetHttpProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setUrlMap": { + "description": "Changes the URL map for TargetHttpProxy.", + "flatPath": "projects/{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "httpMethod": "POST", + "id": "compute.targetHttpProxies.setUrlMap", + "parameterOrder": [ + "project", + "targetHttpProxy" + ], + "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -28788,36 +28990,142 @@ "required": true, "type": "string" }, - "zone": { - "description": "Name of the zone for this request.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy to set a URL map for.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/operations/{operation}/wait", + "path": "projects/{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "request": { + "$ref": "UrlMapReference" + }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } }, - "zones": { + "targetHttpsProxies": { "methods": { + "aggregatedList": { + "description": "Retrieves the list of all TargetHttpsProxy resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/targetHttpsProxies", + "httpMethod": "GET", + "id": "compute.targetHttpsProxies.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/targetHttpsProxies", + "response": { + "$ref": "TargetHttpsProxyAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified TargetHttpsProxy resource.", + "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "DELETE", + "id": "compute.targetHttpsProxies.delete", + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "get": { - "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request.", - "flatPath": "projects/{project}/zones/{zone}", + "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request.", + "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", "httpMethod": "GET", - "id": "compute.zones.get", + "id": "compute.targetHttpsProxies.get", "parameterOrder": [ "project", - "zone" + "targetHttpsProxy" ], "parameters": { "project": { @@ -28827,17 +29135,17 @@ "required": true, "type": "string" }, - "zone": { - "description": "Name of the zone resource to return.", + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}", + "path": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", "response": { - "$ref": "Zone" + "$ref": "TargetHttpsProxy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -28845,17 +29153,51 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "insert": { + "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", + "flatPath": "projects/{project}/global/targetHttpsProxies", + "httpMethod": "POST", + "id": "compute.targetHttpsProxies.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/targetHttpsProxies", + "request": { + "$ref": "TargetHttpsProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "list": { - "description": "Retrieves the list of Zone resources available to the specified project.", - "flatPath": "projects/{project}/zones", + "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", + "flatPath": "projects/{project}/global/targetHttpsProxies", "httpMethod": "GET", - "id": "compute.zones.list", + "id": "compute.targetHttpsProxies.list", "parameterOrder": [ "project" ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", "location": "query", "type": "string" }, @@ -28890,58 +29232,3505 @@ "type": "boolean" } }, - "path": "projects/{project}/zones", + "path": "projects/{project}/global/targetHttpsProxies", "response": { - "$ref": "ZoneList" + "$ref": "TargetHttpsProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - } - }, - "revision": "20220215", - "rootUrl": "https://compute.googleapis.com/", - "schemas": { - "AcceleratorConfig": { - "description": "A specification of the type and number of accelerator cards attached to the instance.", - "id": "AcceleratorConfig", - "properties": { - "acceleratorCount": { - "description": "The number of the guest accelerator cards exposed to this instance.", - "format": "int32", - "type": "integer" - }, - "acceleratorType": { - "description": "Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types.", - "type": "string" - } - }, - "type": "object" - }, - "AcceleratorType": { - "description": "Represents an Accelerator Type resource. Google Cloud Platform provides graphics processing units (accelerators) that you can add to VM instances to improve or accelerate performance when working with intensive workloads. For more information, read GPUs on Compute Engine.", - "id": "AcceleratorType", - "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this accelerator type." - }, - "description": { - "description": "[Output Only] An optional textual description of the resource.", - "type": "string" }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" + "patch": { + "description": "Patches the specified TargetHttpsProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "PATCH", + "id": "compute.targetHttpsProxies.patch", + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", + "request": { + "$ref": "TargetHttpsProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setCertificateMap": { + "description": "Changes the Certificate Map for TargetHttpsProxy.", + "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}/setCertificateMap", + "httpMethod": "POST", + "id": "compute.targetHttpsProxies.setCertificateMap", + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource whose CertificateMap is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}/setCertificateMap", + "request": { + "$ref": "TargetHttpsProxiesSetCertificateMapRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setQuicOverride": { + "description": "Sets the QUIC override policy for TargetHttpsProxy.", + "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride", + "httpMethod": "POST", + "id": "compute.targetHttpsProxies.setQuicOverride", + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to set the QUIC override policy for. The name should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride", + "request": { + "$ref": "TargetHttpsProxiesSetQuicOverrideRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setSslCertificates": { + "description": "Replaces SslCertificates for TargetHttpsProxy.", + "flatPath": "projects/{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "httpMethod": "POST", + "id": "compute.targetHttpsProxies.setSslCertificates", + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "request": { + "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setSslPolicy": { + "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy", + "httpMethod": "POST", + "id": "compute.targetHttpsProxies.setSslPolicy", + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy", + "request": { + "$ref": "SslPolicyReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setUrlMap": { + "description": "Changes the URL map for TargetHttpsProxy.", + "flatPath": "projects/{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + "httpMethod": "POST", + "id": "compute.targetHttpsProxies.setUrlMap", + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + "request": { + "$ref": "UrlMapReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "targetInstances": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of target instances.", + "flatPath": "projects/{project}/aggregated/targetInstances", + "httpMethod": "GET", + "id": "compute.targetInstances.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/targetInstances", + "response": { + "$ref": "TargetInstanceAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified TargetInstance resource.", + "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}", + "httpMethod": "DELETE", + "id": "compute.targetInstances.delete", + "parameterOrder": [ + "project", + "zone", + "targetInstance" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetInstance": { + "description": "Name of the TargetInstance resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request.", + "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}", + "httpMethod": "GET", + "id": "compute.targetInstances.get", + "parameterOrder": [ + "project", + "zone", + "targetInstance" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "targetInstance": { + "description": "Name of the TargetInstance resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}", + "response": { + "$ref": "TargetInstance" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", + "flatPath": "projects/{project}/zones/{zone}/targetInstances", + "httpMethod": "POST", + "id": "compute.targetInstances.insert", + "parameterOrder": [ + "project", + "zone" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "Name of the zone scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/targetInstances", + "request": { + "$ref": "TargetInstance" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", + "flatPath": "projects/{project}/zones/{zone}/targetInstances", + "httpMethod": "GET", + "id": "compute.targetInstances.list", + "parameterOrder": [ + "project", + "zone" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + }, + "zone": { + "description": "Name of the zone scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/targetInstances", + "response": { + "$ref": "TargetInstanceList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "targetPools": { + "methods": { + "addHealthCheck": { + "description": "Adds health check URLs to a target pool.", + "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", + "httpMethod": "POST", + "id": "compute.targetPools.addHealthCheck", + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetPool": { + "description": "Name of the target pool to add a health check to.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", + "request": { + "$ref": "TargetPoolsAddHealthCheckRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "addInstance": { + "description": "Adds an instance to a target pool.", + "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/addInstance", + "httpMethod": "POST", + "id": "compute.targetPools.addInstance", + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetPool": { + "description": "Name of the TargetPool resource to add instances to.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/addInstance", + "request": { + "$ref": "TargetPoolsAddInstanceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "aggregatedList": { + "description": "Retrieves an aggregated list of target pools.", + "flatPath": "projects/{project}/aggregated/targetPools", + "httpMethod": "GET", + "id": "compute.targetPools.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/targetPools", + "response": { + "$ref": "TargetPoolAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified target pool.", + "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}", + "httpMethod": "DELETE", + "id": "compute.targetPools.delete", + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetPool": { + "description": "Name of the TargetPool resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetPools/{targetPool}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}", + "httpMethod": "GET", + "id": "compute.targetPools.get", + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "targetPool": { + "description": "Name of the TargetPool resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetPools/{targetPool}", + "response": { + "$ref": "TargetPool" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getHealth": { + "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", + "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/getHealth", + "httpMethod": "POST", + "id": "compute.targetPools.getHealth", + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "targetPool": { + "description": "Name of the TargetPool resource to which the queried instance belongs.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/getHealth", + "request": { + "$ref": "InstanceReference" + }, + "response": { + "$ref": "TargetPoolInstanceHealth" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a target pool in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/targetPools", + "httpMethod": "POST", + "id": "compute.targetPools.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetPools", + "request": { + "$ref": "TargetPool" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of target pools available to the specified project and region.", + "flatPath": "projects/{project}/regions/{region}/targetPools", + "httpMethod": "GET", + "id": "compute.targetPools.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/targetPools", + "response": { + "$ref": "TargetPoolList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "removeHealthCheck": { + "description": "Removes health check URL from a target pool.", + "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", + "httpMethod": "POST", + "id": "compute.targetPools.removeHealthCheck", + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetPool": { + "description": "Name of the target pool to remove health checks from.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", + "request": { + "$ref": "TargetPoolsRemoveHealthCheckRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "removeInstance": { + "description": "Removes instance URL from a target pool.", + "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + "httpMethod": "POST", + "id": "compute.targetPools.removeInstance", + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetPool": { + "description": "Name of the TargetPool resource to remove instances from.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + "request": { + "$ref": "TargetPoolsRemoveInstanceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setBackup": { + "description": "Changes a backup target pool's configurations.", + "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/setBackup", + "httpMethod": "POST", + "id": "compute.targetPools.setBackup", + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "parameters": { + "failoverRatio": { + "description": "New failoverRatio value for the target pool.", + "format": "float", + "location": "query", + "type": "number" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetPool": { + "description": "Name of the TargetPool resource to set a backup pool for.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/setBackup", + "request": { + "$ref": "TargetReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "targetSslProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetSslProxy resource.", + "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}", + "httpMethod": "DELETE", + "id": "compute.targetSslProxies.delete", + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetSslProxy": { + "description": "Name of the TargetSslProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetSslProxies/{targetSslProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request.", + "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}", + "httpMethod": "GET", + "id": "compute.targetSslProxies.get", + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "targetSslProxy": { + "description": "Name of the TargetSslProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetSslProxies/{targetSslProxy}", + "response": { + "$ref": "TargetSslProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request.", + "flatPath": "projects/{project}/global/targetSslProxies", + "httpMethod": "POST", + "id": "compute.targetSslProxies.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/targetSslProxies", + "request": { + "$ref": "TargetSslProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of TargetSslProxy resources available to the specified project.", + "flatPath": "projects/{project}/global/targetSslProxies", + "httpMethod": "GET", + "id": "compute.targetSslProxies.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/global/targetSslProxies", + "response": { + "$ref": "TargetSslProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setBackendService": { + "description": "Changes the BackendService for TargetSslProxy.", + "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setBackendService", + "httpMethod": "POST", + "id": "compute.targetSslProxies.setBackendService", + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetSslProxy": { + "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setBackendService", + "request": { + "$ref": "TargetSslProxiesSetBackendServiceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setCertificateMap": { + "description": "Changes the Certificate Map for TargetSslProxy.", + "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setCertificateMap", + "httpMethod": "POST", + "id": "compute.targetSslProxies.setCertificateMap", + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetSslProxy": { + "description": "Name of the TargetSslProxy resource whose CertificateMap is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setCertificateMap", + "request": { + "$ref": "TargetSslProxiesSetCertificateMapRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setProxyHeader": { + "description": "Changes the ProxyHeaderType for TargetSslProxy.", + "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader", + "httpMethod": "POST", + "id": "compute.targetSslProxies.setProxyHeader", + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetSslProxy": { + "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader", + "request": { + "$ref": "TargetSslProxiesSetProxyHeaderRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setSslCertificates": { + "description": "Changes SslCertificates for TargetSslProxy.", + "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates", + "httpMethod": "POST", + "id": "compute.targetSslProxies.setSslCertificates", + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetSslProxy": { + "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates", + "request": { + "$ref": "TargetSslProxiesSetSslCertificatesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setSslPolicy": { + "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", + "httpMethod": "POST", + "id": "compute.targetSslProxies.setSslPolicy", + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetSslProxy": { + "description": "Name of the TargetSslProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", + "request": { + "$ref": "SslPolicyReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "targetTcpProxies": { + "methods": { + "aggregatedList": { + "description": "Retrieves the list of all TargetTcpProxy resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/targetTcpProxies", + "httpMethod": "GET", + "id": "compute.targetTcpProxies.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/targetTcpProxies", + "response": { + "$ref": "TargetTcpProxyAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified TargetTcpProxy resource.", + "flatPath": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}", + "httpMethod": "DELETE", + "id": "compute.targetTcpProxies.delete", + "parameterOrder": [ + "project", + "targetTcpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetTcpProxy": { + "description": "Name of the TargetTcpProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request.", + "flatPath": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}", + "httpMethod": "GET", + "id": "compute.targetTcpProxies.get", + "parameterOrder": [ + "project", + "targetTcpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "targetTcpProxy": { + "description": "Name of the TargetTcpProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}", + "response": { + "$ref": "TargetTcpProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request.", + "flatPath": "projects/{project}/global/targetTcpProxies", + "httpMethod": "POST", + "id": "compute.targetTcpProxies.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/targetTcpProxies", + "request": { + "$ref": "TargetTcpProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of TargetTcpProxy resources available to the specified project.", + "flatPath": "projects/{project}/global/targetTcpProxies", + "httpMethod": "GET", + "id": "compute.targetTcpProxies.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/global/targetTcpProxies", + "response": { + "$ref": "TargetTcpProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setBackendService": { + "description": "Changes the BackendService for TargetTcpProxy.", + "flatPath": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService", + "httpMethod": "POST", + "id": "compute.targetTcpProxies.setBackendService", + "parameterOrder": [ + "project", + "targetTcpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetTcpProxy": { + "description": "Name of the TargetTcpProxy resource whose BackendService resource is to be set.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService", + "request": { + "$ref": "TargetTcpProxiesSetBackendServiceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setProxyHeader": { + "description": "Changes the ProxyHeaderType for TargetTcpProxy.", + "flatPath": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader", + "httpMethod": "POST", + "id": "compute.targetTcpProxies.setProxyHeader", + "parameterOrder": [ + "project", + "targetTcpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetTcpProxy": { + "description": "Name of the TargetTcpProxy resource whose ProxyHeader is to be set.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader", + "request": { + "$ref": "TargetTcpProxiesSetProxyHeaderRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "targetVpnGateways": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of target VPN gateways.", + "flatPath": "projects/{project}/aggregated/targetVpnGateways", + "httpMethod": "GET", + "id": "compute.targetVpnGateways.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/targetVpnGateways", + "response": { + "$ref": "TargetVpnGatewayAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified target VPN gateway.", + "flatPath": "projects/{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "httpMethod": "DELETE", + "id": "compute.targetVpnGateways.delete", + "parameterOrder": [ + "project", + "region", + "targetVpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetVpnGateway": { + "description": "Name of the target VPN gateway to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "httpMethod": "GET", + "id": "compute.targetVpnGateways.get", + "parameterOrder": [ + "project", + "region", + "targetVpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "targetVpnGateway": { + "description": "Name of the target VPN gateway to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "response": { + "$ref": "TargetVpnGateway" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/targetVpnGateways", + "httpMethod": "POST", + "id": "compute.targetVpnGateways.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetVpnGateways", + "request": { + "$ref": "TargetVpnGateway" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of target VPN gateways available to the specified project and region.", + "flatPath": "projects/{project}/regions/{region}/targetVpnGateways", + "httpMethod": "GET", + "id": "compute.targetVpnGateways.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/targetVpnGateways", + "response": { + "$ref": "TargetVpnGatewayList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setLabels": { + "description": "Sets the labels on a TargetVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.targetVpnGateways.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "urlMaps": { + "methods": { + "aggregatedList": { + "description": "Retrieves the list of all UrlMap resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/urlMaps", + "httpMethod": "GET", + "id": "compute.urlMaps.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/urlMaps", + "response": { + "$ref": "UrlMapsAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified UrlMap resource.", + "flatPath": "projects/{project}/global/urlMaps/{urlMap}", + "httpMethod": "DELETE", + "id": "compute.urlMaps.delete", + "parameterOrder": [ + "project", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/urlMaps/{urlMap}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + "flatPath": "projects/{project}/global/urlMaps/{urlMap}", + "httpMethod": "GET", + "id": "compute.urlMaps.get", + "parameterOrder": [ + "project", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/urlMaps/{urlMap}", + "response": { + "$ref": "UrlMap" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a UrlMap resource in the specified project using the data included in the request.", + "flatPath": "projects/{project}/global/urlMaps", + "httpMethod": "POST", + "id": "compute.urlMaps.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/urlMaps", + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "invalidateCache": { + "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap. For more information, see [Invalidating cached content](/cdn/docs/invalidating-cached-content).", + "flatPath": "projects/{project}/global/urlMaps/{urlMap}/invalidateCache", + "httpMethod": "POST", + "id": "compute.urlMaps.invalidateCache", + "parameterOrder": [ + "project", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/urlMaps/{urlMap}/invalidateCache", + "request": { + "$ref": "CacheInvalidationRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of UrlMap resources available to the specified project.", + "flatPath": "projects/{project}/global/urlMaps", + "httpMethod": "GET", + "id": "compute.urlMaps.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/global/urlMaps", + "response": { + "$ref": "UrlMapList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/global/urlMaps/{urlMap}", + "httpMethod": "PATCH", + "id": "compute.urlMaps.patch", + "parameterOrder": [ + "project", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/urlMaps/{urlMap}", + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "description": "Updates the specified UrlMap resource with the data included in the request.", + "flatPath": "projects/{project}/global/urlMaps/{urlMap}", + "httpMethod": "PUT", + "id": "compute.urlMaps.update", + "parameterOrder": [ + "project", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/urlMaps/{urlMap}", + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "validate": { + "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", + "flatPath": "projects/{project}/global/urlMaps/{urlMap}/validate", + "httpMethod": "POST", + "id": "compute.urlMaps.validate", + "parameterOrder": [ + "project", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to be validated as.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/urlMaps/{urlMap}/validate", + "request": { + "$ref": "UrlMapsValidateRequest" + }, + "response": { + "$ref": "UrlMapsValidateResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "vpnGateways": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of VPN gateways.", + "flatPath": "projects/{project}/aggregated/vpnGateways", + "httpMethod": "GET", + "id": "compute.vpnGateways.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/vpnGateways", + "response": { + "$ref": "VpnGatewayAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified VPN gateway.", + "flatPath": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}", + "httpMethod": "DELETE", + "id": "compute.vpnGateways.delete", + "parameterOrder": [ + "project", + "region", + "vpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "vpnGateway": { + "description": "Name of the VPN gateway to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified VPN gateway. Gets a list of available VPN gateways by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}", + "httpMethod": "GET", + "id": "compute.vpnGateways.get", + "parameterOrder": [ + "project", + "region", + "vpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "vpnGateway": { + "description": "Name of the VPN gateway to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}", + "response": { + "$ref": "VpnGateway" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getStatus": { + "description": "Returns the status for the specified VPN gateway.", + "flatPath": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}/getStatus", + "httpMethod": "GET", + "id": "compute.vpnGateways.getStatus", + "parameterOrder": [ + "project", + "region", + "vpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "vpnGateway": { + "description": "Name of the VPN gateway to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}/getStatus", + "response": { + "$ref": "VpnGatewaysGetStatusResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a VPN gateway in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/vpnGateways", + "httpMethod": "POST", + "id": "compute.vpnGateways.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/vpnGateways", + "request": { + "$ref": "VpnGateway" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of VPN gateways available to the specified project and region.", + "flatPath": "projects/{project}/regions/{region}/vpnGateways", + "httpMethod": "GET", + "id": "compute.vpnGateways.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/vpnGateways", + "response": { + "$ref": "VpnGatewayList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setLabels": { + "description": "Sets the labels on a VpnGateway. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/vpnGateways/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.vpnGateways.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/vpnGateways/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/vpnGateways/{resource}/testIamPermissions", + "httpMethod": "POST", + "id": "compute.vpnGateways.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/vpnGateways/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "vpnTunnels": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of VPN tunnels.", + "flatPath": "projects/{project}/aggregated/vpnTunnels", + "httpMethod": "GET", + "id": "compute.vpnTunnels.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/vpnTunnels", + "response": { + "$ref": "VpnTunnelAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified VpnTunnel resource.", + "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "httpMethod": "DELETE", + "id": "compute.vpnTunnels.delete", + "parameterOrder": [ + "project", + "region", + "vpnTunnel" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "vpnTunnel": { + "description": "Name of the VpnTunnel resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request.", + "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "httpMethod": "GET", + "id": "compute.vpnTunnels.get", + "parameterOrder": [ + "project", + "region", + "vpnTunnel" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "vpnTunnel": { + "description": "Name of the VpnTunnel resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "response": { + "$ref": "VpnTunnel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/vpnTunnels", + "httpMethod": "POST", + "id": "compute.vpnTunnels.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/vpnTunnels", + "request": { + "$ref": "VpnTunnel" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region.", + "flatPath": "projects/{project}/regions/{region}/vpnTunnels", + "httpMethod": "GET", + "id": "compute.vpnTunnels.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/vpnTunnels", + "response": { + "$ref": "VpnTunnelList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setLabels": { + "description": "Sets the labels on a VpnTunnel. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.vpnTunnels.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "zoneOperations": { + "methods": { + "delete": { + "description": "Deletes the specified zone-specific Operations resource.", + "flatPath": "projects/{project}/zones/{zone}/operations/{operation}", + "httpMethod": "DELETE", + "id": "compute.zoneOperations.delete", + "parameterOrder": [ + "project", + "zone", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/operations/{operation}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Retrieves the specified zone-specific Operations resource.", + "flatPath": "projects/{project}/zones/{zone}/operations/{operation}", + "httpMethod": "GET", + "id": "compute.zoneOperations.get", + "parameterOrder": [ + "project", + "zone", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/operations/{operation}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "description": "Retrieves a list of Operation resources contained within the specified zone.", + "flatPath": "projects/{project}/zones/{zone}/operations", + "httpMethod": "GET", + "id": "compute.zoneOperations.list", + "parameterOrder": [ + "project", + "zone" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + }, + "zone": { + "description": "Name of the zone for request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/operations", + "response": { + "$ref": "OperationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "wait": { + "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method waits for no more than the 2 minutes and then returns the current state of the operation, which might be `DONE` or still in progress. This method is called on a best-effort basis. Specifically: - In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. - If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`. ", + "flatPath": "projects/{project}/zones/{zone}/operations/{operation}/wait", + "httpMethod": "POST", + "id": "compute.zoneOperations.wait", + "parameterOrder": [ + "project", + "zone", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/operations/{operation}/wait", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "zones": { + "methods": { + "get": { + "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request.", + "flatPath": "projects/{project}/zones/{zone}", + "httpMethod": "GET", + "id": "compute.zones.get", + "parameterOrder": [ + "project", + "zone" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}", + "response": { + "$ref": "Zone" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "description": "Retrieves the list of Zone resources available to the specified project.", + "flatPath": "projects/{project}/zones", + "httpMethod": "GET", + "id": "compute.zones.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/zones", + "response": { + "$ref": "ZoneList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + } + }, + "revision": "20221126", + "rootUrl": "https://compute.googleapis.com/", + "schemas": { + "AcceleratorConfig": { + "description": "A specification of the type and number of accelerator cards attached to the instance.", + "id": "AcceleratorConfig", + "properties": { + "acceleratorCount": { + "description": "The number of the guest accelerator cards exposed to this instance.", + "format": "int32", + "type": "integer" + }, + "acceleratorType": { + "description": "Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types.", + "type": "string" + } + }, + "type": "object" + }, + "AcceleratorType": { + "description": "Represents an Accelerator Type resource. Google Cloud Platform provides graphics processing units (accelerators) that you can add to VM instances to improve or accelerate performance when working with intensive workloads. For more information, read GPUs on Compute Engine.", + "id": "AcceleratorType", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this accelerator type." + }, + "description": { + "description": "[Output Only] An optional textual description of the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" }, "kind": { "default": "compute#acceleratorType", @@ -29018,10 +32807,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -29045,10 +32836,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -29134,10 +32927,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -29161,10 +32956,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -29232,10 +33029,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -29259,10 +33058,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -29421,6 +33222,18 @@ ], "type": "string" }, + "ipv6EndpointType": { + "description": "The endpoint type of this address, which should be VM or NETLB. This is used for deciding which type of endpoint this address can be used after the external IPv6 address reservation.", + "enum": [ + "NETLB", + "VM" + ], + "enumDescriptions": [ + "Reserved IPv6 address can be used on network load balancer.", + "Reserved IPv6 address can be used on VM." + ], + "type": "string" + }, "kind": { "default": "compute#address", "description": "[Output Only] Type of the resource. Always compute#address for addresses.", @@ -29462,22 +33275,24 @@ "type": "integer" }, "purpose": { - "description": "The purpose of this resource, which can be one of the following values: - GCE_ENDPOINT for addresses that are used by VM instances, alias IP ranges, load balancers, and similar resources. - DNS_RESOLVER for a DNS resolver address in a subnetwork for a Cloud DNS inbound forwarder IP addresses (regional internal IP address in a subnet of a VPC network) - VPC_PEERING for global internal IP addresses used for private services access allocated ranges. - NAT_AUTO for the regional external IP addresses used by Cloud NAT when allocating addresses using automatic NAT IP address allocation. - IPSEC_INTERCONNECT for addresses created from a private IP range that are reserved for a VLAN attachment in an *IPsec-encrypted Cloud Interconnect* configuration. These addresses are regional resources. Not currently available publicly. - `SHARED_LOADBALANCER_VIP` for an internal IP address that is assigned to multiple internal forwarding rules. - `PRIVATE_SERVICE_CONNECT` for a private network address that is used to configure Private Service Connect. Only global internal addresses can use this purpose. ", + "description": "The purpose of this resource, which can be one of the following values: - GCE_ENDPOINT for addresses that are used by VM instances, alias IP ranges, load balancers, and similar resources. - DNS_RESOLVER for a DNS resolver address in a subnetwork for a Cloud DNS inbound forwarder IP addresses (regional internal IP address in a subnet of a VPC network) - VPC_PEERING for global internal IP addresses used for private services access allocated ranges. - NAT_AUTO for the regional external IP addresses used by Cloud NAT when allocating addresses using automatic NAT IP address allocation. - IPSEC_INTERCONNECT for addresses created from a private IP range that are reserved for a VLAN attachment in an *HA VPN over Cloud Interconnect* configuration. These addresses are regional resources. - `SHARED_LOADBALANCER_VIP` for an internal IP address that is assigned to multiple internal forwarding rules. - `PRIVATE_SERVICE_CONNECT` for a private network address that is used to configure Private Service Connect. Only global internal addresses can use this purpose. ", "enum": [ "DNS_RESOLVER", "GCE_ENDPOINT", "IPSEC_INTERCONNECT", "NAT_AUTO", "PRIVATE_SERVICE_CONNECT", + "SERVERLESS", "SHARED_LOADBALANCER_VIP", "VPC_PEERING" ], "enumDescriptions": [ "DNS resolver address in the subnetwork.", "VM internal/alias IP, Internal LB service IP, etc.", - "A regional internal IP address range reserved for the VLAN attachment that is used in IPsec-encrypted Cloud Interconnect. This regional internal IP address range must not overlap with any IP address range of subnet/route in the VPC network and its peering networks. After the VLAN attachment is created with the reserved IP address range, when creating a new VPN gateway, its interface IP address is allocated from the associated VLAN attachment’s IP address range.", + "A regional internal IP address range reserved for the VLAN attachment that is used in HA VPN over Cloud Interconnect. This regional internal IP address range must not overlap with any IP address range of subnet/route in the VPC network and its peering networks. After the VLAN attachment is created with the reserved IP address range, when creating a new VPN gateway, its interface IP address is allocated from the associated VLAN attachment’s IP address range.", "External IP automatically reserved for Cloud NAT.", "A private network IP address that can be used to configure Private Service Connect. This purpose can be specified only for GLOBAL addresses of Type INTERNAL", + "A regional internal IP address range reserved for Serverless.", "A private network IP address that can be shared by multiple Internal Load Balancer forwarding rules.", "IP range for peer networks." ], @@ -29568,10 +33383,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -29595,10 +33412,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -29684,10 +33503,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -29711,10 +33532,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -29782,10 +33605,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -29809,10 +33634,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -29872,6 +33699,11 @@ "description": "The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.", "format": "int32", "type": "integer" + }, + "visibleCoreCount": { + "description": "The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width.", + "format": "int32", + "type": "integer" } }, "type": "object" @@ -29948,9 +33780,14 @@ "type": "object" }, "AllocationSpecificSKUReservation": { - "description": "This reservation type allows to pre allocate specific instance configuration. Next ID: 5", + "description": "This reservation type allows to pre allocate specific instance configuration. Next ID: 6", "id": "AllocationSpecificSKUReservation", "properties": { + "assuredCount": { + "description": "[Output Only] Indicates how many instances are actually usable currently.", + "format": "int64", + "type": "string" + }, "count": { "description": "Specifies the number of resources that are allocated.", "format": "int64", @@ -29972,6 +33809,20 @@ "description": "An instance-attached disk resource.", "id": "AttachedDisk", "properties": { + "architecture": { + "description": "[Output Only] The architecture of the attached disk. Valid values are ARM64 or X86_64.", + "enum": [ + "ARCHITECTURE_UNSPECIFIED", + "ARM64", + "X86_64" + ], + "enumDescriptions": [ + "Default value indicating Architecture is not set.", + "Machines with architecture ARM64", + "Machines with architecture X86_64" + ], + "type": "string" + }, "autoDelete": { "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance).", "type": "boolean" @@ -29993,6 +33844,10 @@ "format": "int64", "type": "string" }, + "forceAttach": { + "description": "[Input Only] Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error.", + "type": "boolean" + }, "guestOsFeatures": { "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", "items": { @@ -30010,7 +33865,7 @@ "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both." }, "interface": { - "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", + "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. For most machine types, the default is SCSI. Local SSDs can use either NVME or SCSI. In certain configurations, persistent disks can use NVMe. For more information, see About persistent disks.", "enum": [ "NVME", "SCSI" @@ -30069,9 +33924,23 @@ "type": "object" }, "AttachedDiskInitializeParams": { - "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both.", + "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This field is persisted and returned for instanceTemplate and not returned in the context of instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both.", "id": "AttachedDiskInitializeParams", "properties": { + "architecture": { + "description": "The architecture of the attached disk. Valid values are arm64 or x86_64.", + "enum": [ + "ARCHITECTURE_UNSPECIFIED", + "ARM64", + "X86_64" + ], + "enumDescriptions": [ + "Default value indicating Architecture is not set.", + "Machines with architecture ARM64", + "Machines with architecture X86_64" + ], + "type": "string" + }, "description": { "description": "An optional description. Provide this property when creating the disk.", "type": "string" @@ -30086,7 +33955,7 @@ "type": "string" }, "diskType": { - "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL.", + "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you specify this field when creating a VM, you can provide either the full or partial URL. For example, the following values are valid: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType If you specify this field when creating or updating an instance template or all-instances configuration, specify the type of the disk, not the URL. For example: pd-standard.", "type": "string" }, "labels": { @@ -30122,6 +33991,13 @@ "format": "int64", "type": "string" }, + "resourceManagerTags": { + "additionalProperties": { + "type": "string" + }, + "description": "Resource manager tags to be bound to the disk. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT \u0026 PATCH) when empty.", + "type": "object" + }, "resourcePolicies": { "description": "Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name.", "items": { @@ -30135,7 +34011,7 @@ }, "sourceImageEncryptionKey": { "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. Instance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys." + "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. InstanceTemplate and InstancePropertiesPatch do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys." }, "sourceSnapshot": { "description": "The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: global/snapshots/my-backup If the source snapshot is deleted later, this field will not be set.", @@ -30371,10 +34247,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -30398,10 +34276,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -30487,10 +34367,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -30514,10 +34396,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -30643,10 +34527,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -30670,10 +34556,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -30923,7 +34811,7 @@ "type": "string" }, "capacityScaler": { - "description": "A multiplier applied to the backend's target capacity of its balancing mode. The default value is 1, which means the group serves up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. The valid ranges are 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service.", + "description": "A multiplier applied to the backend's target capacity of its balancing mode. The default value is 1, which means the group serves up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. The valid ranges are 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service. Not available with backends that don't support using a balancingMode. This includes backends such as global internet NEGs, regional serverless NEGs, and PSC NEGs.", "format": "float", "type": "number" }, @@ -30970,7 +34858,7 @@ "type": "number" }, "maxUtilization": { - "description": "Optional parameter to define a target capacity for the UTILIZATIONbalancing mode. The valid range is [0.0, 1.0]. For usage guidelines, see Utilization balancing mode.", + "description": "Optional parameter to define a target capacity for the UTILIZATION balancing mode. The valid range is [0.0, 1.0]. For usage guidelines, see Utilization balancing mode.", "format": "float", "type": "number" } @@ -30989,6 +34877,18 @@ "$ref": "BackendBucketCdnPolicy", "description": "Cloud CDN configuration for this BackendBucket." }, + "compressionMode": { + "description": "Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.", + "enum": [ + "AUTOMATIC", + "DISABLED" + ], + "enumDescriptions": [ + "Automatically uses the best compression based on the Accept-Encoding header sent by the client.", + "Disables compression. Existing compressed responses cached by Cloud CDN will not be served to clients." + ], + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -31138,7 +35038,7 @@ "type": "array" }, "queryStringWhitelist": { - "description": "Names of query string parameters to include in cache keys. All other parameters will be excluded. '\u0026' and '=' will be percent encoded and not treated as delimiters.", + "description": "Names of query string parameters to include in cache keys. Default parameters are always included. '\u0026' and '=' will be percent encoded and not treated as delimiters.", "items": { "type": "string" }, @@ -31206,10 +35106,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -31233,10 +35135,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -31285,7 +35189,7 @@ "id": "BackendService", "properties": { "affinityCookieTtlSec": { - "description": "Lifetime of cookies in seconds. This setting is applicable to external and internal HTTP(S) load balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is one day (86,400). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "Lifetime of cookies in seconds. This setting is applicable to external and internal HTTP(S) load balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, @@ -31303,6 +35207,18 @@ "circuitBreakers": { "$ref": "CircuitBreakers" }, + "compressionMode": { + "description": "Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.", + "enum": [ + "AUTOMATIC", + "DISABLED" + ], + "enumDescriptions": [ + "Automatically uses the best compression based on the Accept-Encoding header sent by the client.", + "Disables compression. Existing compressed responses cached by Cloud CDN will not be served to clients." + ], + "type": "string" + }, "connectionDraining": { "$ref": "ConnectionDraining" }, @@ -31394,6 +35310,13 @@ ], "type": "string" }, + "localityLbPolicies": { + "description": "A list of locality load balancing policies to be used in order of preference. Either the policy or the customPolicy field should be set. Overrides any value set in the localityLbPolicy field. localityLbPolicies is only supported when the BackendService is referenced by a URL Map that is referenced by a target gRPC proxy that has the validateForProxyless field set to true.", + "items": { + "$ref": "BackendServiceLocalityLoadBalancingPolicyConfig" + }, + "type": "array" + }, "localityLbPolicy": { "description": "The load balancing algorithm used within the scope of the locality. The possible values are: - ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. - LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. - RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. - RANDOM: The load balancer selects a random healthy host. - ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. - MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, see https://ai.google/research/pubs/pub44824 This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. If sessionAffinity is not NONE, and this field is not set to MAGLEV or RING_HASH, session affinity settings will not take effect. Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "enum": [ @@ -31435,7 +35358,7 @@ }, "outlierDetection": { "$ref": "OutlierDetection", - "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true." + "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, HTTP2, or GRPC, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. " }, "port": { "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port.", @@ -31486,6 +35409,13 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "serviceBindings": { + "description": "URLs of networkservices.ServiceBinding resources. Can only be set if load balancing scheme is INTERNAL_SELF_MANAGED. If set, lists of backends and health checks must be both empty.", + "items": { + "type": "string" + }, + "type": "array" + }, "sessionAffinity": { "description": "Type of session affinity to use. The default is NONE. Only NONE and HEADER_FIELD are supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. For more details, see: [Session Affinity](https://cloud.google.com/load-balancing/docs/backend-service#session_affinity).", "enum": [ @@ -31514,7 +35444,7 @@ "$ref": "Subsetting" }, "timeoutSec": { - "description": "Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. Instead, use maxStreamDuration.", + "description": "The backend service timeout has a different meaning depending on the type of load balancer. For more information see, Backend service settings. The default is 30 seconds. The full range of timeout values allowed goes from 1 through 2,147,483,647 seconds. This value can be overridden in the PathMatcher configuration of the UrlMap that references this backend service. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. Instead, use maxStreamDuration.", "format": "int32", "type": "integer" } @@ -31571,10 +35501,353 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "BackendServiceCdnPolicy": { + "description": "Message containing Cloud CDN configuration for a backend service.", + "id": "BackendServiceCdnPolicy", + "properties": { + "bypassCacheOnRequestHeaders": { + "description": "Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.", + "items": { + "$ref": "BackendServiceCdnPolicyBypassCacheOnRequestHeader" + }, + "type": "array" + }, + "cacheKeyPolicy": { + "$ref": "CacheKeyPolicy", + "description": "The CacheKeyPolicy for this CdnPolicy." + }, + "cacheMode": { + "description": "Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached.", + "enum": [ + "CACHE_ALL_STATIC", + "FORCE_CACHE_ALL", + "INVALID_CACHE_MODE", + "USE_ORIGIN_HEADERS" + ], + "enumDescriptions": [ + "Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached.", + "Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content.", + "", + "Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server." + ], + "type": "string" + }, + "clientTtl": { + "description": "Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a \"public\" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a \"public\" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year).", + "format": "int32", + "type": "integer" + }, + "defaultTtl": { + "description": "Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of \"0\" means \"always revalidate\". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.", + "format": "int32", + "type": "integer" + }, + "maxTtl": { + "description": "Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of \"0\" means \"always revalidate\". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.", + "format": "int32", + "type": "integer" + }, + "negativeCaching": { + "description": "Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy.", + "type": "boolean" + }, + "negativeCachingPolicy": { + "description": "Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists.", + "items": { + "$ref": "BackendServiceCdnPolicyNegativeCachingPolicy" + }, + "type": "array" + }, + "requestCoalescing": { + "description": "If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin.", + "type": "boolean" + }, + "serveWhileStale": { + "description": "Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default \"max-stale\" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale.", + "format": "int32", + "type": "integer" + }, + "signedUrlCacheMaxAgeSec": { + "description": "Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a \"Cache-Control: public, max-age=[TTL]\" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.", + "format": "int64", + "type": "string" + }, + "signedUrlKeyNames": { + "description": "[Output Only] Names of the keys for signing request URLs.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "BackendServiceCdnPolicyBypassCacheOnRequestHeader": { + "description": "Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting.", + "id": "BackendServiceCdnPolicyBypassCacheOnRequestHeader", + "properties": { + "headerName": { + "description": "The header field name to match on when bypassing cache. Values are case-insensitive.", + "type": "string" + } + }, + "type": "object" + }, + "BackendServiceCdnPolicyNegativeCachingPolicy": { + "description": "Specify CDN TTLs for response error codes.", + "id": "BackendServiceCdnPolicyNegativeCachingPolicy", + "properties": { + "code": { + "description": "The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once.", + "format": "int32", + "type": "integer" + }, + "ttl": { + "description": "The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "BackendServiceConnectionTrackingPolicy": { + "description": "Connection Tracking configuration for this BackendService.", + "id": "BackendServiceConnectionTrackingPolicy", + "properties": { + "connectionPersistenceOnUnhealthyBackends": { + "description": "Specifies connection persistence when backends are unhealthy. The default value is DEFAULT_FOR_PROTOCOL. If set to DEFAULT_FOR_PROTOCOL, the existing connections persist on unhealthy backends only for connection-oriented protocols (TCP and SCTP) and only if the Tracking Mode is PER_CONNECTION (default tracking mode) or the Session Affinity is configured for 5-tuple. They do not persist for UDP. If set to NEVER_PERSIST, after a backend becomes unhealthy, the existing connections on the unhealthy backend are never persisted on the unhealthy backend. They are always diverted to newly selected healthy backends (unless all backends are unhealthy). If set to ALWAYS_PERSIST, existing connections always persist on unhealthy backends regardless of protocol and session affinity. It is generally not recommended to use this mode overriding the default. For more details, see [Connection Persistence for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#connection-persistence) and [Connection Persistence for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#connection-persistence).", + "enum": [ + "ALWAYS_PERSIST", + "DEFAULT_FOR_PROTOCOL", + "NEVER_PERSIST" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "enableStrongAffinity": { + "description": "Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly.", + "type": "boolean" + }, + "idleTimeoutSec": { + "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly.", + "format": "int32", + "type": "integer" + }, + "trackingMode": { + "description": "Specifies the key used for connection tracking. There are two options: - PER_CONNECTION: This is the default mode. The Connection Tracking is performed as per the Connection Key (default Hash Method) for the specific protocol. - PER_SESSION: The Connection Tracking is performed as per the configured Session Affinity. It matches the configured Session Affinity. For more details, see [Tracking Mode for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#tracking-mode) and [Tracking Mode for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#tracking-mode).", + "enum": [ + "INVALID_TRACKING_MODE", + "PER_CONNECTION", + "PER_SESSION" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "BackendServiceFailoverPolicy": { + "description": "For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", + "id": "BackendServiceFailoverPolicy", + "properties": { + "disableConnectionDrainOnFailover": { + "description": "This can be set to true only if the protocol is TCP. The default is false.", + "type": "boolean" + }, + "dropTrafficIfUnhealthy": { + "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.", + "type": "boolean" + }, + "failoverRatio": { + "description": "The value of the field must be in the range [0, 1]. If the value is 0, the load balancer performs a failover when the number of healthy primary VMs equals zero. For all other values, the load balancer performs a failover when the total number of healthy primary VMs is less than this ratio. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview).", + "format": "float", + "type": "number" + } + }, + "type": "object" + }, + "BackendServiceGroupHealth": { + "id": "BackendServiceGroupHealth", + "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata defined as annotations on the network endpoint group.", + "type": "object" + }, + "healthStatus": { + "description": "Health state of the backend instances or endpoints in requested instance or network endpoint group, determined based on configured health checks.", + "items": { + "$ref": "HealthStatus" + }, + "type": "array" + }, + "kind": { + "default": "compute#backendServiceGroupHealth", + "description": "[Output Only] Type of resource. Always compute#backendServiceGroupHealth for the health of backend services.", + "type": "string" + } + }, + "type": "object" + }, + "BackendServiceIAP": { + "description": "Identity-Aware Proxy", + "id": "BackendServiceIAP", + "properties": { + "enabled": { + "description": "Whether the serving infrastructure will authenticate and authorize all incoming requests. If true, the oauth2ClientId and oauth2ClientSecret fields must be non-empty.", + "type": "boolean" + }, + "oauth2ClientId": { + "description": "OAuth2 client ID to use for the authentication flow.", + "type": "string" + }, + "oauth2ClientSecret": { + "description": "OAuth2 client secret to use for the authentication flow. For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2ClientSecretSha256 field. @InputOnly", + "type": "string" + }, + "oauth2ClientSecretSha256": { + "description": "[Output Only] SHA256 hash value for the field oauth2_client_secret above.", + "type": "string" + } + }, + "type": "object" + }, + "BackendServiceList": { + "description": "Contains a list of BackendService resources.", + "id": "BackendServiceList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of BackendService resources.", + "items": { + "$ref": "BackendService" + }, + "type": "array" + }, + "kind": { + "default": "compute#backendServiceList", + "description": "[Output Only] Type of resource. Always compute#backendServiceList for lists of backend services.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -31598,10 +35871,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -31645,345 +35920,69 @@ }, "type": "object" }, - "BackendServiceCdnPolicy": { - "description": "Message containing Cloud CDN configuration for a backend service.", - "id": "BackendServiceCdnPolicy", + "BackendServiceLocalityLoadBalancingPolicyConfig": { + "description": "Container for either a built-in LB policy supported by gRPC or Envoy or a custom one implemented by the end user.", + "id": "BackendServiceLocalityLoadBalancingPolicyConfig", "properties": { - "bypassCacheOnRequestHeaders": { - "description": "Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.", - "items": { - "$ref": "BackendServiceCdnPolicyBypassCacheOnRequestHeader" - }, - "type": "array" - }, - "cacheKeyPolicy": { - "$ref": "CacheKeyPolicy", - "description": "The CacheKeyPolicy for this CdnPolicy." - }, - "cacheMode": { - "description": "Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached.", - "enum": [ - "CACHE_ALL_STATIC", - "FORCE_CACHE_ALL", - "INVALID_CACHE_MODE", - "USE_ORIGIN_HEADERS" - ], - "enumDescriptions": [ - "Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached.", - "Cache all content, ignoring any \"private\", \"no-store\" or \"no-cache\" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content.", - "", - "Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server." - ], - "type": "string" + "customPolicy": { + "$ref": "BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy" }, - "clientTtl": { - "description": "Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a \"public\" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a \"public\" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year).", - "format": "int32", - "type": "integer" - }, - "defaultTtl": { - "description": "Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of \"0\" means \"always revalidate\". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.", - "format": "int32", - "type": "integer" - }, - "maxTtl": { - "description": "Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of \"0\" means \"always revalidate\". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.", - "format": "int32", - "type": "integer" - }, - "negativeCaching": { - "description": "Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy.", - "type": "boolean" - }, - "negativeCachingPolicy": { - "description": "Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists.", - "items": { - "$ref": "BackendServiceCdnPolicyNegativeCachingPolicy" - }, - "type": "array" - }, - "requestCoalescing": { - "description": "If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin.", - "type": "boolean" - }, - "serveWhileStale": { - "description": "Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default \"max-stale\" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale.", - "format": "int32", - "type": "integer" - }, - "signedUrlCacheMaxAgeSec": { - "description": "Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a \"Cache-Control: public, max-age=[TTL]\" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.", - "format": "int64", - "type": "string" - }, - "signedUrlKeyNames": { - "description": "[Output Only] Names of the keys for signing request URLs.", - "items": { - "type": "string" - }, - "type": "array" + "policy": { + "$ref": "BackendServiceLocalityLoadBalancingPolicyConfigPolicy" } }, "type": "object" }, - "BackendServiceCdnPolicyBypassCacheOnRequestHeader": { - "description": "Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting.", - "id": "BackendServiceCdnPolicyBypassCacheOnRequestHeader", + "BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy": { + "description": "The configuration for a custom policy implemented by the user and deployed with the client.", + "id": "BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy", "properties": { - "headerName": { - "description": "The header field name to match on when bypassing cache. Values are case-insensitive.", + "data": { + "description": "An optional, arbitrary JSON object with configuration data, understood by a locally installed custom policy implementation.", "type": "string" - } - }, - "type": "object" - }, - "BackendServiceCdnPolicyNegativeCachingPolicy": { - "description": "Specify CDN TTLs for response error codes.", - "id": "BackendServiceCdnPolicyNegativeCachingPolicy", - "properties": { - "code": { - "description": "The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once.", - "format": "int32", - "type": "integer" }, - "ttl": { - "description": "The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.", - "format": "int32", - "type": "integer" + "name": { + "description": "Identifies the custom policy. The value should match the type the custom implementation is registered with on the gRPC clients. It should follow protocol buffer message naming conventions and include the full path (e.g. myorg.CustomLbPolicy). The maximum length is 256 characters. Note that specifying the same custom policy more than once for a backend is not a valid configuration and will be rejected.", + "type": "string" } }, "type": "object" }, - "BackendServiceConnectionTrackingPolicy": { - "description": "Connection Tracking configuration for this BackendService.", - "id": "BackendServiceConnectionTrackingPolicy", + "BackendServiceLocalityLoadBalancingPolicyConfigPolicy": { + "description": "The configuration for a built-in load balancing policy.", + "id": "BackendServiceLocalityLoadBalancingPolicyConfigPolicy", "properties": { - "connectionPersistenceOnUnhealthyBackends": { - "description": "Specifies connection persistence when backends are unhealthy. The default value is DEFAULT_FOR_PROTOCOL. If set to DEFAULT_FOR_PROTOCOL, the existing connections persist on unhealthy backends only for connection-oriented protocols (TCP and SCTP) and only if the Tracking Mode is PER_CONNECTION (default tracking mode) or the Session Affinity is configured for 5-tuple. They do not persist for UDP. If set to NEVER_PERSIST, after a backend becomes unhealthy, the existing connections on the unhealthy backend are never persisted on the unhealthy backend. They are always diverted to newly selected healthy backends (unless all backends are unhealthy). If set to ALWAYS_PERSIST, existing connections always persist on unhealthy backends regardless of protocol and session affinity. It is generally not recommended to use this mode overriding the default. For more details, see [Connection Persistence for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#connection-persistence) and [Connection Persistence for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#connection-persistence).", - "enum": [ - "ALWAYS_PERSIST", - "DEFAULT_FOR_PROTOCOL", - "NEVER_PERSIST" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, - "idleTimeoutSec": { - "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly.", - "format": "int32", - "type": "integer" - }, - "trackingMode": { - "description": "Specifies the key used for connection tracking. There are two options: - PER_CONNECTION: This is the default mode. The Connection Tracking is performed as per the Connection Key (default Hash Method) for the specific protocol. - PER_SESSION: The Connection Tracking is performed as per the configured Session Affinity. It matches the configured Session Affinity. For more details, see [Tracking Mode for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#tracking-mode) and [Tracking Mode for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#tracking-mode).", + "name": { + "description": "The name of a locality load balancer policy to be used. The value should be one of the predefined ones as supported by localityLbPolicy, although at the moment only ROUND_ROBIN is supported. This field should only be populated when the customPolicy field is not used. Note that specifying the same policy more than once for a backend is not a valid configuration and will be rejected.", "enum": [ - "INVALID_TRACKING_MODE", - "PER_CONNECTION", - "PER_SESSION" + "INVALID_LB_POLICY", + "LEAST_REQUEST", + "MAGLEV", + "ORIGINAL_DESTINATION", + "RANDOM", + "RING_HASH", + "ROUND_ROBIN" ], "enumDescriptions": [ "", - "", - "" + "An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests.", + "This algorithm implements consistent hashing to backends. Maglev can be used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, see https://ai.google/research/pubs/pub44824", + "Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer.", + "The load balancer selects a random healthy host.", + "The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests.", + "This is a simple policy in which each healthy backend is selected in round robin order. This is the default." ], "type": "string" } }, "type": "object" }, - "BackendServiceFailoverPolicy": { - "description": "For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", - "id": "BackendServiceFailoverPolicy", - "properties": { - "disableConnectionDrainOnFailover": { - "description": "This can be set to true only if the protocol is TCP. The default is false.", - "type": "boolean" - }, - "dropTrafficIfUnhealthy": { - "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.", - "type": "boolean" - }, - "failoverRatio": { - "description": "The value of the field must be in the range [0, 1]. If the value is 0, the load balancer performs a failover when the number of healthy primary VMs equals zero. For all other values, the load balancer performs a failover when the total number of healthy primary VMs is less than this ratio. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview).", - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "BackendServiceGroupHealth": { - "id": "BackendServiceGroupHealth", - "properties": { - "annotations": { - "additionalProperties": { - "type": "string" - }, - "description": "Metadata defined as annotations on the network endpoint group.", - "type": "object" - }, - "healthStatus": { - "description": "Health state of the backend instances or endpoints in requested instance or network endpoint group, determined based on configured health checks.", - "items": { - "$ref": "HealthStatus" - }, - "type": "array" - }, - "kind": { - "default": "compute#backendServiceGroupHealth", - "description": "[Output Only] Type of resource. Always compute#backendServiceGroupHealth for the health of backend services.", - "type": "string" - } - }, - "type": "object" - }, - "BackendServiceIAP": { - "description": "Identity-Aware Proxy", - "id": "BackendServiceIAP", - "properties": { - "enabled": { - "description": "Whether the serving infrastructure will authenticate and authorize all incoming requests. If true, the oauth2ClientId and oauth2ClientSecret fields must be non-empty.", - "type": "boolean" - }, - "oauth2ClientId": { - "description": "OAuth2 client ID to use for the authentication flow.", - "type": "string" - }, - "oauth2ClientSecret": { - "description": "OAuth2 client secret to use for the authentication flow. For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2ClientSecretSha256 field. @InputOnly", - "type": "string" - }, - "oauth2ClientSecretSha256": { - "description": "[Output Only] SHA256 hash value for the field oauth2_client_secret above.", - "type": "string" - } - }, - "type": "object" - }, - "BackendServiceList": { - "description": "Contains a list of BackendService resources.", - "id": "BackendServiceList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of BackendService resources.", - "items": { - "$ref": "BackendService" - }, - "type": "array" - }, - "kind": { - "default": "compute#backendServiceList", - "description": "[Output Only] Type of resource. Always compute#backendServiceList for lists of backend services.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "LARGE_DEPLOYMENT_WARNING", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "PARTIAL_SUCCESS", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "Warning about failed cleanup of transient changes made by a failed operation.", - "A link to a deprecated resource was created.", - "When deploying and at least one of the resources has a type marked as deprecated", - "The user created a boot disk that is larger than image size.", - "When deploying and at least one of the resources has a type marked as experimental", - "Warning that is present in an external api call", - "Warning that value of a field has been overridden. Deprecated unused field.", - "The operation involved use of an injected kernel, which is deprecated.", - "When deploying a deployment with a exceedingly large number of resources", - "A resource depends on a missing type", - "The route's nextHopIp address is not assigned to an instance on the network.", - "The route's next hop instance cannot ip forward.", - "The route's nextHopInstance URL refers to an instance that does not exist.", - "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", - "The route's next hop instance does not have a status of RUNNING.", - "Error which is not critical. We decided to continue the process despite the mentioned error.", - "No results are present on a particular list page.", - "Success is reported, but some results may be missing due to errors", - "The user attempted to use a resource that requires a TOS they have not accepted.", - "Warning that a resource is in use.", - "One or more of the resources set to auto-delete could not be deleted because they were in use.", - "When a resource schema validation is ignored.", - "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", - "When undeclared properties in the schema are present", - "A given scope cannot be reached." - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, "BackendServiceLogConfig": { "description": "The available logging options for the load balancer traffic served by this backend service.", "id": "BackendServiceLogConfig", "properties": { "enable": { - "description": "This field denotes whether to enable logging for the load balancer traffic served by this backend service.", + "description": "Denotes whether to enable logging for the load balancer traffic served by this backend service. The default value is false.", "type": "boolean" }, "sampleRate": { @@ -32027,10 +36026,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -32054,10 +36055,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -32357,7 +36360,7 @@ "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", + "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", "items": { "type": "string" }, @@ -32385,7 +36388,7 @@ }, "locationPolicy": { "$ref": "LocationPolicy", - "description": "Policy for chosing target zone." + "description": "Policy for chosing target zone. For more information, see Create VMs in bulk ." }, "minCount": { "description": "The minimum number of instances to create. If no min_count is specified then count is used as the default value. If min_count instances cannot be created, then no instances will be created and instances already created will be deleted.", @@ -32486,12 +36489,12 @@ "id": "CircuitBreakers", "properties": { "maxConnections": { - "description": "Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "The maximum number of connections to the backend service. If not specified, there is no limit. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, "maxPendingRequests": { - "description": "Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "The maximum number of pending requests allowed to the backend service. If not specified, there is no limit. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, @@ -32501,12 +36504,12 @@ "type": "integer" }, "maxRequestsPerConnection": { - "description": "Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "Maximum requests for a single connection to the backend service. This parameter is respected by both the HTTP/1.1 and HTTP/2 implementations. If not specified, there is no limit. Setting this parameter to 1 will effectively disable keep alive. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, "maxRetries": { - "description": "Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "The maximum number of parallel retries allowed to the backend cluster. If not specified, the default is 1. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" } @@ -32561,6 +36564,13 @@ "$ref": "LicenseResourceCommitment", "description": "The license specification required as part of a license commitment." }, + "mergeSourceCommitments": { + "description": "List of source commitments to be merged into a new commitment.", + "items": { + "type": "string" + }, + "type": "array" + }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -32602,6 +36612,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "splitSourceCommitment": { + "description": "Source commitment to be splitted into a new commitment.", + "type": "string" + }, "startTimestamp": { "description": "[Output Only] Commitment start time in RFC3339 text format.", "type": "string" @@ -32610,6 +36624,7 @@ "description": "[Output Only] Status of the commitment with regards to eventual expiration (each commitment has an end date defined). One of the following values: NOT_YET_ACTIVE, ACTIVE, EXPIRED.", "enum": [ "ACTIVE", + "CANCELLED", "CREATING", "EXPIRED", "NOT_YET_ACTIVE" @@ -32618,6 +36633,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32638,6 +36654,7 @@ "GENERAL_PURPOSE_N2D", "GENERAL_PURPOSE_T2D", "MEMORY_OPTIMIZED", + "MEMORY_OPTIMIZED_M3", "TYPE_UNSPECIFIED" ], "enumDescriptions": [ @@ -32650,6 +36667,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32706,10 +36724,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -32733,10 +36753,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -32822,10 +36844,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -32849,10 +36873,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -32920,10 +36946,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -32947,10 +36975,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -33159,7 +37189,7 @@ "type": "array" }, "allowOriginRegexes": { - "description": "Specifies a regular expression that matches allowed origins. For more information about the regular expression syntax, see Syntax. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes.", + "description": "Specifies a regular expression that matches allowed origins. For more information about the regular expression syntax, see Syntax. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. Regular expressions can only be used when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED.", "items": { "type": "string" }, @@ -33274,6 +37304,20 @@ "description": "Represents a Persistent Disk resource. Google Compute Engine has two Disk resources: * [Zonal](/compute/docs/reference/rest/v1/disks) * [Regional](/compute/docs/reference/rest/v1/regionDisks) Persistent disks are required for running your VM instances. Create both boot and non-boot (data) persistent disks. For more information, read Persistent Disks. For more storage options, read Storage options. The disks resource represents a zonal persistent disk. For more information, read Zonal persistent disks. The regionDisks resource represents a regional persistent disk. For more information, read Regional resources.", "id": "Disk", "properties": { + "architecture": { + "description": "The architecture of the disk. Valid values are ARM64 or X86_64.", + "enum": [ + "ARCHITECTURE_UNSPECIFIED", + "ARM64", + "X86_64" + ], + "enumDescriptions": [ + "Default value indicating Architecture is not set.", + "Machines with architecture ARM64", + "Machines with architecture X86_64" + ], + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -33356,6 +37400,10 @@ "description": "Internal use only.", "type": "string" }, + "params": { + "$ref": "DiskParams", + "description": "Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload." + }, "physicalBlockSizeBytes": { "description": "Physical block size of the persistent disk, in bytes. If not present in a request, a default value is used. The currently supported size is 4096, other sizes may be added in the future. If an unsupported value is requested, the error message will list the supported values for the caller's project.", "format": "int64", @@ -33392,101 +37440,599 @@ "description": "[Output Only] Server-defined fully-qualified URL for this resource.", "type": "string" }, - "sizeGb": { - "description": "Size, in GB, of the persistent disk. You can specify this field when creating a persistent disk using the sourceImage, sourceSnapshot, or sourceDisk parameter, or specify it alone to create an empty persistent disk. If you specify this field along with a source, the value of sizeGb must not be less than the size of the source. Acceptable values are 1 to 65536, inclusive.", - "format": "int64", + "sizeGb": { + "description": "Size, in GB, of the persistent disk. You can specify this field when creating a persistent disk using the sourceImage, sourceSnapshot, or sourceDisk parameter, or specify it alone to create an empty persistent disk. If you specify this field along with a source, the value of sizeGb must not be less than the size of the source. Acceptable values are 1 to 65536, inclusive.", + "format": "int64", + "type": "string" + }, + "sourceDisk": { + "description": "The source disk used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", + "type": "string" + }, + "sourceDiskId": { + "description": "[Output Only] The unique ID of the disk used to create this disk. This value identifies the exact disk that was used to create this persistent disk. For example, if you created the persistent disk from a disk that was later deleted and recreated under the same name, the source disk ID would identify the exact version of the disk that was used.", + "type": "string" + }, + "sourceImage": { + "description": "The source image used to create this disk. If the source image is deleted, this field will not be set. To create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image: projects/debian-cloud/global/images/family/debian-9 Alternatively, use a specific version of a public operating system image: projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD To create a disk with a custom image that you created, specify the image name in the following format: global/images/my-custom-image You can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name: global/images/family/my-image-family ", + "type": "string" + }, + "sourceImageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key." + }, + "sourceImageId": { + "description": "[Output Only] The ID value of the image used to create this disk. This value identifies the exact image that was used to create this persistent disk. For example, if you created the persistent disk from an image that was later deleted and recreated under the same name, the source image ID would identify the exact version of the image that was used.", + "type": "string" + }, + "sourceSnapshot": { + "description": "The source snapshot used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project /global/snapshots/snapshot - projects/project/global/snapshots/snapshot - global/snapshots/snapshot ", + "type": "string" + }, + "sourceSnapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key." + }, + "sourceSnapshotId": { + "description": "[Output Only] The unique ID of the snapshot used to create this disk. This value identifies the exact snapshot that was used to create this persistent disk. For example, if you created the persistent disk from a snapshot that was later deleted and recreated under the same name, the source snapshot ID would identify the exact version of the snapshot that was used.", + "type": "string" + }, + "sourceStorageObject": { + "description": "The full Google Cloud Storage URI where the disk image is stored. This file must be a gzip-compressed tarball whose name ends in .tar.gz or virtual machine disk whose name ends in vmdk. Valid URIs may start with gs:// or https://storage.googleapis.com/. This flag is not optimized for creating multiple disks from a source storage object. To create many disks from a source storage object, use gcloud compute images import instead.", + "type": "string" + }, + "status": { + "description": "[Output Only] The status of disk creation. - CREATING: Disk is provisioning. - RESTORING: Source data is being copied into the disk. - FAILED: Disk creation failed. - READY: Disk is ready for use. - DELETING: Disk is deleting. ", + "enum": [ + "CREATING", + "DELETING", + "FAILED", + "READY", + "RESTORING" + ], + "enumDescriptions": [ + "Disk is provisioning", + "Disk is deleting.", + "Disk creation failed.", + "Disk is ready for use.", + "Source data is being copied into the disk." + ], + "type": "string" + }, + "type": { + "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: projects/project /zones/zone/diskTypes/pd-ssd . See Persistent disk types.", + "type": "string" + }, + "users": { + "description": "[Output Only] Links to the users of the disk (attached instances) in form: projects/project/zones/zone/instances/instance", + "items": { + "type": "string" + }, + "type": "array" + }, + "zone": { + "description": "[Output Only] URL of the zone where the disk resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + } + }, + "type": "object" + }, + "DiskAggregatedList": { + "id": "DiskAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "DisksScopedList", + "description": "[Output Only] Name of the scope containing this set of disks." + }, + "description": "A list of DisksScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#diskAggregatedList", + "description": "[Output Only] Type of resource. Always compute#diskAggregatedList for aggregated lists of persistent disks.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "DiskInstantiationConfig": { + "description": "A specification of the desired way to instantiate a disk in the instance template when its created from a source instance.", + "id": "DiskInstantiationConfig", + "properties": { + "autoDelete": { + "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance).", + "type": "boolean" + }, + "customImage": { + "description": "The custom source image to be used to restore this disk when instantiating this instance template.", + "type": "string" + }, + "deviceName": { + "description": "Specifies the device name of the disk to which the configurations apply to.", + "type": "string" + }, + "instantiateFrom": { + "description": "Specifies whether to include the disk and what image to use. Possible values are: - source-image: to use the same image that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - source-image-family: to use the same image family that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - custom-image: to use a user-provided image url for disk creation. Applicable to the boot disk and additional read-write disks. - attach-read-only: to attach a read-only disk. Applicable to read-only disks. - do-not-include: to exclude a disk from the template. Applicable to additional read-write disks, local SSDs, and read-only disks. ", + "enum": [ + "ATTACH_READ_ONLY", + "BLANK", + "CUSTOM_IMAGE", + "DEFAULT", + "DO_NOT_INCLUDE", + "SOURCE_IMAGE", + "SOURCE_IMAGE_FAMILY" + ], + "enumDescriptions": [ + "Attach the existing disk in read-only mode. The request will fail if the disk was attached in read-write mode on the source instance. Applicable to: read-only disks.", + "Create a blank disk. The disk will be created unformatted. Applicable to: additional read-write disks, local SSDs.", + "Use the custom image specified in the custom_image field. Applicable to: boot disk, additional read-write disks.", + "Use the default instantiation option for the corresponding type of disk. For boot disk and any other R/W disks, new custom images will be created from each disk. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes.", + "Do not include the disk in the instance template. Applicable to: additional read-write disks, local SSDs, read-only disks.", + "Use the same source image used for creation of the source instance's corresponding disk. The request will fail if the source VM's disk was created from a snapshot. Applicable to: boot disk, additional read-write disks.", + "Use the same source image family used for creation of the source instance's corresponding disk. The request will fail if the source image of the source disk does not belong to any image family. Applicable to: boot disk, additional read-write disks." + ], + "type": "string" + } + }, + "type": "object" + }, + "DiskList": { + "description": "A list of Disk resources.", + "id": "DiskList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of Disk resources.", + "items": { + "$ref": "Disk" + }, + "type": "array" + }, + "kind": { + "default": "compute#diskList", + "description": "[Output Only] Type of resource. Always compute#diskList for lists of disks.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "DiskMoveRequest": { + "id": "DiskMoveRequest", + "properties": { + "destinationZone": { + "description": "The URL of the destination zone to move the disk. This can be a full or partial URL. For example, the following are all valid URLs to a zone: - https://www.googleapis.com/compute/v1/projects/project/zones/zone - projects/project/zones/zone - zones/zone ", + "type": "string" + }, + "targetDisk": { + "description": "The URL of the target disk to move. This can be a full or partial URL. For example, the following are all valid URLs to a disk: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk ", + "type": "string" + } + }, + "type": "object" + }, + "DiskParams": { + "description": "Additional disk params.", + "id": "DiskParams", + "properties": { + "resourceManagerTags": { + "additionalProperties": { + "type": "string" + }, + "description": "Resource manager tags to be bound to the disk. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT \u0026 PATCH) when empty.", + "type": "object" + } + }, + "type": "object" + }, + "DiskType": { + "description": "Represents a Disk Type resource. Google Compute Engine has two Disk Type resources: * [Regional](/compute/docs/reference/rest/v1/regionDiskTypes) * [Zonal](/compute/docs/reference/rest/v1/diskTypes) You can choose from a variety of disk types based on your needs. For more information, read Storage options. The diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks. The regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks.", + "id": "DiskType", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "defaultDiskSizeGb": { + "description": "[Output Only] Server-defined default disk size in GB.", + "format": "int64", + "type": "string" + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this disk type." + }, + "description": { + "description": "[Output Only] An optional description of this resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", "type": "string" }, - "sourceDisk": { - "description": "The source disk used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", + "kind": { + "default": "compute#diskType", + "description": "[Output Only] Type of the resource. Always compute#diskType for disk types.", "type": "string" }, - "sourceDiskId": { - "description": "[Output Only] The unique ID of the disk used to create this disk. This value identifies the exact disk that was used to create this persistent disk. For example, if you created the persistent disk from a disk that was later deleted and recreated under the same name, the source disk ID would identify the exact version of the disk that was used.", + "name": { + "description": "[Output Only] Name of the resource.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "sourceImage": { - "description": "The source image used to create this disk. If the source image is deleted, this field will not be set. To create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image: projects/debian-cloud/global/images/family/debian-9 Alternatively, use a specific version of a public operating system image: projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD To create a disk with a custom image that you created, specify the image name in the following format: global/images/my-custom-image You can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name: global/images/family/my-image-family ", + "region": { + "description": "[Output Only] URL of the region where the disk type resides. Only applicable for regional resources. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" }, - "sourceImageEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key." - }, - "sourceImageId": { - "description": "[Output Only] The ID value of the image used to create this disk. This value identifies the exact image that was used to create this persistent disk. For example, if you created the persistent disk from an image that was later deleted and recreated under the same name, the source image ID would identify the exact version of the image that was used.", + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "sourceSnapshot": { - "description": "The source snapshot used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project /global/snapshots/snapshot - projects/project/global/snapshots/snapshot - global/snapshots/snapshot ", + "validDiskSize": { + "description": "[Output Only] An optional textual description of the valid disk size, such as \"10GB-10TB\".", "type": "string" }, - "sourceSnapshotEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key." - }, - "sourceSnapshotId": { - "description": "[Output Only] The unique ID of the snapshot used to create this disk. This value identifies the exact snapshot that was used to create this persistent disk. For example, if you created the persistent disk from a snapshot that was later deleted and recreated under the same name, the source snapshot ID would identify the exact version of the snapshot that was used.", + "zone": { + "description": "[Output Only] URL of the zone where the disk type resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + } + }, + "type": "object" + }, + "DiskTypeAggregatedList": { + "id": "DiskTypeAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, - "sourceStorageObject": { - "description": "The full Google Cloud Storage URI where the disk image is stored. This file must be a gzip-compressed tarball whose name ends in .tar.gz or virtual machine disk whose name ends in vmdk. Valid URIs may start with gs:// or https://storage.googleapis.com/. This flag is not optimized for creating multiple disks from a source storage object. To create many disks from a source storage object, use gcloud compute images import instead.", + "items": { + "additionalProperties": { + "$ref": "DiskTypesScopedList", + "description": "[Output Only] Name of the scope containing this set of disk types." + }, + "description": "A list of DiskTypesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#diskTypeAggregatedList", + "description": "[Output Only] Type of resource. Always compute#diskTypeAggregatedList.", "type": "string" }, - "status": { - "description": "[Output Only] The status of disk creation. - CREATING: Disk is provisioning. - RESTORING: Source data is being copied into the disk. - FAILED: Disk creation failed. - READY: Disk is ready for use. - DELETING: Disk is deleting. ", - "enum": [ - "CREATING", - "DELETING", - "FAILED", - "READY", - "RESTORING" - ], - "enumDescriptions": [ - "Disk is provisioning", - "Disk is deleting.", - "Disk creation failed.", - "Disk is ready for use.", - "Source data is being copied into the disk." - ], + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, - "type": { - "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: projects/project /zones/zone/diskTypes/pd-ssd . See Persistent disk types.", + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "users": { - "description": "[Output Only] Links to the users of the disk (attached instances) in form: projects/project/zones/zone/instances/instance", + "unreachables": { + "description": "[Output Only] Unreachable resources.", "items": { "type": "string" }, "type": "array" }, - "zone": { - "description": "[Output Only] URL of the zone where the disk resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" }, - "DiskAggregatedList": { - "id": "DiskAggregatedList", + "DiskTypeList": { + "description": "Contains a list of disk types.", + "id": "DiskTypeList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "DisksScopedList", - "description": "[Output Only] Name of the scope containing this set of disks." + "description": "A list of DiskType resources.", + "items": { + "$ref": "DiskType" }, - "description": "A list of DisksScopedList resources.", - "type": "object" + "type": "array" }, "kind": { - "default": "compute#diskAggregatedList", - "description": "[Output Only] Type of resource. Always compute#diskAggregatedList for aggregated lists of persistent disks.", + "default": "compute#diskTypeList", + "description": "[Output Only] Type of resource. Always compute#diskTypeList for disk types.", "type": "string" }, "nextPageToken": { @@ -33497,15 +38043,249 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "unreachables": { - "description": "[Output Only] Unreachable resources.", + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "DiskTypesScopedList": { + "id": "DiskTypesScopedList", + "properties": { + "diskTypes": { + "description": "[Output Only] A list of disk types contained in this scope.", + "items": { + "$ref": "DiskType" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning which replaces the list of disk types when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "DisksAddResourcePoliciesRequest": { + "id": "DisksAddResourcePoliciesRequest", + "properties": { + "resourcePolicies": { + "description": "Full or relative path to the resource policy to be added to this disk. You can only specify one resource policy.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "DisksRemoveResourcePoliciesRequest": { + "id": "DisksRemoveResourcePoliciesRequest", + "properties": { + "resourcePolicies": { + "description": "Resource policies to be removed from this disk.", "items": { "type": "string" }, "type": "array" + } + }, + "type": "object" + }, + "DisksResizeRequest": { + "id": "DisksResizeRequest", + "properties": { + "sizeGb": { + "description": "The new size of the persistent disk, which is specified in GB.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "DisksScopedList": { + "id": "DisksScopedList", + "properties": { + "disks": { + "description": "[Output Only] A list of disks contained in this scope.", + "items": { + "$ref": "Disk" + }, + "type": "array" }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "[Output Only] Informational warning which replaces the list of disks when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -33518,10 +38298,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -33545,10 +38327,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -33592,65 +38376,152 @@ }, "type": "object" }, - "DiskInstantiationConfig": { - "description": "A specification of the desired way to instantiate a disk in the instance template when its created from a source instance.", - "id": "DiskInstantiationConfig", + "DisplayDevice": { + "description": "A set of Display Device options", + "id": "DisplayDevice", + "properties": { + "enableDisplay": { + "description": "Defines whether the instance has Display enabled.", + "type": "boolean" + } + }, + "type": "object" + }, + "DistributionPolicy": { + "id": "DistributionPolicy", + "properties": { + "targetShape": { + "description": "The distribution shape to which the group converges either proactively or on resize events (depending on the value set in updatePolicy.instanceRedistributionType).", + "enum": [ + "ANY", + "BALANCED", + "EVEN" + ], + "enumDescriptions": [ + "The group picks zones for creating VM instances to fulfill the requested number of VMs within present resource constraints and to maximize utilization of unused zonal reservations. Recommended for batch workloads that do not require high availability.", + "The group prioritizes acquisition of resources, scheduling VMs in zones where resources are available while distributing VMs as evenly as possible across selected zones to minimize the impact of zonal failure. Recommended for highly available serving workloads.", + "The group schedules VM instance creation and deletion to achieve and maintain an even number of managed instances across the selected zones. The distribution is even when the number of managed instances does not differ by more than 1 between any two zones. Recommended for highly available serving workloads." + ], + "type": "string" + }, + "zones": { + "description": "Zones where the regional managed instance group will create and manage its instances.", + "items": { + "$ref": "DistributionPolicyZoneConfiguration" + }, + "type": "array" + } + }, + "type": "object" + }, + "DistributionPolicyZoneConfiguration": { + "id": "DistributionPolicyZoneConfiguration", + "properties": { + "zone": { + "annotations": { + "required": [ + "compute.regionInstanceGroupManagers.insert" + ] + }, + "description": "The URL of the zone. The zone must exist in the region where the managed instance group is located.", + "type": "string" + } + }, + "type": "object" + }, + "Duration": { + "description": "A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like \"day\" or \"month\". Range is approximately 10,000 years.", + "id": "Duration", + "properties": { + "nanos": { + "description": "Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive.", + "format": "int32", + "type": "integer" + }, + "seconds": { + "description": "Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "ErrorInfo": { + "description": "Describes the cause of the error with structured details. Example of an error when contacting the \"pubsub.googleapis.com\" API when it is not enabled: { \"reason\": \"API_DISABLED\" \"domain\": \"googleapis.com\" \"metadata\": { \"resource\": \"projects/123\", \"service\": \"pubsub.googleapis.com\" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { \"reason\": \"STOCKOUT\" \"domain\": \"spanner.googleapis.com\", \"metadata\": { \"availableRegions\": \"us-central1,us-east2\" } }", + "id": "ErrorInfo", + "properties": { + "domain": { + "description": "The logical grouping to which the \"reason\" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: \"pubsub.googleapis.com\". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is \"googleapis.com\".", + "type": "string" + }, + "metadatas": { + "additionalProperties": { + "type": "string" + }, + "description": "Additional structured details about this error. Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than {\"instanceLimit\": \"100/request\"}, should be returned as, {\"instanceLimitPerRequest\": \"100\"}, if the client exceeds the number of instances that can be created in a single (batch) request.", + "type": "object" + }, + "reason": { + "description": "The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE.", + "type": "string" + } + }, + "type": "object" + }, + "ExchangedPeeringRoute": { + "id": "ExchangedPeeringRoute", "properties": { - "autoDelete": { - "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance).", + "destRange": { + "description": "The destination range of the route.", + "type": "string" + }, + "imported": { + "description": "True if the peering route has been imported from a peer. The actual import happens if the field networkPeering.importCustomRoutes is true for this network, and networkPeering.exportCustomRoutes is true for the peer network, and the import does not result in a route conflict.", "type": "boolean" }, - "customImage": { - "description": "The custom source image to be used to restore this disk when instantiating this instance template.", + "nextHopRegion": { + "description": "The region of peering route next hop, only applies to dynamic routes.", "type": "string" }, - "deviceName": { - "description": "Specifies the device name of the disk to which the configurations apply to.", - "type": "string" + "priority": { + "description": "The priority of the peering route.", + "format": "uint32", + "type": "integer" }, - "instantiateFrom": { - "description": "Specifies whether to include the disk and what image to use. Possible values are: - source-image: to use the same image that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - source-image-family: to use the same image family that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - custom-image: to use a user-provided image url for disk creation. Applicable to the boot disk and additional read-write disks. - attach-read-only: to attach a read-only disk. Applicable to read-only disks. - do-not-include: to exclude a disk from the template. Applicable to additional read-write disks, local SSDs, and read-only disks. ", + "type": { + "description": "The type of the peering route.", "enum": [ - "ATTACH_READ_ONLY", - "BLANK", - "CUSTOM_IMAGE", - "DEFAULT", - "DO_NOT_INCLUDE", - "SOURCE_IMAGE", - "SOURCE_IMAGE_FAMILY" + "DYNAMIC_PEERING_ROUTE", + "STATIC_PEERING_ROUTE", + "SUBNET_PEERING_ROUTE" ], "enumDescriptions": [ - "Attach the existing disk in read-only mode. The request will fail if the disk was attached in read-write mode on the source instance. Applicable to: read-only disks.", - "Create a blank disk. The disk will be created unformatted. Applicable to: additional read-write disks, local SSDs.", - "Use the custom image specified in the custom_image field. Applicable to: boot disk, additional read-write disks.", - "Use the default instantiation option for the corresponding type of disk. For boot disk and any other R/W disks, new custom images will be created from each disk. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes.", - "Do not include the disk in the instance template. Applicable to: additional read-write disks, local SSDs, read-only disks.", - "Use the same source image used for creation of the source instance's corresponding disk. The request will fail if the source VM's disk was created from a snapshot. Applicable to: boot disk, additional read-write disks.", - "Use the same source image family used for creation of the source instance's corresponding disk. The request will fail if the source image of the source disk does not belong to any image family. Applicable to: boot disk, additional read-write disks." + "For routes exported from local network.", + "The peering route.", + "The peering route corresponding to subnetwork range." ], "type": "string" } }, "type": "object" }, - "DiskList": { - "description": "A list of Disk resources.", - "id": "DiskList", + "ExchangedPeeringRoutesList": { + "id": "ExchangedPeeringRoutesList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Disk resources.", + "description": "A list of ExchangedPeeringRoute resources.", "items": { - "$ref": "Disk" + "$ref": "ExchangedPeeringRoute" }, "type": "array" }, "kind": { - "default": "compute#diskList", - "description": "[Output Only] Type of resource. Always compute#diskList for lists of disks.", + "default": "compute#exchangedPeeringRoutesList", + "description": "[Output Only] Type of resource. Always compute#exchangedPeeringRoutesList for exchanged peering routes lists.", "type": "string" }, "nextPageToken": { @@ -33675,10 +38546,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -33702,10 +38575,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -33749,39 +38624,39 @@ }, "type": "object" }, - "DiskMoveRequest": { - "id": "DiskMoveRequest", + "Expr": { + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", + "id": "Expr", "properties": { - "destinationZone": { - "description": "The URL of the destination zone to move the disk. This can be a full or partial URL. For example, the following are all valid URLs to a zone: - https://www.googleapis.com/compute/v1/projects/project/zones/zone - projects/project/zones/zone - zones/zone ", + "description": { + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, - "targetDisk": { - "description": "The URL of the target disk to move. This can be a full or partial URL. For example, the following are all valid URLs to a disk: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk ", + "expression": { + "description": "Textual representation of an expression in Common Expression Language syntax.", + "type": "string" + }, + "location": { + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + "type": "string" + }, + "title": { + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", "type": "string" } }, "type": "object" }, - "DiskType": { - "description": "Represents a Disk Type resource. Google Compute Engine has two Disk Type resources: * [Regional](/compute/docs/reference/rest/v1/regionDiskTypes) * [Zonal](/compute/docs/reference/rest/v1/diskTypes) You can choose from a variety of disk types based on your needs. For more information, read Storage options. The diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks. The regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks.", - "id": "DiskType", + "ExternalVpnGateway": { + "description": "Represents an external VPN gateway. External VPN gateway is the on-premises VPN gateway(s) or another cloud provider's VPN gateway that connects to your Google Cloud VPN gateway. To create a highly available VPN from Google Cloud Platform to your VPN gateway or another cloud provider's VPN gateway, you must create a external VPN gateway resource with information about the other gateway. For more information about using external VPN gateways, see Creating an HA VPN gateway and tunnel pair to a peer VPN.", + "id": "ExternalVpnGateway", "properties": { "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "defaultDiskSizeGb": { - "description": "[Output Only] Server-defined default disk size in GB.", - "format": "int64", - "type": "string" - }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this disk type." - }, "description": { - "description": "[Output Only] An optional description of this resource.", + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, "id": { @@ -33789,53 +38664,98 @@ "format": "uint64", "type": "string" }, + "interfaces": { + "description": "A list of interfaces for this external VPN gateway. If your peer-side gateway is an on-premises gateway and non-AWS cloud providers' gateway, at most two interfaces can be provided for an external VPN gateway. If your peer side is an AWS virtual private gateway, four interfaces should be provided for an external VPN gateway.", + "items": { + "$ref": "ExternalVpnGatewayInterface" + }, + "type": "array" + }, "kind": { - "default": "compute#diskType", - "description": "[Output Only] Type of the resource. Always compute#diskType for disk types.", + "default": "compute#externalVpnGateway", + "description": "[Output Only] Type of the resource. Always compute#externalVpnGateway for externalVpnGateways.", + "type": "string" + }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this ExternalVpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an ExternalVpnGateway.", + "format": "byte", "type": "string" }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, "name": { - "description": "[Output Only] Name of the resource.", + "annotations": { + "required": [ + "compute.externalVpnGateways.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "region": { - "description": "[Output Only] URL of the region where the disk type resides. Only applicable for regional resources. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "redundancyType": { + "description": "Indicates the user-supplied redundancy type of this external VPN gateway.", + "enum": [ + "FOUR_IPS_REDUNDANCY", + "SINGLE_IP_INTERNALLY_REDUNDANT", + "TWO_IPS_REDUNDANCY" + ], + "enumDescriptions": [ + "The external VPN gateway has four public IP addresses; at the time of writing this API, the AWS virtual private gateway is an example which has four public IP addresses for high availability connections; there should be two VPN connections in the AWS virtual private gateway , each AWS VPN connection has two public IP addresses; please make sure to put two public IP addresses from one AWS VPN connection into interfaces 0 and 1 of this external VPN gateway, and put the other two public IP addresses from another AWS VPN connection into interfaces 2 and 3 of this external VPN gateway. When displaying highly available configuration status for the VPN tunnels connected to FOUR_IPS_REDUNDANCY external VPN gateway, Google will always detect whether interfaces 0 and 1 are connected on one interface of HA Cloud VPN gateway, and detect whether interfaces 2 and 3 are connected to another interface of the HA Cloud VPN gateway.", + "The external VPN gateway has only one public IP address which internally provide redundancy or failover.", + "The external VPN gateway has two public IP addresses which are redundant with each other, the following two types of setup on your on-premises side would have this type of redundancy: (1) Two separate on-premises gateways, each with one public IP address, the two on-premises gateways are redundant with each other. (2) A single on-premise gateway with two public IP addresses that are redundant with eatch other." + ], "type": "string" }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" + } + }, + "type": "object" + }, + "ExternalVpnGatewayInterface": { + "description": "The interface for the external VPN gateway.", + "id": "ExternalVpnGatewayInterface", + "properties": { + "id": { + "description": "The numeric ID of this interface. The allowed input values for this id for different redundancy types of external VPN gateway: - SINGLE_IP_INTERNALLY_REDUNDANT - 0 - TWO_IPS_REDUNDANCY - 0, 1 - FOUR_IPS_REDUNDANCY - 0, 1, 2, 3 ", + "format": "uint32", + "type": "integer" }, - "validDiskSize": { - "description": "[Output Only] An optional textual description of the valid disk size, such as \"10GB-10TB\".", - "type": "string" - }, - "zone": { - "description": "[Output Only] URL of the zone where the disk type resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "ipAddress": { + "description": "IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine.", "type": "string" } }, "type": "object" }, - "DiskTypeAggregatedList": { - "id": "DiskTypeAggregatedList", + "ExternalVpnGatewayList": { + "description": "Response to the list request, and contains a list of externalVpnGateways.", + "id": "ExternalVpnGatewayList", "properties": { + "etag": { + "type": "string" + }, "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "DiskTypesScopedList", - "description": "[Output Only] Name of the scope containing this set of disk types." + "description": "A list of ExternalVpnGateway resources.", + "items": { + "$ref": "ExternalVpnGateway" }, - "description": "A list of DiskTypesScopedList resources.", - "type": "object" + "type": "array" }, "kind": { - "default": "compute#diskTypeAggregatedList", - "description": "[Output Only] Type of resource. Always compute#diskTypeAggregatedList.", + "default": "compute#externalVpnGatewayList", + "description": "[Output Only] Type of resource. Always compute#externalVpnGatewayList for lists of externalVpnGateways.", "type": "string" }, "nextPageToken": { @@ -33846,13 +38766,6 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "unreachables": { - "description": "[Output Only] Unreachable resources.", - "items": { - "type": "string" - }, - "type": "array" - }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -33867,10 +38780,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -33894,10 +38809,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -33941,24 +38858,200 @@ }, "type": "object" }, - "DiskTypeList": { - "description": "Contains a list of disk types.", - "id": "DiskTypeList", + "FileContentBuffer": { + "id": "FileContentBuffer", + "properties": { + "content": { + "description": "The raw content in the secure keys file.", + "format": "byte", + "type": "string" + }, + "fileType": { + "description": "The file type of source file.", + "enum": [ + "BIN", + "UNDEFINED", + "X509" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "Firewall": { + "description": "Represents a Firewall Rule resource. Firewall rules allow or deny ingress traffic to, and egress traffic from your instances. For more information, read Firewall rules.", + "id": "Firewall", + "properties": { + "allowed": { + "description": "The list of ALLOW rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", + "items": { + "properties": { + "IPProtocol": { + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number.", + "type": "string" + }, + "ports": { + "description": "An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "array" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "denied": { + "description": "The list of DENY rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a denied connection.", + "items": { + "properties": { + "IPProtocol": { + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number.", + "type": "string" + }, + "ports": { + "description": "An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "array" + }, + "description": { + "description": "An optional description of this resource. Provide this field when you create the resource.", + "type": "string" + }, + "destinationRanges": { + "description": "If destination ranges are specified, the firewall rule applies only to traffic that has destination IP address in these ranges. These ranges must be expressed in CIDR format. Both IPv4 and IPv6 are supported.", + "items": { + "type": "string" + }, + "type": "array" + }, + "direction": { + "description": "Direction of traffic to which this firewall applies, either `INGRESS` or `EGRESS`. The default is `INGRESS`. For `EGRESS` traffic, you cannot specify the sourceTags fields.", + "enum": [ + "EGRESS", + "INGRESS" + ], + "enumDescriptions": [ + "Indicates that firewall should apply to outgoing traffic.", + "Indicates that firewall should apply to incoming traffic." + ], + "type": "string" + }, + "disabled": { + "description": "Denotes whether the firewall rule is disabled. When set to true, the firewall rule is not enforced and the network behaves as if it did not exist. If this is unspecified, the firewall rule will be enabled.", + "type": "boolean" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#firewall", + "description": "[Output Only] Type of the resource. Always compute#firewall for firewall rules.", + "type": "string" + }, + "logConfig": { + "$ref": "FirewallLogConfig", + "description": "This field denotes the logging options for a particular firewall rule. If logging is enabled, logs will be exported to Cloud Logging." + }, + "name": { + "annotations": { + "required": [ + "compute.firewalls.insert", + "compute.firewalls.patch" + ] + }, + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "network": { + "description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used: global/networks/default If you choose to specify this field, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network - projects/myproject/global/networks/my-network - global/networks/default ", + "type": "string" + }, + "priority": { + "description": "Priority for this rule. This is an integer between `0` and `65535`, both inclusive. The default value is `1000`. Relative priorities determine which rule takes effect if multiple rules apply. Lower values indicate higher priority. For example, a rule with priority `0` has higher precedence than a rule with priority `1`. DENY rules take precedence over ALLOW rules if they have equal priority. Note that VPC networks have implied rules with a priority of `65535`. To avoid conflicts with the implied rules, use a priority number less than `65535`.", + "format": "int32", + "type": "integer" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "sourceRanges": { + "description": "If source ranges are specified, the firewall rule applies only to traffic that has a source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both fields are set, the rule applies to traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. The connection does not need to match both fields for the rule to apply. Both IPv4 and IPv6 are supported.", + "items": { + "type": "string" + }, + "type": "array" + }, + "sourceServiceAccounts": { + "description": "If source service accounts are specified, the firewall rules apply only to traffic originating from an instance with a service account in this list. Source service accounts cannot be used to control traffic to an instance's external IP address because service accounts are associated with an instance, not an IP address. sourceRanges can be set at the same time as sourceServiceAccounts. If both are set, the firewall applies to traffic that has a source IP address within the sourceRanges OR a source IP that belongs to an instance with service account listed in sourceServiceAccount. The connection does not need to match both fields for the firewall to apply. sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags.", + "items": { + "type": "string" + }, + "type": "array" + }, + "sourceTags": { + "description": "If source tags are specified, the firewall rule applies only to traffic with source IPs that match the primary network interfaces of VM instances that have the tag and are in the same VPC network. Source tags cannot be used to control traffic to an instance's external IP address, it only applies to traffic between instances in the same virtual network. Because tags are associated with instances, not IP addresses. One or both of sourceRanges and sourceTags may be set. If both fields are set, the firewall applies to traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. The connection does not need to match both fields for the firewall to apply.", + "items": { + "type": "string" + }, + "type": "array" + }, + "targetServiceAccounts": { + "description": "A list of service accounts indicating sets of instances located in the network that may make network connections as specified in allowed[]. targetServiceAccounts cannot be used at the same time as targetTags or sourceTags. If neither targetServiceAccounts nor targetTags are specified, the firewall rule applies to all instances on the specified network.", + "items": { + "type": "string" + }, + "type": "array" + }, + "targetTags": { + "description": "A list of tags that controls which instances the firewall rule applies to. If targetTags are specified, then the firewall rule applies only to instances in the VPC network that have one of those tags. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "FirewallList": { + "description": "Contains a list of firewalls.", + "id": "FirewallList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of DiskType resources.", + "description": "A list of Firewall resources.", "items": { - "$ref": "DiskType" + "$ref": "Firewall" }, "type": "array" }, "kind": { - "default": "compute#diskTypeList", - "description": "[Output Only] Type of resource. Always compute#diskTypeList for disk types.", + "default": "compute#firewallList", + "description": "[Output Only] Type of resource. Always compute#firewallList for lists of firewalls.", "type": "string" }, "nextPageToken": { @@ -33983,10 +39076,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -34010,10 +39105,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -34057,153 +39154,177 @@ }, "type": "object" }, - "DiskTypesScopedList": { - "id": "DiskTypesScopedList", + "FirewallLogConfig": { + "description": "The available logging options for a firewall rule.", + "id": "FirewallLogConfig", "properties": { - "diskTypes": { - "description": "[Output Only] A list of disk types contained in this scope.", - "items": { - "$ref": "DiskType" - }, - "type": "array" + "enable": { + "description": "This field denotes whether to enable logging for a particular firewall rule.", + "type": "boolean" }, - "warning": { - "description": "[Output Only] Informational warning which replaces the list of disk types when the list is empty.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "LARGE_DEPLOYMENT_WARNING", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "PARTIAL_SUCCESS", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "Warning about failed cleanup of transient changes made by a failed operation.", - "A link to a deprecated resource was created.", - "When deploying and at least one of the resources has a type marked as deprecated", - "The user created a boot disk that is larger than image size.", - "When deploying and at least one of the resources has a type marked as experimental", - "Warning that is present in an external api call", - "Warning that value of a field has been overridden. Deprecated unused field.", - "The operation involved use of an injected kernel, which is deprecated.", - "When deploying a deployment with a exceedingly large number of resources", - "A resource depends on a missing type", - "The route's nextHopIp address is not assigned to an instance on the network.", - "The route's next hop instance cannot ip forward.", - "The route's nextHopInstance URL refers to an instance that does not exist.", - "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", - "The route's next hop instance does not have a status of RUNNING.", - "Error which is not critical. We decided to continue the process despite the mentioned error.", - "No results are present on a particular list page.", - "Success is reported, but some results may be missing due to errors", - "The user attempted to use a resource that requires a TOS they have not accepted.", - "Warning that a resource is in use.", - "One or more of the resources set to auto-delete could not be deleted because they were in use.", - "When a resource schema validation is ignored.", - "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", - "When undeclared properties in the schema are present", - "A given scope cannot be reached." - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" + "metadata": { + "description": "This field can only be specified for a particular firewall rule if logging is enabled for that rule. This field denotes whether to include or exclude metadata for firewall logs.", + "enum": [ + "EXCLUDE_ALL_METADATA", + "INCLUDE_ALL_METADATA" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" } }, "type": "object" }, - "DisksAddResourcePoliciesRequest": { - "id": "DisksAddResourcePoliciesRequest", + "FirewallPoliciesListAssociationsResponse": { + "id": "FirewallPoliciesListAssociationsResponse", "properties": { - "resourcePolicies": { - "description": "Full or relative path to the resource policy to be added to this disk. You can only specify one resource policy.", + "associations": { + "description": "A list of associations.", "items": { - "type": "string" + "$ref": "FirewallPolicyAssociation" }, "type": "array" + }, + "kind": { + "default": "compute#firewallPoliciesListAssociationsResponse", + "description": "[Output Only] Type of firewallPolicy associations. Always compute#FirewallPoliciesListAssociations for lists of firewallPolicy associations.", + "type": "string" } }, "type": "object" }, - "DisksRemoveResourcePoliciesRequest": { - "id": "DisksRemoveResourcePoliciesRequest", + "FirewallPolicy": { + "description": "Represents a Firewall Policy resource.", + "id": "FirewallPolicy", "properties": { - "resourcePolicies": { - "description": "Resource policies to be removed from this disk.", + "associations": { + "description": "A list of associations that belong to this firewall policy.", + "items": { + "$ref": "FirewallPolicyAssociation" + }, + "type": "array" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "displayName": { + "description": "Deprecated, please use short name instead. User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. This field is not applicable to network firewall policies. This name must be set on creation and cannot be changed. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "fingerprint": { + "description": "Specifies a fingerprint for this resource, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make get() request to the firewall policy.", + "format": "byte", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#firewallPolicy", + "description": "[Output only] Type of the resource. Always compute#firewallPolicyfor firewall policies", + "type": "string" + }, + "name": { + "description": "Name of the resource. For Organization Firewall Policies it's a [Output Only] numeric ID allocated by Google Cloud which uniquely identifies the Organization Firewall Policy.", + "type": "string" + }, + "parent": { + "description": "[Output Only] The parent of the firewall policy. This field is not applicable to network firewall policies.", + "type": "string" + }, + "region": { + "description": "[Output Only] URL of the region where the regional firewall policy resides. This field is not applicable to global firewall policies. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "ruleTupleCount": { + "description": "[Output Only] Total count of all firewall policy rule tuples. A firewall policy can not exceed a set number of tuples.", + "format": "int32", + "type": "integer" + }, + "rules": { + "description": "A list of rules that belong to this policy. There must always be a default rule (rule with priority 2147483647 and match \"*\"). If no rules are provided when creating a firewall policy, a default rule with action \"allow\" will be added.", "items": { - "type": "string" + "$ref": "FirewallPolicyRule" }, "type": "array" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, + "shortName": { + "description": "User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. This field is not applicable to network firewall policies. This name must be set on creation and cannot be changed. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" } }, "type": "object" }, - "DisksResizeRequest": { - "id": "DisksResizeRequest", + "FirewallPolicyAssociation": { + "id": "FirewallPolicyAssociation", "properties": { - "sizeGb": { - "description": "The new size of the persistent disk, which is specified in GB.", - "format": "int64", + "attachmentTarget": { + "description": "The target that the firewall policy is attached to.", + "type": "string" + }, + "displayName": { + "description": "[Output Only] Deprecated, please use short name instead. The display name of the firewall policy of the association.", + "type": "string" + }, + "firewallPolicyId": { + "description": "[Output Only] The firewall policy ID of the association.", + "type": "string" + }, + "name": { + "description": "The name for an association.", + "type": "string" + }, + "shortName": { + "description": "[Output Only] The short name of the firewall policy of the association.", "type": "string" } }, "type": "object" }, - "DisksScopedList": { - "id": "DisksScopedList", + "FirewallPolicyList": { + "id": "FirewallPolicyList", "properties": { - "disks": { - "description": "[Output Only] A list of disks contained in this scope.", + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of FirewallPolicy resources.", "items": { - "$ref": "Disk" + "$ref": "FirewallPolicy" }, "type": "array" }, + "kind": { + "default": "compute#firewallPolicyList", + "description": "[Output Only] Type of resource. Always compute#firewallPolicyList for listsof FirewallPolicies", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, "warning": { - "description": "[Output Only] Informational warning which replaces the list of disks when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -34216,10 +39337,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -34243,10 +39366,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -34290,130 +39415,421 @@ }, "type": "object" }, - "DisplayDevice": { - "description": "A set of Display Device options", - "id": "DisplayDevice", + "FirewallPolicyRule": { + "description": "Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny).", + "id": "FirewallPolicyRule", "properties": { - "enableDisplay": { - "description": "Defines whether the instance has Display enabled.", + "action": { + "description": "The Action to perform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", + "type": "string" + }, + "description": { + "description": "An optional description for this resource.", + "type": "string" + }, + "direction": { + "description": "The direction in which this rule applies.", + "enum": [ + "EGRESS", + "INGRESS" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "disabled": { + "description": "Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled.", + "type": "boolean" + }, + "enableLogging": { + "description": "Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on \"goto_next\" rules.", "type": "boolean" + }, + "kind": { + "default": "compute#firewallPolicyRule", + "description": "[Output only] Type of the resource. Always compute#firewallPolicyRule for firewall policy rules", + "type": "string" + }, + "match": { + "$ref": "FirewallPolicyRuleMatcher", + "description": "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced." + }, + "priority": { + "description": "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", + "format": "int32", + "type": "integer" + }, + "ruleName": { + "description": "An optional name for the rule. This field is not a unique identifier and can be updated.", + "type": "string" + }, + "ruleTupleCount": { + "description": "[Output Only] Calculation of the complexity of a single firewall policy rule.", + "format": "int32", + "type": "integer" + }, + "targetResources": { + "description": "A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule.", + "items": { + "type": "string" + }, + "type": "array" + }, + "targetSecureTags": { + "description": "A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256.", + "items": { + "$ref": "FirewallPolicyRuleSecureTag" + }, + "type": "array" + }, + "targetServiceAccounts": { + "description": "A list of service accounts indicating the sets of instances that are applied with this rule.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "DistributionPolicy": { - "id": "DistributionPolicy", + "FirewallPolicyRuleMatcher": { + "description": "Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified.", + "id": "FirewallPolicyRuleMatcher", "properties": { - "targetShape": { - "description": "The distribution shape to which the group converges either proactively or on resize events (depending on the value set in updatePolicy.instanceRedistributionType).", - "enum": [ - "ANY", - "BALANCED", - "EVEN" - ], - "enumDescriptions": [ - "The group picks zones for creating VM instances to fulfill the requested number of VMs within present resource constraints and to maximize utilization of unused zonal reservations. Recommended for batch workloads that do not require high availability.", - "The group prioritizes acquisition of resources, scheduling VMs in zones where resources are available while distributing VMs as evenly as possible across selected zones to minimize the impact of zonal failure. Recommended for highly available serving workloads.", - "The group schedules VM instance creation and deletion to achieve and maintain an even number of managed instances across the selected zones. The distribution is even when the number of managed instances does not differ by more than 1 between any two zones. Recommended for highly available serving workloads." - ], - "type": "string" + "destIpRanges": { + "description": "CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000.", + "items": { + "type": "string" + }, + "type": "array" }, - "zones": { - "description": "Zones where the regional managed instance group will create and manage its instances.", + "layer4Configs": { + "description": "Pairs of IP protocols and ports that the rule should match.", "items": { - "$ref": "DistributionPolicyZoneConfiguration" + "$ref": "FirewallPolicyRuleMatcherLayer4Config" + }, + "type": "array" + }, + "srcIpRanges": { + "description": "CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000.", + "items": { + "type": "string" + }, + "type": "array" + }, + "srcSecureTags": { + "description": "List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.", + "items": { + "$ref": "FirewallPolicyRuleSecureTag" }, "type": "array" } }, "type": "object" }, - "DistributionPolicyZoneConfiguration": { - "id": "DistributionPolicyZoneConfiguration", + "FirewallPolicyRuleMatcherLayer4Config": { + "id": "FirewallPolicyRuleMatcherLayer4Config", "properties": { - "zone": { - "annotations": { - "required": [ - "compute.regionInstanceGroupManagers.insert" - ] + "ipProtocol": { + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", + "type": "string" + }, + "ports": { + "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", + "items": { + "type": "string" }, - "description": "The URL of the zone. The zone must exist in the region where the managed instance group is located.", + "type": "array" + } + }, + "type": "object" + }, + "FirewallPolicyRuleSecureTag": { + "id": "FirewallPolicyRuleSecureTag", + "properties": { + "name": { + "description": "Name of the secure tag, created with TagManager's TagValue API.", + "pattern": "tagValues/[0-9]+", + "type": "string" + }, + "state": { + "description": "[Output Only] State of the secure tag, either `EFFECTIVE` or `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted or its network is deleted.", + "enum": [ + "EFFECTIVE", + "INEFFECTIVE" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" } }, "type": "object" }, - "Duration": { - "description": "A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like \"day\" or \"month\". Range is approximately 10,000 years.", - "id": "Duration", + "FixedOrPercent": { + "description": "Encapsulates numeric value that can be either absolute or relative.", + "id": "FixedOrPercent", "properties": { - "nanos": { - "description": "Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive.", + "calculated": { + "description": "[Output Only] Absolute value of VM instances calculated based on the specific mode. - If the value is fixed, then the calculated value is equal to the fixed value. - If the value is a percent, then the calculated value is percent/100 * targetSize. For example, the calculated value of a 80% of a managed instance group with 150 instances would be (80/100 * 150) = 120 VM instances. If there is a remainder, the number is rounded. ", "format": "int32", "type": "integer" }, - "seconds": { - "description": "Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years", - "format": "int64", - "type": "string" + "fixed": { + "description": "Specifies a fixed number of VM instances. This must be a positive integer.", + "format": "int32", + "type": "integer" + }, + "percent": { + "description": "Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "ExchangedPeeringRoute": { - "id": "ExchangedPeeringRoute", + "ForwardingRule": { + "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/v1/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud Platform load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.", + "id": "ForwardingRule", "properties": { - "destRange": { - "description": "The destination range of the route.", + "IPAddress": { + "description": "IP address for which this forwarding rule accepts traffic. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the referenced target or backendService. While creating a forwarding rule, specifying an IPAddress is required under the following circumstances: - When the target is set to targetGrpcProxy and validateForProxyless is set to true, the IPAddress should be set to 0.0.0.0. - When the target is a Private Service Connect Google APIs bundle, you must specify an IPAddress. Otherwise, you can optionally specify an IP address that references an existing static (reserved) IP address resource. When omitted, Google Cloud assigns an ephemeral IP address. Use one of the following formats to specify an IP address while creating a forwarding rule: * IP address number, as in `100.1.2.3` * IPv6 address range, as in `2600:1234::/96` * Full resource URL, as in https://www.googleapis.com/compute/v1/projects/ project_id/regions/region/addresses/address-name * Partial URL or by name, as in: - projects/project_id/regions/region/addresses/address-name - regions/region/addresses/address-name - global/addresses/address-name - address-name The forwarding rule's target or backendService, and in most cases, also the loadBalancingScheme, determine the type of IP address that you can use. For detailed information, see [IP address specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). When reading an IPAddress, the API always returns the IP address number.", "type": "string" }, - "imported": { - "description": "True if the peering route has been imported from a peer. The actual import happens if the field networkPeering.importCustomRoutes is true for this network, and networkPeering.exportCustomRoutes is true for the peer network, and the import does not result in a route conflict.", + "IPProtocol": { + "description": "The IP protocol to which this rule applies. For protocol forwarding, valid options are TCP, UDP, ESP, AH, SCTP, ICMP and L3_DEFAULT. The valid IP protocols are different for different load balancing products as described in [Load balancing features](https://cloud.google.com/load-balancing/docs/features#protocols_from_the_load_balancer_to_the_backends).", + "enum": [ + "AH", + "ESP", + "ICMP", + "L3_DEFAULT", + "SCTP", + "TCP", + "UDP" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "allPorts": { + "description": "This field can only be used: - If IPProtocol is one of TCP, UDP, or SCTP. - By internal TCP/UDP load balancers, backend service-based network load balancers, and internal and external protocol forwarding. Set this field to true to allow packets addressed to any port or packets lacking destination port information (for example, UDP fragments after the first fragment) to be forwarded to the backends configured with this forwarding rule. The ports, port_range, and allPorts fields are mutually exclusive.", "type": "boolean" }, - "nextHopRegion": { - "description": "The region of peering route next hop, only applies to dynamic routes.", + "allowGlobalAccess": { + "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If the field is set to TRUE, clients can access ILB from all regions. Otherwise only allows access from clients in the same region as the internal load balancer.", + "type": "boolean" + }, + "backendService": { + "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for Internal TCP/UDP Load Balancing and Network Load Balancing; must be omitted for all other load balancer types.", "type": "string" }, - "priority": { - "description": "The priority of the peering route.", - "format": "uint32", - "type": "integer" + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" }, - "type": { - "description": "The type of the peering route.", + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a ForwardingRule. Include the fingerprint in patch request to ensure that you do not overwrite changes that were applied from another concurrent request. To see the latest fingerprint, make a get() request to retrieve a ForwardingRule.", + "format": "byte", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "ipVersion": { + "description": "The IP Version that will be used by this forwarding rule. Valid options are IPV4 or IPV6.", "enum": [ - "DYNAMIC_PEERING_ROUTE", - "STATIC_PEERING_ROUTE", - "SUBNET_PEERING_ROUTE" + "IPV4", + "IPV6", + "UNSPECIFIED_VERSION" ], "enumDescriptions": [ - "For routes exported from local network.", - "The peering route.", - "The peering route corresponding to subnetwork range." + "", + "", + "" + ], + "type": "string" + }, + "isMirroringCollector": { + "description": "Indicates whether or not this load balancer can be used as a collector for packet mirroring. To prevent mirroring loops, instances behind this load balancer will not have their traffic mirrored even if a PacketMirroring rule applies to them. This can only be set to true for load balancers that have their loadBalancingScheme set to INTERNAL.", + "type": "boolean" + }, + "kind": { + "default": "compute#forwardingRule", + "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", + "type": "string" + }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this resource, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a ForwardingRule.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, + "loadBalancingScheme": { + "description": "Specifies the forwarding rule type. For more information about forwarding rules, refer to Forwarding rule concepts.", + "enum": [ + "EXTERNAL", + "EXTERNAL_MANAGED", + "INTERNAL", + "INTERNAL_MANAGED", + "INTERNAL_SELF_MANAGED", + "INVALID" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "metadataFilters": { + "description": "Opaque filter criteria used by load balancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to load balancer, xDS clients present node metadata. When there is a match, the relevant configuration is made available to those proxies. Otherwise, all the resources (e.g. TargetHttpProxy, UrlMap) referenced by the ForwardingRule are not visible to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadataFilters are specified, all of them need to be satisfied in order to be considered a match. metadataFilters specified here will be applifed before those specified in the UrlMap that this ForwardingRule references. metadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "items": { + "$ref": "MetadataFilter" + }, + "type": "array" + }, + "name": { + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. For Private Service Connect forwarding rules that forward traffic to Google APIs, the forwarding rule name must be a 1-20 characters string with lowercase letters and numbers and must start with a letter.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "network": { + "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "type": "string" + }, + "networkTier": { + "description": "This signifies the networking tier used for configuring this load balancer and can only take the following values: PREMIUM, STANDARD. For regional ForwardingRule, the valid values are PREMIUM and STANDARD. For GlobalForwardingRule, the valid value is PREMIUM. If this field is not specified, it is assumed to be PREMIUM. If IPAddress is specified, this value must be equal to the networkTier of the Address.", + "enum": [ + "FIXED_STANDARD", + "PREMIUM", + "STANDARD", + "STANDARD_OVERRIDES_FIXED_STANDARD" + ], + "enumDescriptions": [ + "Public internet quality with fixed bandwidth.", + "High quality, Google-grade network tier, support for all networking products.", + "Public internet quality, only limited support for other networking products.", + "(Output only) Temporary tier for FIXED_STANDARD when fixed standard tier is expired or not configured." + ], + "type": "string" + }, + "noAutomateDnsZone": { + "description": "This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field.", + "type": "boolean" + }, + "portRange": { + "description": "This field can only be used: - If IPProtocol is one of TCP, UDP, or SCTP. - By backend service-based network load balancers, target pool-based network load balancers, internal proxy load balancers, external proxy load balancers, Traffic Director, external protocol forwarding, and Classic VPN. Some products have restrictions on what ports can be used. See port specifications for details. Only packets addressed to ports in the specified range will be forwarded to the backends configured with this forwarding rule. The ports, port_range, and allPorts fields are mutually exclusive. For external forwarding rules, two or more forwarding rules cannot use the same [IPAddress, IPProtocol] pair, and cannot have overlapping portRanges. For internal forwarding rules within the same VPC network, two or more forwarding rules cannot use the same [IPAddress, IPProtocol] pair, and cannot have overlapping portRanges. @pattern: \\\\d+(?:-\\\\d+)?", + "type": "string" + }, + "ports": { + "description": "This field can only be used: - If IPProtocol is one of TCP, UDP, or SCTP. - By internal TCP/UDP load balancers, backend service-based network load balancers, and internal protocol forwarding. You can specify a list of up to five ports by number, separated by commas. The ports can be contiguous or discontiguous. Only packets addressed to these ports will be forwarded to the backends configured with this forwarding rule. For external forwarding rules, two or more forwarding rules cannot use the same [IPAddress, IPProtocol] pair, and cannot share any values defined in ports. For internal forwarding rules within the same VPC network, two or more forwarding rules cannot use the same [IPAddress, IPProtocol] pair, and cannot share any values defined in ports. The ports, port_range, and allPorts fields are mutually exclusive. @pattern: \\\\d+(?:-\\\\d+)?", + "items": { + "type": "string" + }, + "type": "array" + }, + "pscConnectionId": { + "description": "[Output Only] The PSC connection id of the PSC Forwarding Rule.", + "format": "uint64", + "type": "string" + }, + "pscConnectionStatus": { + "enum": [ + "ACCEPTED", + "CLOSED", + "NEEDS_ATTENTION", + "PENDING", + "REJECTED", + "STATUS_UNSPECIFIED" + ], + "enumDescriptions": [ + "The connection has been accepted by the producer.", + "The connection has been closed by the producer and will not serve traffic going forward.", + "The connection has been accepted by the producer, but the producer needs to take further action before the forwarding rule can serve traffic.", + "The connection is pending acceptance by the producer.", + "The connection has been rejected by the producer.", + "" ], "type": "string" + }, + "region": { + "description": "[Output Only] URL of the region where the regional forwarding rule resides. This field is not applicable to global forwarding rules. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "serviceDirectoryRegistrations": { + "description": "Service Directory resources to register this forwarding rule with. Currently, only supports a single Service Directory resource.", + "items": { + "$ref": "ForwardingRuleServiceDirectoryRegistration" + }, + "type": "array" + }, + "serviceLabel": { + "description": "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "serviceName": { + "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.", + "type": "string" + }, + "subnetwork": { + "description": "This field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule, used in internal load balancing and network load balancing with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", + "type": "string" + }, + "target": { + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For more information, see the \"Target\" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: - vpc-sc - APIs that support VPC Service Controls. - all-apis - All supported Google APIs. ", + "type": "string" } }, "type": "object" }, - "ExchangedPeeringRoutesList": { - "id": "ExchangedPeeringRoutesList", + "ForwardingRuleAggregatedList": { + "id": "ForwardingRuleAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of ExchangedPeeringRoute resources.", - "items": { - "$ref": "ExchangedPeeringRoute" + "additionalProperties": { + "$ref": "ForwardingRulesScopedList", + "description": "Name of the scope containing this set of addresses." }, - "type": "array" + "description": "A list of ForwardingRulesScopedList resources.", + "type": "object" }, - "kind": { - "default": "compute#exchangedPeeringRoutesList", - "description": "[Output Only] Type of resource. Always compute#exchangedPeeringRoutesList for exchanged peering routes lists.", + "kind": { + "default": "compute#forwardingRuleAggregatedList", + "description": "[Output Only] Type of resource. Always compute#forwardingRuleAggregatedList for lists of forwarding rules.", "type": "string" }, "nextPageToken": { @@ -34424,6 +39840,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -34438,10 +39861,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -34465,10 +39890,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -34512,138 +39939,24 @@ }, "type": "object" }, - "Expr": { - "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", - "id": "Expr", - "properties": { - "description": { - "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", - "type": "string" - }, - "expression": { - "description": "Textual representation of an expression in Common Expression Language syntax.", - "type": "string" - }, - "location": { - "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", - "type": "string" - }, - "title": { - "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", - "type": "string" - } - }, - "type": "object" - }, - "ExternalVpnGateway": { - "description": "Represents an external VPN gateway. External VPN gateway is the on-premises VPN gateway(s) or another cloud provider's VPN gateway that connects to your Google Cloud VPN gateway. To create a highly available VPN from Google Cloud Platform to your VPN gateway or another cloud provider's VPN gateway, you must create a external VPN gateway resource with information about the other gateway. For more information about using external VPN gateways, see Creating an HA VPN gateway and tunnel pair to a peer VPN.", - "id": "ExternalVpnGateway", - "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "interfaces": { - "description": "A list of interfaces for this external VPN gateway. If your peer-side gateway is an on-premises gateway and non-AWS cloud providers' gateway, at most two interfaces can be provided for an external VPN gateway. If your peer side is an AWS virtual private gateway, four interfaces should be provided for an external VPN gateway.", - "items": { - "$ref": "ExternalVpnGatewayInterface" - }, - "type": "array" - }, - "kind": { - "default": "compute#externalVpnGateway", - "description": "[Output Only] Type of the resource. Always compute#externalVpnGateway for externalVpnGateways.", - "type": "string" - }, - "labelFingerprint": { - "description": "A fingerprint for the labels being applied to this ExternalVpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an ExternalVpnGateway.", - "format": "byte", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", - "type": "object" - }, - "name": { - "annotations": { - "required": [ - "compute.externalVpnGateways.insert" - ] - }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "redundancyType": { - "description": "Indicates the user-supplied redundancy type of this external VPN gateway.", - "enum": [ - "FOUR_IPS_REDUNDANCY", - "SINGLE_IP_INTERNALLY_REDUNDANT", - "TWO_IPS_REDUNDANCY" - ], - "enumDescriptions": [ - "The external VPN gateway has four public IP addresses; at the time of writing this API, the AWS virtual private gateway is an example which has four public IP addresses for high availability connections; there should be two VPN connections in the AWS virtual private gateway , each AWS VPN connection has two public IP addresses; please make sure to put two public IP addresses from one AWS VPN connection into interfaces 0 and 1 of this external VPN gateway, and put the other two public IP addresses from another AWS VPN connection into interfaces 2 and 3 of this external VPN gateway. When displaying highly available configuration status for the VPN tunnels connected to FOUR_IPS_REDUNDANCY external VPN gateway, Google will always detect whether interfaces 0 and 1 are connected on one interface of HA Cloud VPN gateway, and detect whether interfaces 2 and 3 are connected to another interface of the HA Cloud VPN gateway.", - "The external VPN gateway has only one public IP address which internally provide redundancy or failover.", - "The external VPN gateway has two public IP addresses which are redundant with each other, the following two types of setup on your on-premises side would have this type of redundancy: (1) Two separate on-premises gateways, each with one public IP address, the two on-premises gateways are redundant with each other. (2) A single on-premise gateway with two public IP addresses that are redundant with eatch other." - ], - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - } - }, - "type": "object" - }, - "ExternalVpnGatewayInterface": { - "description": "The interface for the external VPN gateway.", - "id": "ExternalVpnGatewayInterface", - "properties": { - "id": { - "description": "The numeric ID of this interface. The allowed input values for this id for different redundancy types of external VPN gateway: - SINGLE_IP_INTERNALLY_REDUNDANT - 0 - TWO_IPS_REDUNDANCY - 0, 1 - FOUR_IPS_REDUNDANCY - 0, 1, 2, 3 ", - "format": "uint32", - "type": "integer" - }, - "ipAddress": { - "description": "IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine.", - "type": "string" - } - }, - "type": "object" - }, - "ExternalVpnGatewayList": { - "description": "Response to the list request, and contains a list of externalVpnGateways.", - "id": "ExternalVpnGatewayList", + "ForwardingRuleList": { + "description": "Contains a list of ForwardingRule resources.", + "id": "ForwardingRuleList", "properties": { - "etag": { - "type": "string" - }, "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of ExternalVpnGateway resources.", + "description": "A list of ForwardingRule resources.", "items": { - "$ref": "ExternalVpnGateway" + "$ref": "ForwardingRule" }, "type": "array" }, "kind": { - "default": "compute#externalVpnGatewayList", - "description": "[Output Only] Type of resource. Always compute#externalVpnGatewayList for lists of externalVpnGateways.", + "default": "compute#forwardingRuleList", + "description": "Type of resource.", "type": "string" }, "nextPageToken": { @@ -34668,10 +39981,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -34695,10 +40010,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -34742,212 +40059,46 @@ }, "type": "object" }, - "FileContentBuffer": { - "id": "FileContentBuffer", + "ForwardingRuleReference": { + "id": "ForwardingRuleReference", "properties": { - "content": { - "description": "The raw content in the secure keys file.", - "format": "byte", - "type": "string" - }, - "fileType": { - "description": "The file type of source file.", - "enum": [ - "BIN", - "UNDEFINED", - "X509" - ], - "enumDescriptions": [ - "", - "", - "" - ], + "forwardingRule": { "type": "string" } }, "type": "object" }, - "Firewall": { - "description": "Represents a Firewall Rule resource. Firewall rules allow or deny ingress traffic to, and egress traffic from your instances. For more information, read Firewall rules.", - "id": "Firewall", + "ForwardingRuleServiceDirectoryRegistration": { + "description": "Describes the auto-registration of the Forwarding Rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this Forwarding Rule.", + "id": "ForwardingRuleServiceDirectoryRegistration", "properties": { - "allowed": { - "description": "The list of ALLOW rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", - "items": { - "properties": { - "IPProtocol": { - "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number.", - "type": "string" - }, - "ports": { - "description": "An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "type": "array" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "denied": { - "description": "The list of DENY rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a denied connection.", - "items": { - "properties": { - "IPProtocol": { - "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number.", - "type": "string" - }, - "ports": { - "description": "An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "type": "array" - }, - "description": { - "description": "An optional description of this resource. Provide this field when you create the resource.", - "type": "string" - }, - "destinationRanges": { - "description": "If destination ranges are specified, the firewall rule applies only to traffic that has destination IP address in these ranges. These ranges must be expressed in CIDR format. Both IPv4 and IPv6 are supported.", - "items": { - "type": "string" - }, - "type": "array" - }, - "direction": { - "description": "Direction of traffic to which this firewall applies, either `INGRESS` or `EGRESS`. The default is `INGRESS`. For `INGRESS` traffic, you cannot specify the destinationRanges field, and for `EGRESS` traffic, you cannot specify the sourceRanges or sourceTags fields.", - "enum": [ - "EGRESS", - "INGRESS" - ], - "enumDescriptions": [ - "Indicates that firewall should apply to outgoing traffic.", - "Indicates that firewall should apply to incoming traffic." - ], - "type": "string" - }, - "disabled": { - "description": "Denotes whether the firewall rule is disabled. When set to true, the firewall rule is not enforced and the network behaves as if it did not exist. If this is unspecified, the firewall rule will be enabled.", - "type": "boolean" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#firewall", - "description": "[Output Only] Type of the resource. Always compute#firewall for firewall rules.", - "type": "string" - }, - "logConfig": { - "$ref": "FirewallLogConfig", - "description": "This field denotes the logging options for a particular firewall rule. If logging is enabled, logs will be exported to Cloud Logging." - }, - "name": { - "annotations": { - "required": [ - "compute.firewalls.insert", - "compute.firewalls.patch" - ] - }, - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "namespace": { + "description": "Service Directory namespace to register the forwarding rule under.", "type": "string" }, - "network": { - "description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used: global/networks/default If you choose to specify this field, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network - projects/myproject/global/networks/my-network - global/networks/default ", + "service": { + "description": "Service Directory service to register the forwarding rule under.", "type": "string" }, - "priority": { - "description": "Priority for this rule. This is an integer between `0` and `65535`, both inclusive. The default value is `1000`. Relative priorities determine which rule takes effect if multiple rules apply. Lower values indicate higher priority. For example, a rule with priority `0` has higher precedence than a rule with priority `1`. DENY rules take precedence over ALLOW rules if they have equal priority. Note that VPC networks have implied rules with a priority of `65535`. To avoid conflicts with the implied rules, use a priority number less than `65535`.", - "format": "int32", - "type": "integer" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "serviceDirectoryRegion": { + "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region.", "type": "string" - }, - "sourceRanges": { - "description": "If source ranges are specified, the firewall rule applies only to traffic that has a source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both fields are set, the rule applies to traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. The connection does not need to match both fields for the rule to apply. Both IPv4 and IPv6 are supported.", - "items": { - "type": "string" - }, - "type": "array" - }, - "sourceServiceAccounts": { - "description": "If source service accounts are specified, the firewall rules apply only to traffic originating from an instance with a service account in this list. Source service accounts cannot be used to control traffic to an instance's external IP address because service accounts are associated with an instance, not an IP address. sourceRanges can be set at the same time as sourceServiceAccounts. If both are set, the firewall applies to traffic that has a source IP address within the sourceRanges OR a source IP that belongs to an instance with service account listed in sourceServiceAccount. The connection does not need to match both fields for the firewall to apply. sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags.", - "items": { - "type": "string" - }, - "type": "array" - }, - "sourceTags": { - "description": "If source tags are specified, the firewall rule applies only to traffic with source IPs that match the primary network interfaces of VM instances that have the tag and are in the same VPC network. Source tags cannot be used to control traffic to an instance's external IP address, it only applies to traffic between instances in the same virtual network. Because tags are associated with instances, not IP addresses. One or both of sourceRanges and sourceTags may be set. If both fields are set, the firewall applies to traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. The connection does not need to match both fields for the firewall to apply.", - "items": { - "type": "string" - }, - "type": "array" - }, - "targetServiceAccounts": { - "description": "A list of service accounts indicating sets of instances located in the network that may make network connections as specified in allowed[]. targetServiceAccounts cannot be used at the same time as targetTags or sourceTags. If neither targetServiceAccounts nor targetTags are specified, the firewall rule applies to all instances on the specified network.", - "items": { - "type": "string" - }, - "type": "array" - }, - "targetTags": { - "description": "A list of tags that controls which instances the firewall rule applies to. If targetTags are specified, then the firewall rule applies only to instances in the VPC network that have one of those tags. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", - "items": { - "type": "string" - }, - "type": "array" } }, "type": "object" }, - "FirewallList": { - "description": "Contains a list of firewalls.", - "id": "FirewallList", + "ForwardingRulesScopedList": { + "id": "ForwardingRulesScopedList", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of Firewall resources.", + "forwardingRules": { + "description": "A list of forwarding rules contained in this scope.", "items": { - "$ref": "Firewall" + "$ref": "ForwardingRule" }, "type": "array" }, - "kind": { - "default": "compute#firewallList", - "description": "[Output Only] Type of resource. Always compute#firewallList for lists of firewalls.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "Informational warning which replaces the list of forwarding rules when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -34960,10 +40111,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -34987,10 +40140,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -35029,26 +40184,219 @@ "type": "string" } }, - "type": "object" + "type": "object" + } + }, + "type": "object" + }, + "GRPCHealthCheck": { + "id": "GRPCHealthCheck", + "properties": { + "grpcServiceName": { + "description": "The gRPC service name for the health check. This field is optional. The value of grpc_service_name has the following meanings by convention: - Empty service_name means the overall status of all services at the backend. - Non-empty service_name means the health of that gRPC service, as defined by the owner of the service. The grpc_service_name can only be ASCII.", + "type": "string" + }, + "port": { + "description": "The TCP port number to which the health check prober sends packets. Valid values are 1 through 65535.", + "format": "int32", + "type": "integer" + }, + "portName": { + "description": "Not supported.", + "type": "string" + }, + "portSpecification": { + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." + ], + "type": "string" + } + }, + "type": "object" + }, + "GlobalNetworkEndpointGroupsAttachEndpointsRequest": { + "id": "GlobalNetworkEndpointGroupsAttachEndpointsRequest", + "properties": { + "networkEndpoints": { + "description": "The list of network endpoints to be attached.", + "items": { + "$ref": "NetworkEndpoint" + }, + "type": "array" + } + }, + "type": "object" + }, + "GlobalNetworkEndpointGroupsDetachEndpointsRequest": { + "id": "GlobalNetworkEndpointGroupsDetachEndpointsRequest", + "properties": { + "networkEndpoints": { + "description": "The list of network endpoints to be detached.", + "items": { + "$ref": "NetworkEndpoint" + }, + "type": "array" + } + }, + "type": "object" + }, + "GlobalOrganizationSetPolicyRequest": { + "id": "GlobalOrganizationSetPolicyRequest", + "properties": { + "bindings": { + "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify bindings.", + "items": { + "$ref": "Binding" + }, + "type": "array" + }, + "etag": { + "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify the etag.", + "format": "byte", + "type": "string" + }, + "policy": { + "$ref": "Policy", + "description": "REQUIRED: The complete policy to be applied to the 'resource'. The size of the policy is limited to a few 10s of KB. An empty policy is in general a valid policy but certain services (like Projects) might reject them." + } + }, + "type": "object" + }, + "GlobalSetLabelsRequest": { + "id": "GlobalSetLabelsRequest", + "properties": { + "labelFingerprint": { + "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash when updating or changing labels, otherwise the request will fail with error 412 conditionNotMet. Make a get() request to the resource to get the latest fingerprint.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "A list of labels to apply for this resource. Each label must comply with the requirements for labels. For example, \"webserver-frontend\": \"images\". A label value can also be empty (e.g. \"my-label\": \"\").", + "type": "object" + } + }, + "type": "object" + }, + "GlobalSetPolicyRequest": { + "id": "GlobalSetPolicyRequest", + "properties": { + "bindings": { + "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify bindings.", + "items": { + "$ref": "Binding" + }, + "type": "array" + }, + "etag": { + "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify the etag.", + "format": "byte", + "type": "string" + }, + "policy": { + "$ref": "Policy", + "description": "REQUIRED: The complete policy to be applied to the 'resource'. The size of the policy is limited to a few 10s of KB. An empty policy is in general a valid policy but certain services (like Projects) might reject them." + } + }, + "type": "object" + }, + "GuestAttributes": { + "description": "A guest attributes entry.", + "id": "GuestAttributes", + "properties": { + "kind": { + "default": "compute#guestAttributes", + "description": "[Output Only] Type of the resource. Always compute#guestAttributes for guest attributes entry.", + "type": "string" + }, + "queryPath": { + "description": "The path to be queried. This can be the default namespace ('') or a nested namespace ('\\/') or a specified key ('\\/\\').", + "type": "string" + }, + "queryValue": { + "$ref": "GuestAttributesValue", + "description": "[Output Only] The value of the requested queried path." + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "variableKey": { + "description": "The key to search for.", + "type": "string" + }, + "variableValue": { + "description": "[Output Only] The value found for the requested key.", + "type": "string" + } + }, + "type": "object" + }, + "GuestAttributesEntry": { + "description": "A guest attributes namespace/key/value entry.", + "id": "GuestAttributesEntry", + "properties": { + "key": { + "description": "Key for the guest attribute entry.", + "type": "string" + }, + "namespace": { + "description": "Namespace for the guest attribute entry.", + "type": "string" + }, + "value": { + "description": "Value for the guest attribute entry.", + "type": "string" + } + }, + "type": "object" + }, + "GuestAttributesValue": { + "description": "Array of guest attribute namespace/key/value tuples.", + "id": "GuestAttributesValue", + "properties": { + "items": { + "items": { + "$ref": "GuestAttributesEntry" + }, + "type": "array" } }, "type": "object" }, - "FirewallLogConfig": { - "description": "The available logging options for a firewall rule.", - "id": "FirewallLogConfig", + "GuestOsFeature": { + "description": "Guest OS features.", + "id": "GuestOsFeature", "properties": { - "enable": { - "description": "This field denotes whether to enable logging for a particular firewall rule.", - "type": "boolean" - }, - "metadata": { - "description": "This field can only be specified for a particular firewall rule if logging is enabled for that rule. This field denotes whether to include or exclude metadata for firewall logs.", + "type": { + "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features.", "enum": [ - "EXCLUDE_ALL_METADATA", - "INCLUDE_ALL_METADATA" + "FEATURE_TYPE_UNSPECIFIED", + "GVNIC", + "MULTI_IP_SUBNET", + "SECURE_BOOT", + "SEV_CAPABLE", + "UEFI_COMPATIBLE", + "VIRTIO_SCSI_MULTIQUEUE", + "WINDOWS" ], "enumDescriptions": [ + "", + "", + "", + "", + "", + "", "", "" ], @@ -35057,148 +40405,295 @@ }, "type": "object" }, - "FirewallPoliciesListAssociationsResponse": { - "id": "FirewallPoliciesListAssociationsResponse", + "HTTP2HealthCheck": { + "id": "HTTP2HealthCheck", "properties": { - "associations": { - "description": "A list of associations.", - "items": { - "$ref": "FirewallPolicyAssociation" - }, - "type": "array" + "host": { + "description": "The value of the host header in the HTTP/2 health check request. If left empty (default value), the host header is set to the destination IP address to which health check packets are sent. The destination IP address depends on the type of load balancer. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest", + "type": "string" }, - "kind": { - "default": "compute#firewallPoliciesListAssociationsResponse", - "description": "[Output Only] Type of firewallPolicy associations. Always compute#FirewallPoliciesListAssociations for lists of firewallPolicy associations.", + "port": { + "description": "The TCP port number to which the health check prober sends packets. The default value is 443. Valid values are 1 through 65535.", + "format": "int32", + "type": "integer" + }, + "portName": { + "description": "Not supported.", + "type": "string" + }, + "portSpecification": { + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." + ], + "type": "string" + }, + "proxyHeader": { + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "requestPath": { + "description": "The request path of the HTTP/2 health check request. The default value is /.", + "type": "string" + }, + "response": { + "description": "Creates a content-based HTTP/2 health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http", "type": "string" } }, "type": "object" }, - "FirewallPolicy": { - "description": "Represents a Firewall Policy resource.", - "id": "FirewallPolicy", + "HTTPHealthCheck": { + "id": "HTTPHealthCheck", "properties": { - "associations": { - "description": "A list of associations that belong to this firewall policy.", - "items": { - "$ref": "FirewallPolicyAssociation" - }, - "type": "array" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "host": { + "description": "The value of the host header in the HTTP health check request. If left empty (default value), the host header is set to the destination IP address to which health check packets are sent. The destination IP address depends on the type of load balancer. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest", "type": "string" }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" + "port": { + "description": "The TCP port number to which the health check prober sends packets. The default value is 80. Valid values are 1 through 65535.", + "format": "int32", + "type": "integer" }, - "displayName": { - "description": "Deprecated, please use short name instead. User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. This name must be set on creation and cannot be changed. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "portName": { + "description": "Not supported.", "type": "string" }, - "fingerprint": { - "description": "Specifies a fingerprint for this resource, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make get() request to the firewall policy.", - "format": "byte", + "portSpecification": { + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." + ], "type": "string" }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", + "proxyHeader": { + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" }, - "kind": { - "default": "compute#firewallPolicy", - "description": "[Output only] Type of the resource. Always compute#firewallPolicyfor firewall policies", + "requestPath": { + "description": "The request path of the HTTP health check request. The default value is /.", "type": "string" }, - "name": { - "description": "[Output Only] Name of the resource. It is a numeric ID allocated by GCP which uniquely identifies the Firewall Policy.", + "response": { + "description": "Creates a content-based HTTP health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http", "type": "string" - }, - "parent": { - "description": "[Output Only] The parent of the firewall policy.", + } + }, + "type": "object" + }, + "HTTPSHealthCheck": { + "id": "HTTPSHealthCheck", + "properties": { + "host": { + "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the host header is set to the destination IP address to which health check packets are sent. The destination IP address depends on the type of load balancer. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest", "type": "string" }, - "ruleTupleCount": { - "description": "[Output Only] Total count of all firewall policy rule tuples. A firewall policy can not exceed a set number of tuples.", + "port": { + "description": "The TCP port number to which the health check prober sends packets. The default value is 443. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, - "rules": { - "description": "A list of rules that belong to this policy. There must always be a default rule (rule with priority 2147483647 and match \"*\"). If no rules are provided when creating a firewall policy, a default rule with action \"allow\" will be added.", - "items": { - "$ref": "FirewallPolicyRule" - }, - "type": "array" + "portName": { + "description": "Not supported.", + "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "portSpecification": { + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." + ], "type": "string" }, - "selfLinkWithId": { - "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "proxyHeader": { + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" }, - "shortName": { - "description": "User-provided name of the Organization firewall plicy. The name should be unique in the organization in which the firewall policy is created. This name must be set on creation and cannot be changed. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "requestPath": { + "description": "The request path of the HTTPS health check request. The default value is /.", + "type": "string" + }, + "response": { + "description": "Creates a content-based HTTPS health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http", "type": "string" } }, "type": "object" }, - "FirewallPolicyAssociation": { - "id": "FirewallPolicyAssociation", + "HealthCheck": { + "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/v1/healthChecks) * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.HealthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", + "id": "HealthCheck", "properties": { - "attachmentTarget": { - "description": "The target that the firewall policy is attached to.", + "checkIntervalSec": { + "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", + "format": "int32", + "type": "integer" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in 3339 text format.", "type": "string" }, - "displayName": { - "description": "[Output Only] Deprecated, please use short name instead. The display name of the firewall policy of the association.", + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "firewallPolicyId": { - "description": "[Output Only] The firewall policy ID of the association.", + "grpcHealthCheck": { + "$ref": "GRPCHealthCheck" + }, + "healthyThreshold": { + "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", + "format": "int32", + "type": "integer" + }, + "http2HealthCheck": { + "$ref": "HTTP2HealthCheck" + }, + "httpHealthCheck": { + "$ref": "HTTPHealthCheck" + }, + "httpsHealthCheck": { + "$ref": "HTTPSHealthCheck" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#healthCheck", + "description": "Type of the resource.", "type": "string" }, + "logConfig": { + "$ref": "HealthCheckLogConfig", + "description": "Configure logging on this health check." + }, "name": { - "description": "The name for an association.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. For example, a name that is 1-63 characters long, matches the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise complies with RFC1035. This regular expression describes a name where the first character is a lowercase letter, and all following characters are a dash, lowercase letter, or digit, except the last character, which isn't a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "shortName": { - "description": "[Output Only] The short name of the firewall policy of the association.", + "region": { + "description": "[Output Only] Region where the health check resides. Not applicable to global health checks.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "sslHealthCheck": { + "$ref": "SSLHealthCheck" + }, + "tcpHealthCheck": { + "$ref": "TCPHealthCheck" + }, + "timeoutSec": { + "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.", + "format": "int32", + "type": "integer" + }, + "type": { + "description": "Specifies the type of the healthCheck, either TCP, SSL, HTTP, HTTPS, HTTP2 or GRPC. Exactly one of the protocol-specific health check fields must be specified, which must match type field.", + "enum": [ + "GRPC", + "HTTP", + "HTTP2", + "HTTPS", + "INVALID", + "SSL", + "TCP" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ], "type": "string" + }, + "unhealthyThreshold": { + "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "FirewallPolicyList": { - "id": "FirewallPolicyList", + "HealthCheckList": { + "description": "Contains a list of HealthCheck resources.", + "id": "HealthCheckList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of FirewallPolicy resources.", + "description": "A list of HealthCheck resources.", "items": { - "$ref": "FirewallPolicy" + "$ref": "HealthCheck" }, "type": "array" }, "kind": { - "default": "compute#firewallPolicyList", - "description": "[Output Only] Type of resource. Always compute#firewallPolicyList for listsof FirewallPolicies", + "default": "compute#healthCheckList", + "description": "Type of resource.", "type": "string" }, "nextPageToken": { "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -35213,10 +40708,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -35240,10 +40737,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -35287,373 +40786,130 @@ }, "type": "object" }, - "FirewallPolicyRule": { - "description": "Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny).", - "id": "FirewallPolicyRule", + "HealthCheckLogConfig": { + "description": "Configuration of logging on a health check. If logging is enabled, logs will be exported to Stackdriver.", + "id": "HealthCheckLogConfig", "properties": { - "action": { - "description": "The Action to perform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", - "type": "string" - }, - "description": { - "description": "An optional description for this resource.", - "type": "string" - }, - "direction": { - "description": "The direction in which this rule applies.", - "enum": [ - "EGRESS", - "INGRESS" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "disabled": { - "description": "Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled.", - "type": "boolean" - }, - "enableLogging": { - "description": "Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on \"goto_next\" rules.", + "enable": { + "description": "Indicates whether or not to export logs. This is false by default, which means no health check logging will be done.", "type": "boolean" - }, - "kind": { - "default": "compute#firewallPolicyRule", - "description": "[Output only] Type of the resource. Always compute#firewallPolicyRule for firewall policy rules", - "type": "string" - }, - "match": { - "$ref": "FirewallPolicyRuleMatcher", - "description": "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced." - }, - "priority": { - "description": "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", - "format": "int32", - "type": "integer" - }, - "ruleTupleCount": { - "description": "[Output Only] Calculation of the complexity of a single firewall policy rule.", - "format": "int32", - "type": "integer" - }, - "targetResources": { - "description": "A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule.", - "items": { - "type": "string" - }, - "type": "array" - }, - "targetServiceAccounts": { - "description": "A list of service accounts indicating the sets of instances that are applied with this rule.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "FirewallPolicyRuleMatcher": { - "description": "Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified.", - "id": "FirewallPolicyRuleMatcher", - "properties": { - "destIpRanges": { - "description": "CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000.", - "items": { - "type": "string" - }, - "type": "array" - }, - "layer4Configs": { - "description": "Pairs of IP protocols and ports that the rule should match.", - "items": { - "$ref": "FirewallPolicyRuleMatcherLayer4Config" - }, - "type": "array" - }, - "srcIpRanges": { - "description": "CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000.", - "items": { - "type": "string" - }, - "type": "array" } }, "type": "object" }, - "FirewallPolicyRuleMatcherLayer4Config": { - "id": "FirewallPolicyRuleMatcherLayer4Config", + "HealthCheckReference": { + "description": "A full or valid partial URL to a health check. For example, the following are valid URLs: - https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check - projects/project-id/global/httpHealthChecks/health-check - global/httpHealthChecks/health-check ", + "id": "HealthCheckReference", "properties": { - "ipProtocol": { - "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", + "healthCheck": { "type": "string" - }, - "ports": { - "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "FixedOrPercent": { - "description": "Encapsulates numeric value that can be either absolute or relative.", - "id": "FixedOrPercent", - "properties": { - "calculated": { - "description": "[Output Only] Absolute value of VM instances calculated based on the specific mode. - If the value is fixed, then the calculated value is equal to the fixed value. - If the value is a percent, then the calculated value is percent/100 * targetSize. For example, the calculated value of a 80% of a managed instance group with 150 instances would be (80/100 * 150) = 120 VM instances. If there is a remainder, the number is rounded. ", - "format": "int32", - "type": "integer" - }, - "fixed": { - "description": "Specifies a fixed number of VM instances. This must be a positive integer.", - "format": "int32", - "type": "integer" - }, - "percent": { - "description": "Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.", - "format": "int32", - "type": "integer" } }, "type": "object" }, - "ForwardingRule": { - "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/v1/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud Platform load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.", - "id": "ForwardingRule", + "HealthCheckService": { + "description": "Represents a Health-Check as a Service resource.", + "id": "HealthCheckService", "properties": { - "IPAddress": { - "description": "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule. If you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address: * IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in https://www.googleapis.com/compute/v1/projects/project_id/regions/region /addresses/address-name * Partial URL or by name, as in: - projects/project_id/regions/region/addresses/address-name - regions/region/addresses/address-name - global/addresses/address-name - address-name The loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, see [IP address specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). Must be set to `0.0.0.0` when the target is targetGrpcProxy that has validateForProxyless field set to true. For Private Service Connect forwarding rules that forward traffic to Google APIs, IP address must be provided.", - "type": "string" - }, - "IPProtocol": { - "description": "The IP protocol to which this rule applies. For protocol forwarding, valid options are TCP, UDP, ESP, AH, SCTP, ICMP and L3_DEFAULT. The valid IP protocols are different for different load balancing products as described in [Load balancing features](https://cloud.google.com/load-balancing/docs/features#protocols_from_the_load_balancer_to_the_backends).", - "enum": [ - "AH", - "ESP", - "ICMP", - "L3_DEFAULT", - "SCTP", - "TCP", - "UDP" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "allPorts": { - "description": "This field is used along with the backend_service field for Internal TCP/UDP Load Balancing or Network Load Balancing, or with the target field for internal and external TargetInstance. You can only use one of ports and port_range, or allPorts. The three are mutually exclusive. For TCP, UDP and SCTP traffic, packets addressed to any ports will be forwarded to the target or backendService.", - "type": "boolean" - }, - "allowGlobalAccess": { - "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If the field is set to TRUE, clients can access ILB from all regions. Otherwise only allows access from clients in the same region as the internal load balancer.", - "type": "boolean" - }, - "backendService": { - "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for Internal TCP/UDP Load Balancing and Network Load Balancing; must be omitted for all other load balancer types.", - "type": "string" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "fingerprint": { - "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a ForwardingRule. Include the fingerprint in patch request to ensure that you do not overwrite changes that were applied from another concurrent request. To see the latest fingerprint, make a get() request to retrieve a ForwardingRule.", - "format": "byte", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "ipVersion": { - "description": "The IP Version that will be used by this forwarding rule. Valid options are IPV4 or IPV6. This can only be specified for an external global forwarding rule.", - "enum": [ - "IPV4", - "IPV6", - "UNSPECIFIED_VERSION" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, - "isMirroringCollector": { - "description": "Indicates whether or not this load balancer can be used as a collector for packet mirroring. To prevent mirroring loops, instances behind this load balancer will not have their traffic mirrored even if a PacketMirroring rule applies to them. This can only be set to true for load balancers that have their loadBalancingScheme set to INTERNAL.", - "type": "boolean" + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" }, - "kind": { - "default": "compute#forwardingRule", - "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "labelFingerprint": { - "description": "A fingerprint for the labels being applied to this resource, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a ForwardingRule.", + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a HealthCheckService. An up-to-date fingerprint must be provided in order to patch/update the HealthCheckService; Otherwise, the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the HealthCheckService.", "format": "byte", "type": "string" }, - "labels": { - "additionalProperties": { + "healthChecks": { + "description": "A list of URLs to the HealthCheck resources. Must have at least one HealthCheck, and not more than 10 for regional HealthCheckService, and not more than 1 for global HealthCheckService. HealthCheck resources must have portSpecification=USE_SERVING_PORT or portSpecification=USE_FIXED_PORT. For regional HealthCheckService, the HealthCheck must be regional and in the same region. For global HealthCheckService, HealthCheck must be global. Mix of regional and global HealthChecks is not supported. Multiple regional HealthChecks must belong to the same region. Regional HealthChecks must belong to the same region as zones of NetworkEndpointGroups. For global HealthCheckService using global INTERNET_IP_PORT NetworkEndpointGroups, the global HealthChecks must specify sourceRegions, and HealthChecks that specify sourceRegions can only be used with global INTERNET_IP_PORT NetworkEndpointGroups.", + "items": { "type": "string" }, - "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", - "type": "object" + "type": "array" }, - "loadBalancingScheme": { - "description": "Specifies the forwarding rule type. For more information about forwarding rules, refer to Forwarding rule concepts.", + "healthStatusAggregationPolicy": { + "description": "Optional. Policy for how the results from multiple health checks for the same endpoint are aggregated. Defaults to NO_AGGREGATION if unspecified. - NO_AGGREGATION. An EndpointHealth message is returned for each pair in the health check service. - AND. If any health check of an endpoint reports UNHEALTHY, then UNHEALTHY is the HealthState of the endpoint. If all health checks report HEALTHY, the HealthState of the endpoint is HEALTHY. . This is only allowed with regional HealthCheckService.", "enum": [ - "EXTERNAL", - "EXTERNAL_MANAGED", - "INTERNAL", - "INTERNAL_MANAGED", - "INTERNAL_SELF_MANAGED", - "INVALID" + "AND", + "NO_AGGREGATION" ], "enumDescriptions": [ - "", - "", - "", - "", - "", - "" + "If any backend's health check reports UNHEALTHY, then UNHEALTHY is the HealthState of the entire health check service. If all backend's are healthy, the HealthState of the health check service is HEALTHY.", + "An EndpointHealth message is returned for each backend in the health check service." ], "type": "string" }, - "metadataFilters": { - "description": "Opaque filter criteria used by load balancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to load balancer, xDS clients present node metadata. When there is a match, the relevant configuration is made available to those proxies. Otherwise, all the resources (e.g. TargetHttpProxy, UrlMap) referenced by the ForwardingRule are not visible to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadataFilters are specified, all of them need to be satisfied in order to be considered a match. metadataFilters specified here will be applifed before those specified in the UrlMap that this ForwardingRule references. metadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", - "items": { - "$ref": "MetadataFilter" - }, - "type": "array" - }, - "name": { - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. For Private Service Connect forwarding rules that forward traffic to Google APIs, the forwarding rule name must be a 1-20 characters string with lowercase letters and numbers and must start with a letter.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "network": { - "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", "type": "string" }, - "networkTier": { - "description": "This signifies the networking tier used for configuring this load balancer and can only take the following values: PREMIUM, STANDARD. For regional ForwardingRule, the valid values are PREMIUM and STANDARD. For GlobalForwardingRule, the valid value is PREMIUM. If this field is not specified, it is assumed to be PREMIUM. If IPAddress is specified, this value must be equal to the networkTier of the Address.", - "enum": [ - "FIXED_STANDARD", - "PREMIUM", - "STANDARD", - "STANDARD_OVERRIDES_FIXED_STANDARD" - ], - "enumDescriptions": [ - "Public internet quality with fixed bandwidth.", - "High quality, Google-grade network tier, support for all networking products.", - "Public internet quality, only limited support for other networking products.", - "(Output only) Temporary tier for FIXED_STANDARD when fixed standard tier is expired or not configured." - ], + "kind": { + "default": "compute#healthCheckService", + "description": "[Output only] Type of the resource. Always compute#healthCheckServicefor health check services.", "type": "string" }, - "portRange": { - "description": "This field can be used only if: - Load balancing scheme is one of EXTERNAL, INTERNAL_SELF_MANAGED or INTERNAL_MANAGED - IPProtocol is one of TCP, UDP, or SCTP. Packets addressed to ports in the specified range will be forwarded to target or backend_service. You can only use one of ports, port_range, or allPorts. The three are mutually exclusive. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint ports. Some types of forwarding target have constraints on the acceptable ports. For more information, see [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications). @pattern: \\\\d+(?:-\\\\d+)?", + "name": { + "description": "Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "ports": { - "description": "The ports field is only supported when the forwarding rule references a backend_service directly. Only packets addressed to the [specified list of ports]((https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications)) are forwarded to backends. You can only use one of ports and port_range, or allPorts. The three are mutually exclusive. You can specify a list of up to five ports, which can be non-contiguous. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint ports. @pattern: \\\\d+(?:-\\\\d+)?", + "networkEndpointGroups": { + "description": "A list of URLs to the NetworkEndpointGroup resources. Must not have more than 100. For regional HealthCheckService, NEGs must be in zones in the region of the HealthCheckService. For global HealthCheckServices, the NetworkEndpointGroups must be global INTERNET_IP_PORT.", "items": { "type": "string" }, "type": "array" }, - "pscConnectionId": { - "description": "[Output Only] The PSC connection id of the PSC Forwarding Rule.", - "format": "uint64", - "type": "string" - }, - "pscConnectionStatus": { - "enum": [ - "ACCEPTED", - "CLOSED", - "PENDING", - "REJECTED", - "STATUS_UNSPECIFIED" - ], - "enumDescriptions": [ - "The connection has been accepted by the producer.", - "The connection has been closed by the producer and will not serve traffic going forward.", - "The connection is pending acceptance by the producer.", - "The connection has been rejected by the producer.", - "" - ], - "type": "string" - }, - "region": { - "description": "[Output Only] URL of the region where the regional forwarding rule resides. This field is not applicable to global forwarding rules. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "serviceDirectoryRegistrations": { - "description": "Service Directory resources to register this forwarding rule with. Currently, only supports a single Service Directory resource.", + "notificationEndpoints": { + "description": "A list of URLs to the NotificationEndpoint resources. Must not have more than 10. A list of endpoints for receiving notifications of change in health status. For regional HealthCheckService, NotificationEndpoint must be regional and in the same region. For global HealthCheckService, NotificationEndpoint must be global.", "items": { - "$ref": "ForwardingRuleServiceDirectoryRegistration" + "type": "string" }, "type": "array" }, - "serviceLabel": { - "description": "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "serviceName": { - "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.", + "region": { + "description": "[Output Only] URL of the region where the health check service resides. This field is not applicable to global health check services. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" }, - "subnetwork": { - "description": "This field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule, used in internal load balancing and network load balancing with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" - }, - "target": { + } + }, + "type": "object" + }, + "HealthCheckServiceReference": { + "description": "A full or valid partial URL to a health check service. For example, the following are valid URLs: - https://www.googleapis.com/compute/beta/projects/project-id/regions/us-west1/healthCheckServices/health-check-service - projects/project-id/regions/us-west1/healthCheckServices/health-check-service - regions/us-west1/healthCheckServices/health-check-service ", + "id": "HealthCheckServiceReference", + "properties": { + "healthCheckService": { "type": "string" } }, "type": "object" }, - "ForwardingRuleAggregatedList": { - "id": "ForwardingRuleAggregatedList", + "HealthCheckServicesList": { + "id": "HealthCheckServicesList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "ForwardingRulesScopedList", - "description": "Name of the scope containing this set of addresses." + "description": "A list of HealthCheckService resources.", + "items": { + "$ref": "HealthCheckService" }, - "description": "A list of ForwardingRulesScopedList resources.", - "type": "object" + "type": "array" }, "kind": { - "default": "compute#forwardingRuleAggregatedList", - "description": "[Output Only] Type of resource. Always compute#forwardingRuleAggregatedList for lists of forwarding rules.", + "default": "compute#healthCheckServicesList", + "description": "[Output Only] Type of the resource. Always compute#healthCheckServicesList for lists of HealthCheckServices.", "type": "string" }, "nextPageToken": { @@ -35664,13 +40920,6 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "unreachables": { - "description": "[Output Only] Unreachable resources.", - "items": { - "type": "string" - }, - "type": "array" - }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -35685,10 +40934,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -35712,10 +40963,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -35759,23 +41012,23 @@ }, "type": "object" }, - "ForwardingRuleList": { - "description": "Contains a list of ForwardingRule resources.", - "id": "ForwardingRuleList", + "HealthChecksAggregatedList": { + "id": "HealthChecksAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of ForwardingRule resources.", - "items": { - "$ref": "ForwardingRule" + "additionalProperties": { + "$ref": "HealthChecksScopedList", + "description": "Name of the scope containing this set of HealthChecks." }, - "type": "array" + "description": "A list of HealthChecksScopedList resources.", + "type": "object" }, "kind": { - "default": "compute#forwardingRuleList", + "default": "compute#healthChecksAggregatedList", "description": "Type of resource.", "type": "string" }, @@ -35787,6 +41040,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -35801,10 +41061,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -35828,10 +41090,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -35875,46 +41139,18 @@ }, "type": "object" }, - "ForwardingRuleReference": { - "id": "ForwardingRuleReference", - "properties": { - "forwardingRule": { - "type": "string" - } - }, - "type": "object" - }, - "ForwardingRuleServiceDirectoryRegistration": { - "description": "Describes the auto-registration of the Forwarding Rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this Forwarding Rule.", - "id": "ForwardingRuleServiceDirectoryRegistration", - "properties": { - "namespace": { - "description": "Service Directory namespace to register the forwarding rule under.", - "type": "string" - }, - "service": { - "description": "Service Directory service to register the forwarding rule under.", - "type": "string" - }, - "serviceDirectoryRegion": { - "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region.", - "type": "string" - } - }, - "type": "object" - }, - "ForwardingRulesScopedList": { - "id": "ForwardingRulesScopedList", + "HealthChecksScopedList": { + "id": "HealthChecksScopedList", "properties": { - "forwardingRules": { - "description": "A list of forwarding rules contained in this scope.", + "healthChecks": { + "description": "A list of HealthChecks contained in this scope.", "items": { - "$ref": "ForwardingRule" + "$ref": "HealthCheck" }, "type": "array" }, "warning": { - "description": "Informational warning which replaces the list of forwarding rules when the list is empty.", + "description": "Informational warning which replaces the list of backend services when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -35927,10 +41163,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -35954,10 +41192,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -36001,384 +41241,303 @@ }, "type": "object" }, - "GRPCHealthCheck": { - "id": "GRPCHealthCheck", + "HealthStatus": { + "id": "HealthStatus", "properties": { - "grpcServiceName": { - "description": "The gRPC service name for the health check. This field is optional. The value of grpc_service_name has the following meanings by convention: - Empty service_name means the overall status of all services at the backend. - Non-empty service_name means the health of that gRPC service, as defined by the owner of the service. The grpc_service_name can only be ASCII.", + "annotations": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata defined as annotations for network endpoint.", + "type": "object" + }, + "forwardingRule": { + "description": "URL of the forwarding rule associated with the health status of the instance.", + "type": "string" + }, + "forwardingRuleIp": { + "description": "A forwarding rule IP address assigned to this instance.", + "type": "string" + }, + "healthState": { + "description": "Health state of the instance.", + "enum": [ + "HEALTHY", + "UNHEALTHY" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "instance": { + "description": "URL of the instance resource.", + "type": "string" + }, + "ipAddress": { + "description": "For target pool based Network Load Balancing, it indicates the forwarding rule's IP address assigned to this instance. For other types of load balancing, the field indicates VM internal ip.", "type": "string" }, "port": { - "description": "The port number for the health check request. Must be specified if port_name and port_specification are not set or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535.", + "description": "The named port of the instance group, not necessarily the port that is health-checked.", "format": "int32", "type": "integer" }, - "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. The port_name should conform to RFC1035.", + "weight": { "type": "string" }, - "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, gRPC health check follows behavior specified in port and portName fields.", + "weightError": { "enum": [ - "USE_FIXED_PORT", - "USE_NAMED_PORT", - "USE_SERVING_PORT" + "INVALID_WEIGHT", + "MISSING_WEIGHT", + "UNAVAILABLE_WEIGHT", + "WEIGHT_NONE" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The response to a Health Check probe had the HTTP response header field X-Load-Balancing-Endpoint-Weight, but its content was invalid (i.e., not a non-negative single-precision floating-point number in decimal string representation).", + "The response to a Health Check probe did not have the HTTP response header field X-Load-Balancing-Endpoint-Weight.", + "This is the value when the accompanied health status is either TIMEOUT (i.e.,the Health Check probe was not able to get a response in time) or UNKNOWN. For the latter, it should be typically because there has not been sufficient time to parse and report the weight for a new backend (which is with 0.0.0.0 ip address). However, it can be also due to an outage case for which the health status is explicitly reset to UNKNOWN.", + "This is the default value when WeightReportMode is DISABLE, and is also the initial value when WeightReportMode has just updated to ENABLE or DRY_RUN and there has not been sufficient time to parse and report the backend weight." ], "type": "string" } }, "type": "object" }, - "GlobalNetworkEndpointGroupsAttachEndpointsRequest": { - "id": "GlobalNetworkEndpointGroupsAttachEndpointsRequest", + "HealthStatusForNetworkEndpoint": { + "id": "HealthStatusForNetworkEndpoint", "properties": { - "networkEndpoints": { - "description": "The list of network endpoints to be attached.", - "items": { - "$ref": "NetworkEndpoint" - }, - "type": "array" + "backendService": { + "$ref": "BackendServiceReference", + "description": "URL of the backend service associated with the health state of the network endpoint." + }, + "forwardingRule": { + "$ref": "ForwardingRuleReference", + "description": "URL of the forwarding rule associated with the health state of the network endpoint." + }, + "healthCheck": { + "$ref": "HealthCheckReference", + "description": "URL of the health check associated with the health state of the network endpoint." + }, + "healthCheckService": { + "$ref": "HealthCheckServiceReference", + "description": "URL of the health check service associated with the health state of the network endpoint." + }, + "healthState": { + "description": "Health state of the network endpoint determined based on the health checks configured.", + "enum": [ + "DRAINING", + "HEALTHY", + "UNHEALTHY", + "UNKNOWN" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" } }, "type": "object" }, - "GlobalNetworkEndpointGroupsDetachEndpointsRequest": { - "id": "GlobalNetworkEndpointGroupsDetachEndpointsRequest", + "Help": { + "description": "Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit.", + "id": "Help", "properties": { - "networkEndpoints": { - "description": "The list of network endpoints to be detached.", + "links": { + "description": "URL(s) pointing to additional information on handling the current error.", "items": { - "$ref": "NetworkEndpoint" + "$ref": "HelpLink" }, "type": "array" } }, "type": "object" }, - "GlobalOrganizationSetPolicyRequest": { - "id": "GlobalOrganizationSetPolicyRequest", + "HelpLink": { + "description": "Describes a URL link.", + "id": "HelpLink", "properties": { - "bindings": { - "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify bindings.", - "items": { - "$ref": "Binding" - }, - "type": "array" - }, - "etag": { - "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify the etag.", - "format": "byte", + "description": { + "description": "Describes what the link offers.", "type": "string" }, - "policy": { - "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the 'resource'. The size of the policy is limited to a few 10s of KB. An empty policy is in general a valid policy but certain services (like Projects) might reject them." + "url": { + "description": "The URL of the link.", + "type": "string" } }, "type": "object" }, - "GlobalSetLabelsRequest": { - "id": "GlobalSetLabelsRequest", + "HostRule": { + "description": "UrlMaps A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService.", + "id": "HostRule", "properties": { - "labelFingerprint": { - "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash when updating or changing labels, otherwise the request will fail with error 412 conditionNotMet. Make a get() request to the resource to get the latest fingerprint.", - "format": "byte", + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "A list of labels to apply for this resource. Each label key \u0026 value must comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. For example, \"webserver-frontend\": \"images\". A label value can also be empty (e.g. \"my-label\": \"\").", - "type": "object" - } - }, - "type": "object" - }, - "GlobalSetPolicyRequest": { - "id": "GlobalSetPolicyRequest", - "properties": { - "bindings": { - "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify bindings.", + "hosts": { + "description": "The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character, and if followed by anything, the immediate following character must be either - or .. * based matching is not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true.", "items": { - "$ref": "Binding" + "type": "string" }, "type": "array" }, - "etag": { - "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify the etag.", - "format": "byte", + "pathMatcher": { + "description": "The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion.", "type": "string" - }, - "policy": { - "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the 'resource'. The size of the policy is limited to a few 10s of KB. An empty policy is in general a valid policy but certain services (like Projects) might reject them." } }, "type": "object" }, - "GuestAttributes": { - "description": "A guest attributes entry.", - "id": "GuestAttributes", + "HttpFaultAbort": { + "description": "Specification for how requests are aborted as part of fault injection.", + "id": "HttpFaultAbort", "properties": { - "kind": { - "default": "compute#guestAttributes", - "description": "[Output Only] Type of the resource. Always compute#guestAttributes for guest attributes entry.", - "type": "string" - }, - "queryPath": { - "description": "The path to be queried. This can be the default namespace ('') or a nested namespace ('\\/') or a specified key ('\\/\\').", - "type": "string" - }, - "queryValue": { - "$ref": "GuestAttributesValue", - "description": "[Output Only] The value of the requested queried path." - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "variableKey": { - "description": "The key to search for.", - "type": "string" + "httpStatus": { + "description": "The HTTP status code used to abort the request. The value must be from 200 to 599 inclusive. For gRPC protocol, the gRPC status code is mapped to HTTP status code according to this mapping table. HTTP status 200 is mapped to gRPC status UNKNOWN. Injecting an OK status is currently not supported by Traffic Director.", + "format": "uint32", + "type": "integer" }, - "variableValue": { - "description": "[Output Only] The value found for the requested key.", - "type": "string" + "percentage": { + "description": "The percentage of traffic for connections, operations, or requests that is aborted as part of fault injection. The value must be from 0.0 to 100.0 inclusive.", + "format": "double", + "type": "number" } }, "type": "object" }, - "GuestAttributesEntry": { - "description": "A guest attributes namespace/key/value entry.", - "id": "GuestAttributesEntry", + "HttpFaultDelay": { + "description": "Specifies the delay introduced by the load balancer before forwarding the request to the backend service as part of fault injection.", + "id": "HttpFaultDelay", "properties": { - "key": { - "description": "Key for the guest attribute entry.", - "type": "string" - }, - "namespace": { - "description": "Namespace for the guest attribute entry.", - "type": "string" + "fixedDelay": { + "$ref": "Duration", + "description": "Specifies the value of the fixed delay interval." }, - "value": { - "description": "Value for the guest attribute entry.", - "type": "string" - } - }, - "type": "object" - }, - "GuestAttributesValue": { - "description": "Array of guest attribute namespace/key/value tuples.", - "id": "GuestAttributesValue", - "properties": { - "items": { - "items": { - "$ref": "GuestAttributesEntry" - }, - "type": "array" + "percentage": { + "description": "The percentage of traffic for connections, operations, or requests for which a delay is introduced as part of fault injection. The value must be from 0.0 to 100.0 inclusive.", + "format": "double", + "type": "number" } }, "type": "object" }, - "GuestOsFeature": { - "description": "Guest OS features.", - "id": "GuestOsFeature", + "HttpFaultInjection": { + "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by the load balancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests.", + "id": "HttpFaultInjection", "properties": { - "type": { - "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - SECURE_BOOT - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE For more information, see Enabling guest operating system features.", - "enum": [ - "FEATURE_TYPE_UNSPECIFIED", - "GVNIC", - "MULTI_IP_SUBNET", - "SECURE_BOOT", - "SEV_CAPABLE", - "UEFI_COMPATIBLE", - "VIRTIO_SCSI_MULTIQUEUE", - "WINDOWS" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" + "abort": { + "$ref": "HttpFaultAbort", + "description": "The specification for how client requests are aborted as part of fault injection." + }, + "delay": { + "$ref": "HttpFaultDelay", + "description": "The specification for how client requests are delayed as part of fault injection, before being sent to a backend service." } }, "type": "object" - }, - "HTTP2HealthCheck": { - "id": "HTTP2HealthCheck", - "properties": { - "host": { - "description": "The value of the host header in the HTTP/2 health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", - "type": "string" - }, - "port": { - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", - "format": "int32", - "type": "integer" - }, - "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", - "type": "string" - }, - "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP2 health check follows behavior specified in port and portName fields.", - "enum": [ - "USE_FIXED_PORT", - "USE_NAMED_PORT", - "USE_SERVING_PORT" - ], - "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." - ], - "type": "string" + }, + "HttpHeaderAction": { + "description": "The request and response header transformations that take effect before the request is passed along to the selected backendService.", + "id": "HttpHeaderAction", + "properties": { + "requestHeadersToAdd": { + "description": "Headers to add to a matching request before forwarding the request to the backendService.", + "items": { + "$ref": "HttpHeaderOption" + }, + "type": "array" }, - "proxyHeader": { - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" + "requestHeadersToRemove": { + "description": "A list of header names for headers that need to be removed from the request before forwarding the request to the backendService.", + "items": { + "type": "string" + }, + "type": "array" }, - "requestPath": { - "description": "The request path of the HTTP/2 health check request. The default value is /.", - "type": "string" + "responseHeadersToAdd": { + "description": "Headers to add the response before sending the response back to the client.", + "items": { + "$ref": "HttpHeaderOption" + }, + "type": "array" }, - "response": { - "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", - "type": "string" + "responseHeadersToRemove": { + "description": "A list of header names for headers that need to be removed from the response before sending the response back to the client.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "HTTPHealthCheck": { - "id": "HTTPHealthCheck", + "HttpHeaderMatch": { + "description": "matchRule criteria for request header matches.", + "id": "HttpHeaderMatch", "properties": { - "host": { - "description": "The value of the host header in the HTTP health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", + "exactMatch": { + "description": "The value should exactly match contents of exactMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", "type": "string" }, - "port": { - "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.", - "format": "int32", - "type": "integer" - }, - "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "headerName": { + "description": "The name of the HTTP header to match. For matching against the HTTP request's authority, use a headerMatch with the header name \":authority\". For matching a request's method, use the headerName \":method\". When the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true, only non-binary user-specified custom metadata and the `content-type` header are supported. The following transport-level headers cannot be used in header matching rules: `:authority`, `:method`, `:path`, `:scheme`, `user-agent`, `accept-encoding`, `content-encoding`, `grpc-accept-encoding`, `grpc-encoding`, `grpc-previous-rpc-attempts`, `grpc-tags-bin`, `grpc-timeout` and `grpc-trace-bin`.", "type": "string" }, - "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP health check follows behavior specified in port and portName fields.", - "enum": [ - "USE_FIXED_PORT", - "USE_NAMED_PORT", - "USE_SERVING_PORT" - ], - "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." - ], - "type": "string" + "invertMatch": { + "description": "If set to false, the headerMatch is considered a match if the preceding match criteria are met. If set to true, the headerMatch is considered a match if the preceding match criteria are NOT met. The default setting is false. ", + "type": "boolean" }, - "proxyHeader": { - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ], + "prefixMatch": { + "description": "The value of the header must start with the contents of prefixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", "type": "string" }, - "requestPath": { - "description": "The request path of the HTTP health check request. The default value is /.", + "presentMatch": { + "description": "A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "boolean" + }, + "rangeMatch": { + "$ref": "Int64RangeMatch", + "description": "The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails. For example for a range [-5, 0] - -3 will match. - 0 will not match. - 0.25 will not match. - -3someString will not match. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. rangeMatch is not supported for load balancers that have loadBalancingScheme set to EXTERNAL." + }, + "regexMatch": { + "description": "The value of the header must match the regular expression specified in regexMatch. For more information about regular expression syntax, see Syntax. For matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. Regular expressions can only be used when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED.", "type": "string" }, - "response": { - "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", + "suffixMatch": { + "description": "The value of the header must end with the contents of suffixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", "type": "string" } }, "type": "object" }, - "HTTPSHealthCheck": { - "id": "HTTPSHealthCheck", + "HttpHeaderOption": { + "description": "Specification determining how headers are added to requests or responses.", + "id": "HttpHeaderOption", "properties": { - "host": { - "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", - "type": "string" - }, - "port": { - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", - "format": "int32", - "type": "integer" - }, - "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", - "type": "string" - }, - "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTPS health check follows behavior specified in port and portName fields.", - "enum": [ - "USE_FIXED_PORT", - "USE_NAMED_PORT", - "USE_SERVING_PORT" - ], - "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." - ], - "type": "string" - }, - "proxyHeader": { - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ], + "headerName": { + "description": "The name of the header.", "type": "string" }, - "requestPath": { - "description": "The request path of the HTTPS health check request. The default value is /.", + "headerValue": { + "description": "The value of the header to add.", "type": "string" }, - "response": { - "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", - "type": "string" + "replace": { + "description": "If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. The default value is false. ", + "type": "boolean" } }, "type": "object" }, - "HealthCheck": { - "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/v1/healthChecks) * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.HealthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", - "id": "HealthCheck", + "HttpHealthCheck": { + "description": "Represents a legacy HTTP Health Check resource. Legacy HTTP health checks are now only required by target pool-based network load balancers. For all other load balancers, including backend service-based network load balancers, and for managed instance group auto-healing, you must use modern (non-legacy) health checks. For more information, see Health checks overview .", + "id": "HttpHealthCheck", "properties": { "checkIntervalSec": { "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", @@ -36386,29 +41545,21 @@ "type": "integer" }, "creationTimestamp": { - "description": "[Output Only] Creation timestamp in 3339 text format.", + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "grpcHealthCheck": { - "$ref": "GRPCHealthCheck" - }, "healthyThreshold": { "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", "format": "int32", "type": "integer" }, - "http2HealthCheck": { - "$ref": "HTTP2HealthCheck" - }, - "httpHealthCheck": { - "$ref": "HTTPHealthCheck" - }, - "httpsHealthCheck": { - "$ref": "HTTPSHealthCheck" + "host": { + "description": "The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.", + "type": "string" }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -36416,60 +41567,33 @@ "type": "string" }, "kind": { - "default": "compute#healthCheck", - "description": "Type of the resource.", + "default": "compute#httpHealthCheck", + "description": "[Output Only] Type of the resource. Always compute#httpHealthCheck for HTTP health checks.", "type": "string" }, - "logConfig": { - "$ref": "HealthCheckLogConfig", - "description": "Configure logging on this health check." - }, "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. For example, a name that is 1-63 characters long, matches the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise complies with RFC1035. This regular expression describes a name where the first character is a lowercase letter, and all following characters are a dash, lowercase letter, or digit, except the last character, which isn't a dash.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "region": { - "description": "[Output Only] Region where the health check resides. Not applicable to global health checks.", + "port": { + "description": "The TCP port number for the HTTP health check request. The default value is 80.", + "format": "int32", + "type": "integer" + }, + "requestPath": { + "description": "The request path of the HTTP health check request. The default value is /. This field does not support query parameters.", "type": "string" }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "sslHealthCheck": { - "$ref": "SSLHealthCheck" - }, - "tcpHealthCheck": { - "$ref": "TCPHealthCheck" - }, "timeoutSec": { "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.", "format": "int32", "type": "integer" }, - "type": { - "description": "Specifies the type of the healthCheck, either TCP, SSL, HTTP, HTTPS or HTTP2. Exactly one of the protocol-specific health check field must be specified, which must match type field.", - "enum": [ - "GRPC", - "HTTP", - "HTTP2", - "HTTPS", - "INVALID", - "SSL", - "TCP" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, "unhealthyThreshold": { "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", "format": "int32", @@ -36478,23 +41602,23 @@ }, "type": "object" }, - "HealthCheckList": { - "description": "Contains a list of HealthCheck resources.", - "id": "HealthCheckList", + "HttpHealthCheckList": { + "description": "Contains a list of HttpHealthCheck resources.", + "id": "HttpHealthCheckList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of HealthCheck resources.", + "description": "A list of HttpHealthCheck resources.", "items": { - "$ref": "HealthCheck" + "$ref": "HttpHealthCheck" }, "type": "array" }, "kind": { - "default": "compute#healthCheckList", + "default": "compute#httpHealthCheckList", "description": "Type of resource.", "type": "string" }, @@ -36520,10 +41644,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -36547,10 +41673,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -36594,130 +41722,306 @@ }, "type": "object" }, - "HealthCheckLogConfig": { - "description": "Configuration of logging on a health check. If logging is enabled, logs will be exported to Stackdriver.", - "id": "HealthCheckLogConfig", + "HttpQueryParameterMatch": { + "description": "HttpRouteRuleMatch criteria for a request's query parameter.", + "id": "HttpQueryParameterMatch", "properties": { - "enable": { - "description": "Indicates whether or not to export logs. This is false by default, which means no health check logging will be done.", + "exactMatch": { + "description": "The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch. Only one of presentMatch, exactMatch, or regexMatch must be set. ", + "type": "string" + }, + "name": { + "description": "The name of the query parameter to match. The query parameter must exist in the request, in the absence of which the request match fails.", + "type": "string" + }, + "presentMatch": { + "description": "Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not. Only one of presentMatch, exactMatch, or regexMatch must be set. ", "type": "boolean" + }, + "regexMatch": { + "description": "The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For more information about regular expression syntax, see Syntax. Only one of presentMatch, exactMatch, or regexMatch must be set. Regular expressions can only be used when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED. ", + "type": "string" } }, "type": "object" }, - "HealthCheckReference": { - "description": "A full or valid partial URL to a health check. For example, the following are valid URLs: - https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check - projects/project-id/global/httpHealthChecks/health-check - global/httpHealthChecks/health-check ", - "id": "HealthCheckReference", + "HttpRedirectAction": { + "description": "Specifies settings for an HTTP redirect.", + "id": "HttpRedirectAction", "properties": { - "healthCheck": { + "hostRedirect": { + "description": "The host that is used in the redirect response instead of the one that was supplied in the request. The value must be from 1 to 255 characters.", + "type": "string" + }, + "httpsRedirect": { + "description": "If set to true, the URL scheme in the redirected request is set to HTTPS. If set to false, the URL scheme of the redirected request remains the same as that of the request. This must only be set for URL maps used in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. The default is set to false.", + "type": "boolean" + }, + "pathRedirect": { + "description": "The path that is used in the redirect response instead of the one that was supplied in the request. pathRedirect cannot be supplied together with prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters.", + "type": "string" + }, + "prefixRedirect": { + "description": "The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, retaining the remaining portion of the URL before redirecting the request. prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters.", + "type": "string" + }, + "redirectResponseCode": { + "description": "The HTTP Status code to use for this RedirectAction. Supported values are: - MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - FOUND, which corresponds to 302. - SEE_OTHER which corresponds to 303. - TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method is retained. - PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method is retained. ", + "enum": [ + "FOUND", + "MOVED_PERMANENTLY_DEFAULT", + "PERMANENT_REDIRECT", + "SEE_OTHER", + "TEMPORARY_REDIRECT" + ], + "enumDescriptions": [ + "Http Status Code 302 - Found.", + "Http Status Code 301 - Moved Permanently.", + "Http Status Code 308 - Permanent Redirect maintaining HTTP method.", + "Http Status Code 303 - See Other.", + "Http Status Code 307 - Temporary Redirect maintaining HTTP method." + ], "type": "string" + }, + "stripQuery": { + "description": "If set to true, any accompanying query portion of the original URL is removed before redirecting the request. If set to false, the query portion of the original URL is retained. The default is set to false. ", + "type": "boolean" } }, "type": "object" }, - "HealthCheckService": { - "description": "Represents a Health-Check as a Service resource.", - "id": "HealthCheckService", + "HttpRetryPolicy": { + "description": "The retry policy associates with HttpRouteRule", + "id": "HttpRetryPolicy", "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" + "numRetries": { + "description": "Specifies the allowed number retries. This number must be \u003e 0. If not specified, defaults to 1.", + "format": "uint32", + "type": "integer" + }, + "perTryTimeout": { + "$ref": "Duration", + "description": "Specifies a non-zero timeout per retry attempt. If not specified, will use the timeout set in the HttpRouteAction field. If timeout in the HttpRouteAction field is not set, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." + }, + "retryConditions": { + "description": "Specifies one or more conditions when this retry policy applies. Valid values are: - 5xx: retry is attempted if the instance or endpoint responds with any 5xx response code, or if the instance or endpoint does not respond at all. For example, disconnects, reset, read timeout, connection failure, and refused streams. - gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. - connect-failure: a retry is attempted on failures connecting to the instance or endpoint. For example, connection timeouts. - retriable-4xx: a retry is attempted if the instance or endpoint responds with a 4xx response code. The only error that you can retry is error code 409. - refused-stream: a retry is attempted if the instance or endpoint resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. - cancelled: a retry is attempted if the gRPC status code in the response header is set to cancelled. - deadline-exceeded: a retry is attempted if the gRPC status code in the response header is set to deadline-exceeded. - internal: a retry is attempted if the gRPC status code in the response header is set to internal. - resource-exhausted: a retry is attempted if the gRPC status code in the response header is set to resource-exhausted. - unavailable: a retry is attempted if the gRPC status code in the response header is set to unavailable. Only the following codes are supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true. - cancelled - deadline-exceeded - internal - resource-exhausted - unavailable ", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "HttpRouteAction": { + "id": "HttpRouteAction", + "properties": { + "corsPolicy": { + "$ref": "CorsPolicy", + "description": "The specification for allowing client-side cross-origin requests. For more information about the W3C recommendation for cross-origin resource sharing (CORS), see Fetch API Living Standard. Not supported when the URL map is bound to a target gRPC proxy." + }, + "faultInjectionPolicy": { + "$ref": "HttpFaultInjection", + "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by a load balancer on a percentage of requests before sending those requests to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests. timeout and retry_policy is ignored by clients that are configured with a fault_injection_policy if: 1. The traffic is generated by fault injection AND 2. The fault injection is not a delay fault injection. Fault injection is not supported with the global external HTTP(S) load balancer (classic). To see which load balancers support fault injection, see Load balancing: Routing and traffic management features." + }, + "maxStreamDuration": { + "$ref": "Duration", + "description": "Specifies the maximum duration (timeout) for streams on the selected route. Unlike the timeout field where the timeout duration starts from the time the request has been fully processed (known as *end-of-stream*), the duration in this field is computed from the beginning of the stream until the response has been processed, including all retries. A stream that does not complete in this duration is closed. If not specified, this field uses the maximum maxStreamDuration value among all backend services associated with the route. This field is only allowed if the Url map is used with backend services with loadBalancingScheme set to INTERNAL_SELF_MANAGED." + }, + "requestMirrorPolicy": { + "$ref": "RequestMirrorPolicy", + "description": "Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. The load balancer does not wait for responses from the shadow service. Before sending traffic to the shadow service, the host / authority header is suffixed with -shadow. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." + }, + "retryPolicy": { + "$ref": "HttpRetryPolicy", + "description": "Specifies the retry policy associated with this route." + }, + "timeout": { + "$ref": "Duration", + "description": "Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (known as *end-of-stream*) up until the response has been processed. Timeout includes all retries. If not specified, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true." + }, + "urlRewrite": { + "$ref": "UrlRewrite", + "description": "The spec to modify the URL of the request, before forwarding the request to the matched service. urlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." }, + "weightedBackendServices": { + "description": "A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number. After a backend service is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction.", + "items": { + "$ref": "WeightedBackendService" + }, + "type": "array" + } + }, + "type": "object" + }, + "HttpRouteRule": { + "description": "The HttpRouteRule setting specifies how to match an HTTP request and the corresponding routing action that load balancing proxies perform.", + "id": "HttpRouteRule", + "properties": { "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "description": "The short description conveying the intent of this routeRule. The description can have a maximum length of 1024 characters.", "type": "string" }, - "fingerprint": { - "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a HealthCheckService. An up-to-date fingerprint must be provided in order to patch/update the HealthCheckService; Otherwise, the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the HealthCheckService.", - "format": "byte", - "type": "string" + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService. The headerAction value specified here is applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction HeaderAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true." }, - "healthChecks": { - "description": "A list of URLs to the HealthCheck resources. Must have at least one HealthCheck, and not more than 10. HealthCheck resources must have portSpecification=USE_SERVING_PORT or portSpecification=USE_FIXED_PORT. For regional HealthCheckService, the HealthCheck must be regional and in the same region. For global HealthCheckService, HealthCheck must be global. Mix of regional and global HealthChecks is not supported. Multiple regional HealthChecks must belong to the same region. Regional HealthChecks must belong to the same region as zones of NEGs.", + "matchRules": { + "description": "The list of criteria for matching attributes of a request to this routeRule. This list has OR semantics: the request matches this routeRule when any of the matchRules are satisfied. However predicates within a given matchRule have AND semantics. All predicates within a matchRule must match for the request to match the rule.", "items": { - "type": "string" + "$ref": "HttpRouteRuleMatch" }, "type": "array" }, - "healthStatusAggregationPolicy": { - "description": "Optional. Policy for how the results from multiple health checks for the same endpoint are aggregated. Defaults to NO_AGGREGATION if unspecified. - NO_AGGREGATION. An EndpointHealth message is returned for each pair in the health check service. - AND. If any health check of an endpoint reports UNHEALTHY, then UNHEALTHY is the HealthState of the endpoint. If all health checks report HEALTHY, the HealthState of the endpoint is HEALTHY. .", - "enum": [ - "AND", - "NO_AGGREGATION" - ], - "enumDescriptions": [ - "If any backend's health check reports UNHEALTHY, then UNHEALTHY is the HealthState of the entire health check service. If all backend's are healthy, the HealthState of the health check service is HEALTHY.", - "An EndpointHealth message is returned for each backend in the health check service." - ], - "type": "string" + "priority": { + "description": "For routeRules within a given pathMatcher, priority determines the order in which a load balancer interprets routeRules. RouteRules are evaluated in order of priority, from the lowest to highest number. The priority of a rule decreases as its number increases (1, 2, 3, N+1). The first rule that matches the request is applied. You cannot configure two or more routeRules with the same priority. Priority for each rule must be set to a number from 0 to 2147483647 inclusive. Priority numbers can have gaps, which enable you to add or remove rules in the future without affecting the rest of the rules. For example, 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the future without any impact on existing rules.", + "format": "int32", + "type": "integer" }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" + "routeAction": { + "$ref": "HttpRouteAction", + "description": "In response to a matching matchRule, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within a route rule's routeAction." }, - "kind": { - "default": "compute#healthCheckService", - "description": "[Output only] Type of the resource. Always compute#healthCheckServicefor health check services.", + "service": { + "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set.", "type": "string" }, - "name": { - "description": "Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "urlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When this rule is matched, the request is redirected to a URL specified by urlRedirect. If urlRedirect is specified, service or routeAction must not be set. Not supported when the URL map is bound to a target gRPC proxy." + } + }, + "type": "object" + }, + "HttpRouteRuleMatch": { + "description": "HttpRouteRuleMatch specifies a set of criteria for matching requests to an HttpRouteRule. All specified criteria must be satisfied for a match to occur.", + "id": "HttpRouteRuleMatch", + "properties": { + "fullPathMatch": { + "description": "For satisfying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL. fullPathMatch must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified.", "type": "string" }, - "networkEndpointGroups": { - "description": "A list of URLs to the NetworkEndpointGroup resources. Must not have more than 100. For regional HealthCheckService, NEGs must be in zones in the region of the HealthCheckService.", + "headerMatches": { + "description": "Specifies a list of header match criteria, all of which must match corresponding headers in the request.", "items": { - "type": "string" + "$ref": "HttpHeaderMatch" }, "type": "array" }, - "notificationEndpoints": { - "description": "A list of URLs to the NotificationEndpoint resources. Must not have more than 10. A list of endpoints for receiving notifications of change in health status. For regional HealthCheckService, NotificationEndpoint must be regional and in the same region. For global HealthCheckService, NotificationEndpoint must be global.", + "ignoreCase": { + "description": "Specifies that prefixMatch and fullPathMatch matches are case sensitive. The default value is false. ignoreCase must not be used with regexMatch. Not supported when the URL map is bound to a target gRPC proxy.", + "type": "boolean" + }, + "metadataFilters": { + "description": "Opaque filter criteria used by the load balancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to the load balancer, xDS clients present node metadata. When there is a match, the relevant routing configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadata filters are specified, all of them need to be satisfied in order to be considered a match. metadataFilters specified here is applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to. metadataFilters only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true.", "items": { - "type": "string" + "$ref": "MetadataFilter" }, "type": "array" }, - "region": { - "description": "[Output Only] URL of the region where the health check service resides. This field is not applicable to global health check services. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "prefixMatch": { + "description": "For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /. The value must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified.", "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "queryParameterMatches": { + "description": "Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request. Not supported when the URL map is bound to a target gRPC proxy.", + "items": { + "$ref": "HttpQueryParameterMatch" + }, + "type": "array" + }, + "regexMatch": { + "description": "For satisfying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For more information about regular expression syntax, see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. Regular expressions can only be used when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED.", "type": "string" } }, "type": "object" }, - "HealthCheckServiceReference": { - "description": "A full or valid partial URL to a health check service. For example, the following are valid URLs: - https://www.googleapis.com/compute/beta/projects/project-id/regions/us-west1/healthCheckServices/health-check-service - projects/project-id/regions/us-west1/healthCheckServices/health-check-service - regions/us-west1/healthCheckServices/health-check-service ", - "id": "HealthCheckServiceReference", + "HttpsHealthCheck": { + "description": "Represents a legacy HTTPS Health Check resource. Legacy HTTPS health checks have been deprecated. If you are using a target pool-based network load balancer, you must use a legacy HTTP (not HTTPS) health check. For all other load balancers, including backend service-based network load balancers, and for managed instance group auto-healing, you must use modern (non-legacy) health checks. For more information, see Health checks overview .", + "id": "HttpsHealthCheck", "properties": { - "healthCheckService": { + "checkIntervalSec": { + "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", + "format": "int32", + "type": "integer" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "healthyThreshold": { + "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", + "format": "int32", + "type": "integer" + }, + "host": { + "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#httpsHealthCheck", + "description": "Type of the resource.", "type": "string" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "port": { + "description": "The TCP port number for the HTTPS health check request. The default value is 443.", + "format": "int32", + "type": "integer" + }, + "requestPath": { + "description": "The request path of the HTTPS health check request. The default value is \"/\".", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "timeoutSec": { + "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have a greater value than checkIntervalSec.", + "format": "int32", + "type": "integer" + }, + "unhealthyThreshold": { + "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "HealthCheckServicesList": { - "id": "HealthCheckServicesList", + "HttpsHealthCheckList": { + "description": "Contains a list of HttpsHealthCheck resources.", + "id": "HttpsHealthCheckList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of HealthCheckService resources.", + "description": "A list of HttpsHealthCheck resources.", "items": { - "$ref": "HealthCheckService" + "$ref": "HttpsHealthCheck" }, "type": "array" }, "kind": { - "default": "compute#healthCheckServicesList", - "description": "[Output Only] Type of the resource. Always compute#healthCheckServicesList for lists of HealthCheckServices.", + "default": "compute#httpsHealthCheckList", + "description": "Type of resource.", "type": "string" }, "nextPageToken": { @@ -36742,10 +42046,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -36769,10 +42075,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -36816,141 +42124,258 @@ }, "type": "object" }, - "HealthChecksAggregatedList": { - "id": "HealthChecksAggregatedList", + "Image": { + "description": "Represents an Image resource. You can use images to create boot disks for your VM instances. For more information, read Images.", + "id": "Image", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "architecture": { + "description": "The architecture of the image. Valid values are ARM64 or X86_64.", + "enum": [ + "ARCHITECTURE_UNSPECIFIED", + "ARM64", + "X86_64" + ], + "enumDescriptions": [ + "Default value indicating Architecture is not set.", + "Machines with architecture ARM64", + "Machines with architecture X86_64" + ], "type": "string" }, - "items": { - "additionalProperties": { - "$ref": "HealthChecksScopedList", - "description": "Name of the scope containing this set of HealthChecks." + "archiveSizeBytes": { + "description": "Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).", + "format": "int64", + "type": "string" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "The deprecation status associated with this image." + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "diskSizeGb": { + "description": "Size of the image when restored onto a persistent disk (in GB).", + "format": "int64", + "type": "string" + }, + "family": { + "description": "The name of the image family to which this image belongs. You can create disks by specifying an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035.", + "type": "string" + }, + "guestOsFeatures": { + "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. To see a list of available options, see the guestOSfeatures[].type parameter.", + "items": { + "$ref": "GuestOsFeature" }, - "description": "A list of HealthChecksScopedList resources.", - "type": "object" + "type": "array" }, - "kind": { - "default": "compute#healthChecksAggregatedList", - "description": "Type of resource.", + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", "type": "string" }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "imageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the image using a customer-supplied encryption key. After you encrypt an image with a customer-supplied key, you must provide the same key if you use the image later (e.g. to create a disk from the image). Customer-supplied encryption keys do not protect access to metadata of the disk. If you do not provide an encryption key when creating the image, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the image later." + }, + "kind": { + "default": "compute#image", + "description": "[Output Only] Type of the resource. Always compute#image for images.", "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this image, which is essentially a hash of the labels used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an image.", + "format": "byte", "type": "string" }, - "unreachables": { - "description": "[Output Only] Unreachable resources.", + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this image. These can be later modified by the setLabels method.", + "type": "object" + }, + "licenseCodes": { + "description": "Integer license codes indicating which licenses are attached to this image.", "items": { + "format": "int64", "type": "string" }, "type": "array" }, - "warning": { - "description": "[Output Only] Informational warning message.", + "licenses": { + "description": "Any applicable license URI.", + "items": { + "type": "string" + }, + "type": "array" + }, + "name": { + "annotations": { + "required": [ + "compute.images.insert" + ] + }, + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "rawDisk": { + "description": "The parameters of the raw disk image.", "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "containerType": { + "description": "The format used to encode and transmit the block device, which should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created.", "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "LARGE_DEPLOYMENT_WARNING", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "PARTIAL_SUCCESS", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" + "TAR" ], "enumDescriptions": [ - "Warning about failed cleanup of transient changes made by a failed operation.", - "A link to a deprecated resource was created.", - "When deploying and at least one of the resources has a type marked as deprecated", - "The user created a boot disk that is larger than image size.", - "When deploying and at least one of the resources has a type marked as experimental", - "Warning that is present in an external api call", - "Warning that value of a field has been overridden. Deprecated unused field.", - "The operation involved use of an injected kernel, which is deprecated.", - "When deploying a deployment with a exceedingly large number of resources", - "A resource depends on a missing type", - "The route's nextHopIp address is not assigned to an instance on the network.", - "The route's next hop instance cannot ip forward.", - "The route's nextHopInstance URL refers to an instance that does not exist.", - "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", - "The route's next hop instance does not have a status of RUNNING.", - "Error which is not critical. We decided to continue the process despite the mentioned error.", - "No results are present on a particular list page.", - "Success is reported, but some results may be missing due to errors", - "The user attempted to use a resource that requires a TOS they have not accepted.", - "Warning that a resource is in use.", - "One or more of the resources set to auto-delete could not be deleted because they were in use.", - "When a resource schema validation is ignored.", - "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", - "When undeclared properties in the schema are present", - "A given scope cannot be reached." + "" ], "type": "string" }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" + "sha1Checksum": { + "description": "[Deprecated] This field is deprecated. An optional SHA1 checksum of the disk image before unpackaging provided by the client when the disk image is created.", + "pattern": "[a-f0-9]{40}", + "type": "string" }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", + "source": { + "description": "The full Google Cloud Storage URL where the raw disk image archive is stored. The following are valid formats for the URL: - https://storage.googleapis.com/bucket_name/image_archive_name - https://storage.googleapis.com/bucket_name/folder_name/ image_archive_name In order to create an image, you must provide the full or partial URL of one of the following: - The rawDisk.source URL - The sourceDisk URL - The sourceImage URL - The sourceSnapshot URL ", "type": "string" } }, "type": "object" + }, + "satisfiesPzs": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "shieldedInstanceInitialState": { + "$ref": "InitialStateConfig", + "description": "Set the secure boot keys of shielded instance." + }, + "sourceDisk": { + "description": "URL of the source disk used to create this image. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk In order to create an image, you must provide the full or partial URL of one of the following: - The rawDisk.source URL - The sourceDisk URL - The sourceImage URL - The sourceSnapshot URL ", + "type": "string" + }, + "sourceDiskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." + }, + "sourceDiskId": { + "description": "[Output Only] The ID value of the disk used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given disk name.", + "type": "string" + }, + "sourceImage": { + "description": "URL of the source image used to create this image. The following are valid formats for the URL: - https://www.googleapis.com/compute/v1/projects/project_id/global/ images/image_name - projects/project_id/global/images/image_name In order to create an image, you must provide the full or partial URL of one of the following: - The rawDisk.source URL - The sourceDisk URL - The sourceImage URL - The sourceSnapshot URL ", + "type": "string" + }, + "sourceImageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key." + }, + "sourceImageId": { + "description": "[Output Only] The ID value of the image used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given image name.", + "type": "string" + }, + "sourceSnapshot": { + "description": "URL of the source snapshot used to create this image. The following are valid formats for the URL: - https://www.googleapis.com/compute/v1/projects/project_id/global/ snapshots/snapshot_name - projects/project_id/global/snapshots/snapshot_name In order to create an image, you must provide the full or partial URL of one of the following: - The rawDisk.source URL - The sourceDisk URL - The sourceImage URL - The sourceSnapshot URL ", + "type": "string" + }, + "sourceSnapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key." + }, + "sourceSnapshotId": { + "description": "[Output Only] The ID value of the snapshot used to create this image. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given snapshot name.", + "type": "string" + }, + "sourceType": { + "default": "RAW", + "description": "The type of the image used to create this disk. The default and only valid value is RAW.", + "enum": [ + "RAW" + ], + "enumDescriptions": [ + "" + ], + "type": "string" + }, + "status": { + "description": "[Output Only] The status of the image. An image can be used to create other resources, such as instances, only after the image has been successfully created and the status is set to READY. Possible values are FAILED, PENDING, or READY.", + "enum": [ + "DELETING", + "FAILED", + "PENDING", + "READY" + ], + "enumDescriptions": [ + "Image is deleting.", + "Image creation failed due to an error.", + "Image hasn't been created as yet.", + "Image has been successfully created." + ], + "type": "string" + }, + "storageLocations": { + "description": "Cloud Storage bucket storage location of the image (regional or multi-regional).", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "HealthChecksScopedList": { - "id": "HealthChecksScopedList", + "ImageFamilyView": { + "id": "ImageFamilyView", "properties": { - "healthChecks": { - "description": "A list of HealthChecks contained in this scope.", + "image": { + "$ref": "Image", + "description": "The latest image that is part of the specified image family in the requested location, and that is not deprecated." + } + }, + "type": "object" + }, + "ImageList": { + "description": "Contains a list of images.", + "id": "ImageList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of Image resources.", "items": { - "$ref": "HealthCheck" + "$ref": "Image" }, "type": "array" }, + "kind": { + "default": "compute#imageList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "Informational warning which replaces the list of backend services when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -36963,10 +42388,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -36990,10 +42417,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -37037,356 +42466,312 @@ }, "type": "object" }, - "HealthStatus": { - "id": "HealthStatus", + "InitialStateConfig": { + "description": "Initial State for shielded instance, these are public keys which are safe to store in public", + "id": "InitialStateConfig", "properties": { - "annotations": { - "additionalProperties": { - "type": "string" + "dbs": { + "description": "The Key Database (db).", + "items": { + "$ref": "FileContentBuffer" }, - "description": "Metadata defined as annotations for network endpoint.", - "type": "object" - }, - "forwardingRule": { - "description": "URL of the forwarding rule associated with the health status of the instance.", - "type": "string" - }, - "forwardingRuleIp": { - "description": "A forwarding rule IP address assigned to this instance.", - "type": "string" - }, - "healthState": { - "description": "Health state of the instance.", - "enum": [ - "HEALTHY", - "UNHEALTHY" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "instance": { - "description": "URL of the instance resource.", - "type": "string" - }, - "ipAddress": { - "description": "For target pool based Network Load Balancing, it indicates the forwarding rule's IP address assigned to this instance. For other types of load balancing, the field indicates VM internal ip.", - "type": "string" - }, - "port": { - "description": "The named port of the instance group, not necessarily the port that is health-checked.", - "format": "int32", - "type": "integer" - }, - "weight": { - "type": "string" - }, - "weightError": { - "enum": [ - "INVALID_WEIGHT", - "MISSING_WEIGHT", - "UNAVAILABLE_WEIGHT", - "WEIGHT_NONE" - ], - "enumDescriptions": [ - "The response to a Health Check probe had the HTTP response header field X-Load-Balancing-Endpoint-Weight, but its content was invalid (i.e., not a non-negative single-precision floating-point number in decimal string representation).", - "The response to a Health Check probe did not have the HTTP response header field X-Load-Balancing-Endpoint-Weight.", - "This is the value when the accompanied health status is either TIMEOUT (i.e.,the Health Check probe was not able to get a response in time) or UNKNOWN. For the latter, it should be typically because there has not been sufficient time to parse and report the weight for a new backend (which is with 0.0.0.0 ip address). However, it can be also due to an outage case for which the health status is explicitly reset to UNKNOWN.", - "This is the default value when WeightReportMode is DISABLE, and is also the initial value when WeightReportMode has just updated to ENABLE or DRY_RUN and there has not been sufficient time to parse and report the backend weight." - ], - "type": "string" - } - }, - "type": "object" - }, - "HealthStatusForNetworkEndpoint": { - "id": "HealthStatusForNetworkEndpoint", - "properties": { - "backendService": { - "$ref": "BackendServiceReference", - "description": "URL of the backend service associated with the health state of the network endpoint." - }, - "forwardingRule": { - "$ref": "ForwardingRuleReference", - "description": "URL of the forwarding rule associated with the health state of the network endpoint." - }, - "healthCheck": { - "$ref": "HealthCheckReference", - "description": "URL of the health check associated with the health state of the network endpoint." - }, - "healthCheckService": { - "$ref": "HealthCheckServiceReference", - "description": "URL of the health check service associated with the health state of the network endpoint." + "type": "array" }, - "healthState": { - "description": "Health state of the network endpoint determined based on the health checks configured.", - "enum": [ - "DRAINING", - "HEALTHY", - "UNHEALTHY", - "UNKNOWN" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "HostRule": { - "description": "UrlMaps A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService.", - "id": "HostRule", - "properties": { - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" + "dbxs": { + "description": "The forbidden key database (dbx).", + "items": { + "$ref": "FileContentBuffer" + }, + "type": "array" }, - "hosts": { - "description": "The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or .. * based matching is not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true.", + "keks": { + "description": "The Key Exchange Key (KEK).", "items": { - "type": "string" + "$ref": "FileContentBuffer" }, "type": "array" }, - "pathMatcher": { - "description": "The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion.", - "type": "string" + "pk": { + "$ref": "FileContentBuffer", + "description": "The Platform Key (PK)." } }, "type": "object" }, - "HttpFaultAbort": { - "description": "Specification for how requests are aborted as part of fault injection.", - "id": "HttpFaultAbort", + "Instance": { + "description": "Represents an Instance resource. An instance is a virtual machine that is hosted on Google Cloud Platform. For more information, read Virtual Machine Instances.", + "id": "Instance", "properties": { - "httpStatus": { - "description": "The HTTP status code used to abort the request. The value must be from 200 to 599 inclusive. For gRPC protocol, the gRPC status code is mapped to HTTP status code according to this mapping table. HTTP status 200 is mapped to gRPC status UNKNOWN. Injecting an OK status is currently not supported by Traffic Director.", - "format": "uint32", - "type": "integer" + "advancedMachineFeatures": { + "$ref": "AdvancedMachineFeatures", + "description": "Controls for advanced machine-related behavior features." }, - "percentage": { - "description": "The percentage of traffic for connections, operations, or requests that is aborted as part of fault injection. The value must be from 0.0 to 100.0 inclusive.", - "format": "double", - "type": "number" - } - }, - "type": "object" - }, - "HttpFaultDelay": { - "description": "Specifies the delay introduced by the load balancer before forwarding the request to the backend service as part of fault injection.", - "id": "HttpFaultDelay", - "properties": { - "fixedDelay": { - "$ref": "Duration", - "description": "Specifies the value of the fixed delay interval." + "canIpForward": { + "description": "Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. For more information, see Enabling IP Forwarding .", + "type": "boolean" }, - "percentage": { - "description": "The percentage of traffic for connections, operations, or requests for which a delay is introduced as part of fault injection. The value must be from 0.0 to 100.0 inclusive.", - "format": "double", - "type": "number" - } - }, - "type": "object" - }, - "HttpFaultInjection": { - "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by the load balancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests.", - "id": "HttpFaultInjection", - "properties": { - "abort": { - "$ref": "HttpFaultAbort", - "description": "The specification for how client requests are aborted as part of fault injection." + "confidentialInstanceConfig": { + "$ref": "ConfidentialInstanceConfig" }, - "delay": { - "$ref": "HttpFaultDelay", - "description": "The specification for how client requests are delayed as part of fault injection, before being sent to a backend service." - } - }, - "type": "object" - }, - "HttpHeaderAction": { - "description": "The request and response header transformations that take effect before the request is passed along to the selected backendService.", - "id": "HttpHeaderAction", - "properties": { - "requestHeadersToAdd": { - "description": "Headers to add to a matching request before forwarding the request to the backendService.", - "items": { - "$ref": "HttpHeaderOption" - }, - "type": "array" + "cpuPlatform": { + "description": "[Output Only] The CPU platform used by this instance.", + "type": "string" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" }, - "requestHeadersToRemove": { - "description": "A list of header names for headers that need to be removed from the request before forwarding the request to the backendService.", - "items": { - "type": "string" - }, - "type": "array" + "deletionProtection": { + "description": "Whether the resource should be protected against deletion.", + "type": "boolean" }, - "responseHeadersToAdd": { - "description": "Headers to add the response before sending the response back to the client.", + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "disks": { + "description": "Array of disks associated with this instance. Persistent disks must be created before you can assign them.", "items": { - "$ref": "HttpHeaderOption" + "$ref": "AttachedDisk" }, "type": "array" }, - "responseHeadersToRemove": { - "description": "A list of header names for headers that need to be removed from the response before sending the response back to the client.", + "displayDevice": { + "$ref": "DisplayDevice", + "description": "Enables display device for the instance." + }, + "fingerprint": { + "description": "Specifies a fingerprint for this resource, which is essentially a hash of the instance's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update the instance. You must always provide an up-to-date fingerprint hash in order to update the instance. To see the latest fingerprint, make get() request to the instance.", + "format": "byte", + "type": "string" + }, + "guestAccelerators": { + "description": "A list of the type and count of accelerator cards attached to the instance.", "items": { - "type": "string" + "$ref": "AcceleratorConfig" }, "type": "array" - } - }, - "type": "object" - }, - "HttpHeaderMatch": { - "description": "matchRule criteria for request header matches.", - "id": "HttpHeaderMatch", - "properties": { - "exactMatch": { - "description": "The value should exactly match contents of exactMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + }, + "hostname": { + "description": "Specifies the hostname of the instance. The specified hostname must be RFC1035 compliant. If hostname is not specified, the default hostname is [INSTANCE_NAME].c.[PROJECT_ID].internal when using the global DNS, and [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal when using zonal DNS.", "type": "string" }, - "headerName": { - "description": "The name of the HTTP header to match. For matching against the HTTP request's authority, use a headerMatch with the header name \":authority\". For matching a request's method, use the headerName \":method\". When the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true, only non-binary user-specified custom metadata and the `content-type` header are supported. The following transport-level headers cannot be used in header matching rules: `:authority`, `:method`, `:path`, `:scheme`, `user-agent`, `accept-encoding`, `content-encoding`, `grpc-accept-encoding`, `grpc-encoding`, `grpc-previous-rpc-attempts`, `grpc-tags-bin`, `grpc-timeout` and `grpc-trace-bin`.", + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", "type": "string" }, - "invertMatch": { - "description": "If set to false, the headerMatch is considered a match if the preceding match criteria are met. If set to true, the headerMatch is considered a match if the preceding match criteria are NOT met. The default setting is false. ", - "type": "boolean" + "keyRevocationActionType": { + "description": "KeyRevocationActionType of the instance. Supported options are \"STOP\" and \"NONE\". The default value is \"NONE\" if it is not specified.", + "enum": [ + "KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED", + "NONE", + "STOP" + ], + "enumDescriptions": [ + "Default value. This value is unused.", + "Indicates user chose no operation.", + "Indicates user chose to opt for VM shutdown on key revocation." + ], + "type": "string" }, - "prefixMatch": { - "description": "The value of the header must start with the contents of prefixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "kind": { + "default": "compute#instance", + "description": "[Output Only] Type of the resource. Always compute#instance for instances.", "type": "string" }, - "presentMatch": { - "description": "A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", - "type": "boolean" + "labelFingerprint": { + "description": "A fingerprint for this request, which is essentially a hash of the label's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. To see the latest fingerprint, make get() request to the instance.", + "format": "byte", + "type": "string" }, - "rangeMatch": { - "$ref": "Int64RangeMatch", - "description": "The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails. For example for a range [-5, 0] - -3 will match. - 0 will not match. - 0.25 will not match. - -3someString will not match. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. rangeMatch is not supported for load balancers that have loadBalancingScheme set to EXTERNAL." + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this instance. These can be later modified by the setLabels method.", + "type": "object" }, - "regexMatch": { - "description": "The value of the header must match the regular expression specified in regexMatch. For more information about regular expression syntax, see Syntax. For matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "lastStartTimestamp": { + "description": "[Output Only] Last start timestamp in RFC3339 text format.", "type": "string" }, - "suffixMatch": { - "description": "The value of the header must end with the contents of suffixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "lastStopTimestamp": { + "description": "[Output Only] Last stop timestamp in RFC3339 text format.", "type": "string" - } - }, - "type": "object" - }, - "HttpHeaderOption": { - "description": "Specification determining how headers are added to requests or responses.", - "id": "HttpHeaderOption", - "properties": { - "headerName": { - "description": "The name of the header.", + }, + "lastSuspendedTimestamp": { + "description": "[Output Only] Last suspended timestamp in RFC3339 text format.", "type": "string" }, - "headerValue": { - "description": "The value of the header to add.", + "machineType": { + "annotations": { + "required": [ + "compute.instances.insert" + ] + }, + "description": "Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/machine-type. This is provided by the client when the instance is created. For example, the following is a valid partial url to a predefined machine type: zones/us-central1-f/machineTypes/n1-standard-1 To create a custom machine type, provide a URL to a machine type in the following format, where CPUS is 1 or an even number up to 32 (2, 4, 6, ... 24, etc), and MEMORY is the total memory for this instance. Memory must be a multiple of 256 MB and must be supplied in MB (e.g. 5 GB of memory is 5120 MB): zones/zone/machineTypes/custom-CPUS-MEMORY For example: zones/us-central1-f/machineTypes/custom-4-5120 For a full list of restrictions, read the Specifications for custom machine types.", "type": "string" }, - "replace": { - "description": "If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. The default value is false. ", - "type": "boolean" - } - }, - "type": "object" - }, - "HttpHealthCheck": { - "description": "Represents a legacy HTTP Health Check resource. Legacy HTTP health checks are now only required by target pool-based network load balancers. For all other load balancers, including backend service-based network load balancers, and for managed instance group auto-healing, you must use modern (non-legacy) health checks. For more information, see Health checks overview .", - "id": "HttpHealthCheck", - "properties": { - "checkIntervalSec": { - "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", - "format": "int32", - "type": "integer" + "metadata": { + "$ref": "Metadata", + "description": "The metadata key/value pairs assigned to this instance. This includes custom metadata and predefined keys." }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "minCpuPlatform": { + "description": "Specifies a minimum CPU platform for the VM instance. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\".", "type": "string" }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "name": { + "annotations": { + "required": [ + "compute.instances.insert" + ] + }, + "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "healthyThreshold": { - "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", - "format": "int32", - "type": "integer" + "networkInterfaces": { + "description": "An array of network configurations for this instance. These specify how interfaces are configured to interact with other network services, such as connecting to the internet. Multiple interfaces are supported per instance.", + "items": { + "$ref": "NetworkInterface" + }, + "type": "array" }, - "host": { - "description": "The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.", - "type": "string" + "networkPerformanceConfig": { + "$ref": "NetworkPerformanceConfig" }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", + "params": { + "$ref": "InstanceParams", + "description": "Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload." + }, + "privateIpv6GoogleAccess": { + "description": "The private IPv6 google access type for the VM. If not specified, use INHERIT_FROM_SUBNETWORK as default.", + "enum": [ + "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE", + "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE", + "INHERIT_FROM_SUBNETWORK" + ], + "enumDescriptions": [ + "Bidirectional private IPv6 access to/from Google services. If specified, the subnetwork who is attached to the instance's default network interface will be assigned an internal IPv6 prefix if it doesn't have before.", + "Outbound private IPv6 access from VMs in this subnet to Google services. If specified, the subnetwork who is attached to the instance's default network interface will be assigned an internal IPv6 prefix if it doesn't have before.", + "Each network interface inherits PrivateIpv6GoogleAccess from its subnetwork." + ], "type": "string" }, - "kind": { - "default": "compute#httpHealthCheck", - "description": "[Output Only] Type of the resource. Always compute#httpHealthCheck for HTTP health checks.", + "reservationAffinity": { + "$ref": "ReservationAffinity", + "description": "Specifies the reservations that this instance can consume from." + }, + "resourcePolicies": { + "description": "Resource policies applied to this instance.", + "items": { + "type": "string" + }, + "type": "array" + }, + "resourceStatus": { + "$ref": "ResourceStatus", + "description": "[Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field." + }, + "satisfiesPzs": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" + }, + "scheduling": { + "$ref": "Scheduling", + "description": "Sets the scheduling options for this instance." + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "serviceAccounts": { + "description": "A list of service accounts, with their specified scopes, authorized for this instance. Only one service account per VM instance is supported. Service accounts generate access tokens that can be accessed through the metadata server and used to authenticate applications on the instance. See Service Accounts for more information.", + "items": { + "$ref": "ServiceAccount" + }, + "type": "array" + }, + "shieldedInstanceConfig": { + "$ref": "ShieldedInstanceConfig" + }, + "shieldedInstanceIntegrityPolicy": { + "$ref": "ShieldedInstanceIntegrityPolicy" + }, + "sourceMachineImage": { + "description": "Source machine image", "type": "string" }, - "port": { - "description": "The TCP port number for the HTTP health check request. The default value is 80.", - "format": "int32", - "type": "integer" + "sourceMachineImageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Source machine image encryption key when creating an instance from a machine image." }, - "requestPath": { - "description": "The request path of the HTTP health check request. The default value is /. This field does not support query parameters.", + "startRestricted": { + "description": "[Output Only] Whether a VM has been restricted for start because Compute Engine has detected suspicious activity.", + "type": "boolean" + }, + "status": { + "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see Instance life cycle.", + "enum": [ + "DEPROVISIONING", + "PROVISIONING", + "REPAIRING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" + ], + "enumDescriptions": [ + "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", + "All required resources have been allocated and the instance is being started.", + "The instance has stopped successfully.", + "The instance is currently stopping (either being deleted or killed).", + "The instance has suspended.", + "The instance is suspending.", + "The instance has stopped (either by explicit action or underlying failure)." + ], "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "statusMessage": { + "description": "[Output Only] An optional, human-readable explanation of the status.", "type": "string" }, - "timeoutSec": { - "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.", - "format": "int32", - "type": "integer" + "tags": { + "$ref": "Tags", + "description": "Tags to apply to this instance. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during instance creation. The tags can be later modified by the setTags method. Each tag within the list must comply with RFC1035. Multiple tags can be specified via the 'tags.items' field." }, - "unhealthyThreshold": { - "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", - "format": "int32", - "type": "integer" + "zone": { + "description": "[Output Only] URL of the zone where the instance resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" } }, "type": "object" }, - "HttpHealthCheckList": { - "description": "Contains a list of HttpHealthCheck resources.", - "id": "HttpHealthCheckList", + "InstanceAggregatedList": { + "id": "InstanceAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of HttpHealthCheck resources.", - "items": { - "$ref": "HttpHealthCheck" + "additionalProperties": { + "$ref": "InstancesScopedList", + "description": "[Output Only] Name of the scope containing this set of instances." }, - "type": "array" + "description": "An object that contains a list of instances scoped by zone.", + "type": "object" }, "kind": { - "default": "compute#httpHealthCheckList", - "description": "Type of resource.", + "default": "compute#instanceAggregatedList", + "description": "[Output Only] Type of resource. Always compute#instanceAggregatedList for aggregated lists of Instance resources.", "type": "string" }, "nextPageToken": { @@ -37397,6 +42782,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -37411,10 +42803,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -37438,10 +42832,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -37485,306 +42881,263 @@ }, "type": "object" }, - "HttpQueryParameterMatch": { - "description": "HttpRouteRuleMatch criteria for a request's query parameter.", - "id": "HttpQueryParameterMatch", + "InstanceConsumptionData": { + "id": "InstanceConsumptionData", "properties": { - "exactMatch": { - "description": "The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch. Only one of presentMatch, exactMatch, or regexMatch must be set. ", - "type": "string" - }, - "name": { - "description": "The name of the query parameter to match. The query parameter must exist in the request, in the absence of which the request match fails.", - "type": "string" - }, - "presentMatch": { - "description": "Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not. Only one of presentMatch, exactMatch, or regexMatch must be set. ", - "type": "boolean" + "consumptionInfo": { + "$ref": "InstanceConsumptionInfo", + "description": "Resources consumed by the instance." }, - "regexMatch": { - "description": "The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For more information about regular expression syntax, see Syntax. Only one of presentMatch, exactMatch, or regexMatch must be set. regexMatch only applies when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED. ", + "instance": { + "description": "Server-defined URL for the instance.", "type": "string" } }, "type": "object" }, - "HttpRedirectAction": { - "description": "Specifies settings for an HTTP redirect.", - "id": "HttpRedirectAction", + "InstanceConsumptionInfo": { + "id": "InstanceConsumptionInfo", "properties": { - "hostRedirect": { - "description": "The host that is used in the redirect response instead of the one that was supplied in the request. The value must be from 1 to 255 characters.", - "type": "string" - }, - "httpsRedirect": { - "description": "If set to true, the URL scheme in the redirected request is set to HTTPS. If set to false, the URL scheme of the redirected request remains the same as that of the request. This must only be set for URL maps used in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. The default is set to false.", - "type": "boolean" - }, - "pathRedirect": { - "description": "The path that is used in the redirect response instead of the one that was supplied in the request. pathRedirect cannot be supplied together with prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters.", - "type": "string" - }, - "prefixRedirect": { - "description": "The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, retaining the remaining portion of the URL before redirecting the request. prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters.", - "type": "string" - }, - "redirectResponseCode": { - "description": "The HTTP Status code to use for this RedirectAction. Supported values are: - MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - FOUND, which corresponds to 302. - SEE_OTHER which corresponds to 303. - TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method is retained. - PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method is retained. ", - "enum": [ - "FOUND", - "MOVED_PERMANENTLY_DEFAULT", - "PERMANENT_REDIRECT", - "SEE_OTHER", - "TEMPORARY_REDIRECT" - ], - "enumDescriptions": [ - "Http Status Code 302 - Found.", - "Http Status Code 301 - Moved Permanently.", - "Http Status Code 308 - Permanent Redirect maintaining HTTP method.", - "Http Status Code 303 - See Other.", - "Http Status Code 307 - Temporary Redirect maintaining HTTP method." - ], - "type": "string" + "guestCpus": { + "description": "The number of virtual CPUs that are available to the instance.", + "format": "int32", + "type": "integer" }, - "stripQuery": { - "description": "If set to true, any accompanying query portion of the original URL is removed before redirecting the request. If set to false, the query portion of the original URL is retained. The default is set to false. ", - "type": "boolean" - } - }, - "type": "object" - }, - "HttpRetryPolicy": { - "description": "The retry policy associates with HttpRouteRule", - "id": "HttpRetryPolicy", - "properties": { - "numRetries": { - "description": "Specifies the allowed number retries. This number must be \u003e 0. If not specified, defaults to 1.", - "format": "uint32", + "localSsdGb": { + "description": "The amount of local SSD storage available to the instance, defined in GiB.", + "format": "int32", "type": "integer" }, - "perTryTimeout": { - "$ref": "Duration", - "description": "Specifies a non-zero timeout per retry attempt. If not specified, will use the timeout set in the HttpRouteAction field. If timeout in the HttpRouteAction field is not set, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." + "memoryMb": { + "description": "The amount of physical memory available to the instance, defined in MiB.", + "format": "int32", + "type": "integer" }, - "retryConditions": { - "description": "Specifies one or more conditions when this retry policy applies. Valid values are: - 5xx: retry is attempted if the instance or endpoint responds with any 5xx response code, or if the instance or endpoint does not respond at all. For example, disconnects, reset, read timeout, connection failure, and refused streams. - gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. - connect-failure: a retry is attempted on failures connecting to the instance or endpoint. For example, connection timeouts. - retriable-4xx: a retry is attempted if the instance or endpoint responds with a 4xx response code. The only error that you can retry is error code 409. - refused-stream: a retry is attempted if the instance or endpoint resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. - cancelled: a retry is attempted if the gRPC status code in the response header is set to cancelled. - deadline-exceeded: a retry is attempted if the gRPC status code in the response header is set to deadline-exceeded. - internal: a retry is attempted if the gRPC status code in the response header is set to internal. - resource-exhausted: a retry is attempted if the gRPC status code in the response header is set to resource-exhausted. - unavailable: a retry is attempted if the gRPC status code in the response header is set to unavailable. Only the following codes are supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true. - cancelled - deadline-exceeded - internal - resource-exhausted - unavailable ", - "items": { - "type": "string" - }, - "type": "array" + "minNodeCpus": { + "description": "The minimal guaranteed number of virtual CPUs that are reserved.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "HttpRouteAction": { - "id": "HttpRouteAction", + "InstanceGroup": { + "description": "Represents an Instance Group resource. Instance Groups can be used to configure a target for load balancing. Instance groups can either be managed or unmanaged. To create managed instance groups, use the instanceGroupManager or regionInstanceGroupManager resource instead. Use zonal unmanaged instance groups if you need to apply load balancing to groups of heterogeneous instances or if you need to manage the instances yourself. You cannot create regional unmanaged instance groups. For more information, read Instance groups.", + "id": "InstanceGroup", "properties": { - "corsPolicy": { - "$ref": "CorsPolicy", - "description": "The specification for allowing client-side cross-origin requests. For more information about the W3C recommendation for cross-origin resource sharing (CORS), see Fetch API Living Standard. Not supported when the URL map is bound to a target gRPC proxy." - }, - "faultInjectionPolicy": { - "$ref": "HttpFaultInjection", - "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by a load balancer on a percentage of requests before sending those requests to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests. For the requests impacted by fault injection, timeout and retry_policy is ignored by clients that are configured with a fault_injection_policy." - }, - "maxStreamDuration": { - "$ref": "Duration", - "description": "Specifies the maximum duration (timeout) for streams on the selected route. Unlike the timeout field where the timeout duration starts from the time the request has been fully processed (known as *end-of-stream*), the duration in this field is computed from the beginning of the stream until the response has been processed, including all retries. A stream that does not complete in this duration is closed. If not specified, this field uses the maximum maxStreamDuration value among all backend services associated with the route. This field is only allowed if the Url map is used with backend services with loadBalancingScheme set to INTERNAL_SELF_MANAGED." - }, - "requestMirrorPolicy": { - "$ref": "RequestMirrorPolicy", - "description": "Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. The load balancer does not wait for responses from the shadow service. Before sending traffic to the shadow service, the host / authority header is suffixed with -shadow. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." - }, - "retryPolicy": { - "$ref": "HttpRetryPolicy", - "description": "Specifies the retry policy associated with this route." - }, - "timeout": { - "$ref": "Duration", - "description": "Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (known as *end-of-stream*) up until the response has been processed. Timeout includes all retries. If not specified, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true." - }, - "urlRewrite": { - "$ref": "UrlRewrite", - "description": "The spec to modify the URL of the request, before forwarding the request to the matched service. urlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." + "creationTimestamp": { + "description": "[Output Only] The creation timestamp for this instance group in RFC3339 text format.", + "type": "string" }, - "weightedBackendServices": { - "description": "A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number. After a backend service is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction.", - "items": { - "$ref": "WeightedBackendService" - }, - "type": "array" - } - }, - "type": "object" - }, - "HttpRouteRule": { - "description": "The HttpRouteRule setting specifies how to match an HTTP request and the corresponding routing action that load balancing proxies perform.", - "id": "HttpRouteRule", - "properties": { "description": { - "description": "The short description conveying the intent of this routeRule. The description can have a maximum length of 1024 characters.", + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "headerAction": { - "$ref": "HttpHeaderAction", - "description": "Specifies changes to request and response headers that need to take effect for the selected backendService. The headerAction value specified here is applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction HeaderAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true." - }, - "matchRules": { - "description": "The list of criteria for matching attributes of a request to this routeRule. This list has OR semantics: the request matches this routeRule when any of the matchRules are satisfied. However predicates within a given matchRule have AND semantics. All predicates within a matchRule must match for the request to match the rule.", - "items": { - "$ref": "HttpRouteRuleMatch" - }, - "type": "array" - }, - "priority": { - "description": "For routeRules within a given pathMatcher, priority determines the order in which a load balancer interprets routeRules. RouteRules are evaluated in order of priority, from the lowest to highest number. The priority of a rule decreases as its number increases (1, 2, 3, N+1). The first rule that matches the request is applied. You cannot configure two or more routeRules with the same priority. Priority for each rule must be set to a number from 0 to 2147483647 inclusive. Priority numbers can have gaps, which enable you to add or remove rules in the future without affecting the rest of the rules. For example, 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the future without any impact on existing rules.", - "format": "int32", - "type": "integer" - }, - "routeAction": { - "$ref": "HttpRouteAction", - "description": "In response to a matching matchRule, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a route rule's routeAction." - }, - "service": { - "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set.", + "fingerprint": { + "description": "[Output Only] The fingerprint of the named ports. The system uses this fingerprint to detect conflicts when multiple users change the named ports concurrently.", + "format": "byte", "type": "string" }, - "urlRedirect": { - "$ref": "HttpRedirectAction", - "description": "When this rule is matched, the request is redirected to a URL specified by urlRedirect. If urlRedirect is specified, service or routeAction must not be set. Not supported when the URL map is bound to a target gRPC proxy." - } - }, - "type": "object" - }, - "HttpRouteRuleMatch": { - "description": "HttpRouteRuleMatch specifies a set of criteria for matching requests to an HttpRouteRule. All specified criteria must be satisfied for a match to occur.", - "id": "HttpRouteRuleMatch", - "properties": { - "fullPathMatch": { - "description": "For satisfying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL. fullPathMatch must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified.", + "id": { + "description": "[Output Only] A unique identifier for this instance group, generated by the server.", + "format": "uint64", "type": "string" }, - "headerMatches": { - "description": "Specifies a list of header match criteria, all of which must match corresponding headers in the request.", - "items": { - "$ref": "HttpHeaderMatch" - }, - "type": "array" - }, - "ignoreCase": { - "description": "Specifies that prefixMatch and fullPathMatch matches are case sensitive. The default value is false. ignoreCase must not be used with regexMatch. Not supported when the URL map is bound to a target gRPC proxy.", - "type": "boolean" + "kind": { + "default": "compute#instanceGroup", + "description": "[Output Only] The resource type, which is always compute#instanceGroup for instance groups.", + "type": "string" }, - "metadataFilters": { - "description": "Opaque filter criteria used by the load balancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to the load balancer, xDS clients present node metadata. When there is a match, the relevant routing configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadata filters are specified, all of them need to be satisfied in order to be considered a match. metadataFilters specified here is applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to. metadataFilters only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true.", - "items": { - "$ref": "MetadataFilter" + "name": { + "annotations": { + "required": [ + "compute.instanceGroups.insert" + ] }, - "type": "array" - }, - "prefixMatch": { - "description": "For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /. The value must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified.", + "description": "The name of the instance group. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" - }, - "queryParameterMatches": { - "description": "Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request. Not supported when the URL map is bound to a target gRPC proxy.", + }, + "namedPorts": { + "description": " Assigns a name to a port number. For example: {name: \"http\", port: 80} This allows the system to reference ports by the assigned name instead of a port number. Named ports can also contain multiple ports. For example: [{name: \"app1\", port: 8080}, {name: \"app1\", port: 8081}, {name: \"app2\", port: 8082}] Named ports apply to all instances in this instance group. ", "items": { - "$ref": "HttpQueryParameterMatch" + "$ref": "NamedPort" }, "type": "array" }, - "regexMatch": { - "description": "For satisfying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For more information about regular expression syntax, see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "network": { + "description": "[Output Only] The URL of the network to which all instances in the instance group belong. If your instance has multiple network interfaces, then the network and subnetwork fields only refer to the network and subnet used by your primary interface (nic0).", "type": "string" - } - }, - "type": "object" - }, - "HttpsHealthCheck": { - "description": "Represents a legacy HTTPS Health Check resource. Legacy HTTPS health checks have been deprecated. If you are using a target pool-based network load balancer, you must use a legacy HTTP (not HTTPS) health check. For all other load balancers, including backend service-based network load balancers, and for managed instance group auto-healing, you must use modern (non-legacy) health checks. For more information, see Health checks overview .", - "id": "HttpsHealthCheck", - "properties": { - "checkIntervalSec": { - "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", - "format": "int32", - "type": "integer" }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "region": { + "description": "[Output Only] The URL of the region where the instance group is located (for regional resources).", "type": "string" }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "selfLink": { + "description": "[Output Only] The URL for this instance group. The server generates this URL.", "type": "string" }, - "healthyThreshold": { - "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", + "size": { + "description": "[Output Only] The total number of instances in the instance group.", "format": "int32", "type": "integer" }, - "host": { - "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.", + "subnetwork": { + "description": "[Output Only] The URL of the subnetwork to which all instances in the instance group belong. If your instance has multiple network interfaces, then the network and subnetwork fields only refer to the network and subnet used by your primary interface (nic0).", "type": "string" }, + "zone": { + "description": "[Output Only] The URL of the zone where the instance group is located (for zonal resources).", + "type": "string" + } + }, + "type": "object" + }, + "InstanceGroupAggregatedList": { + "id": "InstanceGroupAggregatedList", + "properties": { "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, - "kind": { - "default": "compute#httpsHealthCheck", - "description": "Type of the resource.", - "type": "string" + "items": { + "additionalProperties": { + "$ref": "InstanceGroupsScopedList", + "description": "The name of the scope that contains this set of instance groups." + }, + "description": "A list of InstanceGroupsScopedList resources.", + "type": "object" }, - "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "kind": { + "default": "compute#instanceGroupAggregatedList", + "description": "[Output Only] The resource type, which is always compute#instanceGroupAggregatedList for aggregated lists of instance groups.", "type": "string" }, - "port": { - "description": "The TCP port number for the HTTPS health check request. The default value is 443.", - "format": "int32", - "type": "integer" - }, - "requestPath": { - "description": "The request path of the HTTPS health check request. The default value is \"/\".", + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "timeoutSec": { - "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have a greater value than checkIntervalSec.", - "format": "int32", - "type": "integer" + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" }, - "unhealthyThreshold": { - "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", - "format": "int32", - "type": "integer" + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" }, - "HttpsHealthCheckList": { - "description": "Contains a list of HttpsHealthCheck resources.", - "id": "HttpsHealthCheckList", + "InstanceGroupList": { + "description": "A list of InstanceGroup resources.", + "id": "InstanceGroupList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of HttpsHealthCheck resources.", + "description": "A list of InstanceGroup resources.", "items": { - "$ref": "HttpsHealthCheck" + "$ref": "InstanceGroup" }, "type": "array" }, "kind": { - "default": "compute#httpsHealthCheckList", - "description": "Type of resource.", + "default": "compute#instanceGroupList", + "description": "[Output Only] The resource type, which is always compute#instanceGroupList for instance group lists.", "type": "string" }, "nextPageToken": { @@ -37809,10 +43162,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -37836,10 +43191,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -37883,232 +43240,379 @@ }, "type": "object" }, - "Image": { - "description": "Represents an Image resource. You can use images to create boot disks for your VM instances. For more information, read Images.", - "id": "Image", + "InstanceGroupManager": { + "description": "Represents a Managed Instance Group resource. An instance group is a collection of VM instances that you can manage as a single entity. For more information, read Instance groups. For zonal Managed Instance Group, use the instanceGroupManagers resource. For regional Managed Instance Group, use the regionInstanceGroupManagers resource.", + "id": "InstanceGroupManager", "properties": { - "archiveSizeBytes": { - "description": "Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).", - "format": "int64", + "autoHealingPolicies": { + "description": "The autohealing policy for this managed instance group. You can specify only one value.", + "items": { + "$ref": "InstanceGroupManagerAutoHealingPolicy" + }, + "type": "array" + }, + "baseInstanceName": { + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert" + ] + }, + "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", + "pattern": "[a-z][-a-z0-9]{0,57}", "type": "string" }, "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "description": "[Output Only] The creation timestamp for this managed instance group in RFC3339 text format.", "type": "string" }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "The deprecation status associated with this image." + "currentActions": { + "$ref": "InstanceGroupManagerActionsSummary", + "description": "[Output Only] The list of instance actions and the number of instances in this managed instance group that are scheduled for each of those actions." }, "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "description": "An optional description of this resource.", "type": "string" }, - "diskSizeGb": { - "description": "Size of the image when restored onto a persistent disk (in GB).", - "format": "int64", - "type": "string" + "distributionPolicy": { + "$ref": "DistributionPolicy", + "description": "Policy specifying the intended distribution of managed instances across zones in a regional managed instance group." }, - "family": { - "description": "The name of the image family to which this image belongs. You can create disks by specifying an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035.", + "fingerprint": { + "description": "Fingerprint of this resource. This field may be used in optimistic locking. It will be ignored when inserting an InstanceGroupManager. An up-to-date fingerprint must be provided in order to update the InstanceGroupManager, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an InstanceGroupManager.", + "format": "byte", "type": "string" }, - "guestOsFeatures": { - "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. To see a list of available options, see the guestOSfeatures[].type parameter.", - "items": { - "$ref": "GuestOsFeature" - }, - "type": "array" - }, "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier.", "format": "uint64", "type": "string" }, - "imageEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Encrypts the image using a customer-supplied encryption key. After you encrypt an image with a customer-supplied key, you must provide the same key if you use the image later (e.g. to create a disk from the image). Customer-supplied encryption keys do not protect access to metadata of the disk. If you do not provide an encryption key when creating the image, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the image later." + "instanceGroup": { + "description": "[Output Only] The URL of the Instance Group resource.", + "type": "string" + }, + "instanceTemplate": { + "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group. The templates for existing instances in the group do not change unless you run recreateInstances, run applyUpdatesToInstances, or set the group's updatePolicy.type to PROACTIVE.", + "type": "string" }, "kind": { - "default": "compute#image", - "description": "[Output Only] Type of the resource. Always compute#image for images.", + "default": "compute#instanceGroupManager", + "description": "[Output Only] The resource type, which is always compute#instanceGroupManager for managed instance groups.", "type": "string" }, - "labelFingerprint": { - "description": "A fingerprint for the labels being applied to this image, which is essentially a hash of the labels used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an image.", - "format": "byte", + "listManagedInstancesResults": { + "description": "Pagination behavior of the listManagedInstances API method for this managed instance group.", + "enum": [ + "PAGELESS", + "PAGINATED" + ], + "enumDescriptions": [ + "(Default) Pagination is disabled for the group's listManagedInstances API method. maxResults and pageToken query parameters are ignored and all instances are returned in a single response.", + "Pagination is enabled for the group's listManagedInstances API method. maxResults and pageToken query parameters are respected." + ], "type": "string" }, - "labels": { - "additionalProperties": { - "type": "string" + "name": { + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert", + "compute.regionInstanceGroupManagers.insert" + ] }, - "description": "Labels to apply to this image. These can be later modified by the setLabels method.", - "type": "object" + "description": "The name of the managed instance group. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" }, - "licenseCodes": { - "description": "Integer license codes indicating which licenses are attached to this image.", + "namedPorts": { + "description": "Named ports configured for the Instance Groups complementary to this Instance Group Manager.", "items": { - "format": "int64", - "type": "string" + "$ref": "NamedPort" }, "type": "array" }, - "licenses": { - "description": "Any applicable license URI.", + "region": { + "description": "[Output Only] The URL of the region where the managed instance group resides (for regional resources).", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] The URL for this managed instance group. The server defines this URL.", + "type": "string" + }, + "statefulPolicy": { + "$ref": "StatefulPolicy", + "description": "Stateful configuration for this Instanced Group Manager" + }, + "status": { + "$ref": "InstanceGroupManagerStatus", + "description": "[Output Only] The status of this managed instance group." + }, + "targetPools": { + "description": "The URLs for all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.", "items": { "type": "string" }, "type": "array" }, - "name": { + "targetSize": { "annotations": { "required": [ - "compute.images.insert" + "compute.instanceGroupManagers.insert", + "compute.regionInstanceGroupManagers.insert" ] }, - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" + "description": "The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number.", + "format": "int32", + "type": "integer" }, - "rawDisk": { - "description": "The parameters of the raw disk image.", - "properties": { - "containerType": { - "description": "The format used to encode and transmit the block device, which should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created.", - "enum": [ - "TAR" - ], - "enumDescriptions": [ - "" - ], - "type": "string" - }, - "sha1Checksum": { - "description": "[Deprecated] This field is deprecated. An optional SHA1 checksum of the disk image before unpackaging provided by the client when the disk image is created.", - "pattern": "[a-f0-9]{40}", - "type": "string" - }, - "source": { - "description": "The full Google Cloud Storage URL where the raw disk image archive is stored. The following are valid formats for the URL: - https://storage.googleapis.com/bucket_name/image_archive_name - https://storage.googleapis.com/bucket_name/folder_name/ image_archive_name In order to create an image, you must provide the full or partial URL of one of the following: - The rawDisk.source URL - The sourceDisk URL - The sourceImage URL - The sourceSnapshot URL ", - "type": "string" - } - }, - "type": "object" + "updatePolicy": { + "$ref": "InstanceGroupManagerUpdatePolicy", + "description": "The update policy for this managed instance group." }, - "satisfiesPzs": { - "description": "[Output Only] Reserved for future use.", - "type": "boolean" + "versions": { + "description": "Specifies the instance templates used by this managed instance group to create instances. Each version is defined by an instanceTemplate and a name. Every version can appear at most once per instance group. This field overrides the top-level instanceTemplate field. Read more about the relationships between these fields. Exactly one version must leave the targetSize field unset. That version will be applied to all remaining instances. For more information, read about canary updates.", + "items": { + "$ref": "InstanceGroupManagerVersion" + }, + "type": "array" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "zone": { + "description": "[Output Only] The URL of a zone where the managed instance group is located (for zonal resources).", "type": "string" + } + }, + "type": "object" + }, + "InstanceGroupManagerActionsSummary": { + "id": "InstanceGroupManagerActionsSummary", + "properties": { + "abandoning": { + "description": "[Output Only] The total number of instances in the managed instance group that are scheduled to be abandoned. Abandoning an instance removes it from the managed instance group without deleting it.", + "format": "int32", + "type": "integer" }, - "shieldedInstanceInitialState": { - "$ref": "InitialStateConfig", - "description": "Set the secure boot keys of shielded instance." + "creating": { + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be created or are currently being created. If the group fails to create any of these instances, it tries again until it creates the instance successfully. If you have disabled creation retries, this field will not be populated; instead, the creatingWithoutRetries field will be populated.", + "format": "int32", + "type": "integer" }, - "sourceDisk": { - "description": "URL of the source disk used to create this image. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk In order to create an image, you must provide the full or partial URL of one of the following: - The rawDisk.source URL - The sourceDisk URL - The sourceImage URL - The sourceSnapshot URL ", - "type": "string" + "creatingWithoutRetries": { + "description": "[Output Only] The number of instances that the managed instance group will attempt to create. The group attempts to create each instance only once. If the group fails to create any of these instances, it decreases the group's targetSize value accordingly.", + "format": "int32", + "type": "integer" }, - "sourceDiskEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." + "deleting": { + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be deleted or are currently being deleted.", + "format": "int32", + "type": "integer" }, - "sourceDiskId": { - "description": "[Output Only] The ID value of the disk used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given disk name.", - "type": "string" + "none": { + "description": "[Output Only] The number of instances in the managed instance group that are running and have no scheduled actions.", + "format": "int32", + "type": "integer" }, - "sourceImage": { - "description": "URL of the source image used to create this image. The following are valid formats for the URL: - https://www.googleapis.com/compute/v1/projects/project_id/global/ images/image_name - projects/project_id/global/images/image_name In order to create an image, you must provide the full or partial URL of one of the following: - The rawDisk.source URL - The sourceDisk URL - The sourceImage URL - The sourceSnapshot URL ", - "type": "string" + "recreating": { + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be recreated or are currently being being recreated. Recreating an instance deletes the existing root persistent disk and creates a new disk from the image that is defined in the instance template.", + "format": "int32", + "type": "integer" }, - "sourceImageEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key." + "refreshing": { + "description": "[Output Only] The number of instances in the managed instance group that are being reconfigured with properties that do not require a restart or a recreate action. For example, setting or removing target pools for the instance.", + "format": "int32", + "type": "integer" }, - "sourceImageId": { - "description": "[Output Only] The ID value of the image used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given image name.", - "type": "string" + "restarting": { + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be restarted or are currently being restarted.", + "format": "int32", + "type": "integer" }, - "sourceSnapshot": { - "description": "URL of the source snapshot used to create this image. The following are valid formats for the URL: - https://www.googleapis.com/compute/v1/projects/project_id/global/ snapshots/snapshot_name - projects/project_id/global/snapshots/snapshot_name In order to create an image, you must provide the full or partial URL of one of the following: - The rawDisk.source URL - The sourceDisk URL - The sourceImage URL - The sourceSnapshot URL ", + "resuming": { + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be resumed or are currently being resumed.", + "format": "int32", + "type": "integer" + }, + "starting": { + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be started or are currently being started.", + "format": "int32", + "type": "integer" + }, + "stopping": { + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be stopped or are currently being stopped.", + "format": "int32", + "type": "integer" + }, + "suspending": { + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be suspended or are currently being suspended.", + "format": "int32", + "type": "integer" + }, + "verifying": { + "description": "[Output Only] The number of instances in the managed instance group that are being verified. See the managedInstances[].currentAction property in the listManagedInstances method documentation.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "InstanceGroupManagerAggregatedList": { + "id": "InstanceGroupManagerAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, - "sourceSnapshotEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key." + "items": { + "additionalProperties": { + "$ref": "InstanceGroupManagersScopedList", + "description": "[Output Only] The name of the scope that contains this set of managed instance groups." + }, + "description": "A list of InstanceGroupManagersScopedList resources.", + "type": "object" }, - "sourceSnapshotId": { - "description": "[Output Only] The ID value of the snapshot used to create this image. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given snapshot name.", + "kind": { + "default": "compute#instanceGroupManagerAggregatedList", + "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerAggregatedList for an aggregated list of managed instance groups.", "type": "string" }, - "sourceType": { - "default": "RAW", - "description": "The type of the image used to create this disk. The default and only value is RAW", - "enum": [ - "RAW" - ], - "enumDescriptions": [ - "" - ], + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, - "status": { - "description": "[Output Only] The status of the image. An image can be used to create other resources, such as instances, only after the image has been successfully created and the status is set to READY. Possible values are FAILED, PENDING, or READY.", - "enum": [ - "DELETING", - "FAILED", - "PENDING", - "READY" - ], - "enumDescriptions": [ - "Image is deleting.", - "Image creation failed due to an error.", - "Image hasn't been created as yet.", - "Image has been successfully created." - ], + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "storageLocations": { - "description": "Cloud Storage bucket storage location of the image (regional or multi-regional).", + "unreachables": { + "description": "[Output Only] Unreachable resources.", "items": { "type": "string" }, - "type": "array" + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" }, - "ImageFamilyView": { - "id": "ImageFamilyView", + "InstanceGroupManagerAutoHealingPolicy": { + "id": "InstanceGroupManagerAutoHealingPolicy", "properties": { - "image": { - "$ref": "Image", - "description": "The latest image that is part of the specified image family in the requested location, and that is not deprecated." + "healthCheck": { + "description": "The URL for the health check that signals autohealing.", + "type": "string" + }, + "initialDelaySec": { + "description": "The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600].", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "ImageList": { - "description": "Contains a list of images.", - "id": "ImageList", + "InstanceGroupManagerList": { + "description": "[Output Only] A list of managed instance groups.", + "id": "InstanceGroupManagerList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Image resources.", + "description": "A list of InstanceGroupManager resources.", "items": { - "$ref": "Image" + "$ref": "InstanceGroupManager" }, "type": "array" }, "kind": { - "default": "compute#imageList", - "description": "Type of resource.", + "default": "compute#instanceGroupManagerList", + "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups.", "type": "string" }, "nextPageToken": { @@ -38133,10 +43637,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -38160,10 +43666,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -38207,307 +43715,318 @@ }, "type": "object" }, - "InitialStateConfig": { - "description": "Initial State for shielded instance, these are public keys which are safe to store in public", - "id": "InitialStateConfig", + "InstanceGroupManagerStatus": { + "id": "InstanceGroupManagerStatus", "properties": { - "dbs": { - "description": "The Key Database (db).", - "items": { - "$ref": "FileContentBuffer" - }, - "type": "array" + "autoscaler": { + "description": "[Output Only] The URL of the Autoscaler that targets this instance group manager.", + "type": "string" }, - "dbxs": { - "description": "The forbidden key database (dbx).", - "items": { - "$ref": "FileContentBuffer" - }, - "type": "array" + "isStable": { + "description": "[Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified.", + "type": "boolean" }, - "keks": { - "description": "The Key Exchange Key (KEK).", - "items": { - "$ref": "FileContentBuffer" - }, - "type": "array" + "stateful": { + "$ref": "InstanceGroupManagerStatusStateful", + "description": "[Output Only] Stateful status of the given Instance Group Manager." }, - "pk": { - "$ref": "FileContentBuffer", - "description": "The Platform Key (PK)." + "versionTarget": { + "$ref": "InstanceGroupManagerStatusVersionTarget", + "description": "[Output Only] A status of consistency of Instances' versions with their target version specified by version field on Instance Group Manager." } }, "type": "object" }, - "Instance": { - "description": "Represents an Instance resource. An instance is a virtual machine that is hosted on Google Cloud Platform. For more information, read Virtual Machine Instances.", - "id": "Instance", + "InstanceGroupManagerStatusStateful": { + "id": "InstanceGroupManagerStatusStateful", "properties": { - "advancedMachineFeatures": { - "$ref": "AdvancedMachineFeatures", - "description": "Controls for advanced machine-related behavior features." - }, - "canIpForward": { - "description": "Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. For more information, see Enabling IP Forwarding .", + "hasStatefulConfig": { + "description": "[Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful configuration even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions.", "type": "boolean" }, - "confidentialInstanceConfig": { - "$ref": "ConfidentialInstanceConfig" - }, - "cpuPlatform": { - "description": "[Output Only] The CPU platform used by this instance.", - "type": "string" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "deletionProtection": { - "description": "Whether the resource should be protected against deletion.", + "perInstanceConfigs": { + "$ref": "InstanceGroupManagerStatusStatefulPerInstanceConfigs", + "description": "[Output Only] Status of per-instance configurations on the instance." + } + }, + "type": "object" + }, + "InstanceGroupManagerStatusStatefulPerInstanceConfigs": { + "id": "InstanceGroupManagerStatusStatefulPerInstanceConfigs", + "properties": { + "allEffective": { + "description": "A bit indicating if all of the group's per-instance configurations (listed in the output of a listPerInstanceConfigs API call) have status EFFECTIVE or there are no per-instance-configs.", "type": "boolean" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "disks": { - "description": "Array of disks associated with this instance. Persistent disks must be created before you can assign them.", - "items": { - "$ref": "AttachedDisk" - }, - "type": "array" - }, - "displayDevice": { - "$ref": "DisplayDevice", - "description": "Enables display device for the instance." - }, - "fingerprint": { - "description": "Specifies a fingerprint for this resource, which is essentially a hash of the instance's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update the instance. You must always provide an up-to-date fingerprint hash in order to update the instance. To see the latest fingerprint, make get() request to the instance.", - "format": "byte", - "type": "string" - }, - "guestAccelerators": { - "description": "A list of the type and count of accelerator cards attached to the instance.", - "items": { - "$ref": "AcceleratorConfig" - }, - "type": "array" - }, - "hostname": { - "description": "Specifies the hostname of the instance. The specified hostname must be RFC1035 compliant. If hostname is not specified, the default hostname is [INSTANCE_NAME].c.[PROJECT_ID].internal when using the global DNS, and [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal when using zonal DNS.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#instance", - "description": "[Output Only] Type of the resource. Always compute#instance for instances.", - "type": "string" - }, - "labelFingerprint": { - "description": "A fingerprint for this request, which is essentially a hash of the label's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. To see the latest fingerprint, make get() request to the instance.", - "format": "byte", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels to apply to this instance. These can be later modified by the setLabels method.", - "type": "object" - }, - "lastStartTimestamp": { - "description": "[Output Only] Last start timestamp in RFC3339 text format.", + } + }, + "type": "object" + }, + "InstanceGroupManagerStatusVersionTarget": { + "id": "InstanceGroupManagerStatusVersionTarget", + "properties": { + "isReached": { + "description": "[Output Only] A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances' target version are specified by version field on Instance Group Manager.", + "type": "boolean" + } + }, + "type": "object" + }, + "InstanceGroupManagerUpdatePolicy": { + "id": "InstanceGroupManagerUpdatePolicy", + "properties": { + "instanceRedistributionType": { + "description": "The instance redistribution policy for regional managed instance groups. Valid values are: - PROACTIVE (default): The group attempts to maintain an even distribution of VM instances across zones in the region. - NONE: For non-autoscaled groups, proactive redistribution is disabled. ", + "enum": [ + "NONE", + "PROACTIVE" + ], + "enumDescriptions": [ + "No action is being proactively performed in order to bring this IGM to its target instance distribution.", + "This IGM will actively converge to its target instance distribution." + ], "type": "string" }, - "lastStopTimestamp": { - "description": "[Output Only] Last stop timestamp in RFC3339 text format.", - "type": "string" + "maxSurge": { + "$ref": "FixedOrPercent", + "description": "The maximum number of instances that can be created above the specified targetSize during the update process. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxSurge is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxSurge." }, - "lastSuspendedTimestamp": { - "description": "[Output Only] Last suspended timestamp in RFC3339 text format.", - "type": "string" + "maxUnavailable": { + "$ref": "FixedOrPercent", + "description": "The maximum number of instances that can be unavailable during the update process. An instance is considered available if all of the following conditions are satisfied: - The instance's status is RUNNING. - If there is a health check on the instance group, the instance's health check status must be HEALTHY at least once. If there is no health check on the group, then the instance only needs to have a status of RUNNING to be considered available. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxUnavailable is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxUnavailable." }, - "machineType": { - "annotations": { - "required": [ - "compute.instances.insert" - ] - }, - "description": "Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/machine-type. This is provided by the client when the instance is created. For example, the following is a valid partial url to a predefined machine type: zones/us-central1-f/machineTypes/n1-standard-1 To create a custom machine type, provide a URL to a machine type in the following format, where CPUS is 1 or an even number up to 32 (2, 4, 6, ... 24, etc), and MEMORY is the total memory for this instance. Memory must be a multiple of 256 MB and must be supplied in MB (e.g. 5 GB of memory is 5120 MB): zones/zone/machineTypes/custom-CPUS-MEMORY For example: zones/us-central1-f/machineTypes/custom-4-5120 For a full list of restrictions, read the Specifications for custom machine types.", + "minimalAction": { + "description": "Minimal action to be taken on an instance. Use this option to minimize disruption as much as possible or to apply a more disruptive action than is necessary. - To limit disruption as much as possible, set the minimal action to REFRESH. If your update requires a more disruptive action, Compute Engine performs the necessary action to execute the update. - To apply a more disruptive action than is strictly necessary, set the minimal action to RESTART or REPLACE. For example, Compute Engine does not need to restart a VM to change its metadata. But if your application reads instance metadata only when a VM is restarted, you can set the minimal action to RESTART in order to pick up metadata changes. ", + "enum": [ + "NONE", + "REFRESH", + "REPLACE", + "RESTART" + ], + "enumDescriptions": [ + "Do not perform any action.", + "Updates applied in runtime, instances will not be disrupted.", + "Old instances will be deleted. New instances will be created from the target template.", + "Every instance will be restarted." + ], "type": "string" }, - "metadata": { - "$ref": "Metadata", - "description": "The metadata key/value pairs assigned to this instance. This includes custom metadata and predefined keys." - }, - "minCpuPlatform": { - "description": "Specifies a minimum CPU platform for the VM instance. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\".", + "mostDisruptiveAllowedAction": { + "description": "Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all.", + "enum": [ + "NONE", + "REFRESH", + "REPLACE", + "RESTART" + ], + "enumDescriptions": [ + "Do not perform any action.", + "Updates applied in runtime, instances will not be disrupted.", + "Old instances will be deleted. New instances will be created from the target template.", + "Every instance will be restarted." + ], "type": "string" }, - "name": { - "annotations": { - "required": [ - "compute.instances.insert" - ] - }, - "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "replacementMethod": { + "description": "What action should be used to replace instances. See minimal_action.REPLACE", + "enum": [ + "RECREATE", + "SUBSTITUTE" + ], + "enumDescriptions": [ + "Instances will be recreated (with the same name)", + "Default option: instances will be deleted and created (with a new name)" + ], "type": "string" }, - "networkInterfaces": { - "description": "An array of network configurations for this instance. These specify how interfaces are configured to interact with other network services, such as connecting to the internet. Multiple interfaces are supported per instance.", - "items": { - "$ref": "NetworkInterface" - }, - "type": "array" - }, - "networkPerformanceConfig": { - "$ref": "NetworkPerformanceConfig" - }, - "privateIpv6GoogleAccess": { - "description": "The private IPv6 google access type for the VM. If not specified, use INHERIT_FROM_SUBNETWORK as default.", + "type": { + "description": "The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls).", "enum": [ - "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE", - "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE", - "INHERIT_FROM_SUBNETWORK" + "OPPORTUNISTIC", + "PROACTIVE" ], "enumDescriptions": [ - "Bidirectional private IPv6 access to/from Google services. If specified, the subnetwork who is attached to the instance's default network interface will be assigned an internal IPv6 prefix if it doesn't have before.", - "Outbound private IPv6 access from VMs in this subnet to Google services. If specified, the subnetwork who is attached to the instance's default network interface will be assigned an internal IPv6 prefix if it doesn't have before.", - "Each network interface inherits PrivateIpv6GoogleAccess from its subnetwork." + "No action is being proactively performed in order to bring this IGM to its target version distribution (regardless of whether this distribution is expressed using instanceTemplate or versions field).", + "This IGM will actively converge to its target version distribution (regardless of whether this distribution is expressed using instanceTemplate or versions field)." ], "type": "string" + } + }, + "type": "object" + }, + "InstanceGroupManagerVersion": { + "id": "InstanceGroupManagerVersion", + "properties": { + "instanceTemplate": { + "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create new instances in the managed instance group until the `targetSize` for this version is reached. The templates for existing instances in the group do not change unless you run recreateInstances, run applyUpdatesToInstances, or set the group's updatePolicy.type to PROACTIVE; in those cases, existing instances are updated until the `targetSize` for this version is reached.", + "type": "string" }, - "reservationAffinity": { - "$ref": "ReservationAffinity", - "description": "Specifies the reservations that this instance can consume from." + "name": { + "description": "Name of the version. Unique among all versions in the scope of this managed instance group.", + "type": "string" }, - "resourcePolicies": { - "description": "Resource policies applied to this instance.", + "targetSize": { + "$ref": "FixedOrPercent", + "description": "Specifies the intended number of instances to be created from the instanceTemplate. The final number of instances created from the template will be equal to: - If expressed as a fixed number, the minimum of either targetSize.fixed or instanceGroupManager.targetSize is used. - if expressed as a percent, the targetSize would be (targetSize.percent/100 * InstanceGroupManager.targetSize) If there is a remainder, the number is rounded. If unset, this version will update any remaining instances not updated by another version. Read Starting a canary update for more information." + } + }, + "type": "object" + }, + "InstanceGroupManagersAbandonInstancesRequest": { + "id": "InstanceGroupManagersAbandonInstancesRequest", + "properties": { + "instances": { + "description": "The URLs of one or more instances to abandon. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", "items": { "type": "string" }, "type": "array" - }, - "satisfiesPzs": { - "description": "[Output Only] Reserved for future use.", + } + }, + "type": "object" + }, + "InstanceGroupManagersApplyUpdatesRequest": { + "description": "InstanceGroupManagers.applyUpdatesToInstances", + "id": "InstanceGroupManagersApplyUpdatesRequest", + "properties": { + "allInstances": { + "description": "Flag to update all instances instead of specified list of “instances”. If the flag is set to true then the instances may not be specified in the request.", "type": "boolean" }, - "scheduling": { - "$ref": "Scheduling", - "description": "Sets the scheduling options for this instance." - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "serviceAccounts": { - "description": "A list of service accounts, with their specified scopes, authorized for this instance. Only one service account per VM instance is supported. Service accounts generate access tokens that can be accessed through the metadata server and used to authenticate applications on the instance. See Service Accounts for more information.", + "instances": { + "description": "The list of URLs of one or more instances for which you want to apply updates. Each URL can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", "items": { - "$ref": "ServiceAccount" + "type": "string" }, "type": "array" }, - "shieldedInstanceConfig": { - "$ref": "ShieldedInstanceConfig" - }, - "shieldedInstanceIntegrityPolicy": { - "$ref": "ShieldedInstanceIntegrityPolicy" - }, - "sourceMachineImage": { - "description": "Source machine image", - "type": "string" - }, - "sourceMachineImageEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Source machine image encryption key when creating an instance from a machine image." - }, - "startRestricted": { - "description": "[Output Only] Whether a VM has been restricted for start because Compute Engine has detected suspicious activity.", - "type": "boolean" - }, - "status": { - "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see Instance life cycle.", + "minimalAction": { + "description": "The minimal action that you want to perform on each instance during the update: - REPLACE: At minimum, delete the instance and create it again. - RESTART: Stop the instance and start it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt the instance at all. By default, the minimum action is NONE. If your update requires a more disruptive action than you set with this flag, the necessary action is performed to execute the update.", "enum": [ - "DEPROVISIONING", - "PROVISIONING", - "REPAIRING", - "RUNNING", - "STAGING", - "STOPPED", - "STOPPING", - "SUSPENDED", - "SUSPENDING", - "TERMINATED" + "NONE", + "REFRESH", + "REPLACE", + "RESTART" ], "enumDescriptions": [ - "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", - "Resources are being allocated for the instance.", - "The instance is in repair.", - "The instance is running.", - "All required resources have been allocated and the instance is being started.", - "The instance has stopped successfully.", - "The instance is currently stopping (either being deleted or killed).", - "The instance has suspended.", - "The instance is suspending.", - "The instance has stopped (either by explicit action or underlying failure)." + "Do not perform any action.", + "Updates applied in runtime, instances will not be disrupted.", + "Old instances will be deleted. New instances will be created from the target template.", + "Every instance will be restarted." ], "type": "string" }, - "statusMessage": { - "description": "[Output Only] An optional, human-readable explanation of the status.", + "mostDisruptiveAllowedAction": { + "description": "The most disruptive action that you want to perform on each instance during the update: - REPLACE: Delete the instance and create it again. - RESTART: Stop the instance and start it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt the instance at all. By default, the most disruptive allowed action is REPLACE. If your update requires a more disruptive action than you set with this flag, the update request will fail.", + "enum": [ + "NONE", + "REFRESH", + "REPLACE", + "RESTART" + ], + "enumDescriptions": [ + "Do not perform any action.", + "Updates applied in runtime, instances will not be disrupted.", + "Old instances will be deleted. New instances will be created from the target template.", + "Every instance will be restarted." + ], "type": "string" + } + }, + "type": "object" + }, + "InstanceGroupManagersCreateInstancesRequest": { + "description": "InstanceGroupManagers.createInstances", + "id": "InstanceGroupManagersCreateInstancesRequest", + "properties": { + "instances": { + "description": "[Required] List of specifications of per-instance configs.", + "items": { + "$ref": "PerInstanceConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstanceGroupManagersDeleteInstancesRequest": { + "id": "InstanceGroupManagersDeleteInstancesRequest", + "properties": { + "instances": { + "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", + "items": { + "type": "string" + }, + "type": "array" }, - "tags": { - "$ref": "Tags", - "description": "Tags to apply to this instance. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during instance creation. The tags can be later modified by the setTags method. Each tag within the list must comply with RFC1035. Multiple tags can be specified via the 'tags.items' field." - }, - "zone": { - "description": "[Output Only] URL of the zone where the instance resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" + "skipInstancesOnValidationError": { + "description": "Specifies whether the request should proceed despite the inclusion of instances that are not members of the group or that are already in the process of being deleted or abandoned. If this field is set to `false` and such an instance is specified in the request, the operation fails. The operation always fails if the request contains a malformed instance URL or a reference to an instance that exists in a zone or region other than the group's zone or region.", + "type": "boolean" + } + }, + "type": "object" + }, + "InstanceGroupManagersDeletePerInstanceConfigsReq": { + "description": "InstanceGroupManagers.deletePerInstanceConfigs", + "id": "InstanceGroupManagersDeletePerInstanceConfigsReq", + "properties": { + "names": { + "description": "The list of instance names for which we want to delete per-instance configs on this managed instance group.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "InstanceAggregatedList": { - "id": "InstanceAggregatedList", + "InstanceGroupManagersListErrorsResponse": { + "id": "InstanceGroupManagersListErrorsResponse", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, "items": { - "additionalProperties": { - "$ref": "InstancesScopedList", - "description": "[Output Only] Name of the scope containing this set of instances." + "description": "[Output Only] The list of errors of the managed instance group.", + "items": { + "$ref": "InstanceManagedByIgmError" }, - "description": "An object that contains a list of instances scoped by zone.", - "type": "object" - }, - "kind": { - "default": "compute#instanceAggregatedList", - "description": "[Output Only] Type of resource. Always compute#instanceAggregatedList for aggregated lists of Instance resources.", - "type": "string" + "type": "array" }, "nextPageToken": { "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" + } + }, + "type": "object" + }, + "InstanceGroupManagersListManagedInstancesResponse": { + "id": "InstanceGroupManagersListManagedInstancesResponse", + "properties": { + "managedInstances": { + "description": "[Output Only] The list of instances in the managed instance group.", + "items": { + "$ref": "ManagedInstance" + }, + "type": "array" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" - }, - "unreachables": { - "description": "[Output Only] Unreachable resources.", + } + }, + "type": "object" + }, + "InstanceGroupManagersListPerInstanceConfigsResp": { + "id": "InstanceGroupManagersListPerInstanceConfigsResp", + "properties": { + "items": { + "description": "[Output Only] The list of PerInstanceConfig.", "items": { - "type": "string" + "$ref": "PerInstanceConfig" }, "type": "array" }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -38522,10 +44041,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -38549,10 +44070,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -38596,115 +44119,45 @@ }, "type": "object" }, - "InstanceGroup": { - "description": "Represents an Instance Group resource. Instance Groups can be used to configure a target for load balancing. Instance groups can either be managed or unmanaged. To create managed instance groups, use the instanceGroupManager or regionInstanceGroupManager resource instead. Use zonal unmanaged instance groups if you need to apply load balancing to groups of heterogeneous instances or if you need to manage the instances yourself. You cannot create regional unmanaged instance groups. For more information, read Instance groups.", - "id": "InstanceGroup", + "InstanceGroupManagersPatchPerInstanceConfigsReq": { + "description": "InstanceGroupManagers.patchPerInstanceConfigs", + "id": "InstanceGroupManagersPatchPerInstanceConfigsReq", "properties": { - "creationTimestamp": { - "description": "[Output Only] The creation timestamp for this instance group in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "fingerprint": { - "description": "[Output Only] The fingerprint of the named ports. The system uses this fingerprint to detect conflicts when multiple users change the named ports concurrently.", - "format": "byte", - "type": "string" - }, - "id": { - "description": "[Output Only] A unique identifier for this instance group, generated by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#instanceGroup", - "description": "[Output Only] The resource type, which is always compute#instanceGroup for instance groups.", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "compute.instanceGroups.insert" - ] - }, - "description": "The name of the instance group. The name must be 1-63 characters long, and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "namedPorts": { - "description": " Assigns a name to a port number. For example: {name: \"http\", port: 80} This allows the system to reference ports by the assigned name instead of a port number. Named ports can also contain multiple ports. For example: [{name: \"http\", port: 80},{name: \"http\", port: 8080}] Named ports apply to all instances in this instance group. ", + "perInstanceConfigs": { + "description": "The list of per-instance configurations to insert or patch on this managed instance group.", "items": { - "$ref": "NamedPort" + "$ref": "PerInstanceConfig" }, "type": "array" - }, - "network": { - "description": "[Output Only] The URL of the network to which all instances in the instance group belong. If your instance has multiple network interfaces, then the network and subnetwork fields only refer to the network and subnet used by your primary interface (nic0).", - "type": "string" - }, - "region": { - "description": "[Output Only] The URL of the region where the instance group is located (for regional resources).", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] The URL for this instance group. The server generates this URL.", - "type": "string" - }, - "size": { - "description": "[Output Only] The total number of instances in the instance group.", - "format": "int32", - "type": "integer" - }, - "subnetwork": { - "description": "[Output Only] The URL of the subnetwork to which all instances in the instance group belong. If your instance has multiple network interfaces, then the network and subnetwork fields only refer to the network and subnet used by your primary interface (nic0).", - "type": "string" - }, - "zone": { - "description": "[Output Only] The URL of the zone where the instance group is located (for zonal resources).", - "type": "string" } }, "type": "object" }, - "InstanceGroupAggregatedList": { - "id": "InstanceGroupAggregatedList", + "InstanceGroupManagersRecreateInstancesRequest": { + "id": "InstanceGroupManagersRecreateInstancesRequest", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "additionalProperties": { - "$ref": "InstanceGroupsScopedList", - "description": "The name of the scope that contains this set of instance groups." - }, - "description": "A list of InstanceGroupsScopedList resources.", - "type": "object" - }, - "kind": { - "default": "compute#instanceGroupAggregatedList", - "description": "[Output Only] The resource type, which is always compute#instanceGroupAggregatedList for aggregated lists of instance groups.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "unreachables": { - "description": "[Output Only] Unreachable resources.", + "instances": { + "description": "The URLs of one or more instances to recreate. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", "items": { "type": "string" }, "type": "array" + } + }, + "type": "object" + }, + "InstanceGroupManagersScopedList": { + "id": "InstanceGroupManagersScopedList", + "properties": { + "instanceGroupManagers": { + "description": "[Output Only] The list of managed instance groups that are contained in the specified project and zone.", + "items": { + "$ref": "InstanceGroupManager" + }, + "type": "array" }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "[Output Only] The warning that replaces the list of managed instance groups when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -38717,10 +44170,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -38744,10 +44199,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -38791,24 +44248,78 @@ }, "type": "object" }, - "InstanceGroupList": { - "description": "A list of InstanceGroup resources.", - "id": "InstanceGroupList", + "InstanceGroupManagersSetInstanceTemplateRequest": { + "id": "InstanceGroupManagersSetInstanceTemplateRequest", + "properties": { + "instanceTemplate": { + "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group. The templates for existing instances in the group do not change unless you run recreateInstances, run applyUpdatesToInstances, or set the group's updatePolicy.type to PROACTIVE.", + "type": "string" + } + }, + "type": "object" + }, + "InstanceGroupManagersSetTargetPoolsRequest": { + "id": "InstanceGroupManagersSetTargetPoolsRequest", + "properties": { + "fingerprint": { + "description": "The fingerprint of the target pools information. Use this optional property to prevent conflicts when multiple users change the target pools settings concurrently. Obtain the fingerprint with the instanceGroupManagers.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", + "format": "byte", + "type": "string" + }, + "targetPools": { + "description": "The list of target pool URLs that instances in this managed instance group belong to. The managed instance group applies these target pools to all of the instances in the group. Existing instances and new instances in the group all receive these target pool settings.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstanceGroupManagersUpdatePerInstanceConfigsReq": { + "description": "InstanceGroupManagers.updatePerInstanceConfigs", + "id": "InstanceGroupManagersUpdatePerInstanceConfigsReq", + "properties": { + "perInstanceConfigs": { + "description": "The list of per-instance configurations to insert or patch on this managed instance group.", + "items": { + "$ref": "PerInstanceConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstanceGroupsAddInstancesRequest": { + "id": "InstanceGroupsAddInstancesRequest", + "properties": { + "instances": { + "description": "The list of instances to add to the instance group.", + "items": { + "$ref": "InstanceReference" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstanceGroupsListInstances": { + "id": "InstanceGroupsListInstances", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of InstanceGroup resources.", + "description": "A list of InstanceWithNamedPorts resources.", "items": { - "$ref": "InstanceGroup" + "$ref": "InstanceWithNamedPorts" }, "type": "array" }, "kind": { - "default": "compute#instanceGroupList", - "description": "[Output Only] The resource type, which is always compute#instanceGroupList for instance group lists.", + "default": "compute#instanceGroupsListInstances", + "description": "[Output Only] The resource type, which is always compute#instanceGroupsListInstances for the list of instances in the specified instance group.", "type": "string" }, "nextPageToken": { @@ -38833,10 +44344,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -38860,10 +44373,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -38907,225 +44422,175 @@ }, "type": "object" }, - "InstanceGroupManager": { - "description": "Represents a Managed Instance Group resource. An instance group is a collection of VM instances that you can manage as a single entity. For more information, read Instance groups. For zonal Managed Instance Group, use the instanceGroupManagers resource. For regional Managed Instance Group, use the regionInstanceGroupManagers resource.", - "id": "InstanceGroupManager", + "InstanceGroupsListInstancesRequest": { + "id": "InstanceGroupsListInstancesRequest", "properties": { - "autoHealingPolicies": { - "description": "The autohealing policy for this managed instance group. You can specify only one value.", - "items": { - "$ref": "InstanceGroupManagerAutoHealingPolicy" - }, - "type": "array" - }, - "baseInstanceName": { - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert" - ] - }, - "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", - "pattern": "[a-z][-a-z0-9]{0,57}", - "type": "string" - }, - "creationTimestamp": { - "description": "[Output Only] The creation timestamp for this managed instance group in RFC3339 text format.", - "type": "string" - }, - "currentActions": { - "$ref": "InstanceGroupManagerActionsSummary", - "description": "[Output Only] The list of instance actions and the number of instances in this managed instance group that are scheduled for each of those actions." - }, - "description": { - "description": "An optional description of this resource.", - "type": "string" - }, - "distributionPolicy": { - "$ref": "DistributionPolicy", - "description": "Policy specifying the intended distribution of managed instances across zones in a regional managed instance group." - }, - "fingerprint": { - "description": "Fingerprint of this resource. This field may be used in optimistic locking. It will be ignored when inserting an InstanceGroupManager. An up-to-date fingerprint must be provided in order to update the InstanceGroupManager, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an InstanceGroupManager.", - "format": "byte", - "type": "string" - }, - "id": { - "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier.", - "format": "uint64", - "type": "string" - }, - "instanceGroup": { - "description": "[Output Only] The URL of the Instance Group resource.", - "type": "string" - }, - "instanceTemplate": { - "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group. The templates for existing instances in the group do not change unless you run recreateInstances, run applyUpdatesToInstances, or set the group's updatePolicy.type to PROACTIVE.", - "type": "string" - }, - "kind": { - "default": "compute#instanceGroupManager", - "description": "[Output Only] The resource type, which is always compute#instanceGroupManager for managed instance groups.", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert", - "compute.regionInstanceGroupManagers.insert" - ] - }, - "description": "The name of the managed instance group. The name must be 1-63 characters long, and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "namedPorts": { - "description": "Named ports configured for the Instance Groups complementary to this Instance Group Manager.", - "items": { - "$ref": "NamedPort" - }, - "type": "array" - }, - "region": { - "description": "[Output Only] The URL of the region where the managed instance group resides (for regional resources).", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] The URL for this managed instance group. The server defines this URL.", + "instanceState": { + "description": "A filter for the state of the instances in the instance group. Valid options are ALL or RUNNING. If you do not specify this parameter the list includes all instances regardless of their state.", + "enum": [ + "ALL", + "RUNNING" + ], + "enumDescriptions": [ + "Includes all instances in the generated list regardless of their state.", + "Includes instances in the generated list only if they have a RUNNING state." + ], "type": "string" - }, - "statefulPolicy": { - "$ref": "StatefulPolicy", - "description": "Stateful configuration for this Instanced Group Manager" - }, - "status": { - "$ref": "InstanceGroupManagerStatus", - "description": "[Output Only] The status of this managed instance group." - }, - "targetPools": { - "description": "The URLs for all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.", - "items": { - "type": "string" - }, - "type": "array" - }, - "targetSize": { - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert", - "compute.regionInstanceGroupManagers.insert" - ] - }, - "description": "The target number of running instances for this managed instance group. You can reduce this number by using the instanceGroupManager deleteInstances or abandonInstances methods. Resizing the group also changes this number.", - "format": "int32", - "type": "integer" - }, - "updatePolicy": { - "$ref": "InstanceGroupManagerUpdatePolicy", - "description": "The update policy for this managed instance group." - }, - "versions": { - "description": "Specifies the instance templates used by this managed instance group to create instances. Each version is defined by an instanceTemplate and a name. Every version can appear at most once per instance group. This field overrides the top-level instanceTemplate field. Read more about the relationships between these fields. Exactly one version must leave the targetSize field unset. That version will be applied to all remaining instances. For more information, read about canary updates.", + } + }, + "type": "object" + }, + "InstanceGroupsRemoveInstancesRequest": { + "id": "InstanceGroupsRemoveInstancesRequest", + "properties": { + "instances": { + "description": "The list of instances to remove from the instance group.", "items": { - "$ref": "InstanceGroupManagerVersion" + "$ref": "InstanceReference" }, "type": "array" - }, - "zone": { - "description": "[Output Only] The URL of a zone where the managed instance group is located (for zonal resources).", - "type": "string" } }, "type": "object" }, - "InstanceGroupManagerActionsSummary": { - "id": "InstanceGroupManagerActionsSummary", + "InstanceGroupsScopedList": { + "id": "InstanceGroupsScopedList", "properties": { - "abandoning": { - "description": "[Output Only] The total number of instances in the managed instance group that are scheduled to be abandoned. Abandoning an instance removes it from the managed instance group without deleting it.", - "format": "int32", - "type": "integer" - }, - "creating": { - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be created or are currently being created. If the group fails to create any of these instances, it tries again until it creates the instance successfully. If you have disabled creation retries, this field will not be populated; instead, the creatingWithoutRetries field will be populated.", - "format": "int32", - "type": "integer" - }, - "creatingWithoutRetries": { - "description": "[Output Only] The number of instances that the managed instance group will attempt to create. The group attempts to create each instance only once. If the group fails to create any of these instances, it decreases the group's targetSize value accordingly.", - "format": "int32", - "type": "integer" - }, - "deleting": { - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be deleted or are currently being deleted.", - "format": "int32", - "type": "integer" - }, - "none": { - "description": "[Output Only] The number of instances in the managed instance group that are running and have no scheduled actions.", - "format": "int32", - "type": "integer" - }, - "recreating": { - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be recreated or are currently being being recreated. Recreating an instance deletes the existing root persistent disk and creates a new disk from the image that is defined in the instance template.", - "format": "int32", - "type": "integer" - }, - "refreshing": { - "description": "[Output Only] The number of instances in the managed instance group that are being reconfigured with properties that do not require a restart or a recreate action. For example, setting or removing target pools for the instance.", - "format": "int32", - "type": "integer" - }, - "restarting": { - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be restarted or are currently being restarted.", - "format": "int32", - "type": "integer" - }, - "resuming": { - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be resumed or are currently being resumed.", - "format": "int32", - "type": "integer" - }, - "starting": { - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be started or are currently being started.", - "format": "int32", - "type": "integer" - }, - "stopping": { - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be stopped or are currently being stopped.", - "format": "int32", - "type": "integer" + "instanceGroups": { + "description": "[Output Only] The list of instance groups that are contained in this scope.", + "items": { + "$ref": "InstanceGroup" + }, + "type": "array" }, - "suspending": { - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be suspended or are currently being suspended.", - "format": "int32", - "type": "integer" + "warning": { + "description": "[Output Only] An informational warning that replaces the list of instance groups when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "InstanceGroupsSetNamedPortsRequest": { + "id": "InstanceGroupsSetNamedPortsRequest", + "properties": { + "fingerprint": { + "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request. A request with an incorrect fingerprint will fail with error 412 conditionNotMet.", + "format": "byte", + "type": "string" }, - "verifying": { - "description": "[Output Only] The number of instances in the managed instance group that are being verified. See the managedInstances[].currentAction property in the listManagedInstances method documentation.", - "format": "int32", - "type": "integer" + "namedPorts": { + "description": "The list of named ports to set for this instance group.", + "items": { + "$ref": "NamedPort" + }, + "type": "array" } }, "type": "object" }, - "InstanceGroupManagerAggregatedList": { - "id": "InstanceGroupManagerAggregatedList", + "InstanceList": { + "description": "Contains a list of instances.", + "id": "InstanceList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "InstanceGroupManagersScopedList", - "description": "[Output Only] The name of the scope that contains this set of managed instance groups." + "description": "A list of Instance resources.", + "items": { + "$ref": "Instance" }, - "description": "A list of InstanceGroupManagersScopedList resources.", - "type": "object" + "type": "array" }, "kind": { - "default": "compute#instanceGroupManagerAggregatedList", - "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerAggregatedList for an aggregated list of managed instance groups.", + "default": "compute#instanceList", + "description": "[Output Only] Type of resource. Always compute#instanceList for lists of Instance resources.", "type": "string" }, "nextPageToken": { @@ -39136,13 +44601,6 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "unreachables": { - "description": "[Output Only] Unreachable resources.", - "items": { - "type": "string" - }, - "type": "array" - }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -39157,10 +44615,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -39184,10 +44644,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -39231,39 +44693,24 @@ }, "type": "object" }, - "InstanceGroupManagerAutoHealingPolicy": { - "id": "InstanceGroupManagerAutoHealingPolicy", - "properties": { - "healthCheck": { - "description": "The URL for the health check that signals autohealing.", - "type": "string" - }, - "initialDelaySec": { - "description": "The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600].", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "InstanceGroupManagerList": { - "description": "[Output Only] A list of managed instance groups.", - "id": "InstanceGroupManagerList", + "InstanceListReferrers": { + "description": "Contains a list of instance referrers.", + "id": "InstanceListReferrers", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of InstanceGroupManager resources.", + "description": "A list of Reference resources.", "items": { - "$ref": "InstanceGroupManager" + "$ref": "Reference" }, "type": "array" }, "kind": { - "default": "compute#instanceGroupManagerList", - "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups.", + "default": "compute#instanceListReferrers", + "description": "[Output Only] Type of resource. Always compute#instanceListReferrers for lists of Instance referrers.", "type": "string" }, "nextPageToken": { @@ -39288,10 +44735,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -39315,10 +44764,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -39362,318 +44813,339 @@ }, "type": "object" }, - "InstanceGroupManagerStatus": { - "id": "InstanceGroupManagerStatus", + "InstanceManagedByIgmError": { + "id": "InstanceManagedByIgmError", "properties": { - "autoscaler": { - "description": "[Output Only] The URL of the Autoscaler that targets this instance group manager.", - "type": "string" - }, - "isStable": { - "description": "[Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified.", - "type": "boolean" - }, - "stateful": { - "$ref": "InstanceGroupManagerStatusStateful", - "description": "[Output Only] Stateful status of the given Instance Group Manager." + "error": { + "$ref": "InstanceManagedByIgmErrorManagedInstanceError", + "description": "[Output Only] Contents of the error." }, - "versionTarget": { - "$ref": "InstanceGroupManagerStatusVersionTarget", - "description": "[Output Only] A status of consistency of Instances' versions with their target version specified by version field on Instance Group Manager." - } - }, - "type": "object" - }, - "InstanceGroupManagerStatusStateful": { - "id": "InstanceGroupManagerStatusStateful", - "properties": { - "hasStatefulConfig": { - "description": "[Output Only] A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful config even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions.", - "type": "boolean" + "instanceActionDetails": { + "$ref": "InstanceManagedByIgmErrorInstanceActionDetails", + "description": "[Output Only] Details of the instance action that triggered this error. May be null, if the error was not caused by an action on an instance. This field is optional." }, - "perInstanceConfigs": { - "$ref": "InstanceGroupManagerStatusStatefulPerInstanceConfigs", - "description": "[Output Only] Status of per-instance configs on the instance." - } - }, - "type": "object" - }, - "InstanceGroupManagerStatusStatefulPerInstanceConfigs": { - "id": "InstanceGroupManagerStatusStatefulPerInstanceConfigs", - "properties": { - "allEffective": { - "description": "A bit indicating if all of the group's per-instance configs (listed in the output of a listPerInstanceConfigs API call) have status EFFECTIVE or there are no per-instance-configs.", - "type": "boolean" - } - }, - "type": "object" - }, - "InstanceGroupManagerStatusVersionTarget": { - "id": "InstanceGroupManagerStatusVersionTarget", - "properties": { - "isReached": { - "description": "[Output Only] A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances' target version are specified by version field on Instance Group Manager.", - "type": "boolean" + "timestamp": { + "description": "[Output Only] The time that this error occurred. This value is in RFC3339 text format.", + "type": "string" } }, "type": "object" }, - "InstanceGroupManagerUpdatePolicy": { - "id": "InstanceGroupManagerUpdatePolicy", + "InstanceManagedByIgmErrorInstanceActionDetails": { + "id": "InstanceManagedByIgmErrorInstanceActionDetails", "properties": { - "instanceRedistributionType": { - "description": "The instance redistribution policy for regional managed instance groups. Valid values are: - PROACTIVE (default): The group attempts to maintain an even distribution of VM instances across zones in the region. - NONE: For non-autoscaled groups, proactive redistribution is disabled. ", - "enum": [ - "NONE", - "PROACTIVE" - ], - "enumDescriptions": [ - "No action is being proactively performed in order to bring this IGM to its target instance distribution.", - "This IGM will actively converge to its target instance distribution." - ], - "type": "string" - }, - "maxSurge": { - "$ref": "FixedOrPercent", - "description": "The maximum number of instances that can be created above the specified targetSize during the update process. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxSurge is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxSurge." - }, - "maxUnavailable": { - "$ref": "FixedOrPercent", - "description": "The maximum number of instances that can be unavailable during the update process. An instance is considered available if all of the following conditions are satisfied: - The instance's status is RUNNING. - If there is a health check on the instance group, the instance's health check status must be HEALTHY at least once. If there is no health check on the group, then the instance only needs to have a status of RUNNING to be considered available. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxUnavailable is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxUnavailable." - }, - "minimalAction": { - "description": "Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a RESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action.", + "action": { + "description": "[Output Only] Action that managed instance group was executing on the instance when the error occurred. Possible values:", "enum": [ + "ABANDONING", + "CREATING", + "CREATING_WITHOUT_RETRIES", + "DELETING", "NONE", - "REFRESH", - "REPLACE", - "RESTART" + "RECREATING", + "REFRESHING", + "RESTARTING", + "RESUMING", + "STARTING", + "STOPPING", + "SUSPENDING", + "VERIFYING" ], "enumDescriptions": [ - "Do not perform any action.", - "Updates applied in runtime, instances will not be disrupted.", - "Old instances will be deleted. New instances will be created from the target template.", - "Every instance will be restarted." + "The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group.", + "The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful.", + "The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased.", + "The managed instance group is permanently deleting this instance.", + "The managed instance group has not scheduled any actions for this instance.", + "The managed instance group is recreating this instance.", + "The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance.", + "The managed instance group is restarting this instance.", + "The managed instance group is resuming this instance.", + "The managed instance group is starting this instance.", + "The managed instance group is stopping this instance.", + "The managed instance group is suspending this instance.", + "The managed instance group is verifying this already created instance. Verification happens every time the instance is (re)created or restarted and consists of: 1. Waiting until health check specified as part of this managed instance group's autohealing policy reports HEALTHY. Note: Applies only if autohealing policy has a health check specified 2. Waiting for addition verification steps performed as post-instance creation (subject to future extensions)." ], "type": "string" }, - "mostDisruptiveAllowedAction": { - "description": "Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all.", - "enum": [ - "NONE", - "REFRESH", - "REPLACE", - "RESTART" - ], - "enumDescriptions": [ - "Do not perform any action.", - "Updates applied in runtime, instances will not be disrupted.", - "Old instances will be deleted. New instances will be created from the target template.", - "Every instance will be restarted." - ], + "instance": { + "description": "[Output Only] The URL of the instance. The URL can be set even if the instance has not yet been created.", "type": "string" }, - "replacementMethod": { - "description": "What action should be used to replace instances. See minimal_action.REPLACE", - "enum": [ - "RECREATE", - "SUBSTITUTE" - ], - "enumDescriptions": [ - "Instances will be recreated (with the same name)", - "Default option: instances will be deleted and created (with a new name)" - ], + "version": { + "$ref": "ManagedInstanceVersion", + "description": "[Output Only] Version this instance was created from, or was being created from, but the creation failed. Corresponds to one of the versions that were set on the Instance Group Manager resource at the time this instance was being created." + } + }, + "type": "object" + }, + "InstanceManagedByIgmErrorManagedInstanceError": { + "id": "InstanceManagedByIgmErrorManagedInstanceError", + "properties": { + "code": { + "description": "[Output Only] Error code.", "type": "string" }, - "type": { - "description": "The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls).", - "enum": [ - "OPPORTUNISTIC", - "PROACTIVE" - ], - "enumDescriptions": [ - "No action is being proactively performed in order to bring this IGM to its target version distribution (regardless of whether this distribution is expressed using instanceTemplate or versions field).", - "This IGM will actively converge to its target version distribution (regardless of whether this distribution is expressed using instanceTemplate or versions field)." - ], + "message": { + "description": "[Output Only] Error message.", "type": "string" } }, "type": "object" }, - "InstanceGroupManagerVersion": { - "id": "InstanceGroupManagerVersion", + "InstanceMoveRequest": { + "id": "InstanceMoveRequest", "properties": { - "instanceTemplate": { - "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create new instances in the managed instance group until the `targetSize` for this version is reached. The templates for existing instances in the group do not change unless you run recreateInstances, run applyUpdatesToInstances, or set the group's updatePolicy.type to PROACTIVE; in those cases, existing instances are updated until the `targetSize` for this version is reached.", + "destinationZone": { + "description": "The URL of the destination zone to move the instance. This can be a full or partial URL. For example, the following are all valid URLs to a zone: - https://www.googleapis.com/compute/v1/projects/project/zones/zone - projects/project/zones/zone - zones/zone ", "type": "string" }, - "name": { - "description": "Name of the version. Unique among all versions in the scope of this managed instance group.", + "targetInstance": { + "description": "The URL of the target instance to move. This can be a full or partial URL. For example, the following are all valid URLs to an instance: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /instances/instance - projects/project/zones/zone/instances/instance - zones/zone/instances/instance ", "type": "string" - }, - "targetSize": { - "$ref": "FixedOrPercent", - "description": "Specifies the intended number of instances to be created from the instanceTemplate. The final number of instances created from the template will be equal to: - If expressed as a fixed number, the minimum of either targetSize.fixed or instanceGroupManager.targetSize is used. - if expressed as a percent, the targetSize would be (targetSize.percent/100 * InstanceGroupManager.targetSize) If there is a remainder, the number is rounded. If unset, this version will update any remaining instances not updated by another version. Read Starting a canary update for more information." } }, "type": "object" }, - "InstanceGroupManagersAbandonInstancesRequest": { - "id": "InstanceGroupManagersAbandonInstancesRequest", + "InstanceParams": { + "description": "Additional instance params.", + "id": "InstanceParams", "properties": { - "instances": { - "description": "The URLs of one or more instances to abandon. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", - "items": { + "resourceManagerTags": { + "additionalProperties": { "type": "string" }, - "type": "array" + "description": "Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT \u0026 PATCH) when empty.", + "type": "object" } }, "type": "object" }, - "InstanceGroupManagersApplyUpdatesRequest": { - "description": "InstanceGroupManagers.applyUpdatesToInstances", - "id": "InstanceGroupManagersApplyUpdatesRequest", + "InstanceProperties": { + "id": "InstanceProperties", "properties": { - "allInstances": { - "description": "Flag to update all instances instead of specified list of “instances”. If the flag is set to true then the instances may not be specified in the request.", + "advancedMachineFeatures": { + "$ref": "AdvancedMachineFeatures", + "description": "Controls for advanced machine-related behavior features. Note that for MachineImage, this is not supported yet." + }, + "canIpForward": { + "description": "Enables instances created based on these properties to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information.", "type": "boolean" }, - "instances": { - "description": "The list of URLs of one or more instances for which you want to apply updates. Each URL can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", + "confidentialInstanceConfig": { + "$ref": "ConfidentialInstanceConfig", + "description": "Specifies the Confidential Instance options. Note that for MachineImage, this is not supported yet." + }, + "description": { + "description": "An optional text description for the instances that are created from these properties.", + "type": "string" + }, + "disks": { + "description": "An array of disks that are associated with the instances that are created from these properties.", "items": { - "type": "string" + "$ref": "AttachedDisk" }, "type": "array" }, - "minimalAction": { - "description": "The minimal action that you want to perform on each instance during the update: - REPLACE: At minimum, delete the instance and create it again. - RESTART: Stop the instance and start it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt the instance at all. By default, the minimum action is NONE. If your update requires a more disruptive action than you set with this flag, the necessary action is performed to execute the update.", + "guestAccelerators": { + "description": "A list of guest accelerator cards' type and count to use for instances created from these properties.", + "items": { + "$ref": "AcceleratorConfig" + }, + "type": "array" + }, + "keyRevocationActionType": { + "description": "KeyRevocationActionType of the instance. Supported options are \"STOP\" and \"NONE\". The default value is \"NONE\" if it is not specified.", "enum": [ + "KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED", "NONE", - "REFRESH", - "REPLACE", - "RESTART" + "STOP" ], "enumDescriptions": [ - "Do not perform any action.", - "Updates applied in runtime, instances will not be disrupted.", - "Old instances will be deleted. New instances will be created from the target template.", - "Every instance will be restarted." + "Default value. This value is unused.", + "Indicates user chose no operation.", + "Indicates user chose to opt for VM shutdown on key revocation." ], "type": "string" }, - "mostDisruptiveAllowedAction": { - "description": "The most disruptive action that you want to perform on each instance during the update: - REPLACE: Delete the instance and create it again. - RESTART: Stop the instance and start it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt the instance at all. By default, the most disruptive allowed action is REPLACE. If your update requires a more disruptive action than you set with this flag, the update request will fail.", + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to instances that are created from these properties.", + "type": "object" + }, + "machineType": { + "annotations": { + "required": [ + "compute.instanceTemplates.insert" + ] + }, + "description": "The machine type to use for instances that are created from these properties.", + "type": "string" + }, + "metadata": { + "$ref": "Metadata", + "description": "The metadata key/value pairs to assign to instances that are created from these properties. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." + }, + "minCpuPlatform": { + "description": "Minimum cpu/platform to be used by instances. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\". For more information, read Specifying a Minimum CPU Platform.", + "type": "string" + }, + "networkInterfaces": { + "description": "An array of network access configurations for this interface.", + "items": { + "$ref": "NetworkInterface" + }, + "type": "array" + }, + "networkPerformanceConfig": { + "$ref": "NetworkPerformanceConfig", + "description": "Note that for MachineImage, this is not supported yet." + }, + "privateIpv6GoogleAccess": { + "description": "The private IPv6 google access type for VMs. If not specified, use INHERIT_FROM_SUBNETWORK as default. Note that for MachineImage, this is not supported yet.", "enum": [ - "NONE", - "REFRESH", - "REPLACE", - "RESTART" + "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE", + "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE", + "INHERIT_FROM_SUBNETWORK" ], "enumDescriptions": [ - "Do not perform any action.", - "Updates applied in runtime, instances will not be disrupted.", - "Old instances will be deleted. New instances will be created from the target template.", - "Every instance will be restarted." + "Bidirectional private IPv6 access to/from Google services. If specified, the subnetwork who is attached to the instance's default network interface will be assigned an internal IPv6 prefix if it doesn't have before.", + "Outbound private IPv6 access from VMs in this subnet to Google services. If specified, the subnetwork who is attached to the instance's default network interface will be assigned an internal IPv6 prefix if it doesn't have before.", + "Each network interface inherits PrivateIpv6GoogleAccess from its subnetwork." ], "type": "string" - } - }, - "type": "object" - }, - "InstanceGroupManagersCreateInstancesRequest": { - "description": "InstanceGroupManagers.createInstances", - "id": "InstanceGroupManagersCreateInstancesRequest", - "properties": { - "instances": { - "description": "[Required] List of specifications of per-instance configs.", - "items": { - "$ref": "PerInstanceConfig" + }, + "reservationAffinity": { + "$ref": "ReservationAffinity", + "description": "Specifies the reservations that instances can consume from. Note that for MachineImage, this is not supported yet." + }, + "resourceManagerTags": { + "additionalProperties": { + "type": "string" }, - "type": "array" - } - }, - "type": "object" - }, - "InstanceGroupManagersDeleteInstancesRequest": { - "id": "InstanceGroupManagersDeleteInstancesRequest", - "properties": { - "instances": { - "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", + "description": "Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT \u0026 PATCH) when empty.", + "type": "object" + }, + "resourcePolicies": { + "description": "Resource policies (names, not URLs) applied to instances created from these properties. Note that for MachineImage, this is not supported yet.", "items": { "type": "string" }, "type": "array" }, - "skipInstancesOnValidationError": { - "description": "Specifies whether the request should proceed despite the inclusion of instances that are not members of the group or that are already in the process of being deleted or abandoned. If this field is set to `false` and such an instance is specified in the request, the operation fails. The operation always fails if the request contains a malformed instance URL or a reference to an instance that exists in a zone or region other than the group's zone or region.", - "type": "boolean" - } - }, - "type": "object" - }, - "InstanceGroupManagersDeletePerInstanceConfigsReq": { - "description": "InstanceGroupManagers.deletePerInstanceConfigs", - "id": "InstanceGroupManagersDeletePerInstanceConfigsReq", - "properties": { - "names": { - "description": "The list of instance names for which we want to delete per-instance configs on this managed instance group.", + "scheduling": { + "$ref": "Scheduling", + "description": "Specifies the scheduling options for the instances that are created from these properties." + }, + "serviceAccounts": { + "description": "A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from these properties. Use metadata queries to obtain the access tokens for these instances.", "items": { - "type": "string" + "$ref": "ServiceAccount" }, "type": "array" + }, + "shieldedInstanceConfig": { + "$ref": "ShieldedInstanceConfig", + "description": "Note that for MachineImage, this is not supported yet." + }, + "tags": { + "$ref": "Tags", + "description": "A list of tags to apply to the instances that are created from these properties. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035." } }, "type": "object" }, - "InstanceGroupManagersListErrorsResponse": { - "id": "InstanceGroupManagersListErrorsResponse", + "InstanceReference": { + "id": "InstanceReference", "properties": { - "items": { - "description": "[Output Only] The list of errors of the managed instance group.", - "items": { - "$ref": "InstanceManagedByIgmError" - }, - "type": "array" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "instance": { + "description": "The URL for a specific instance. @required compute.instancegroups.addInstances/removeInstances", "type": "string" } }, "type": "object" }, - "InstanceGroupManagersListManagedInstancesResponse": { - "id": "InstanceGroupManagersListManagedInstancesResponse", + "InstanceTemplate": { + "description": "Represents an Instance Template resource. You can use instance templates to create VM instances and managed instance groups. For more information, read Instance Templates.", + "id": "InstanceTemplate", "properties": { - "managedInstances": { - "description": "[Output Only] The list of instances in the managed instance group.", - "items": { - "$ref": "ManagedInstance" + "creationTimestamp": { + "description": "[Output Only] The creation timestamp for this instance template in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] A unique identifier for this instance template. The server defines this identifier.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#instanceTemplate", + "description": "[Output Only] The resource type, which is always compute#instanceTemplate for instance templates.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.instanceTemplates.insert" + ] }, - "type": "array" + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "properties": { + "$ref": "InstanceProperties", + "description": "The instance properties for this instance template." + }, + "selfLink": { + "description": "[Output Only] The URL for this instance template. The server defines this URL.", + "type": "string" + }, + "sourceInstance": { + "description": "The source instance used to create the template. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /instances/instance - projects/project/zones/zone/instances/instance ", "type": "string" + }, + "sourceInstanceParams": { + "$ref": "SourceInstanceParams", + "description": "The source instance params to use to create this instance template." } }, "type": "object" }, - "InstanceGroupManagersListPerInstanceConfigsResp": { - "id": "InstanceGroupManagersListPerInstanceConfigsResp", + "InstanceTemplateList": { + "description": "A list of instance templates.", + "id": "InstanceTemplateList", "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, "items": { - "description": "[Output Only] The list of PerInstanceConfig.", + "description": "A list of InstanceTemplate resources.", "items": { - "$ref": "PerInstanceConfig" + "$ref": "InstanceTemplate" }, "type": "array" }, + "kind": { + "default": "compute#instanceTemplateList", + "description": "[Output Only] The resource type, which is always compute#instanceTemplatesListResponse for instance template lists.", + "type": "string" + }, "nextPageToken": { "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -39688,10 +45160,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -39715,10 +45189,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -39762,25 +45238,56 @@ }, "type": "object" }, - "InstanceGroupManagersPatchPerInstanceConfigsReq": { - "description": "InstanceGroupManagers.patchPerInstanceConfigs", - "id": "InstanceGroupManagersPatchPerInstanceConfigsReq", + "InstanceWithNamedPorts": { + "id": "InstanceWithNamedPorts", "properties": { - "perInstanceConfigs": { - "description": "The list of per-instance configs to insert or patch on this managed instance group.", + "instance": { + "description": "[Output Only] The URL of the instance.", + "type": "string" + }, + "namedPorts": { + "description": "[Output Only] The named ports that belong to this instance group.", "items": { - "$ref": "PerInstanceConfig" + "$ref": "NamedPort" }, "type": "array" + }, + "status": { + "description": "[Output Only] The status of the instance.", + "enum": [ + "DEPROVISIONING", + "PROVISIONING", + "REPAIRING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" + ], + "enumDescriptions": [ + "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", + "All required resources have been allocated and the instance is being started.", + "The instance has stopped successfully.", + "The instance is currently stopping (either being deleted or killed).", + "The instance has suspended.", + "The instance is suspending.", + "The instance has stopped (either by explicit action or underlying failure)." + ], + "type": "string" } }, "type": "object" }, - "InstanceGroupManagersRecreateInstancesRequest": { - "id": "InstanceGroupManagersRecreateInstancesRequest", + "InstancesAddResourcePoliciesRequest": { + "id": "InstancesAddResourcePoliciesRequest", "properties": { - "instances": { - "description": "The URLs of one or more instances to recreate. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", + "resourcePolicies": { + "description": "Resource policies to be added to this instance.", "items": { "type": "string" }, @@ -39789,18 +45296,92 @@ }, "type": "object" }, - "InstanceGroupManagersScopedList": { - "id": "InstanceGroupManagersScopedList", + "InstancesGetEffectiveFirewallsResponse": { + "id": "InstancesGetEffectiveFirewallsResponse", "properties": { - "instanceGroupManagers": { - "description": "[Output Only] The list of managed instance groups that are contained in the specified project and zone.", + "firewallPolicys": { + "description": "Effective firewalls from firewall policies.", "items": { - "$ref": "InstanceGroupManager" + "$ref": "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy" + }, + "type": "array" + }, + "firewalls": { + "description": "Effective firewalls on the instance.", + "items": { + "$ref": "Firewall" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy": { + "id": "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy", + "properties": { + "displayName": { + "description": "[Output Only] Deprecated, please use short name instead. The display name of the firewall policy.", + "type": "string" + }, + "name": { + "description": "[Output Only] The name of the firewall policy.", + "type": "string" + }, + "rules": { + "description": "The rules that apply to the network.", + "items": { + "$ref": "FirewallPolicyRule" + }, + "type": "array" + }, + "shortName": { + "description": "[Output Only] The short name of the firewall policy.", + "type": "string" + }, + "type": { + "description": "[Output Only] The type of the firewall policy. Can be one of HIERARCHY, NETWORK, NETWORK_REGIONAL.", + "enum": [ + "HIERARCHY", + "NETWORK", + "NETWORK_REGIONAL", + "UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "InstancesRemoveResourcePoliciesRequest": { + "id": "InstancesRemoveResourcePoliciesRequest", + "properties": { + "resourcePolicies": { + "description": "Resource policies to be removed from this instance.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstancesScopedList": { + "id": "InstancesScopedList", + "properties": { + "instances": { + "description": "[Output Only] A list of instances contained in this scope.", + "items": { + "$ref": "Instance" }, "type": "array" }, "warning": { - "description": "[Output Only] The warning that replaces the list of managed instance groups when the list is empty.", + "description": "[Output Only] Informational warning which replaces the list of instances when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -39813,10 +45394,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -39840,10 +45423,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -39887,26 +45472,65 @@ }, "type": "object" }, - "InstanceGroupManagersSetInstanceTemplateRequest": { - "id": "InstanceGroupManagersSetInstanceTemplateRequest", + "InstancesSetLabelsRequest": { + "id": "InstancesSetLabelsRequest", "properties": { - "instanceTemplate": { - "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group. The templates for existing instances in the group do not change unless you run recreateInstances, run applyUpdatesToInstances, or set the group's updatePolicy.type to PROACTIVE.", + "labelFingerprint": { + "description": "Fingerprint of the previous set of labels for this resource, used to prevent conflicts. Provide the latest fingerprint value when making a request to add or change labels.", + "format": "byte", "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" } }, "type": "object" }, - "InstanceGroupManagersSetTargetPoolsRequest": { - "id": "InstanceGroupManagersSetTargetPoolsRequest", + "InstancesSetMachineResourcesRequest": { + "id": "InstancesSetMachineResourcesRequest", "properties": { - "fingerprint": { - "description": "The fingerprint of the target pools information. Use this optional property to prevent conflicts when multiple users change the target pools settings concurrently. Obtain the fingerprint with the instanceGroupManagers.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", - "format": "byte", + "guestAccelerators": { + "description": "A list of the type and count of accelerator cards attached to the instance.", + "items": { + "$ref": "AcceleratorConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstancesSetMachineTypeRequest": { + "id": "InstancesSetMachineTypeRequest", + "properties": { + "machineType": { + "description": "Full or partial URL of the machine type resource. See Machine Types for a full list of machine types. For example: zones/us-central1-f/machineTypes/n1-standard-1", + "type": "string" + } + }, + "type": "object" + }, + "InstancesSetMinCpuPlatformRequest": { + "id": "InstancesSetMinCpuPlatformRequest", + "properties": { + "minCpuPlatform": { + "description": "Minimum cpu/platform this instance should be started at.", + "type": "string" + } + }, + "type": "object" + }, + "InstancesSetServiceAccountRequest": { + "id": "InstancesSetServiceAccountRequest", + "properties": { + "email": { + "description": "Email address of the service account.", "type": "string" }, - "targetPools": { - "description": "The list of target pool URLs that instances in this managed instance group belong to. The managed instance group applies these target pools to all of the instances in the group. Existing instances and new instances in the group all receive these target pool settings.", + "scopes": { + "description": "The list of scopes to be made available for this service account.", "items": { "type": "string" }, @@ -39915,191 +45539,478 @@ }, "type": "object" }, - "InstanceGroupManagersUpdatePerInstanceConfigsReq": { - "description": "InstanceGroupManagers.updatePerInstanceConfigs", - "id": "InstanceGroupManagersUpdatePerInstanceConfigsReq", + "InstancesStartWithEncryptionKeyRequest": { + "id": "InstancesStartWithEncryptionKeyRequest", "properties": { - "perInstanceConfigs": { - "description": "The list of per-instance configs to insert or patch on this managed instance group.", + "disks": { + "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key. In order to start the instance, the disk url and its corresponding key must be provided. If the disk is not protected with a customer-supplied encryption key it should not be specified.", "items": { - "$ref": "PerInstanceConfig" + "$ref": "CustomerEncryptionKeyProtectedDisk" }, "type": "array" } }, "type": "object" }, - "InstanceGroupsAddInstancesRequest": { - "id": "InstanceGroupsAddInstancesRequest", + "Int64RangeMatch": { + "description": "HttpRouteRuleMatch criteria for field values that must stay within the specified integer range.", + "id": "Int64RangeMatch", "properties": { - "instances": { - "description": "The list of instances to add to the instance group.", + "rangeEnd": { + "description": "The end of the range (exclusive) in signed long integer format.", + "format": "int64", + "type": "string" + }, + "rangeStart": { + "description": "The start of the range (inclusive) in signed long integer format.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "Interconnect": { + "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the GCP network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", + "id": "Interconnect", + "properties": { + "adminEnabled": { + "description": "Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true.", + "type": "boolean" + }, + "circuitInfos": { + "description": "[Output Only] A list of CircuitInfo objects, that describe the individual circuits in this LAG.", "items": { - "$ref": "InstanceReference" + "$ref": "InterconnectCircuitInfo" + }, + "type": "array" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "customerName": { + "description": "Customer name, to put in the Letter of Authorization as the party authorized to request a crossconnect.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "expectedOutages": { + "description": "[Output Only] A list of outages expected for this Interconnect.", + "items": { + "$ref": "InterconnectOutageNotification" + }, + "type": "array" + }, + "googleIpAddress": { + "description": "[Output Only] IP address configured on the Google side of the Interconnect link. This can be used only for ping tests.", + "type": "string" + }, + "googleReferenceId": { + "description": "[Output Only] Google reference ID to be used when raising support tickets with Google or otherwise to debug backend connectivity issues.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "interconnectAttachments": { + "description": "[Output Only] A list of the URLs of all InterconnectAttachments configured to use this Interconnect.", + "items": { + "type": "string" }, "type": "array" + }, + "interconnectType": { + "description": "Type of interconnect, which can take one of the following values: - PARTNER: A partner-managed interconnection shared between customers though a partner. - DEDICATED: A dedicated physical interconnection with the customer. Note that a value IT_PRIVATE has been deprecated in favor of DEDICATED.", + "enum": [ + "DEDICATED", + "IT_PRIVATE", + "PARTNER" + ], + "enumDescriptions": [ + "A dedicated physical interconnection with the customer.", + "[Deprecated] A private, physical interconnection with the customer.", + "A partner-managed interconnection shared between customers via partner." + ], + "type": "string" + }, + "kind": { + "default": "compute#interconnect", + "description": "[Output Only] Type of the resource. Always compute#interconnect for interconnects.", + "type": "string" + }, + "linkType": { + "description": "Type of link requested, which can take one of the following values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.", + "enum": [ + "LINK_TYPE_ETHERNET_100G_LR", + "LINK_TYPE_ETHERNET_10G_LR" + ], + "enumDescriptions": [ + "100G Ethernet, LR Optics.", + "10G Ethernet, LR Optics. [(rate_bps) = 10000000000];" + ], + "type": "string" + }, + "location": { + "description": "URL of the InterconnectLocation object that represents where this connection is to be provisioned.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.interconnects.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "nocContactEmail": { + "description": "Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Stackdriver logs alerting and Cloud Notifications.", + "type": "string" + }, + "operationalStatus": { + "description": "[Output Only] The current status of this Interconnect's functionality, which can take one of the following values: - OS_ACTIVE: A valid Interconnect, which is turned up and is ready to use. Attachments may be provisioned on this Interconnect. - OS_UNPROVISIONED: An Interconnect that has not completed turnup. No attachments may be provisioned on this Interconnect. - OS_UNDER_MAINTENANCE: An Interconnect that is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect. ", + "enum": [ + "OS_ACTIVE", + "OS_UNPROVISIONED" + ], + "enumDescriptions": [ + "The interconnect is valid, turned up, and ready to use. Attachments may be provisioned on this interconnect.", + "The interconnect has not completed turnup. No attachments may be provisioned on this interconnect." + ], + "type": "string" + }, + "peerIpAddress": { + "description": "[Output Only] IP address configured on the customer side of the Interconnect link. The customer should configure this IP address during turnup when prompted by Google NOC. This can be used only for ping tests.", + "type": "string" + }, + "provisionedLinkCount": { + "description": "[Output Only] Number of links actually provisioned in this interconnect.", + "format": "int32", + "type": "integer" + }, + "requestedLinkCount": { + "description": "Target number of physical links in the link bundle, as requested by the customer.", + "format": "int32", + "type": "integer" + }, + "satisfiesPzs": { + "description": "[Output Only] Set to true if the resource satisfies the zone separation organization policy constraints and false otherwise. Defaults to false if the field is not present.", + "type": "boolean" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "state": { + "description": "[Output Only] The current state of Interconnect functionality, which can take one of the following values: - ACTIVE: The Interconnect is valid, turned up and ready to use. Attachments may be provisioned on this Interconnect. - UNPROVISIONED: The Interconnect has not completed turnup. No attachments may be provisioned on this Interconnect. - UNDER_MAINTENANCE: The Interconnect is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect. ", + "enum": [ + "ACTIVE", + "UNPROVISIONED" + ], + "enumDescriptions": [ + "The interconnect is valid, turned up, and ready to use. Attachments may be provisioned on this interconnect.", + "The interconnect has not completed turnup. No attachments may be provisioned on this interconnect." + ], + "type": "string" } }, "type": "object" }, - "InstanceGroupsListInstances": { - "id": "InstanceGroupsListInstances", + "InterconnectAttachment": { + "description": "Represents an Interconnect Attachment (VLAN) resource. You can use Interconnect attachments (VLANS) to connect your Virtual Private Cloud networks to your on-premises networks through an Interconnect. For more information, read Creating VLAN Attachments.", + "id": "InterconnectAttachment", "properties": { + "adminEnabled": { + "description": "Determines whether this Attachment will carry packets. Not present for PARTNER_PROVIDER.", + "type": "boolean" + }, + "bandwidth": { + "description": "Provisioned bandwidth capacity for the interconnect attachment. For attachments of type DEDICATED, the user can set the bandwidth. For attachments of type PARTNER, the Google Partner that is operating the interconnect must set the bandwidth. Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, and can take one of the following values: - BPS_50M: 50 Mbit/s - BPS_100M: 100 Mbit/s - BPS_200M: 200 Mbit/s - BPS_300M: 300 Mbit/s - BPS_400M: 400 Mbit/s - BPS_500M: 500 Mbit/s - BPS_1G: 1 Gbit/s - BPS_2G: 2 Gbit/s - BPS_5G: 5 Gbit/s - BPS_10G: 10 Gbit/s - BPS_20G: 20 Gbit/s - BPS_50G: 50 Gbit/s ", + "enum": [ + "BPS_100M", + "BPS_10G", + "BPS_1G", + "BPS_200M", + "BPS_20G", + "BPS_2G", + "BPS_300M", + "BPS_400M", + "BPS_500M", + "BPS_50G", + "BPS_50M", + "BPS_5G" + ], + "enumDescriptions": [ + "100 Mbit/s", + "10 Gbit/s", + "1 Gbit/s", + "200 Mbit/s", + "20 Gbit/s", + "2 Gbit/s", + "300 Mbit/s", + "400 Mbit/s", + "500 Mbit/s", + "50 Gbit/s", + "50 Mbit/s", + "5 Gbit/s" + ], + "type": "string" + }, + "candidateIpv6Subnets": { + "description": "This field is not available.", + "items": { + "type": "string" + }, + "type": "array" + }, + "candidateSubnets": { + "description": "Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress and customerRouterIpAddress for this attachment. All prefixes must be within link-local address space (169.254.0.0/16) and must be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29 from the supplied candidate prefix(es). The request will fail if all possible /29s are in use on Google's edge. If not supplied, Google will randomly select an unused /29 from all of link-local space.", + "items": { + "type": "string" + }, + "type": "array" + }, + "cloudRouterIpAddress": { + "description": "[Output Only] IPv4 address + prefix length to be configured on Cloud Router Interface for this interconnect attachment.", + "type": "string" + }, + "cloudRouterIpv6Address": { + "description": "[Output Only] IPv6 address + prefix length to be configured on Cloud Router Interface for this interconnect attachment.", + "type": "string" + }, + "cloudRouterIpv6InterfaceId": { + "description": "This field is not available.", + "type": "string" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "customerRouterIpAddress": { + "description": "[Output Only] IPv4 address + prefix length to be configured on the customer router subinterface for this interconnect attachment.", + "type": "string" + }, + "customerRouterIpv6Address": { + "description": "[Output Only] IPv6 address + prefix length to be configured on the customer router subinterface for this interconnect attachment.", + "type": "string" + }, + "customerRouterIpv6InterfaceId": { + "description": "This field is not available.", + "type": "string" + }, + "dataplaneVersion": { + "description": "[Output Only] Dataplane version for this InterconnectAttachment. This field is only present for Dataplane version 2 and higher. Absence of this field in the API output indicates that the Dataplane is version 1.", + "format": "int32", + "type": "integer" + }, + "description": { + "description": "An optional description of this resource.", + "type": "string" + }, + "edgeAvailabilityDomain": { + "description": "Desired availability domain for the attachment. Only available for type PARTNER, at creation time, and can take one of the following values: - AVAILABILITY_DOMAIN_ANY - AVAILABILITY_DOMAIN_1 - AVAILABILITY_DOMAIN_2 For improved reliability, customers should configure a pair of attachments, one per availability domain. The selected availability domain will be provided to the Partner via the pairing key, so that the provisioned circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.", + "enum": [ + "AVAILABILITY_DOMAIN_1", + "AVAILABILITY_DOMAIN_2", + "AVAILABILITY_DOMAIN_ANY" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "encryption": { + "description": "Indicates the user-supplied encryption option of this VLAN attachment (interconnectAttachment). Can only be specified at attachment creation for PARTNER or DEDICATED attachments. Possible values are: - NONE - This is the default value, which means that the VLAN attachment carries unencrypted traffic. VMs are able to send traffic to, or receive traffic from, such a VLAN attachment. - IPSEC - The VLAN attachment carries only encrypted traffic that is encrypted by an IPsec device, such as an HA VPN gateway or third-party IPsec VPN. VMs cannot directly send traffic to, or receive traffic from, such a VLAN attachment. To use *HA VPN over Cloud Interconnect*, the VLAN attachment must be created with this option. ", + "enum": [ + "IPSEC", + "NONE" + ], + "enumDescriptions": [ + "The interconnect attachment will carry only encrypted traffic that is encrypted by an IPsec device such as HA VPN gateway; VMs cannot directly send traffic to or receive traffic from such an interconnect attachment. To use HA VPN over Cloud Interconnect, the interconnect attachment must be created with this option.", + "This is the default value, which means the Interconnect Attachment will carry unencrypted traffic. VMs will be able to send traffic to or receive traffic from such interconnect attachment." + ], + "type": "string" + }, + "googleReferenceId": { + "description": "[Output Only] Google reference ID, to be used when raising support tickets with Google or otherwise to debug backend connectivity issues. [Deprecated] This field is not used.", + "type": "string" + }, "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", "type": "string" }, - "items": { - "description": "A list of InstanceWithNamedPorts resources.", + "interconnect": { + "description": "URL of the underlying Interconnect object that this attachment's traffic will traverse through.", + "type": "string" + }, + "ipsecInternalAddresses": { + "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool. Not currently available publicly. ", "items": { - "$ref": "InstanceWithNamedPorts" + "type": "string" }, "type": "array" }, "kind": { - "default": "compute#instanceGroupsListInstances", - "description": "[Output Only] The resource type, which is always compute#instanceGroupsListInstances for the list of instances in the specified instance group.", + "default": "compute#interconnectAttachment", + "description": "[Output Only] Type of the resource. Always compute#interconnectAttachment for interconnect attachments.", "type": "string" }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "mtu": { + "description": "Maximum Transmission Unit (MTU), in bytes, of packets passing through this interconnect attachment. Only 1440 and 1500 are allowed. If not specified, the value will default to 1440.", + "format": "int32", + "type": "integer" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "operationalStatus": { + "description": "[Output Only] The current status of whether or not this interconnect attachment is functional, which can take one of the following values: - OS_ACTIVE: The attachment has been turned up and is ready to use. - OS_UNPROVISIONED: The attachment is not ready to use yet, because turnup is not complete. ", + "enum": [ + "OS_ACTIVE", + "OS_UNPROVISIONED" + ], + "enumDescriptions": [ + "Indicates that attachment has been turned up and is ready to use.", + "Indicates that attachment is not ready to use yet, because turnup is not complete." + ], + "type": "string" + }, + "pairingKey": { + "description": "[Output only for type PARTNER. Input only for PARTNER_PROVIDER. Not present for DEDICATED]. The opaque identifier of an PARTNER attachment used to initiate provisioning with a selected partner. Of the form \"XXXXX/region/domain\"", + "type": "string" + }, + "partnerAsn": { + "description": "Optional BGP ASN for the router supplied by a Layer 3 Partner if they configured BGP on behalf of the customer. Output only for PARTNER type, input only for PARTNER_PROVIDER, not available for DEDICATED.", + "format": "int64", "type": "string" }, + "partnerMetadata": { + "$ref": "InterconnectAttachmentPartnerMetadata", + "description": "Informational metadata about Partner attachments from Partners to display to customers. Output only for for PARTNER type, mutable for PARTNER_PROVIDER, not available for DEDICATED." + }, + "privateInterconnectInfo": { + "$ref": "InterconnectAttachmentPrivateInfo", + "description": "[Output Only] Information specific to an InterconnectAttachment. This property is populated if the interconnect that this is attached to is of type DEDICATED." + }, + "region": { + "description": "[Output Only] URL of the region where the regional interconnect attachment resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "router": { + "description": "URL of the Cloud Router to be used for dynamic routing. This router must be in the same region as this InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the network \u0026 region within which the Cloud Router is configured.", + "type": "string" + }, + "satisfiesPzs": { + "description": "[Output Only] Set to true if the resource satisfies the zone separation organization policy constraints and false otherwise. Defaults to false if the field is not present.", + "type": "boolean" + }, "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "LARGE_DEPLOYMENT_WARNING", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "PARTIAL_SUCCESS", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "Warning about failed cleanup of transient changes made by a failed operation.", - "A link to a deprecated resource was created.", - "When deploying and at least one of the resources has a type marked as deprecated", - "The user created a boot disk that is larger than image size.", - "When deploying and at least one of the resources has a type marked as experimental", - "Warning that is present in an external api call", - "Warning that value of a field has been overridden. Deprecated unused field.", - "The operation involved use of an injected kernel, which is deprecated.", - "When deploying a deployment with a exceedingly large number of resources", - "A resource depends on a missing type", - "The route's nextHopIp address is not assigned to an instance on the network.", - "The route's next hop instance cannot ip forward.", - "The route's nextHopInstance URL refers to an instance that does not exist.", - "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", - "The route's next hop instance does not have a status of RUNNING.", - "Error which is not critical. We decided to continue the process despite the mentioned error.", - "No results are present on a particular list page.", - "Success is reported, but some results may be missing due to errors", - "The user attempted to use a resource that requires a TOS they have not accepted.", - "Warning that a resource is in use.", - "One or more of the resources set to auto-delete could not be deleted because they were in use.", - "When a resource schema validation is ignored.", - "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", - "When undeclared properties in the schema are present", - "A given scope cannot be reached." - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "InstanceGroupsListInstancesRequest": { - "id": "InstanceGroupsListInstancesRequest", - "properties": { - "instanceState": { - "description": "A filter for the state of the instances in the instance group. Valid options are ALL or RUNNING. If you do not specify this parameter the list includes all instances regardless of their state.", + "stackType": { + "description": "The stack type for this interconnect attachment to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at interconnect attachments creation and update interconnect attachment operations.", "enum": [ - "ALL", - "RUNNING" + "IPV4_IPV6", + "IPV4_ONLY" ], "enumDescriptions": [ - "Includes all instances in the generated list regardless of their state.", - "Includes instances in the generated list only if they have a RUNNING state." + "The interconnect attachment can have both IPv4 and IPv6 addresses.", + "The interconnect attachment will only be assigned IPv4 addresses." + ], + "type": "string" + }, + "state": { + "description": "[Output Only] The current state of this attachment's functionality. Enum values ACTIVE and UNPROVISIONED are shared by DEDICATED/PRIVATE, PARTNER, and PARTNER_PROVIDER interconnect attachments, while enum values PENDING_PARTNER, PARTNER_REQUEST_RECEIVED, and PENDING_CUSTOMER are used for only PARTNER and PARTNER_PROVIDER interconnect attachments. This state can take one of the following values: - ACTIVE: The attachment has been turned up and is ready to use. - UNPROVISIONED: The attachment is not ready to use yet, because turnup is not complete. - PENDING_PARTNER: A newly-created PARTNER attachment that has not yet been configured on the Partner side. - PARTNER_REQUEST_RECEIVED: A PARTNER attachment is in the process of provisioning after a PARTNER_PROVIDER attachment was created that references it. - PENDING_CUSTOMER: A PARTNER or PARTNER_PROVIDER attachment that is waiting for a customer to activate it. - DEFUNCT: The attachment was deleted externally and is no longer functional. This could be because the associated Interconnect was removed, or because the other side of a Partner attachment was deleted. ", + "enum": [ + "ACTIVE", + "DEFUNCT", + "PARTNER_REQUEST_RECEIVED", + "PENDING_CUSTOMER", + "PENDING_PARTNER", + "STATE_UNSPECIFIED", + "UNPROVISIONED" + ], + "enumDescriptions": [ + "Indicates that attachment has been turned up and is ready to use.", + "The attachment was deleted externally and is no longer functional. This could be because the associated Interconnect was wiped out, or because the other side of a Partner attachment was deleted.", + "A PARTNER attachment is in the process of provisioning after a PARTNER_PROVIDER attachment was created that references it.", + "PARTNER or PARTNER_PROVIDER attachment that is waiting for the customer to activate.", + "A newly created PARTNER attachment that has not yet been configured on the Partner side.", + "", + "Indicates that attachment is not ready to use yet, because turnup is not complete." + ], + "type": "string" + }, + "type": { + "description": "The type of interconnect attachment this is, which can take one of the following values: - DEDICATED: an attachment to a Dedicated Interconnect. - PARTNER: an attachment to a Partner Interconnect, created by the customer. - PARTNER_PROVIDER: an attachment to a Partner Interconnect, created by the partner. ", + "enum": [ + "DEDICATED", + "PARTNER", + "PARTNER_PROVIDER" + ], + "enumDescriptions": [ + "Attachment to a dedicated interconnect.", + "Attachment to a partner interconnect, created by the customer.", + "Attachment to a partner interconnect, created by the partner." ], "type": "string" + }, + "vlanTag8021q": { + "description": "The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. Only specified at creation time.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "InstanceGroupsRemoveInstancesRequest": { - "id": "InstanceGroupsRemoveInstancesRequest", + "InterconnectAttachmentAggregatedList": { + "id": "InterconnectAttachmentAggregatedList", "properties": { - "instances": { - "description": "The list of instances to remove from the instance group.", - "items": { - "$ref": "InstanceReference" + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "InterconnectAttachmentsScopedList", + "description": "Name of the scope containing this set of interconnect attachments." }, - "type": "array" - } - }, - "type": "object" - }, - "InstanceGroupsScopedList": { - "id": "InstanceGroupsScopedList", - "properties": { - "instanceGroups": { - "description": "[Output Only] The list of instance groups that are contained in this scope.", + "description": "A list of InterconnectAttachmentsScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#interconnectAttachmentAggregatedList", + "description": "[Output Only] Type of resource. Always compute#interconnectAttachmentAggregatedList for aggregated lists of interconnect attachments.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", "items": { - "$ref": "InstanceGroup" + "type": "string" }, "type": "array" }, "warning": { - "description": "[Output Only] An informational warning that replaces the list of instance groups when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -40112,10 +46023,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -40139,10 +46052,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -40186,42 +46101,24 @@ }, "type": "object" }, - "InstanceGroupsSetNamedPortsRequest": { - "id": "InstanceGroupsSetNamedPortsRequest", - "properties": { - "fingerprint": { - "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request. A request with an incorrect fingerprint will fail with error 412 conditionNotMet.", - "format": "byte", - "type": "string" - }, - "namedPorts": { - "description": "The list of named ports to set for this instance group.", - "items": { - "$ref": "NamedPort" - }, - "type": "array" - } - }, - "type": "object" - }, - "InstanceList": { - "description": "Contains a list of instances.", - "id": "InstanceList", + "InterconnectAttachmentList": { + "description": "Response to the list request, and contains a list of interconnect attachments.", + "id": "InterconnectAttachmentList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Instance resources.", + "description": "A list of InterconnectAttachment resources.", "items": { - "$ref": "Instance" + "$ref": "InterconnectAttachment" }, "type": "array" }, "kind": { - "default": "compute#instanceList", - "description": "[Output Only] Type of resource. Always compute#instanceList for lists of Instance resources.", + "default": "compute#interconnectAttachmentList", + "description": "[Output Only] Type of resource. Always compute#interconnectAttachmentList for lists of interconnect attachments.", "type": "string" }, "nextPageToken": { @@ -40246,10 +46143,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -40273,10 +46172,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -40320,36 +46221,49 @@ }, "type": "object" }, - "InstanceListReferrers": { - "description": "Contains a list of instance referrers.", - "id": "InstanceListReferrers", + "InterconnectAttachmentPartnerMetadata": { + "description": "Informational metadata about Partner attachments from Partners to display to customers. These fields are propagated from PARTNER_PROVIDER attachments to their corresponding PARTNER attachments.", + "id": "InterconnectAttachmentPartnerMetadata", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of Reference resources.", - "items": { - "$ref": "Reference" - }, - "type": "array" - }, - "kind": { - "default": "compute#instanceListReferrers", - "description": "[Output Only] Type of resource. Always compute#instanceListReferrers for lists of Instance referrers.", + "interconnectName": { + "description": "Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner's portal. For instance \"Chicago 1\". This value may be validated to match approved Partner values.", "type": "string" }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "partnerName": { + "description": "Plain text name of the Partner providing this attachment. This value may be validated to match approved Partner values.", "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", + "portalUrl": { + "description": "URL of the Partner's portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values.", "type": "string" + } + }, + "type": "object" + }, + "InterconnectAttachmentPrivateInfo": { + "description": "Information for an interconnect attachment when this belongs to an interconnect of type DEDICATED.", + "id": "InterconnectAttachmentPrivateInfo", + "properties": { + "tag8021q": { + "description": "[Output Only] 802.1q encapsulation tag to be used for traffic between Google and the customer, going to and from this network and region.", + "format": "uint32", + "type": "integer" + } + }, + "type": "object" + }, + "InterconnectAttachmentsScopedList": { + "id": "InterconnectAttachmentsScopedList", + "properties": { + "interconnectAttachments": { + "description": "A list of interconnect attachments contained in this scope.", + "items": { + "$ref": "InterconnectAttachment" + }, + "type": "array" }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "Informational warning which replaces the list of addresses when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -40362,10 +46276,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -40389,10 +46305,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -40436,301 +46354,206 @@ }, "type": "object" }, - "InstanceManagedByIgmError": { - "id": "InstanceManagedByIgmError", - "properties": { - "error": { - "$ref": "InstanceManagedByIgmErrorManagedInstanceError", - "description": "[Output Only] Contents of the error." - }, - "instanceActionDetails": { - "$ref": "InstanceManagedByIgmErrorInstanceActionDetails", - "description": "[Output Only] Details of the instance action that triggered this error. May be null, if the error was not caused by an action on an instance. This field is optional." - }, - "timestamp": { - "description": "[Output Only] The time that this error occurred. This value is in RFC3339 text format.", - "type": "string" - } - }, - "type": "object" - }, - "InstanceManagedByIgmErrorInstanceActionDetails": { - "id": "InstanceManagedByIgmErrorInstanceActionDetails", - "properties": { - "action": { - "description": "[Output Only] Action that managed instance group was executing on the instance when the error occurred. Possible values:", - "enum": [ - "ABANDONING", - "CREATING", - "CREATING_WITHOUT_RETRIES", - "DELETING", - "NONE", - "RECREATING", - "REFRESHING", - "RESTARTING", - "RESUMING", - "STARTING", - "STOPPING", - "SUSPENDING", - "VERIFYING" - ], - "enumDescriptions": [ - "The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group.", - "The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful.", - "The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased.", - "The managed instance group is permanently deleting this instance.", - "The managed instance group has not scheduled any actions for this instance.", - "The managed instance group is recreating this instance.", - "The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance.", - "The managed instance group is restarting this instance.", - "The managed instance group is resuming this instance.", - "The managed instance group is starting this instance.", - "The managed instance group is stopping this instance.", - "The managed instance group is suspending this instance.", - "The managed instance group is verifying this already created instance. Verification happens every time the instance is (re)created or restarted and consists of: 1. Waiting until health check specified as part of this managed instance group's autohealing policy reports HEALTHY. Note: Applies only if autohealing policy has a health check specified 2. Waiting for addition verification steps performed as post-instance creation (subject to future extensions)." - ], - "type": "string" - }, - "instance": { - "description": "[Output Only] The URL of the instance. The URL can be set even if the instance has not yet been created.", - "type": "string" - }, - "version": { - "$ref": "ManagedInstanceVersion", - "description": "[Output Only] Version this instance was created from, or was being created from, but the creation failed. Corresponds to one of the versions that were set on the Instance Group Manager resource at the time this instance was being created." - } - }, - "type": "object" - }, - "InstanceManagedByIgmErrorManagedInstanceError": { - "id": "InstanceManagedByIgmErrorManagedInstanceError", - "properties": { - "code": { - "description": "[Output Only] Error code.", - "type": "string" - }, - "message": { - "description": "[Output Only] Error message.", - "type": "string" - } - }, - "type": "object" - }, - "InstanceMoveRequest": { - "id": "InstanceMoveRequest", - "properties": { - "destinationZone": { - "description": "The URL of the destination zone to move the instance. This can be a full or partial URL. For example, the following are all valid URLs to a zone: - https://www.googleapis.com/compute/v1/projects/project/zones/zone - projects/project/zones/zone - zones/zone ", - "type": "string" - }, - "targetInstance": { - "description": "The URL of the target instance to move. This can be a full or partial URL. For example, the following are all valid URLs to an instance: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /instances/instance - projects/project/zones/zone/instances/instance - zones/zone/instances/instance ", - "type": "string" - } - }, - "type": "object" - }, - "InstanceProperties": { - "id": "InstanceProperties", + "InterconnectCircuitInfo": { + "description": "Describes a single physical circuit between the Customer and Google. CircuitInfo objects are created by Google, so all fields are output only.", + "id": "InterconnectCircuitInfo", "properties": { - "advancedMachineFeatures": { - "$ref": "AdvancedMachineFeatures", - "description": "Controls for advanced machine-related behavior features. Note that for MachineImage, this is not supported yet." - }, - "canIpForward": { - "description": "Enables instances created based on these properties to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information.", - "type": "boolean" - }, - "confidentialInstanceConfig": { - "$ref": "ConfidentialInstanceConfig", - "description": "Specifies the Confidential Instance options. Note that for MachineImage, this is not supported yet." - }, - "description": { - "description": "An optional text description for the instances that are created from these properties.", - "type": "string" - }, - "disks": { - "description": "An array of disks that are associated with the instances that are created from these properties.", - "items": { - "$ref": "AttachedDisk" - }, - "type": "array" - }, - "guestAccelerators": { - "description": "A list of guest accelerator cards' type and count to use for instances created from these properties.", - "items": { - "$ref": "AcceleratorConfig" - }, - "type": "array" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels to apply to instances that are created from these properties.", - "type": "object" - }, - "machineType": { - "annotations": { - "required": [ - "compute.instanceTemplates.insert" - ] - }, - "description": "The machine type to use for instances that are created from these properties.", + "customerDemarcId": { + "description": "Customer-side demarc ID for this circuit.", "type": "string" }, - "metadata": { - "$ref": "Metadata", - "description": "The metadata key/value pairs to assign to instances that are created from these properties. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." - }, - "minCpuPlatform": { - "description": "Minimum cpu/platform to be used by instances. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\". For more information, read Specifying a Minimum CPU Platform.", + "googleCircuitId": { + "description": "Google-assigned unique ID for this circuit. Assigned at circuit turn-up.", "type": "string" }, - "networkInterfaces": { - "description": "An array of network access configurations for this interface.", + "googleDemarcId": { + "description": "Google-side demarc ID for this circuit. Assigned at circuit turn-up and provided by Google to the customer in the LOA.", + "type": "string" + } + }, + "type": "object" + }, + "InterconnectDiagnostics": { + "description": "Diagnostics information about interconnect, contains detailed and current technical information about Google's side of the connection.", + "id": "InterconnectDiagnostics", + "properties": { + "arpCaches": { + "description": "A list of InterconnectDiagnostics.ARPEntry objects, describing individual neighbors currently seen by the Google router in the ARP cache for the Interconnect. This will be empty when the Interconnect is not bundled.", "items": { - "$ref": "NetworkInterface" + "$ref": "InterconnectDiagnosticsARPEntry" }, "type": "array" }, - "networkPerformanceConfig": { - "$ref": "NetworkPerformanceConfig", - "description": "Note that for MachineImage, this is not supported yet." - }, - "privateIpv6GoogleAccess": { - "description": "The private IPv6 google access type for VMs. If not specified, use INHERIT_FROM_SUBNETWORK as default. Note that for MachineImage, this is not supported yet.", + "bundleAggregationType": { + "description": "The aggregation type of the bundle interface.", "enum": [ - "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE", - "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE", - "INHERIT_FROM_SUBNETWORK" + "BUNDLE_AGGREGATION_TYPE_LACP", + "BUNDLE_AGGREGATION_TYPE_STATIC" ], "enumDescriptions": [ - "Bidirectional private IPv6 access to/from Google services. If specified, the subnetwork who is attached to the instance's default network interface will be assigned an internal IPv6 prefix if it doesn't have before.", - "Outbound private IPv6 access from VMs in this subnet to Google services. If specified, the subnetwork who is attached to the instance's default network interface will be assigned an internal IPv6 prefix if it doesn't have before.", - "Each network interface inherits PrivateIpv6GoogleAccess from its subnetwork." + "LACP is enabled.", + "LACP is disabled." ], "type": "string" }, - "reservationAffinity": { - "$ref": "ReservationAffinity", - "description": "Specifies the reservations that instances can consume from. Note that for MachineImage, this is not supported yet." - }, - "resourceManagerTags": { - "additionalProperties": { - "type": "string" - }, - "description": "Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT \u0026 PATCH) when empty.", - "type": "object" - }, - "resourcePolicies": { - "description": "Resource policies (names, not URLs) applied to instances created from these properties. Note that for MachineImage, this is not supported yet.", - "items": { - "type": "string" - }, - "type": "array" - }, - "scheduling": { - "$ref": "Scheduling", - "description": "Specifies the scheduling options for the instances that are created from these properties." + "bundleOperationalStatus": { + "description": "The operational status of the bundle interface.", + "enum": [ + "BUNDLE_OPERATIONAL_STATUS_DOWN", + "BUNDLE_OPERATIONAL_STATUS_UP" + ], + "enumDescriptions": [ + "If bundleAggregationType is LACP: LACP is not established and/or all links in the bundle have DOWN operational status. If bundleAggregationType is STATIC: one or more links in the bundle has DOWN operational status.", + "If bundleAggregationType is LACP: LACP is established and at least one link in the bundle has UP operational status. If bundleAggregationType is STATIC: all links in the bundle (typically just one) have UP operational status." + ], + "type": "string" }, - "serviceAccounts": { - "description": "A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from these properties. Use metadata queries to obtain the access tokens for these instances.", + "links": { + "description": "A list of InterconnectDiagnostics.LinkStatus objects, describing the status for each link on the Interconnect.", "items": { - "$ref": "ServiceAccount" + "$ref": "InterconnectDiagnosticsLinkStatus" }, "type": "array" }, - "shieldedInstanceConfig": { - "$ref": "ShieldedInstanceConfig", - "description": "Note that for MachineImage, this is not supported yet." - }, - "tags": { - "$ref": "Tags", - "description": "A list of tags to apply to the instances that are created from these properties. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035." + "macAddress": { + "description": "The MAC address of the Interconnect's bundle interface.", + "type": "string" } }, "type": "object" }, - "InstanceReference": { - "id": "InstanceReference", + "InterconnectDiagnosticsARPEntry": { + "description": "Describing the ARP neighbor entries seen on this link", + "id": "InterconnectDiagnosticsARPEntry", "properties": { - "instance": { - "description": "The URL for a specific instance. @required compute.instancegroups.addInstances/removeInstances", + "ipAddress": { + "description": "The IP address of this ARP neighbor.", + "type": "string" + }, + "macAddress": { + "description": "The MAC address of this ARP neighbor.", "type": "string" } }, "type": "object" }, - "InstanceTemplate": { - "description": "Represents an Instance Template resource. You can use instance templates to create VM instances and managed instance groups. For more information, read Instance Templates.", - "id": "InstanceTemplate", + "InterconnectDiagnosticsLinkLACPStatus": { + "id": "InterconnectDiagnosticsLinkLACPStatus", "properties": { - "creationTimestamp": { - "description": "[Output Only] The creation timestamp for this instance template in RFC3339 text format.", + "googleSystemId": { + "description": "System ID of the port on Google's side of the LACP exchange.", "type": "string" }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "neighborSystemId": { + "description": "System ID of the port on the neighbor's side of the LACP exchange.", "type": "string" }, - "id": { - "description": "[Output Only] A unique identifier for this instance template. The server defines this identifier.", - "format": "uint64", + "state": { + "description": "The state of a LACP link, which can take one of the following values: - ACTIVE: The link is configured and active within the bundle. - DETACHED: The link is not configured within the bundle. This means that the rest of the object should be empty. ", + "enum": [ + "ACTIVE", + "DETACHED" + ], + "enumDescriptions": [ + "The link is configured and active within the bundle.", + "The link is not configured within the bundle, this means the rest of the object should be empty." + ], "type": "string" - }, - "kind": { - "default": "compute#instanceTemplate", - "description": "[Output Only] The resource type, which is always compute#instanceTemplate for instance templates.", + } + }, + "type": "object" + }, + "InterconnectDiagnosticsLinkOpticalPower": { + "id": "InterconnectDiagnosticsLinkOpticalPower", + "properties": { + "state": { + "description": "The status of the current value when compared to the warning and alarm levels for the receiving or transmitting transceiver. Possible states include: - OK: The value has not crossed a warning threshold. - LOW_WARNING: The value has crossed below the low warning threshold. - HIGH_WARNING: The value has crossed above the high warning threshold. - LOW_ALARM: The value has crossed below the low alarm threshold. - HIGH_ALARM: The value has crossed above the high alarm threshold. ", + "enum": [ + "HIGH_ALARM", + "HIGH_WARNING", + "LOW_ALARM", + "LOW_WARNING", + "OK" + ], + "enumDescriptions": [ + "The value has crossed above the high alarm threshold.", + "The value of the current optical power has crossed above the high warning threshold.", + "The value of the current optical power has crossed below the low alarm threshold.", + "The value of the current optical power has crossed below the low warning threshold.", + "The value of the current optical power has not crossed a warning threshold." + ], "type": "string" }, - "name": { - "annotations": { - "required": [ - "compute.instanceTemplates.insert" - ] + "value": { + "description": "Value of the current receiving or transmitting optical power, read in dBm. Take a known good optical value, give it a 10% margin and trigger warnings relative to that value. In general, a -7dBm warning and a -11dBm alarm are good optical value estimates for most links.", + "format": "float", + "type": "number" + } + }, + "type": "object" + }, + "InterconnectDiagnosticsLinkStatus": { + "id": "InterconnectDiagnosticsLinkStatus", + "properties": { + "arpCaches": { + "description": "A list of InterconnectDiagnostics.ARPEntry objects, describing the ARP neighbor entries seen on this link. This will be empty if the link is bundled", + "items": { + "$ref": "InterconnectDiagnosticsARPEntry" }, - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" + "type": "array" }, - "properties": { - "$ref": "InstanceProperties", - "description": "The instance properties for this instance template." + "circuitId": { + "description": "The unique ID for this link assigned during turn up by Google.", + "type": "string" }, - "selfLink": { - "description": "[Output Only] The URL for this instance template. The server defines this URL.", + "googleDemarc": { + "description": "The Demarc address assigned by Google and provided in the LoA.", "type": "string" }, - "sourceInstance": { - "description": "The source instance used to create the template. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /instances/instance - projects/project/zones/zone/instances/instance ", + "lacpStatus": { + "$ref": "InterconnectDiagnosticsLinkLACPStatus" + }, + "operationalStatus": { + "description": "The operational status of the link.", + "enum": [ + "LINK_OPERATIONAL_STATUS_DOWN", + "LINK_OPERATIONAL_STATUS_UP" + ], + "enumDescriptions": [ + "The interface is unable to communicate with the remote end.", + "The interface has low level communication with the remote end." + ], "type": "string" }, - "sourceInstanceParams": { - "$ref": "SourceInstanceParams", - "description": "The source instance params to use to create this instance template." + "receivingOpticalPower": { + "$ref": "InterconnectDiagnosticsLinkOpticalPower", + "description": "An InterconnectDiagnostics.LinkOpticalPower object, describing the current value and status of the received light level." + }, + "transmittingOpticalPower": { + "$ref": "InterconnectDiagnosticsLinkOpticalPower", + "description": "An InterconnectDiagnostics.LinkOpticalPower object, describing the current value and status of the transmitted light level." } }, "type": "object" }, - "InstanceTemplateList": { - "description": "A list of instance templates.", - "id": "InstanceTemplateList", + "InterconnectList": { + "description": "Response to the list request, and contains a list of interconnects.", + "id": "InterconnectList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of InstanceTemplate resources.", + "description": "A list of Interconnect resources.", "items": { - "$ref": "InstanceTemplate" + "$ref": "Interconnect" }, "type": "array" }, "kind": { - "default": "compute#instanceTemplateList", - "description": "[Output Only] The resource type, which is always compute#instanceTemplatesListResponse for instance template lists.", + "default": "compute#interconnectList", + "description": "[Output Only] Type of resource. Always compute#interconnectList for lists of interconnects.", "type": "string" }, "nextPageToken": { @@ -40755,10 +46578,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -40782,10 +46607,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -40829,146 +46656,144 @@ }, "type": "object" }, - "InstanceWithNamedPorts": { - "id": "InstanceWithNamedPorts", + "InterconnectLocation": { + "description": "Represents an Interconnect Attachment (VLAN) Location resource. You can use this resource to find location details about an Interconnect attachment (VLAN). For more information about interconnect attachments, read Creating VLAN Attachments.", + "id": "InterconnectLocation", "properties": { - "instance": { - "description": "[Output Only] The URL of the instance.", + "address": { + "description": "[Output Only] The postal address of the Point of Presence, each line in the address is separated by a newline character.", "type": "string" }, - "namedPorts": { - "description": "[Output Only] The named ports that belong to this instance group.", - "items": { - "$ref": "NamedPort" - }, - "type": "array" + "availabilityZone": { + "description": "[Output Only] Availability zone for this InterconnectLocation. Within a metropolitan area (metro), maintenance will not be simultaneously scheduled in more than one availability zone. Example: \"zone1\" or \"zone2\".", + "type": "string" }, - "status": { - "description": "[Output Only] The status of the instance.", + "city": { + "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\".", + "type": "string" + }, + "continent": { + "description": "[Output Only] Continent for this location, which can take one of the following values: - AFRICA - ASIA_PAC - EUROPE - NORTH_AMERICA - SOUTH_AMERICA ", "enum": [ - "DEPROVISIONING", - "PROVISIONING", - "REPAIRING", - "RUNNING", - "STAGING", - "STOPPED", - "STOPPING", - "SUSPENDED", - "SUSPENDING", - "TERMINATED" + "AFRICA", + "ASIA_PAC", + "C_AFRICA", + "C_ASIA_PAC", + "C_EUROPE", + "C_NORTH_AMERICA", + "C_SOUTH_AMERICA", + "EUROPE", + "NORTH_AMERICA", + "SOUTH_AMERICA" ], "enumDescriptions": [ - "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", - "Resources are being allocated for the instance.", - "The instance is in repair.", - "The instance is running.", - "All required resources have been allocated and the instance is being started.", - "The instance has stopped successfully.", - "The instance is currently stopping (either being deleted or killed).", - "The instance has suspended.", - "The instance is suspending.", - "The instance has stopped (either by explicit action or underlying failure)." + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" ], "type": "string" - } - }, - "type": "object" - }, - "InstancesAddResourcePoliciesRequest": { - "id": "InstancesAddResourcePoliciesRequest", - "properties": { - "resourcePolicies": { - "description": "Resource policies to be added to this instance.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "InstancesGetEffectiveFirewallsResponse": { - "id": "InstancesGetEffectiveFirewallsResponse", - "properties": { - "firewallPolicys": { - "description": "Effective firewalls from firewall policies.", - "items": { - "$ref": "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy" - }, - "type": "array" }, - "firewalls": { - "description": "Effective firewalls on the instance.", - "items": { - "$ref": "Firewall" - }, - "type": "array" - } - }, - "type": "object" - }, - "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy": { - "id": "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy", - "properties": { - "displayName": { - "description": "[Output Only] Deprecated, please use short name instead. The display name of the firewall policy.", + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "[Output Only] An optional description of the resource.", + "type": "string" + }, + "facilityProvider": { + "description": "[Output Only] The name of the provider for this facility (e.g., EQUINIX).", + "type": "string" + }, + "facilityProviderFacilityId": { + "description": "[Output Only] A provider-assigned Identifier for this facility (e.g., Ashburn-DC1).", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#interconnectLocation", + "description": "[Output Only] Type of the resource. Always compute#interconnectLocation for interconnect locations.", "type": "string" }, "name": { - "description": "[Output Only] The name of the firewall policy.", + "description": "[Output Only] Name of the resource.", "type": "string" }, - "rules": { - "description": "The rules that apply to the network.", + "peeringdbFacilityId": { + "description": "[Output Only] The peeringdb identifier for this facility (corresponding with a netfac type in peeringdb).", + "type": "string" + }, + "regionInfos": { + "description": "[Output Only] A list of InterconnectLocation.RegionInfo objects, that describe parameters pertaining to the relation between this InterconnectLocation and various Google Cloud regions.", "items": { - "$ref": "FirewallPolicyRule" + "$ref": "InterconnectLocationRegionInfo" }, "type": "array" }, - "shortName": { - "description": "[Output Only] The short name of the firewall policy.", + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "type": { - "description": "[Output Only] The type of the firewall policy.", + "status": { + "description": "[Output Only] The status of this InterconnectLocation, which can take one of the following values: - CLOSED: The InterconnectLocation is closed and is unavailable for provisioning new Interconnects. - AVAILABLE: The InterconnectLocation is available for provisioning new Interconnects. ", "enum": [ - "HIERARCHY", - "UNSPECIFIED" + "AVAILABLE", + "CLOSED" ], "enumDescriptions": [ - "", - "" + "The InterconnectLocation is available for provisioning new Interconnects.", + "The InterconnectLocation is closed for provisioning new Interconnects." ], "type": "string" + }, + "supportsPzs": { + "description": "[Output Only] Set to true for locations that support physical zone separation. Defaults to false if the field is not present.", + "type": "boolean" } }, "type": "object" }, - "InstancesRemoveResourcePoliciesRequest": { - "id": "InstancesRemoveResourcePoliciesRequest", - "properties": { - "resourcePolicies": { - "description": "Resource policies to be removed from this instance.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "InstancesScopedList": { - "id": "InstancesScopedList", + "InterconnectLocationList": { + "description": "Response to the list request, and contains a list of interconnect locations.", + "id": "InterconnectLocationList", "properties": { - "instances": { - "description": "[Output Only] A list of instances contained in this scope.", + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of InterconnectLocation resources.", "items": { - "$ref": "Instance" + "$ref": "InterconnectLocation" }, "type": "array" }, + "kind": { + "default": "compute#interconnectLocationList", + "description": "[Output Only] Type of resource. Always compute#interconnectLocationList for lists of interconnect locations.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "[Output Only] Informational warning which replaces the list of instances when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -40981,10 +46806,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -41008,10 +46835,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -41036,393 +46865,210 @@ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", "type": "string" }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "InstancesSetLabelsRequest": { - "id": "InstancesSetLabelsRequest", - "properties": { - "labelFingerprint": { - "description": "Fingerprint of the previous set of labels for this resource, used to prevent conflicts. Provide the latest fingerprint value when making a request to add or change labels.", - "format": "byte", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "type": "object" - } - }, - "type": "object" - }, - "InstancesSetMachineResourcesRequest": { - "id": "InstancesSetMachineResourcesRequest", - "properties": { - "guestAccelerators": { - "description": "A list of the type and count of accelerator cards attached to the instance.", - "items": { - "$ref": "AcceleratorConfig" - }, - "type": "array" - } - }, - "type": "object" - }, - "InstancesSetMachineTypeRequest": { - "id": "InstancesSetMachineTypeRequest", - "properties": { - "machineType": { - "description": "Full or partial URL of the machine type resource. See Machine Types for a full list of machine types. For example: zones/us-central1-f/machineTypes/n1-standard-1", - "type": "string" - } - }, - "type": "object" - }, - "InstancesSetMinCpuPlatformRequest": { - "id": "InstancesSetMinCpuPlatformRequest", - "properties": { - "minCpuPlatform": { - "description": "Minimum cpu/platform this instance should be started at.", - "type": "string" - } - }, - "type": "object" - }, - "InstancesSetServiceAccountRequest": { - "id": "InstancesSetServiceAccountRequest", - "properties": { - "email": { - "description": "Email address of the service account.", - "type": "string" - }, - "scopes": { - "description": "The list of scopes to be made available for this service account.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "InstancesStartWithEncryptionKeyRequest": { - "id": "InstancesStartWithEncryptionKeyRequest", - "properties": { - "disks": { - "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key. In order to start the instance, the disk url and its corresponding key must be provided. If the disk is not protected with a customer-supplied encryption key it should not be specified.", - "items": { - "$ref": "CustomerEncryptionKeyProtectedDisk" + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } }, - "type": "array" + "type": "object" } }, "type": "object" }, - "Int64RangeMatch": { - "description": "HttpRouteRuleMatch criteria for field values that must stay within the specified integer range.", - "id": "Int64RangeMatch", + "InterconnectLocationRegionInfo": { + "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", + "id": "InterconnectLocationRegionInfo", "properties": { - "rangeEnd": { - "description": "The end of the range (exclusive) in signed long integer format.", + "expectedRttMs": { + "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", "format": "int64", "type": "string" }, - "rangeStart": { - "description": "The start of the range (inclusive) in signed long integer format.", - "format": "int64", + "locationPresence": { + "description": "Identifies the network presence of this location.", + "enum": [ + "GLOBAL", + "LOCAL_REGION", + "LP_GLOBAL", + "LP_LOCAL_REGION" + ], + "enumDescriptions": [ + "This region is not in any common network presence with this InterconnectLocation.", + "This region shares the same regional network presence as this InterconnectLocation.", + "[Deprecated] This region is not in any common network presence with this InterconnectLocation.", + "[Deprecated] This region shares the same regional network presence as this InterconnectLocation." + ], + "type": "string" + }, + "region": { + "description": "URL for the region of this location.", "type": "string" } }, "type": "object" }, - "Interconnect": { - "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the GCP network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", - "id": "Interconnect", + "InterconnectOutageNotification": { + "description": "Description of a planned outage on this Interconnect.", + "id": "InterconnectOutageNotification", "properties": { - "adminEnabled": { - "description": "Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true.", - "type": "boolean" - }, - "circuitInfos": { - "description": "[Output Only] A list of CircuitInfo objects, that describe the individual circuits in this LAG.", - "items": { - "$ref": "InterconnectCircuitInfo" - }, - "type": "array" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "customerName": { - "description": "Customer name, to put in the Letter of Authorization as the party authorized to request a crossconnect.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "expectedOutages": { - "description": "[Output Only] A list of outages expected for this Interconnect.", - "items": { - "$ref": "InterconnectOutageNotification" - }, - "type": "array" - }, - "googleIpAddress": { - "description": "[Output Only] IP address configured on the Google side of the Interconnect link. This can be used only for ping tests.", - "type": "string" - }, - "googleReferenceId": { - "description": "[Output Only] Google reference ID to be used when raising support tickets with Google or otherwise to debug backend connectivity issues.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "interconnectAttachments": { - "description": "[Output Only] A list of the URLs of all InterconnectAttachments configured to use this Interconnect.", + "affectedCircuits": { + "description": "If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", "items": { "type": "string" }, "type": "array" }, - "interconnectType": { - "description": "Type of interconnect, which can take one of the following values: - PARTNER: A partner-managed interconnection shared between customers though a partner. - DEDICATED: A dedicated physical interconnection with the customer. Note that a value IT_PRIVATE has been deprecated in favor of DEDICATED.", - "enum": [ - "DEDICATED", - "IT_PRIVATE", - "PARTNER" - ], - "enumDescriptions": [ - "A dedicated physical interconnection with the customer.", - "[Deprecated] A private, physical interconnection with the customer.", - "A partner-managed interconnection shared between customers via partner." - ], + "description": { + "description": "A description about the purpose of the outage.", "type": "string" }, - "kind": { - "default": "compute#interconnect", - "description": "[Output Only] Type of the resource. Always compute#interconnect for interconnects.", + "endTime": { + "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", + "format": "int64", "type": "string" }, - "linkType": { - "description": "Type of link requested, which can take one of the following values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.", + "issueType": { + "description": "Form this outage is expected to take, which can take one of the following values: - OUTAGE: The Interconnect may be completely out of service for some or all of the specified window. - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain up, but with reduced bandwidth. Note that the versions of this enum prefixed with \"IT_\" have been deprecated in favor of the unprefixed values.", "enum": [ - "LINK_TYPE_ETHERNET_100G_LR", - "LINK_TYPE_ETHERNET_10G_LR" + "IT_OUTAGE", + "IT_PARTIAL_OUTAGE", + "OUTAGE", + "PARTIAL_OUTAGE" ], "enumDescriptions": [ - "100G Ethernet, LR Optics.", - "10G Ethernet, LR Optics. [(rate_bps) = 10000000000];" + "[Deprecated] The Interconnect may be completely out of service for some or all of the specified window.", + "[Deprecated] Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth.", + "The Interconnect may be completely out of service for some or all of the specified window.", + "Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth." ], "type": "string" }, - "location": { - "description": "URL of the InterconnectLocation object that represents where this connection is to be provisioned.", - "type": "string" - }, "name": { - "annotations": { - "required": [ - "compute.interconnects.insert" - ] - }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "nocContactEmail": { - "description": "Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Stackdriver logs alerting and Cloud Notifications.", + "description": "Unique identifier for this outage notification.", "type": "string" }, - "operationalStatus": { - "description": "[Output Only] The current status of this Interconnect's functionality, which can take one of the following values: - OS_ACTIVE: A valid Interconnect, which is turned up and is ready to use. Attachments may be provisioned on this Interconnect. - OS_UNPROVISIONED: An Interconnect that has not completed turnup. No attachments may be provisioned on this Interconnect. - OS_UNDER_MAINTENANCE: An Interconnect that is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect. ", + "source": { + "description": "The party that generated this notification, which can take the following value: - GOOGLE: this notification as generated by Google. Note that the value of NSRC_GOOGLE has been deprecated in favor of GOOGLE.", "enum": [ - "OS_ACTIVE", - "OS_UNPROVISIONED" + "GOOGLE", + "NSRC_GOOGLE" ], "enumDescriptions": [ - "The interconnect is valid, turned up, and ready to use. Attachments may be provisioned on this interconnect.", - "The interconnect has not completed turnup. No attachments may be provisioned on this interconnect." + "This notification was generated by Google.", + "[Deprecated] This notification was generated by Google." ], "type": "string" }, - "peerIpAddress": { - "description": "[Output Only] IP address configured on the customer side of the Interconnect link. The customer should configure this IP address during turnup when prompted by Google NOC. This can be used only for ping tests.", - "type": "string" - }, - "provisionedLinkCount": { - "description": "[Output Only] Number of links actually provisioned in this interconnect.", - "format": "int32", - "type": "integer" - }, - "requestedLinkCount": { - "description": "Target number of physical links in the link bundle, as requested by the customer.", - "format": "int32", - "type": "integer" - }, - "satisfiesPzs": { - "description": "[Output Only] Set to true if the resource satisfies the zone separation organization policy constraints and false otherwise. Defaults to false if the field is not present.", - "type": "boolean" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "startTime": { + "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", + "format": "int64", "type": "string" }, "state": { - "description": "[Output Only] The current state of Interconnect functionality, which can take one of the following values: - ACTIVE: The Interconnect is valid, turned up and ready to use. Attachments may be provisioned on this Interconnect. - UNPROVISIONED: The Interconnect has not completed turnup. No attachments may be provisioned on this Interconnect. - UNDER_MAINTENANCE: The Interconnect is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect. ", + "description": "State of this notification, which can take one of the following values: - ACTIVE: This outage notification is active. The event could be in the past, present, or future. See start_time and end_time for scheduling. - CANCELLED: The outage associated with this notification was cancelled before the outage was due to start. - COMPLETED: The outage associated with this notification is complete. Note that the versions of this enum prefixed with \"NS_\" have been deprecated in favor of the unprefixed values.", "enum": [ "ACTIVE", - "UNPROVISIONED" + "CANCELLED", + "COMPLETED", + "NS_ACTIVE", + "NS_CANCELED" ], "enumDescriptions": [ - "The interconnect is valid, turned up, and ready to use. Attachments may be provisioned on this interconnect.", - "The interconnect has not completed turnup. No attachments may be provisioned on this interconnect." + "This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", + "The outage associated with this notification was cancelled before the outage was due to start.", + "The outage associated with this notification is complete.", + "[Deprecated] This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", + "[Deprecated] The outage associated with this notification was canceled before the outage was due to start." ], "type": "string" } }, "type": "object" }, - "InterconnectAttachment": { - "description": "Represents an Interconnect Attachment (VLAN) resource. You can use Interconnect attachments (VLANS) to connect your Virtual Private Cloud networks to your on-premises networks through an Interconnect. For more information, read Creating VLAN Attachments.", - "id": "InterconnectAttachment", + "InterconnectsGetDiagnosticsResponse": { + "description": "Response for the InterconnectsGetDiagnosticsRequest.", + "id": "InterconnectsGetDiagnosticsResponse", "properties": { - "adminEnabled": { - "description": "Determines whether this Attachment will carry packets. Not present for PARTNER_PROVIDER.", + "result": { + "$ref": "InterconnectDiagnostics" + } + }, + "type": "object" + }, + "License": { + "description": "Represents a License resource. A License represents billing and aggregate usage data for public and marketplace images. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", + "id": "License", + "properties": { + "chargesUseFee": { + "description": "[Output Only] Deprecated. This field no longer reflects whether a license charges a usage fee.", "type": "boolean" }, - "bandwidth": { - "description": "Provisioned bandwidth capacity for the interconnect attachment. For attachments of type DEDICATED, the user can set the bandwidth. For attachments of type PARTNER, the Google Partner that is operating the interconnect must set the bandwidth. Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, and can take one of the following values: - BPS_50M: 50 Mbit/s - BPS_100M: 100 Mbit/s - BPS_200M: 200 Mbit/s - BPS_300M: 300 Mbit/s - BPS_400M: 400 Mbit/s - BPS_500M: 500 Mbit/s - BPS_1G: 1 Gbit/s - BPS_2G: 2 Gbit/s - BPS_5G: 5 Gbit/s - BPS_10G: 10 Gbit/s - BPS_20G: 20 Gbit/s - BPS_50G: 50 Gbit/s ", - "enum": [ - "BPS_100M", - "BPS_10G", - "BPS_1G", - "BPS_200M", - "BPS_20G", - "BPS_2G", - "BPS_300M", - "BPS_400M", - "BPS_500M", - "BPS_50G", - "BPS_50M", - "BPS_5G" - ], - "enumDescriptions": [ - "100 Mbit/s", - "10 Gbit/s", - "1 Gbit/s", - "200 Mbit/s", - "20 Gbit/s", - "2 Gbit/s", - "300 Mbit/s", - "400 Mbit/s", - "500 Mbit/s", - "50 Gbit/s", - "50 Mbit/s", - "5 Gbit/s" - ], - "type": "string" - }, - "candidateIpv6Subnets": { - "description": "Up to 16 candidate prefixes that control the allocation of cloudRouterIpv6Address and customerRouterIpv6Address for this attachment. Each prefix must be in the Global Unique Address (GUA) space. It is highly recommended that it be in a range owned by the requestor. A GUA in a range owned by Google will cause the request to fail. Google will select an available prefix from the supplied candidates or fail the request. If not supplied, a /125 from a Google-owned GUA block will be selected.", - "items": { - "type": "string" - }, - "type": "array" - }, - "candidateSubnets": { - "description": "Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress and customerRouterIpAddress for this attachment. All prefixes must be within link-local address space (169.254.0.0/16) and must be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29 from the supplied candidate prefix(es). The request will fail if all possible /29s are in use on Google's edge. If not supplied, Google will randomly select an unused /29 from all of link-local space.", - "items": { - "type": "string" - }, - "type": "array" - }, - "cloudRouterIpAddress": { - "description": "[Output Only] IPv4 address + prefix length to be configured on Cloud Router Interface for this interconnect attachment.", - "type": "string" - }, - "cloudRouterIpv6Address": { - "description": "[Output Only] IPv6 address + prefix length to be configured on Cloud Router Interface for this interconnect attachment.", - "type": "string" - }, - "cloudRouterIpv6InterfaceId": { - "description": "If supplied, the interface id (index within the subnet) to be used for the cloud router address. The id must be in the range of 1 to 6. If a subnet mask is supplied, it must be /125, and the subnet should either be 0 or match the selected subnet.", - "type": "string" - }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "customerRouterIpAddress": { - "description": "[Output Only] IPv4 address + prefix length to be configured on the customer router subinterface for this interconnect attachment.", + "description": { + "description": "An optional textual description of the resource; provided by the client when the resource is created.", "type": "string" }, - "customerRouterIpv6Address": { - "description": "[Output Only] IPv6 address + prefix length to be configured on the customer router subinterface for this interconnect attachment.", + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", "type": "string" }, - "customerRouterIpv6InterfaceId": { - "description": "If supplied, the interface id (index within the subnet) to be used for the customer router address. The id must be in the range of 1 to 6. If a subnet mask is supplied, it must be /125, and the subnet should either be 0 or match the selected subnet.", + "kind": { + "default": "compute#license", + "description": "[Output Only] Type of resource. Always compute#license for licenses.", "type": "string" }, - "dataplaneVersion": { - "description": "[Output only for types PARTNER and DEDICATED. Not present for PARTNER_PROVIDER.] Dataplane version for this InterconnectAttachment. This field is only present for Dataplane version 2 and higher. Absence of this field in the API output indicates that the Dataplane is version 1.", - "format": "int32", - "type": "integer" + "licenseCode": { + "description": "[Output Only] The unique code used to attach this license to images, snapshots, and disks.", + "format": "uint64", + "type": "string" }, - "description": { - "description": "An optional description of this resource.", + "name": { + "annotations": { + "required": [ + "compute.images.insert" + ] + }, + "description": "Name of the resource. The name must be 1-63 characters long and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "edgeAvailabilityDomain": { - "description": "Desired availability domain for the attachment. Only available for type PARTNER, at creation time, and can take one of the following values: - AVAILABILITY_DOMAIN_ANY - AVAILABILITY_DOMAIN_1 - AVAILABILITY_DOMAIN_2 For improved reliability, customers should configure a pair of attachments, one per availability domain. The selected availability domain will be provided to the Partner via the pairing key, so that the provisioned circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.", - "enum": [ - "AVAILABILITY_DOMAIN_1", - "AVAILABILITY_DOMAIN_2", - "AVAILABILITY_DOMAIN_ANY" - ], - "enumDescriptions": [ - "", - "", - "" - ], + "resourceRequirements": { + "$ref": "LicenseResourceRequirements" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "encryption": { - "description": "Indicates the user-supplied encryption option of this VLAN attachment (interconnectAttachment). Can only be specified at attachment creation for PARTNER or DEDICATED attachments. Possible values are: - NONE - This is the default value, which means that the VLAN attachment carries unencrypted traffic. VMs are able to send traffic to, or receive traffic from, such a VLAN attachment. - IPSEC - The VLAN attachment carries only encrypted traffic that is encrypted by an IPsec device, such as an HA VPN gateway or third-party IPsec VPN. VMs cannot directly send traffic to, or receive traffic from, such a VLAN attachment. To use *IPsec-encrypted Cloud Interconnect*, the VLAN attachment must be created with this option. Not currently available publicly. ", - "enum": [ - "IPSEC", - "NONE" - ], - "enumDescriptions": [ - "The interconnect attachment will carry only encrypted traffic that is encrypted by an IPsec device such as HA VPN gateway; VMs cannot directly send traffic to or receive traffic from such an interconnect attachment. To use IPsec-encrypted Cloud Interconnect, the interconnect attachment must be created with this option.", - "This is the default value, which means the Interconnect Attachment will carry unencrypted traffic. VMs will be able to send traffic to or receive traffic from such interconnect attachment." - ], + "transferable": { + "description": "If false, licenses will not be copied from the source resource when creating an image from a disk, disk from snapshot, or snapshot from disk.", + "type": "boolean" + } + }, + "type": "object" + }, + "LicenseCode": { + "description": "Represents a License Code resource. A License Code is a unique identifier used to represent a license resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", + "id": "LicenseCode", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "googleReferenceId": { - "description": "[Output Only] Google reference ID, to be used when raising support tickets with Google or otherwise to debug backend connectivity issues. [Deprecated] This field is not used.", + "description": { + "description": "[Output Only] Description of this License Code.", "type": "string" }, "id": { @@ -41430,152 +47076,120 @@ "format": "uint64", "type": "string" }, - "interconnect": { - "description": "URL of the underlying Interconnect object that this attachment's traffic will traverse through.", + "kind": { + "default": "compute#licenseCode", + "description": "[Output Only] Type of resource. Always compute#licenseCode for licenses.", "type": "string" }, - "ipsecInternalAddresses": { - "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool. Not currently available publicly. ", + "licenseAlias": { + "description": "[Output Only] URL and description aliases of Licenses with the same License Code.", "items": { - "type": "string" + "$ref": "LicenseCodeLicenseAlias" }, "type": "array" }, - "kind": { - "default": "compute#interconnectAttachment", - "description": "[Output Only] Type of the resource. Always compute#interconnectAttachment for interconnect attachments.", + "name": { + "annotations": { + "required": [ + "compute.licenses.insert" + ] + }, + "description": "[Output Only] Name of the resource. The name is 1-20 characters long and must be a valid 64 bit integer.", + "pattern": "[0-9]{0,20}?", "type": "string" }, - "mtu": { - "description": "Maximum Transmission Unit (MTU), in bytes, of packets passing through this interconnect attachment. Only 1440 and 1500 are allowed. If not specified, the value will default to 1440.", - "format": "int32", - "type": "integer" - }, - "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "operationalStatus": { - "description": "[Output Only] The current status of whether or not this interconnect attachment is functional, which can take one of the following values: - OS_ACTIVE: The attachment has been turned up and is ready to use. - OS_UNPROVISIONED: The attachment is not ready to use yet, because turnup is not complete. ", + "state": { + "description": "[Output Only] Current state of this License Code.", "enum": [ - "OS_ACTIVE", - "OS_UNPROVISIONED" + "DISABLED", + "ENABLED", + "RESTRICTED", + "STATE_UNSPECIFIED", + "TERMINATED" ], "enumDescriptions": [ - "Indicates that attachment has been turned up and is ready to use.", - "Indicates that attachment is not ready to use yet, because turnup is not complete." + "Machines are not allowed to attach boot disks with this License Code. Requests to create new resources with this license will be rejected.", + "Use is allowed for anyone with USE_READ_ONLY access to this License Code.", + "Use of this license is limited to a project whitelist.", + "", + "Reserved state." ], "type": "string" }, - "pairingKey": { - "description": "[Output only for type PARTNER. Input only for PARTNER_PROVIDER. Not present for DEDICATED]. The opaque identifier of an PARTNER attachment used to initiate provisioning with a selected partner. Of the form \"XXXXX/region/domain\"", - "type": "string" - }, - "partnerAsn": { - "description": "Optional BGP ASN for the router supplied by a Layer 3 Partner if they configured BGP on behalf of the customer. Output only for PARTNER type, input only for PARTNER_PROVIDER, not available for DEDICATED.", - "format": "int64", - "type": "string" - }, - "partnerMetadata": { - "$ref": "InterconnectAttachmentPartnerMetadata", - "description": "Informational metadata about Partner attachments from Partners to display to customers. Output only for for PARTNER type, mutable for PARTNER_PROVIDER, not available for DEDICATED." - }, - "privateInterconnectInfo": { - "$ref": "InterconnectAttachmentPrivateInfo", - "description": "[Output Only] Information specific to an InterconnectAttachment. This property is populated if the interconnect that this is attached to is of type DEDICATED." - }, - "region": { - "description": "[Output Only] URL of the region where the regional interconnect attachment resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" - }, - "router": { - "description": "URL of the Cloud Router to be used for dynamic routing. This router must be in the same region as this InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the network \u0026 region within which the Cloud Router is configured.", - "type": "string" - }, - "satisfiesPzs": { - "description": "[Output Only] Set to true if the resource satisfies the zone separation organization policy constraints and false otherwise. Defaults to false if the field is not present.", + "transferable": { + "description": "[Output Only] If true, the license will remain attached when creating images or snapshots from disks. Otherwise, the license is not transferred.", "type": "boolean" + } + }, + "type": "object" + }, + "LicenseCodeLicenseAlias": { + "id": "LicenseCodeLicenseAlias", + "properties": { + "description": { + "description": "[Output Only] Description of this License Code.", + "type": "string" }, "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "description": "[Output Only] URL of license corresponding to this License Code.", "type": "string" - }, - "stackType": { - "description": "The stack type for this interconnect attachment to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at interconnect attachments creation and update interconnect attachment operations.", - "enum": [ - "IPV4_IPV6", - "IPV4_ONLY" - ], - "enumDescriptions": [ - "The interconnect attachment can have both IPv4 and IPv6 addresses.", - "The interconnect attachment will only be assigned IPv4 addresses." - ], + } + }, + "type": "object" + }, + "LicenseResourceCommitment": { + "description": "Commitment for a particular license resource.", + "id": "LicenseResourceCommitment", + "properties": { + "amount": { + "description": "The number of licenses purchased.", + "format": "int64", "type": "string" }, - "state": { - "description": "[Output Only] The current state of this attachment's functionality. Enum values ACTIVE and UNPROVISIONED are shared by DEDICATED/PRIVATE, PARTNER, and PARTNER_PROVIDER interconnect attachments, while enum values PENDING_PARTNER, PARTNER_REQUEST_RECEIVED, and PENDING_CUSTOMER are used for only PARTNER and PARTNER_PROVIDER interconnect attachments. This state can take one of the following values: - ACTIVE: The attachment has been turned up and is ready to use. - UNPROVISIONED: The attachment is not ready to use yet, because turnup is not complete. - PENDING_PARTNER: A newly-created PARTNER attachment that has not yet been configured on the Partner side. - PARTNER_REQUEST_RECEIVED: A PARTNER attachment is in the process of provisioning after a PARTNER_PROVIDER attachment was created that references it. - PENDING_CUSTOMER: A PARTNER or PARTNER_PROVIDER attachment that is waiting for a customer to activate it. - DEFUNCT: The attachment was deleted externally and is no longer functional. This could be because the associated Interconnect was removed, or because the other side of a Partner attachment was deleted. ", - "enum": [ - "ACTIVE", - "DEFUNCT", - "PARTNER_REQUEST_RECEIVED", - "PENDING_CUSTOMER", - "PENDING_PARTNER", - "STATE_UNSPECIFIED", - "UNPROVISIONED" - ], - "enumDescriptions": [ - "Indicates that attachment has been turned up and is ready to use.", - "The attachment was deleted externally and is no longer functional. This could be because the associated Interconnect was wiped out, or because the other side of a Partner attachment was deleted.", - "A PARTNER attachment is in the process of provisioning after a PARTNER_PROVIDER attachment was created that references it.", - "PARTNER or PARTNER_PROVIDER attachment that is waiting for the customer to activate.", - "A newly created PARTNER attachment that has not yet been configured on the Partner side.", - "", - "Indicates that attachment is not ready to use yet, because turnup is not complete." - ], + "coresPerLicense": { + "description": "Specifies the core range of the instance for which this license applies.", "type": "string" }, - "type": { - "description": "The type of interconnect attachment this is, which can take one of the following values: - DEDICATED: an attachment to a Dedicated Interconnect. - PARTNER: an attachment to a Partner Interconnect, created by the customer. - PARTNER_PROVIDER: an attachment to a Partner Interconnect, created by the partner. ", - "enum": [ - "DEDICATED", - "PARTNER", - "PARTNER_PROVIDER" - ], - "enumDescriptions": [ - "Attachment to a dedicated interconnect.", - "Attachment to a partner interconnect, created by the customer.", - "Attachment to a partner interconnect, created by the partner." - ], + "license": { + "description": "Any applicable license URI.", "type": "string" + } + }, + "type": "object" + }, + "LicenseResourceRequirements": { + "id": "LicenseResourceRequirements", + "properties": { + "minGuestCpuCount": { + "description": "Minimum number of guest cpus required to use the Instance. Enforced at Instance creation and Instance start.", + "format": "int32", + "type": "integer" }, - "vlanTag8021q": { - "description": "The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. Only specified at creation time.", + "minMemoryMb": { + "description": "Minimum memory required to use the Instance. Enforced at Instance creation and Instance start.", "format": "int32", "type": "integer" } }, "type": "object" }, - "InterconnectAttachmentAggregatedList": { - "id": "InterconnectAttachmentAggregatedList", + "LicensesListResponse": { + "id": "LicensesListResponse", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "InterconnectAttachmentsScopedList", - "description": "Name of the scope containing this set of interconnect attachments." + "description": "A list of License resources.", + "items": { + "$ref": "License" }, - "description": "A list of InterconnectAttachmentsScopedList resources.", - "type": "object" - }, - "kind": { - "default": "compute#interconnectAttachmentAggregatedList", - "description": "[Output Only] Type of resource. Always compute#interconnectAttachmentAggregatedList for aggregated lists of interconnect attachments.", - "type": "string" + "type": "array" }, "nextPageToken": { "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", @@ -41585,13 +47199,6 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "unreachables": { - "description": "[Output Only] Unreachable resources.", - "items": { - "type": "string" - }, - "type": "array" - }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -41606,10 +47213,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -41633,10 +47242,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -41680,415 +47291,330 @@ }, "type": "object" }, - "InterconnectAttachmentList": { - "description": "Response to the list request, and contains a list of interconnect attachments.", - "id": "InterconnectAttachmentList", + "LocalDisk": { + "id": "LocalDisk", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" + "diskCount": { + "description": "Specifies the number of such disks.", + "format": "int32", + "type": "integer" }, - "items": { - "description": "A list of InterconnectAttachment resources.", - "items": { - "$ref": "InterconnectAttachment" - }, - "type": "array" + "diskSizeGb": { + "description": "Specifies the size of the disk in base-2 GB.", + "format": "int32", + "type": "integer" }, - "kind": { - "default": "compute#interconnectAttachmentList", - "description": "[Output Only] Type of resource. Always compute#interconnectAttachmentList for lists of interconnect attachments.", + "diskType": { + "description": "Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL.", "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + } + }, + "type": "object" + }, + "LocalizedMessage": { + "description": "Provides a localized error message that is safe to return to the user which can be attached to an RPC error.", + "id": "LocalizedMessage", + "properties": { + "locale": { + "description": "The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: \"en-US\", \"fr-CH\", \"es-MX\"", "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", + "message": { + "description": "The localized error message in the above locale.", "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "LARGE_DEPLOYMENT_WARNING", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "PARTIAL_SUCCESS", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "Warning about failed cleanup of transient changes made by a failed operation.", - "A link to a deprecated resource was created.", - "When deploying and at least one of the resources has a type marked as deprecated", - "The user created a boot disk that is larger than image size.", - "When deploying and at least one of the resources has a type marked as experimental", - "Warning that is present in an external api call", - "Warning that value of a field has been overridden. Deprecated unused field.", - "The operation involved use of an injected kernel, which is deprecated.", - "When deploying a deployment with a exceedingly large number of resources", - "A resource depends on a missing type", - "The route's nextHopIp address is not assigned to an instance on the network.", - "The route's next hop instance cannot ip forward.", - "The route's nextHopInstance URL refers to an instance that does not exist.", - "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", - "The route's next hop instance does not have a status of RUNNING.", - "Error which is not critical. We decided to continue the process despite the mentioned error.", - "No results are present on a particular list page.", - "Success is reported, but some results may be missing due to errors", - "The user attempted to use a resource that requires a TOS they have not accepted.", - "Warning that a resource is in use.", - "One or more of the resources set to auto-delete could not be deleted because they were in use.", - "When a resource schema validation is ignored.", - "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", - "When undeclared properties in the schema are present", - "A given scope cannot be reached." - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" } }, "type": "object" }, - "InterconnectAttachmentPartnerMetadata": { - "description": "Informational metadata about Partner attachments from Partners to display to customers. These fields are propagated from PARTNER_PROVIDER attachments to their corresponding PARTNER attachments.", - "id": "InterconnectAttachmentPartnerMetadata", + "LocationPolicy": { + "description": "Configuration for location policy among multiple possible locations (e.g. preferences for zone selection among zones in a single region).", + "id": "LocationPolicy", "properties": { - "interconnectName": { - "description": "Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner's portal. For instance \"Chicago 1\". This value may be validated to match approved Partner values.", - "type": "string" + "locations": { + "additionalProperties": { + "$ref": "LocationPolicyLocation" + }, + "description": "Location configurations mapped by location name. Currently only zone names are supported and must be represented as valid internal URLs, such as zones/us-central1-a.", + "type": "object" }, - "partnerName": { - "description": "Plain text name of the Partner providing this attachment. This value may be validated to match approved Partner values.", + "targetShape": { + "description": "Strategy for distributing VMs across zones in a region.", + "enum": [ + "ANY", + "ANY_SINGLE_ZONE", + "BALANCED" + ], + "enumDescriptions": [ + "GCE picks zones for creating VM instances to fulfill the requested number of VMs within present resource constraints and to maximize utilization of unused zonal reservations. Recommended for batch workloads that do not require high availability.", + "GCE always selects a single zone for all the VMs, optimizing for resource quotas, available reservations and general capacity. Recommended for batch workloads that cannot tollerate distribution over multiple zones. This the default shape in Bulk Insert and Capacity Advisor APIs.", + "GCE prioritizes acquisition of resources, scheduling VMs in zones where resources are available while distributing VMs as evenly as possible across allowed zones to minimize the impact of zonal failure. Recommended for highly available serving workloads." + ], "type": "string" + } + }, + "type": "object" + }, + "LocationPolicyLocation": { + "id": "LocationPolicyLocation", + "properties": { + "constraints": { + "$ref": "LocationPolicyLocationConstraints", + "description": "Constraints that the caller requires on the result distribution in this zone." }, - "portalUrl": { - "description": "URL of the Partner's portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values.", + "preference": { + "description": "Preference for a given location. Set to either ALLOW or DENY.", + "enum": [ + "ALLOW", + "DENY", + "PREFERENCE_UNSPECIFIED" + ], + "enumDescriptions": [ + "Location is allowed for use.", + "Location is prohibited.", + "Default value, unused." + ], "type": "string" } }, "type": "object" }, - "InterconnectAttachmentPrivateInfo": { - "description": "Information for an interconnect attachment when this belongs to an interconnect of type DEDICATED.", - "id": "InterconnectAttachmentPrivateInfo", + "LocationPolicyLocationConstraints": { + "description": "Per-zone constraints on location policy for this zone.", + "id": "LocationPolicyLocationConstraints", "properties": { - "tag8021q": { - "description": "[Output Only] 802.1q encapsulation tag to be used for traffic between Google and the customer, going to and from this network and region.", - "format": "uint32", + "maxCount": { + "description": "Maximum number of items that are allowed to be placed in this zone. The value must be non-negative.", + "format": "int32", "type": "integer" } }, "type": "object" }, - "InterconnectAttachmentsScopedList": { - "id": "InterconnectAttachmentsScopedList", + "LogConfig": { + "description": "This is deprecated and has no effect. Do not use.", + "id": "LogConfig", "properties": { - "interconnectAttachments": { - "description": "A list of interconnect attachments contained in this scope.", - "items": { - "$ref": "InterconnectAttachment" - }, - "type": "array" + "cloudAudit": { + "$ref": "LogConfigCloudAuditOptions", + "description": "This is deprecated and has no effect. Do not use." }, - "warning": { - "description": "Informational warning which replaces the list of addresses when the list is empty.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "LARGE_DEPLOYMENT_WARNING", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "PARTIAL_SUCCESS", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "Warning about failed cleanup of transient changes made by a failed operation.", - "A link to a deprecated resource was created.", - "When deploying and at least one of the resources has a type marked as deprecated", - "The user created a boot disk that is larger than image size.", - "When deploying and at least one of the resources has a type marked as experimental", - "Warning that is present in an external api call", - "Warning that value of a field has been overridden. Deprecated unused field.", - "The operation involved use of an injected kernel, which is deprecated.", - "When deploying a deployment with a exceedingly large number of resources", - "A resource depends on a missing type", - "The route's nextHopIp address is not assigned to an instance on the network.", - "The route's next hop instance cannot ip forward.", - "The route's nextHopInstance URL refers to an instance that does not exist.", - "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", - "The route's next hop instance does not have a status of RUNNING.", - "Error which is not critical. We decided to continue the process despite the mentioned error.", - "No results are present on a particular list page.", - "Success is reported, but some results may be missing due to errors", - "The user attempted to use a resource that requires a TOS they have not accepted.", - "Warning that a resource is in use.", - "One or more of the resources set to auto-delete could not be deleted because they were in use.", - "When a resource schema validation is ignored.", - "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", - "When undeclared properties in the schema are present", - "A given scope cannot be reached." - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" + "counter": { + "$ref": "LogConfigCounterOptions", + "description": "This is deprecated and has no effect. Do not use." + }, + "dataAccess": { + "$ref": "LogConfigDataAccessOptions", + "description": "This is deprecated and has no effect. Do not use." } }, "type": "object" }, - "InterconnectCircuitInfo": { - "description": "Describes a single physical circuit between the Customer and Google. CircuitInfo objects are created by Google, so all fields are output only.", - "id": "InterconnectCircuitInfo", + "LogConfigCloudAuditOptions": { + "description": "This is deprecated and has no effect. Do not use.", + "id": "LogConfigCloudAuditOptions", "properties": { - "customerDemarcId": { - "description": "Customer-side demarc ID for this circuit.", - "type": "string" - }, - "googleCircuitId": { - "description": "Google-assigned unique ID for this circuit. Assigned at circuit turn-up.", - "type": "string" + "authorizationLoggingOptions": { + "$ref": "AuthorizationLoggingOptions", + "description": "This is deprecated and has no effect. Do not use." }, - "googleDemarcId": { - "description": "Google-side demarc ID for this circuit. Assigned at circuit turn-up and provided by Google to the customer in the LOA.", + "logName": { + "description": "This is deprecated and has no effect. Do not use.", + "enum": [ + "ADMIN_ACTIVITY", + "DATA_ACCESS", + "UNSPECIFIED_LOG_NAME" + ], + "enumDescriptions": [ + "This is deprecated and has no effect. Do not use.", + "This is deprecated and has no effect. Do not use.", + "This is deprecated and has no effect. Do not use." + ], "type": "string" } }, "type": "object" }, - "InterconnectDiagnostics": { - "description": "Diagnostics information about interconnect, contains detailed and current technical information about Google's side of the connection.", - "id": "InterconnectDiagnostics", + "LogConfigCounterOptions": { + "description": "This is deprecated and has no effect. Do not use.", + "id": "LogConfigCounterOptions", "properties": { - "arpCaches": { - "description": "A list of InterconnectDiagnostics.ARPEntry objects, describing individual neighbors currently seen by the Google router in the ARP cache for the Interconnect. This will be empty when the Interconnect is not bundled.", + "customFields": { + "description": "This is deprecated and has no effect. Do not use.", "items": { - "$ref": "InterconnectDiagnosticsARPEntry" + "$ref": "LogConfigCounterOptionsCustomField" }, "type": "array" }, - "links": { - "description": "A list of InterconnectDiagnostics.LinkStatus objects, describing the status for each link on the Interconnect.", - "items": { - "$ref": "InterconnectDiagnosticsLinkStatus" - }, - "type": "array" + "field": { + "description": "This is deprecated and has no effect. Do not use.", + "type": "string" }, - "macAddress": { - "description": "The MAC address of the Interconnect's bundle interface.", + "metric": { + "description": "This is deprecated and has no effect. Do not use.", "type": "string" } }, "type": "object" }, - "InterconnectDiagnosticsARPEntry": { - "description": "Describing the ARP neighbor entries seen on this link", - "id": "InterconnectDiagnosticsARPEntry", + "LogConfigCounterOptionsCustomField": { + "description": "This is deprecated and has no effect. Do not use.", + "id": "LogConfigCounterOptionsCustomField", "properties": { - "ipAddress": { - "description": "The IP address of this ARP neighbor.", + "name": { + "description": "This is deprecated and has no effect. Do not use.", "type": "string" }, - "macAddress": { - "description": "The MAC address of this ARP neighbor.", + "value": { + "description": "This is deprecated and has no effect. Do not use.", "type": "string" } }, "type": "object" }, - "InterconnectDiagnosticsLinkLACPStatus": { - "id": "InterconnectDiagnosticsLinkLACPStatus", + "LogConfigDataAccessOptions": { + "description": "This is deprecated and has no effect. Do not use.", + "id": "LogConfigDataAccessOptions", "properties": { - "googleSystemId": { - "description": "System ID of the port on Google's side of the LACP exchange.", - "type": "string" - }, - "neighborSystemId": { - "description": "System ID of the port on the neighbor's side of the LACP exchange.", - "type": "string" - }, - "state": { - "description": "The state of a LACP link, which can take one of the following values: - ACTIVE: The link is configured and active within the bundle. - DETACHED: The link is not configured within the bundle. This means that the rest of the object should be empty. ", + "logMode": { + "description": "This is deprecated and has no effect. Do not use.", "enum": [ - "ACTIVE", - "DETACHED" + "LOG_FAIL_CLOSED", + "LOG_MODE_UNSPECIFIED" ], "enumDescriptions": [ - "The link is configured and active within the bundle.", - "The link is not configured within the bundle, this means the rest of the object should be empty." + "This is deprecated and has no effect. Do not use.", + "This is deprecated and has no effect. Do not use." ], "type": "string" } }, "type": "object" }, - "InterconnectDiagnosticsLinkOpticalPower": { - "id": "InterconnectDiagnosticsLinkOpticalPower", + "MachineImage": { + "description": "Represents a machine image resource. A machine image is a Compute Engine resource that stores all the configuration, metadata, permissions, and data from one or more disks required to create a Virtual machine (VM) instance. For more information, see Machine images.", + "id": "MachineImage", "properties": { - "state": { - "description": "The status of the current value when compared to the warning and alarm levels for the receiving or transmitting transceiver. Possible states include: - OK: The value has not crossed a warning threshold. - LOW_WARNING: The value has crossed below the low warning threshold. - HIGH_WARNING: The value has crossed above the high warning threshold. - LOW_ALARM: The value has crossed below the low alarm threshold. - HIGH_ALARM: The value has crossed above the high alarm threshold. ", + "creationTimestamp": { + "description": "[Output Only] The creation timestamp for this machine image in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "guestFlush": { + "description": "[Input Only] Whether to attempt an application consistent machine image by informing the OS to prepare for the snapshot process.", + "type": "boolean" + }, + "id": { + "description": "[Output Only] A unique identifier for this machine image. The server defines this identifier.", + "format": "uint64", + "type": "string" + }, + "instanceProperties": { + "$ref": "InstanceProperties", + "description": "[Output Only] Properties of source instance" + }, + "kind": { + "default": "compute#machineImage", + "description": "[Output Only] The resource type, which is always compute#machineImage for machine image.", + "type": "string" + }, + "machineImageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the machine image using a customer-supplied encryption key. After you encrypt a machine image using a customer-supplied key, you must provide the same key if you use the machine image later. For example, you must provide the encryption key when you create an instance from the encrypted machine image in a future request. Customer-supplied encryption keys do not protect access to metadata of the machine image. If you do not provide an encryption key when creating the machine image, then the machine image will be encrypted using an automatically generated key and you do not need to provide a key to use the machine image later." + }, + "name": { + "annotations": { + "required": [ + "compute.machineImages.insert" + ] + }, + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "satisfiesPzs": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" + }, + "savedDisks": { + "description": "An array of Machine Image specific properties for disks attached to the source instance", + "items": { + "$ref": "SavedDisk" + }, + "type": "array" + }, + "selfLink": { + "description": "[Output Only] The URL for this machine image. The server defines this URL.", + "type": "string" + }, + "sourceDiskEncryptionKeys": { + "description": "[Input Only] The customer-supplied encryption key of the disks attached to the source instance. Required if the source disk is protected by a customer-supplied encryption key.", + "items": { + "$ref": "SourceDiskEncryptionKey" + }, + "type": "array" + }, + "sourceInstance": { + "description": "The source instance used to create the machine image. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /instances/instance - projects/project/zones/zone/instances/instance ", + "type": "string" + }, + "sourceInstanceProperties": { + "$ref": "SourceInstanceProperties", + "description": "[Output Only] DEPRECATED: Please use instance_properties instead for source instance related properties. New properties will not be added to this field." + }, + "status": { + "description": "[Output Only] The status of the machine image. One of the following values: INVALID, CREATING, READY, DELETING, and UPLOADING.", "enum": [ - "HIGH_ALARM", - "HIGH_WARNING", - "LOW_ALARM", - "LOW_WARNING", - "OK" + "CREATING", + "DELETING", + "INVALID", + "READY", + "UPLOADING" ], "enumDescriptions": [ - "The value has crossed above the high alarm threshold.", - "The value of the current optical power has crossed above the high warning threshold.", - "The value of the current optical power has crossed below the low alarm threshold.", - "The value of the current optical power has crossed below the low warning threshold.", - "The value of the current optical power has not crossed a warning threshold." + "", + "", + "", + "", + "" ], "type": "string" }, - "value": { - "description": "Value of the current receiving or transmitting optical power, read in dBm. Take a known good optical value, give it a 10% margin and trigger warnings relative to that value. In general, a -7dBm warning and a -11dBm alarm are good optical value estimates for most links.", - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "InterconnectDiagnosticsLinkStatus": { - "id": "InterconnectDiagnosticsLinkStatus", - "properties": { - "arpCaches": { - "description": "A list of InterconnectDiagnostics.ARPEntry objects, describing the ARP neighbor entries seen on this link. This will be empty if the link is bundled", + "storageLocations": { + "description": "The regional or multi-regional Cloud Storage bucket location where the machine image is stored.", "items": { - "$ref": "InterconnectDiagnosticsARPEntry" + "type": "string" }, "type": "array" }, - "circuitId": { - "description": "The unique ID for this link assigned during turn up by Google.", - "type": "string" - }, - "googleDemarc": { - "description": "The Demarc address assigned by Google and provided in the LoA.", + "totalStorageBytes": { + "description": "[Output Only] Total size of the storage used by the machine image.", + "format": "int64", "type": "string" - }, - "lacpStatus": { - "$ref": "InterconnectDiagnosticsLinkLACPStatus" - }, - "receivingOpticalPower": { - "$ref": "InterconnectDiagnosticsLinkOpticalPower", - "description": "An InterconnectDiagnostics.LinkOpticalPower object, describing the current value and status of the received light level." - }, - "transmittingOpticalPower": { - "$ref": "InterconnectDiagnosticsLinkOpticalPower", - "description": "An InterconnectDiagnostics.LinkOpticalPower object, describing the current value and status of the transmitted light level." } }, "type": "object" }, - "InterconnectList": { - "description": "Response to the list request, and contains a list of interconnects.", - "id": "InterconnectList", + "MachineImageList": { + "description": "A list of machine images.", + "id": "MachineImageList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Interconnect resources.", + "description": "A list of MachineImage resources.", "items": { - "$ref": "Interconnect" + "$ref": "MachineImage" }, "type": "array" }, "kind": { - "default": "compute#interconnectList", - "description": "[Output Only] Type of resource. Always compute#interconnectList for lists of interconnects.", + "default": "compute#machineImageList", + "description": "[Output Only] The resource type, which is always compute#machineImagesListResponse for machine image lists.", "type": "string" }, "nextPageToken": { @@ -42113,10 +47639,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -42140,10 +47668,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -42187,88 +47717,95 @@ }, "type": "object" }, - "InterconnectLocation": { - "description": "Represents an Interconnect Attachment (VLAN) Location resource. You can use this resource to find location details about an Interconnect attachment (VLAN). For more information about interconnect attachments, read Creating VLAN Attachments.", - "id": "InterconnectLocation", + "MachineType": { + "description": "Represents a Machine Type resource. You can use specific machine types for your VM instances based on performance and pricing requirements. For more information, read Machine Types.", + "id": "MachineType", "properties": { - "address": { - "description": "[Output Only] The postal address of the Point of Presence, each line in the address is separated by a newline character.", - "type": "string" - }, - "availabilityZone": { - "description": "[Output Only] Availability zone for this InterconnectLocation. Within a metropolitan area (metro), maintenance will not be simultaneously scheduled in more than one availability zone. Example: \"zone1\" or \"zone2\".", - "type": "string" - }, - "city": { - "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\".", - "type": "string" - }, - "continent": { - "description": "[Output Only] Continent for this location, which can take one of the following values: - AFRICA - ASIA_PAC - EUROPE - NORTH_AMERICA - SOUTH_AMERICA ", - "enum": [ - "AFRICA", - "ASIA_PAC", - "C_AFRICA", - "C_ASIA_PAC", - "C_EUROPE", - "C_NORTH_AMERICA", - "C_SOUTH_AMERICA", - "EUROPE", - "NORTH_AMERICA", - "SOUTH_AMERICA" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" + "accelerators": { + "description": "[Output Only] A list of accelerator configurations assigned to this machine type.", + "items": { + "properties": { + "guestAcceleratorCount": { + "description": "Number of accelerator cards exposed to the guest.", + "format": "int32", + "type": "integer" + }, + "guestAcceleratorType": { + "description": "The accelerator type resource name, not a full URL, e.g. 'nvidia-tesla-k80'.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "description": { - "description": "[Output Only] An optional description of the resource.", - "type": "string" + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this machine type. Only applicable if the machine type is unavailable." }, - "facilityProvider": { - "description": "[Output Only] The name of the provider for this facility (e.g., EQUINIX).", + "description": { + "description": "[Output Only] An optional textual description of the resource.", "type": "string" }, - "facilityProviderFacilityId": { - "description": "[Output Only] A provider-assigned Identifier for this facility (e.g., Ashburn-DC1).", - "type": "string" + "guestCpus": { + "description": "[Output Only] The number of virtual CPUs that are available to the instance.", + "format": "int32", + "type": "integer" }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", "type": "string" }, + "imageSpaceGb": { + "description": "[Deprecated] This property is deprecated and will never be populated with any relevant values.", + "format": "int32", + "type": "integer" + }, + "isSharedCpu": { + "description": "[Output Only] Whether this machine type has a shared CPU. See Shared-core machine types for more information.", + "type": "boolean" + }, "kind": { - "default": "compute#interconnectLocation", - "description": "[Output Only] Type of the resource. Always compute#interconnectLocation for interconnect locations.", + "default": "compute#machineType", + "description": "[Output Only] The type of the resource. Always compute#machineType for machine types.", "type": "string" }, - "name": { - "description": "[Output Only] Name of the resource.", + "maximumPersistentDisks": { + "description": "[Output Only] Maximum persistent disks allowed.", + "format": "int32", + "type": "integer" + }, + "maximumPersistentDisksSizeGb": { + "description": "[Output Only] Maximum total persistent disks size (GB) allowed.", + "format": "int64", "type": "string" }, - "peeringdbFacilityId": { - "description": "[Output Only] The peeringdb identifier for this facility (corresponding with a netfac type in peeringdb).", + "memoryMb": { + "description": "[Output Only] The amount of physical memory available to the instance, defined in MB.", + "format": "int32", + "type": "integer" + }, + "name": { + "description": "[Output Only] Name of the resource.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "regionInfos": { - "description": "[Output Only] A list of InterconnectLocation.RegionInfo objects, that describe parameters pertaining to the relation between this InterconnectLocation and various Google Cloud regions.", + "scratchDisks": { + "description": "[Output Only] A list of extended scratch disks assigned to the instance.", "items": { - "$ref": "InterconnectLocationRegionInfo" + "properties": { + "diskGb": { + "description": "Size of the scratch disk, defined in GB.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" }, "type": "array" }, @@ -42276,43 +47813,31 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "status": { - "description": "[Output Only] The status of this InterconnectLocation, which can take one of the following values: - CLOSED: The InterconnectLocation is closed and is unavailable for provisioning new Interconnects. - AVAILABLE: The InterconnectLocation is available for provisioning new Interconnects. ", - "enum": [ - "AVAILABLE", - "CLOSED" - ], - "enumDescriptions": [ - "The InterconnectLocation is available for provisioning new Interconnects.", - "The InterconnectLocation is closed for provisioning new Interconnects." - ], + "zone": { + "description": "[Output Only] The name of the zone where the machine type resides, such as us-central1-a.", "type": "string" - }, - "supportsPzs": { - "description": "[Output Only] Set to true for locations that support physical zone separation. Defaults to false if the field is not present.", - "type": "boolean" } }, "type": "object" }, - "InterconnectLocationList": { - "description": "Response to the list request, and contains a list of interconnect locations.", - "id": "InterconnectLocationList", + "MachineTypeAggregatedList": { + "id": "MachineTypeAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of InterconnectLocation resources.", - "items": { - "$ref": "InterconnectLocation" + "additionalProperties": { + "$ref": "MachineTypesScopedList", + "description": "[Output Only] Name of the scope containing this set of machine types." }, - "type": "array" + "description": "A list of MachineTypesScopedList resources.", + "type": "object" }, "kind": { - "default": "compute#interconnectLocationList", - "description": "[Output Only] Type of resource. Always compute#interconnectLocationList for lists of interconnect locations.", + "default": "compute#machineTypeAggregatedList", + "description": "[Output Only] Type of resource. Always compute#machineTypeAggregatedList for aggregated lists of machine types.", "type": "string" }, "nextPageToken": { @@ -42323,6 +47848,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -42337,10 +47869,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -42364,10 +47898,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -42411,323 +47947,138 @@ }, "type": "object" }, - "InterconnectLocationRegionInfo": { - "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", - "id": "InterconnectLocationRegionInfo", - "properties": { - "expectedRttMs": { - "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", - "format": "int64", - "type": "string" - }, - "locationPresence": { - "description": "Identifies the network presence of this location.", - "enum": [ - "GLOBAL", - "LOCAL_REGION", - "LP_GLOBAL", - "LP_LOCAL_REGION" - ], - "enumDescriptions": [ - "This region is not in any common network presence with this InterconnectLocation.", - "This region shares the same regional network presence as this InterconnectLocation.", - "[Deprecated] This region is not in any common network presence with this InterconnectLocation.", - "[Deprecated] This region shares the same regional network presence as this InterconnectLocation." - ], - "type": "string" - }, - "region": { - "description": "URL for the region of this location.", - "type": "string" - } - }, - "type": "object" - }, - "InterconnectOutageNotification": { - "description": "Description of a planned outage on this Interconnect.", - "id": "InterconnectOutageNotification", - "properties": { - "affectedCircuits": { - "description": "If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", - "items": { - "type": "string" - }, - "type": "array" - }, - "description": { - "description": "A description about the purpose of the outage.", - "type": "string" - }, - "endTime": { - "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", - "format": "int64", - "type": "string" - }, - "issueType": { - "description": "Form this outage is expected to take, which can take one of the following values: - OUTAGE: The Interconnect may be completely out of service for some or all of the specified window. - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain up, but with reduced bandwidth. Note that the versions of this enum prefixed with \"IT_\" have been deprecated in favor of the unprefixed values.", - "enum": [ - "IT_OUTAGE", - "IT_PARTIAL_OUTAGE", - "OUTAGE", - "PARTIAL_OUTAGE" - ], - "enumDescriptions": [ - "[Deprecated] The Interconnect may be completely out of service for some or all of the specified window.", - "[Deprecated] Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth.", - "The Interconnect may be completely out of service for some or all of the specified window.", - "Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth." - ], - "type": "string" - }, - "name": { - "description": "Unique identifier for this outage notification.", - "type": "string" - }, - "source": { - "description": "The party that generated this notification, which can take the following value: - GOOGLE: this notification as generated by Google. Note that the value of NSRC_GOOGLE has been deprecated in favor of GOOGLE.", - "enum": [ - "GOOGLE", - "NSRC_GOOGLE" - ], - "enumDescriptions": [ - "This notification was generated by Google.", - "[Deprecated] This notification was generated by Google." - ], - "type": "string" - }, - "startTime": { - "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", - "format": "int64", - "type": "string" - }, - "state": { - "description": "State of this notification, which can take one of the following values: - ACTIVE: This outage notification is active. The event could be in the past, present, or future. See start_time and end_time for scheduling. - CANCELLED: The outage associated with this notification was cancelled before the outage was due to start. - COMPLETED: The outage associated with this notification is complete. Note that the versions of this enum prefixed with \"NS_\" have been deprecated in favor of the unprefixed values.", - "enum": [ - "ACTIVE", - "CANCELLED", - "COMPLETED", - "NS_ACTIVE", - "NS_CANCELED" - ], - "enumDescriptions": [ - "This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", - "The outage associated with this notification was cancelled before the outage was due to start.", - "The outage associated with this notification is complete.", - "[Deprecated] This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", - "[Deprecated] The outage associated with this notification was canceled before the outage was due to start." - ], - "type": "string" - } - }, - "type": "object" - }, - "InterconnectsGetDiagnosticsResponse": { - "description": "Response for the InterconnectsGetDiagnosticsRequest.", - "id": "InterconnectsGetDiagnosticsResponse", - "properties": { - "result": { - "$ref": "InterconnectDiagnostics" - } - }, - "type": "object" - }, - "License": { - "description": "Represents a License resource. A License represents billing and aggregate usage data for public and marketplace images. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", - "id": "License", - "properties": { - "chargesUseFee": { - "description": "[Output Only] Deprecated. This field no longer reflects whether a license charges a usage fee.", - "type": "boolean" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional textual description of the resource; provided by the client when the resource is created.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#license", - "description": "[Output Only] Type of resource. Always compute#license for licenses.", - "type": "string" - }, - "licenseCode": { - "description": "[Output Only] The unique code used to attach this license to images, snapshots, and disks.", - "format": "uint64", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "compute.images.insert" - ] - }, - "description": "Name of the resource. The name must be 1-63 characters long and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "resourceRequirements": { - "$ref": "LicenseResourceRequirements" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "transferable": { - "description": "If false, licenses will not be copied from the source resource when creating an image from a disk, disk from snapshot, or snapshot from disk.", - "type": "boolean" - } - }, - "type": "object" - }, - "LicenseCode": { - "description": "Represents a License Code resource. A License Code is a unique identifier used to represent a license resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", - "id": "LicenseCode", + "MachineTypeList": { + "description": "Contains a list of machine types.", + "id": "MachineTypeList", "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "[Output Only] Description of this License Code.", - "type": "string" - }, "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#licenseCode", - "description": "[Output Only] Type of resource. Always compute#licenseCode for licenses.", + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, - "licenseAlias": { - "description": "[Output Only] URL and description aliases of Licenses with the same License Code.", + "items": { + "description": "A list of MachineType resources.", "items": { - "$ref": "LicenseCodeLicenseAlias" + "$ref": "MachineType" }, "type": "array" }, - "name": { - "annotations": { - "required": [ - "compute.licenses.insert" - ] - }, - "description": "[Output Only] Name of the resource. The name is 1-20 characters long and must be a valid 64 bit integer.", - "pattern": "[0-9]{0,20}?", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "state": { - "description": "[Output Only] Current state of this License Code.", - "enum": [ - "DISABLED", - "ENABLED", - "RESTRICTED", - "STATE_UNSPECIFIED", - "TERMINATED" - ], - "enumDescriptions": [ - "Machines are not allowed to attach boot disks with this License Code. Requests to create new resources with this license will be rejected.", - "Use is allowed for anyone with USE_READ_ONLY access to this License Code.", - "Use of this license is limited to a project whitelist.", - "", - "Reserved state." - ], + "kind": { + "default": "compute#machineTypeList", + "description": "[Output Only] Type of resource. Always compute#machineTypeList for lists of machine types.", "type": "string" }, - "transferable": { - "description": "[Output Only] If true, the license will remain attached when creating images or snapshots from disks. Otherwise, the license is not transferred.", - "type": "boolean" - } - }, - "type": "object" - }, - "LicenseCodeLicenseAlias": { - "id": "LicenseCodeLicenseAlias", - "properties": { - "description": { - "description": "[Output Only] Description of this License Code.", + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, "selfLink": { - "description": "[Output Only] URL of license corresponding to this License Code.", - "type": "string" - } - }, - "type": "object" - }, - "LicenseResourceCommitment": { - "description": "Commitment for a particular license resource.", - "id": "LicenseResourceCommitment", - "properties": { - "amount": { - "description": "The number of licenses purchased.", - "format": "int64", - "type": "string" - }, - "coresPerLicense": { - "description": "Specifies the core range of the instance for which this license applies.", - "type": "string" - }, - "license": { - "description": "Any applicable license URI.", + "description": "[Output Only] Server-defined URL for this resource.", "type": "string" - } - }, - "type": "object" - }, - "LicenseResourceRequirements": { - "id": "LicenseResourceRequirements", - "properties": { - "minGuestCpuCount": { - "description": "Minimum number of guest cpus required to use the Instance. Enforced at Instance creation and Instance start.", - "format": "int32", - "type": "integer" }, - "minMemoryMb": { - "description": "Minimum memory required to use the Instance. Enforced at Instance creation and Instance start.", - "format": "int32", - "type": "integer" + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" }, - "LicensesListResponse": { - "id": "LicensesListResponse", + "MachineTypesScopedList": { + "id": "MachineTypesScopedList", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of License resources.", + "machineTypes": { + "description": "[Output Only] A list of machine types contained in this scope.", "items": { - "$ref": "License" + "$ref": "MachineType" }, "type": "array" }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "[Output Only] An informational warning that appears when the machine types list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -42740,10 +48091,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -42767,10 +48120,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -42814,488 +48169,514 @@ }, "type": "object" }, - "LocalDisk": { - "id": "LocalDisk", + "ManagedInstance": { + "description": "A Managed Instance resource.", + "id": "ManagedInstance", "properties": { - "diskCount": { - "description": "Specifies the number of such disks.", - "format": "int32", - "type": "integer" + "currentAction": { + "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: - NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. - CREATING The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful. - CREATING_WITHOUT_RETRIES The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased instead. - RECREATING The managed instance group is recreating this instance. - DELETING The managed instance group is permanently deleting this instance. - ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. - RESTARTING The managed instance group is restarting the instance. - REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance. - VERIFYING The managed instance group has created the instance and it is in the process of being verified. ", + "enum": [ + "ABANDONING", + "CREATING", + "CREATING_WITHOUT_RETRIES", + "DELETING", + "NONE", + "RECREATING", + "REFRESHING", + "RESTARTING", + "RESUMING", + "STARTING", + "STOPPING", + "SUSPENDING", + "VERIFYING" + ], + "enumDescriptions": [ + "The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group.", + "The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful.", + "The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased.", + "The managed instance group is permanently deleting this instance.", + "The managed instance group has not scheduled any actions for this instance.", + "The managed instance group is recreating this instance.", + "The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance.", + "The managed instance group is restarting this instance.", + "The managed instance group is resuming this instance.", + "The managed instance group is starting this instance.", + "The managed instance group is stopping this instance.", + "The managed instance group is suspending this instance.", + "The managed instance group is verifying this already created instance. Verification happens every time the instance is (re)created or restarted and consists of: 1. Waiting until health check specified as part of this managed instance group's autohealing policy reports HEALTHY. Note: Applies only if autohealing policy has a health check specified 2. Waiting for addition verification steps performed as post-instance creation (subject to future extensions)." + ], + "type": "string" }, - "diskSizeGb": { - "description": "Specifies the size of the disk in base-2 GB.", - "format": "int32", - "type": "integer" + "id": { + "description": "[Output only] The unique identifier for this resource. This field is empty when instance does not exist.", + "format": "uint64", + "type": "string" }, - "diskType": { - "description": "Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL.", + "instance": { + "description": "[Output Only] The URL of the instance. The URL can exist even if the instance has not yet been created.", "type": "string" - } - }, - "type": "object" - }, - "LocationPolicy": { - "description": "Configuration for location policy among multiple possible locations (e.g. preferences for zone selection among zones in a single region).", - "id": "LocationPolicy", - "properties": { - "locations": { - "additionalProperties": { - "$ref": "LocationPolicyLocation" + }, + "instanceHealth": { + "description": "[Output Only] Health state of the instance per health-check.", + "items": { + "$ref": "ManagedInstanceInstanceHealth" }, - "description": "Location configurations mapped by location name. Currently only zone names are supported and must be represented as valid internal URLs, such as zones/us-central1-a.", - "type": "object" + "type": "array" + }, + "instanceStatus": { + "description": "[Output Only] The status of the instance. This field is empty when the instance does not exist.", + "enum": [ + "DEPROVISIONING", + "PROVISIONING", + "REPAIRING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" + ], + "enumDescriptions": [ + "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", + "All required resources have been allocated and the instance is being started.", + "The instance has stopped successfully.", + "The instance is currently stopping (either being deleted or killed).", + "The instance has suspended.", + "The instance is suspending.", + "The instance has stopped (either by explicit action or underlying failure)." + ], + "type": "string" + }, + "lastAttempt": { + "$ref": "ManagedInstanceLastAttempt", + "description": "[Output Only] Information about the last attempt to create or delete the instance." + }, + "preservedStateFromConfig": { + "$ref": "PreservedState", + "description": "[Output Only] Preserved state applied from per-instance config for this instance." + }, + "preservedStateFromPolicy": { + "$ref": "PreservedState", + "description": "[Output Only] Preserved state generated based on stateful policy for this instance." + }, + "version": { + "$ref": "ManagedInstanceVersion", + "description": "[Output Only] Intended version of this instance." } }, "type": "object" }, - "LocationPolicyLocation": { - "id": "LocationPolicyLocation", + "ManagedInstanceInstanceHealth": { + "id": "ManagedInstanceInstanceHealth", "properties": { - "preference": { - "description": "Preference for a given location.", + "detailedHealthState": { + "description": "[Output Only] The current detailed instance health state.", "enum": [ - "ALLOW", - "DENY", - "PREFERENCE_UNSPECIFIED" + "DRAINING", + "HEALTHY", + "TIMEOUT", + "UNHEALTHY", + "UNKNOWN" ], "enumDescriptions": [ - "Location is allowed for use.", - "Location is prohibited.", - "Default value, unused." + "The instance is being drained. The existing connections to the instance have time to complete, but the new ones are being refused.", + "The instance is reachable i.e. a connection to the application health checking endpoint can be established, and conforms to the requirements defined by the health check.", + "The instance is unreachable i.e. a connection to the application health checking endpoint cannot be established, or the server does not respond within the specified timeout.", + "The instance is reachable, but does not conform to the requirements defined by the health check.", + "The health checking system is aware of the instance but its health is not known at the moment." ], "type": "string" + }, + "healthCheck": { + "description": "[Output Only] The URL for the health check that verifies whether the instance is healthy.", + "type": "string" } }, "type": "object" }, - "LogConfig": { - "description": "This is deprecated and has no effect. Do not use.", - "id": "LogConfig", + "ManagedInstanceLastAttempt": { + "id": "ManagedInstanceLastAttempt", "properties": { - "cloudAudit": { - "$ref": "LogConfigCloudAuditOptions", - "description": "This is deprecated and has no effect. Do not use." - }, - "counter": { - "$ref": "LogConfigCounterOptions", - "description": "This is deprecated and has no effect. Do not use." - }, - "dataAccess": { - "$ref": "LogConfigDataAccessOptions", - "description": "This is deprecated and has no effect. Do not use." + "errors": { + "description": "[Output Only] Encountered errors during the last attempt to create or delete the instance.", + "properties": { + "errors": { + "description": "[Output Only] The array of errors encountered while processing this operation.", + "items": { + "properties": { + "code": { + "description": "[Output Only] The error type identifier for this error.", + "type": "string" + }, + "errorDetails": { + "description": "[Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED.", + "items": { + "properties": { + "errorInfo": { + "$ref": "ErrorInfo" + }, + "help": { + "$ref": "Help" + }, + "localizedMessage": { + "$ref": "LocalizedMessage" + }, + "quotaInfo": { + "$ref": "QuotaExceededInfo" + } + }, + "type": "object" + }, + "type": "array" + }, + "location": { + "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional.", + "type": "string" + }, + "message": { + "description": "[Output Only] An optional, human-readable error message.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" } }, "type": "object" }, - "LogConfigCloudAuditOptions": { - "description": "This is deprecated and has no effect. Do not use.", - "id": "LogConfigCloudAuditOptions", + "ManagedInstanceVersion": { + "id": "ManagedInstanceVersion", "properties": { - "authorizationLoggingOptions": { - "$ref": "AuthorizationLoggingOptions", - "description": "This is deprecated and has no effect. Do not use." + "instanceTemplate": { + "description": "[Output Only] The intended template of the instance. This field is empty when current_action is one of { DELETING, ABANDONING }.", + "type": "string" }, - "logName": { - "description": "This is deprecated and has no effect. Do not use.", - "enum": [ - "ADMIN_ACTIVITY", - "DATA_ACCESS", - "UNSPECIFIED_LOG_NAME" - ], - "enumDescriptions": [ - "This is deprecated and has no effect. Do not use.", - "This is deprecated and has no effect. Do not use.", - "This is deprecated and has no effect. Do not use." - ], + "name": { + "description": "[Output Only] Name of the version.", "type": "string" } }, "type": "object" }, - "LogConfigCounterOptions": { - "description": "This is deprecated and has no effect. Do not use.", - "id": "LogConfigCounterOptions", + "Metadata": { + "description": "A metadata key/value entry.", + "id": "Metadata", "properties": { - "customFields": { - "description": "This is deprecated and has no effect. Do not use.", + "fingerprint": { + "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the resource.", + "format": "byte", + "type": "string" + }, + "items": { + "description": "Array of key/value pairs. The total size of all keys and values must be less than 512 KB.", "items": { - "$ref": "LogConfigCounterOptionsCustomField" + "description": "Metadata", + "properties": { + "key": { + "annotations": { + "required": [ + "compute.instances.insert", + "compute.projects.setCommonInstanceMetadata" + ] + }, + "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.", + "pattern": "[a-zA-Z0-9-_]{1,128}", + "type": "string" + }, + "value": { + "annotations": { + "required": [ + "compute.instances.insert", + "compute.projects.setCommonInstanceMetadata" + ] + }, + "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB).", + "type": "string" + } + }, + "type": "object" }, "type": "array" }, - "field": { - "description": "This is deprecated and has no effect. Do not use.", + "kind": { + "default": "compute#metadata", + "description": "[Output Only] Type of the resource. Always compute#metadata for metadata.", "type": "string" + } + }, + "type": "object" + }, + "MetadataFilter": { + "description": "Opaque filter criteria used by load balancers to restrict routing configuration to a limited set of load balancing proxies. Proxies and sidecars involved in load balancing would typically present metadata to the load balancers that need to match criteria specified here. If a match takes place, the relevant configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. An example for using metadataFilters would be: if load balancing involves Envoys, they receive routing configuration when values in metadataFilters match values supplied in of their XDS requests to loadbalancers.", + "id": "MetadataFilter", + "properties": { + "filterLabels": { + "description": "The list of label value pairs that must match labels in the provided metadata based on filterMatchCriteria This list must not be empty and can have at the most 64 entries.", + "items": { + "$ref": "MetadataFilterLabelMatch" + }, + "type": "array" }, - "metric": { - "description": "This is deprecated and has no effect. Do not use.", + "filterMatchCriteria": { + "description": "Specifies how individual filter label matches within the list of filterLabels and contributes toward the overall metadataFilter match. Supported values are: - MATCH_ANY: at least one of the filterLabels must have a matching label in the provided metadata. - MATCH_ALL: all filterLabels must have matching labels in the provided metadata. ", + "enum": [ + "MATCH_ALL", + "MATCH_ANY", + "NOT_SET" + ], + "enumDescriptions": [ + "Specifies that all filterLabels must match for the metadataFilter to be considered a match.", + "Specifies that any filterLabel must match for the metadataFilter to be considered a match.", + "Indicates that the match criteria was not set. A metadataFilter must never be created with this value." + ], "type": "string" } }, "type": "object" }, - "LogConfigCounterOptionsCustomField": { - "description": "This is deprecated and has no effect. Do not use.", - "id": "LogConfigCounterOptionsCustomField", + "MetadataFilterLabelMatch": { + "description": "MetadataFilter label name value pairs that are expected to match corresponding labels presented as metadata to the load balancer.", + "id": "MetadataFilterLabelMatch", "properties": { "name": { - "description": "This is deprecated and has no effect. Do not use.", + "description": "Name of metadata label. The name can have a maximum length of 1024 characters and must be at least 1 character long.", "type": "string" }, "value": { - "description": "This is deprecated and has no effect. Do not use.", + "description": "The value of the label must match the specified value. value can have a maximum length of 1024 characters.", "type": "string" } }, "type": "object" }, - "LogConfigDataAccessOptions": { - "description": "This is deprecated and has no effect. Do not use.", - "id": "LogConfigDataAccessOptions", + "NamedPort": { + "description": "The named port. For example: \u003c\"http\", 80\u003e.", + "id": "NamedPort", "properties": { - "logMode": { - "description": "This is deprecated and has no effect. Do not use.", - "enum": [ - "LOG_FAIL_CLOSED", - "LOG_MODE_UNSPECIFIED" - ], - "enumDescriptions": [ - "This is deprecated and has no effect. Do not use.", - "This is deprecated and has no effect. Do not use." - ], + "name": { + "description": "The name for this named port. The name must be 1-63 characters long, and comply with RFC1035.", "type": "string" + }, + "port": { + "description": "The port number, which can be a value between 1 and 65535.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "MachineImage": { - "description": "Represents a machine image resource. A machine image is a Compute Engine resource that stores all the configuration, metadata, permissions, and data from one or more disks required to create a Virtual machine (VM) instance. For more information, see Machine images.", - "id": "MachineImage", + "Network": { + "description": "Represents a VPC Network resource. Networks connect resources to each other and to the internet. For more information, read Virtual Private Cloud (VPC) Network.", + "id": "Network", "properties": { + "IPv4Range": { + "description": "Deprecated in favor of subnet mode networks. The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}", + "type": "string" + }, + "autoCreateSubnetworks": { + "description": "Must be set to create a VPC network. If not set, a legacy network is created. When set to true, the VPC network is created in auto mode. When set to false, the VPC network is created in custom mode. An auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges. For custom mode VPC networks, you can add subnets using the subnetworks insert method.", + "type": "boolean" + }, "creationTimestamp": { - "description": "[Output Only] The creation timestamp for this machine image in RFC3339 text format.", + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "description": "An optional description of this resource. Provide this field when you create the resource.", "type": "string" }, - "guestFlush": { - "description": "[Input Only] Whether to attempt an application consistent machine image by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", + "enableUlaInternalIpv6": { + "description": "Enable ULA internal ipv6 on this network. Enabling this feature will assign a /48 from google defined ULA prefix fd20::/20. .", "type": "boolean" }, + "firewallPolicy": { + "description": "[Output Only] URL of the firewall policy the network is associated with.", + "type": "string" + }, + "gatewayIPv4": { + "description": "[Output Only] The gateway address for default routing out of the network, selected by GCP.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", + "type": "string" + }, "id": { - "description": "[Output Only] A unique identifier for this machine image. The server defines this identifier.", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", "type": "string" }, - "instanceProperties": { - "$ref": "InstanceProperties", - "description": "[Output Only] Properties of source instance" + "internalIpv6Range": { + "description": "When enabling ula internal ipv6, caller optionally can specify the /48 range they want from the google defined ULA prefix fd20::/20. The input must be a valid /48 ULA IPv6 address and must be within the fd20::/20. Operation will fail if the speficied /48 is already in used by another resource. If the field is not speficied, then a /48 range will be randomly allocated from fd20::/20 and returned via this field. .", + "type": "string" }, "kind": { - "default": "compute#machineImage", - "description": "[Output Only] The resource type, which is always compute#machineImage for machine image.", + "default": "compute#network", + "description": "[Output Only] Type of the resource. Always compute#network for networks.", "type": "string" }, - "machineImageEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Encrypts the machine image using a customer-supplied encryption key. After you encrypt a machine image using a customer-supplied key, you must provide the same key if you use the machine image later. For example, you must provide the encryption key when you create an instance from the encrypted machine image in a future request. Customer-supplied encryption keys do not protect access to metadata of the machine image. If you do not provide an encryption key when creating the machine image, then the machine image will be encrypted using an automatically generated key and you do not need to provide a key to use the machine image later." + "mtu": { + "description": "Maximum Transmission Unit in bytes. The minimum value for this field is 1300 and the maximum value is 8896. The suggested value is 1500, which is the default MTU used on the Internet, or 8896 if you want to use Jumbo frames. If unspecified, the value defaults to 1460.", + "format": "int32", + "type": "integer" }, "name": { "annotations": { "required": [ - "compute.machineImages.insert" - ] - }, - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "satisfiesPzs": { - "description": "[Output Only] Reserved for future use.", - "type": "boolean" - }, - "savedDisks": { - "description": "An array of Machine Image specific properties for disks attached to the source instance", - "items": { - "$ref": "SavedDisk" - }, - "type": "array" - }, - "selfLink": { - "description": "[Output Only] The URL for this machine image. The server defines this URL.", - "type": "string" - }, - "sourceDiskEncryptionKeys": { - "description": "[Input Only] The customer-supplied encryption key of the disks attached to the source instance. Required if the source disk is protected by a customer-supplied encryption key.", - "items": { - "$ref": "SourceDiskEncryptionKey" - }, - "type": "array" - }, - "sourceInstance": { - "description": "The source instance used to create the machine image. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /instances/instance - projects/project/zones/zone/instances/instance ", - "type": "string" - }, - "sourceInstanceProperties": { - "$ref": "SourceInstanceProperties", - "description": "[Output Only] DEPRECATED: Please use instance_properties instead for source instance related properties. New properties will not be added to this field." - }, - "status": { - "description": "[Output Only] The status of the machine image. One of the following values: INVALID, CREATING, READY, DELETING, and UPLOADING.", - "enum": [ - "CREATING", - "DELETING", - "INVALID", - "READY", - "UPLOADING" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "storageLocations": { - "description": "The regional or multi-regional Cloud Storage bucket location where the machine image is stored.", - "items": { - "type": "string" - }, - "type": "array" - }, - "totalStorageBytes": { - "description": "[Output Only] Total size of the storage used by the machine image.", - "format": "int64", + "compute.networks.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" - } - }, - "type": "object" - }, - "MachineImageList": { - "description": "A list of machine images.", - "id": "MachineImageList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + }, + "networkFirewallPolicyEnforcementOrder": { + "description": "The network firewall policy enforcement order. Can be either AFTER_CLASSIC_FIREWALL or BEFORE_CLASSIC_FIREWALL. Defaults to AFTER_CLASSIC_FIREWALL if the field is not specified.", + "enum": [ + "AFTER_CLASSIC_FIREWALL", + "BEFORE_CLASSIC_FIREWALL" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" }, - "items": { - "description": "A list of MachineImage resources.", + "peerings": { + "description": "[Output Only] A list of network peerings for the resource.", "items": { - "$ref": "MachineImage" + "$ref": "NetworkPeering" }, "type": "array" }, - "kind": { - "default": "compute#machineImageList", - "description": "[Output Only] The resource type, which is always compute#machineImagesListResponse for machine image lists.", - "type": "string" + "routingConfig": { + "$ref": "NetworkRoutingConfig", + "description": "The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce." }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", "type": "string" }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "LARGE_DEPLOYMENT_WARNING", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "PARTIAL_SUCCESS", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "Warning about failed cleanup of transient changes made by a failed operation.", - "A link to a deprecated resource was created.", - "When deploying and at least one of the resources has a type marked as deprecated", - "The user created a boot disk that is larger than image size.", - "When deploying and at least one of the resources has a type marked as experimental", - "Warning that is present in an external api call", - "Warning that value of a field has been overridden. Deprecated unused field.", - "The operation involved use of an injected kernel, which is deprecated.", - "When deploying a deployment with a exceedingly large number of resources", - "A resource depends on a missing type", - "The route's nextHopIp address is not assigned to an instance on the network.", - "The route's next hop instance cannot ip forward.", - "The route's nextHopInstance URL refers to an instance that does not exist.", - "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", - "The route's next hop instance does not have a status of RUNNING.", - "Error which is not critical. We decided to continue the process despite the mentioned error.", - "No results are present on a particular list page.", - "Success is reported, but some results may be missing due to errors", - "The user attempted to use a resource that requires a TOS they have not accepted.", - "Warning that a resource is in use.", - "One or more of the resources set to auto-delete could not be deleted because they were in use.", - "When a resource schema validation is ignored.", - "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", - "When undeclared properties in the schema are present", - "A given scope cannot be reached." - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } + "subnetworks": { + "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this VPC network.", + "items": { + "type": "string" }, - "type": "object" + "type": "array" } }, "type": "object" }, - "MachineType": { - "description": "Represents a Machine Type resource. You can use specific machine types for your VM instances based on performance and pricing requirements. For more information, read Machine Types.", - "id": "MachineType", + "NetworkAttachment": { + "description": "NetworkAttachments A network attachment resource ...", + "id": "NetworkAttachment", "properties": { - "accelerators": { - "description": "[Output Only] A list of accelerator configurations assigned to this machine type.", + "connectionEndpoints": { + "description": "[Output Only] An array of connections for all the producers connected to this network attachment.", "items": { - "properties": { - "guestAcceleratorCount": { - "description": "Number of accelerator cards exposed to the guest.", - "format": "int32", - "type": "integer" - }, - "guestAcceleratorType": { - "description": "The accelerator type resource name, not a full URL, e.g. 'nvidia-tesla-k80'.", - "type": "string" - } - }, - "type": "object" + "$ref": "NetworkAttachmentConnectedEndpoint" }, "type": "array" }, + "connectionPreference": { + "enum": [ + "ACCEPT_AUTOMATIC", + "ACCEPT_MANUAL", + "INVALID" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this machine type. Only applicable if the machine type is unavailable." - }, "description": { - "description": "[Output Only] An optional textual description of the resource.", + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "guestCpus": { - "description": "[Output Only] The number of virtual CPUs that are available to the instance.", - "format": "int32", - "type": "integer" + "fingerprint": { + "description": "[Output Only] Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", + "format": "byte", + "type": "string" }, "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "description": "[Output Only] The unique identifier for the resource type. The server generates this identifier.", "format": "uint64", "type": "string" }, - "imageSpaceGb": { - "description": "[Deprecated] This property is deprecated and will never be populated with any relevant values.", - "format": "int32", - "type": "integer" - }, - "isSharedCpu": { - "description": "[Output Only] Whether this machine type has a shared CPU. See Shared-core machine types for more information.", - "type": "boolean" - }, "kind": { - "default": "compute#machineType", - "description": "[Output Only] The type of the resource. Always compute#machineType for machine types.", - "type": "string" - }, - "maximumPersistentDisks": { - "description": "[Output Only] Maximum persistent disks allowed.", - "format": "int32", - "type": "integer" - }, - "maximumPersistentDisksSizeGb": { - "description": "[Output Only] Maximum total persistent disks size (GB) allowed.", - "format": "int64", + "default": "compute#networkAttachment", + "description": "[Output Only] Type of the resource.", "type": "string" }, - "memoryMb": { - "description": "[Output Only] The amount of physical memory available to the instance, defined in MB.", - "format": "int32", - "type": "integer" - }, "name": { - "description": "[Output Only] Name of the resource.", + "annotations": { + "required": [ + "compute.networkAttachments.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "scratchDisks": { - "description": "[Output Only] A list of extended scratch disks assigned to the instance.", + "network": { + "description": "[Output Only] The URL of the network which the Network Attachment belongs to.", + "type": "string" + }, + "producerAcceptLists": { + "description": "Projects that are allowed to connect to this network attachment. The project can be specified using its id or number.", "items": { - "properties": { - "diskGb": { - "description": "Size of the scratch disk, defined in GB.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" + "type": "string" + }, + "type": "array" + }, + "producerRejectLists": { + "description": "Projects that are not allowed to connect to this network attachment. The project can be specified using its id or number.", + "items": { + "type": "string" }, "type": "array" }, + "region": { + "description": "[Output Only] URL of the region where the network attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "zone": { - "description": "[Output Only] The name of the zone where the machine type resides, such as us-central1-a.", + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource's resource id.", "type": "string" + }, + "subnetworks": { + "description": "An array of URLs where each entry is the URL of a subnet provided by the service consumer to use for endpoints in the producers that connect to this network attachment.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "MachineTypeAggregatedList": { - "id": "MachineTypeAggregatedList", + "NetworkAttachmentAggregatedList": { + "description": "Contains a list of NetworkAttachmentsScopedList.", + "id": "NetworkAttachmentAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", @@ -43303,15 +48684,14 @@ }, "items": { "additionalProperties": { - "$ref": "MachineTypesScopedList", - "description": "[Output Only] Name of the scope containing this set of machine types." + "$ref": "NetworkAttachmentsScopedList", + "description": "Name of the scope containing this set of NetworkAttachments." }, - "description": "A list of MachineTypesScopedList resources.", + "description": "A list of NetworkAttachmentsScopedList resources.", "type": "object" }, "kind": { - "default": "compute#machineTypeAggregatedList", - "description": "[Output Only] Type of resource. Always compute#machineTypeAggregatedList for aggregated lists of machine types.", + "default": "compute#networkAttachmentAggregatedList", "type": "string" }, "nextPageToken": { @@ -43322,13 +48702,6 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "unreachables": { - "description": "[Output Only] Unreachable resources.", - "items": { - "type": "string" - }, - "type": "array" - }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -43343,10 +48716,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -43370,10 +48745,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -43417,24 +48794,68 @@ }, "type": "object" }, - "MachineTypeList": { - "description": "Contains a list of machine types.", - "id": "MachineTypeList", + "NetworkAttachmentConnectedEndpoint": { + "description": "[Output Only] A connection connected to this network attachment.", + "id": "NetworkAttachmentConnectedEndpoint", + "properties": { + "ipAddress": { + "description": "The IP address assigned to the producer instance network interface. This value will be a range in case of Serverless.", + "type": "string" + }, + "projectIdOrNum": { + "description": "The project id or number of the interface to which the IP was assigned.", + "type": "string" + }, + "secondaryIpCidrRanges": { + "description": "Alias IP ranges from the same subnetwork", + "items": { + "type": "string" + }, + "type": "array" + }, + "status": { + "description": "The status of a connected endpoint to this network attachment.", + "enum": [ + "ACCEPTED", + "CLOSED", + "NEEDS_ATTENTION", + "PENDING", + "REJECTED", + "STATUS_UNSPECIFIED" + ], + "enumDescriptions": [ + "The consumer allows traffic from the producer to reach its VPC.", + "The consumer network attachment no longer exists.", + "The consumer needs to take further action before traffic can be served.", + "The consumer neither allows nor prohibits traffic from the producer to reach its VPC.", + "The consumer prohibits traffic from the producer to reach its VPC.", + "" + ], + "type": "string" + }, + "subnetwork": { + "description": "The subnetwork used to assign the IP to the producer instance network interface.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkAttachmentList": { + "id": "NetworkAttachmentList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of MachineType resources.", + "description": "A list of NetworkAttachment resources.", "items": { - "$ref": "MachineType" + "$ref": "NetworkAttachment" }, "type": "array" }, "kind": { - "default": "compute#machineTypeList", - "description": "[Output Only] Type of resource. Always compute#machineTypeList for lists of machine types.", + "default": "compute#networkAttachmentList", "type": "string" }, "nextPageToken": { @@ -43459,10 +48880,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -43486,10 +48909,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -43533,18 +48958,18 @@ }, "type": "object" }, - "MachineTypesScopedList": { - "id": "MachineTypesScopedList", + "NetworkAttachmentsScopedList": { + "id": "NetworkAttachmentsScopedList", "properties": { - "machineTypes": { - "description": "[Output Only] A list of machine types contained in this scope.", + "networkAttachments": { + "description": "A list of NetworkAttachments contained in this scope.", "items": { - "$ref": "MachineType" + "$ref": "NetworkAttachment" }, "type": "array" }, "warning": { - "description": "[Output Only] An informational warning that appears when the machine types list is empty.", + "description": "Informational warning which replaces the list of network attachments when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -43557,10 +48982,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -43584,10 +49011,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -43611,381 +49040,305 @@ "key": { "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "ManagedInstance": { - "description": "A Managed Instance resource.", - "id": "ManagedInstance", - "properties": { - "currentAction": { - "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: - NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. - CREATING The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful. - CREATING_WITHOUT_RETRIES The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased instead. - RECREATING The managed instance group is recreating this instance. - DELETING The managed instance group is permanently deleting this instance. - ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. - RESTARTING The managed instance group is restarting the instance. - REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance. - VERIFYING The managed instance group has created the instance and it is in the process of being verified. ", - "enum": [ - "ABANDONING", - "CREATING", - "CREATING_WITHOUT_RETRIES", - "DELETING", - "NONE", - "RECREATING", - "REFRESHING", - "RESTARTING", - "RESUMING", - "STARTING", - "STOPPING", - "SUSPENDING", - "VERIFYING" - ], - "enumDescriptions": [ - "The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group.", - "The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful.", - "The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased.", - "The managed instance group is permanently deleting this instance.", - "The managed instance group has not scheduled any actions for this instance.", - "The managed instance group is recreating this instance.", - "The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance.", - "The managed instance group is restarting this instance.", - "The managed instance group is resuming this instance.", - "The managed instance group is starting this instance.", - "The managed instance group is stopping this instance.", - "The managed instance group is suspending this instance.", - "The managed instance group is verifying this already created instance. Verification happens every time the instance is (re)created or restarted and consists of: 1. Waiting until health check specified as part of this managed instance group's autohealing policy reports HEALTHY. Note: Applies only if autohealing policy has a health check specified 2. Waiting for addition verification steps performed as post-instance creation (subject to future extensions)." - ], - "type": "string" - }, - "id": { - "description": "[Output only] The unique identifier for this resource. This field is empty when instance does not exist.", - "format": "uint64", - "type": "string" - }, - "instance": { - "description": "[Output Only] The URL of the instance. The URL can exist even if the instance has not yet been created.", - "type": "string" - }, - "instanceHealth": { - "description": "[Output Only] Health state of the instance per health-check.", - "items": { - "$ref": "ManagedInstanceInstanceHealth" - }, - "type": "array" - }, - "instanceStatus": { - "description": "[Output Only] The status of the instance. This field is empty when the instance does not exist.", - "enum": [ - "DEPROVISIONING", - "PROVISIONING", - "REPAIRING", - "RUNNING", - "STAGING", - "STOPPED", - "STOPPING", - "SUSPENDED", - "SUSPENDING", - "TERMINATED" - ], - "enumDescriptions": [ - "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", - "Resources are being allocated for the instance.", - "The instance is in repair.", - "The instance is running.", - "All required resources have been allocated and the instance is being started.", - "The instance has stopped successfully.", - "The instance is currently stopping (either being deleted or killed).", - "The instance has suspended.", - "The instance is suspending.", - "The instance has stopped (either by explicit action or underlying failure)." - ], - "type": "string" - }, - "lastAttempt": { - "$ref": "ManagedInstanceLastAttempt", - "description": "[Output Only] Information about the last attempt to create or delete the instance." - }, - "preservedStateFromConfig": { - "$ref": "PreservedState", - "description": "[Output Only] Preserved state applied from per-instance config for this instance." - }, - "preservedStateFromPolicy": { - "$ref": "PreservedState", - "description": "[Output Only] Preserved state generated based on stateful policy for this instance." - }, - "version": { - "$ref": "ManagedInstanceVersion", - "description": "[Output Only] Intended version of this instance." - } - }, - "type": "object" - }, - "ManagedInstanceInstanceHealth": { - "id": "ManagedInstanceInstanceHealth", - "properties": { - "detailedHealthState": { - "description": "[Output Only] The current detailed instance health state.", - "enum": [ - "DRAINING", - "HEALTHY", - "TIMEOUT", - "UNHEALTHY", - "UNKNOWN" - ], - "enumDescriptions": [ - "The instance is being drained. The existing connections to the instance have time to complete, but the new ones are being refused.", - "The instance is reachable i.e. a connection to the application health checking endpoint can be established, and conforms to the requirements defined by the health check.", - "The instance is unreachable i.e. a connection to the application health checking endpoint cannot be established, or the server does not respond within the specified timeout.", - "The instance is reachable, but does not conform to the requirements defined by the health check.", - "The health checking system is aware of the instance but its health is not known at the moment." - ], - "type": "string" - }, - "healthCheck": { - "description": "[Output Only] The URL for the health check that verifies whether the instance is healthy.", - "type": "string" - } - }, - "type": "object" - }, - "ManagedInstanceLastAttempt": { - "id": "ManagedInstanceLastAttempt", - "properties": { - "errors": { - "description": "[Output Only] Encountered errors during the last attempt to create or delete the instance.", - "properties": { - "errors": { - "description": "[Output Only] The array of errors encountered while processing this operation.", - "items": { - "properties": { - "code": { - "description": "[Output Only] The error type identifier for this error.", - "type": "string" - }, - "location": { - "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional.", - "type": "string" - }, - "message": { - "description": "[Output Only] An optional, human-readable error message.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "ManagedInstanceVersion": { - "id": "ManagedInstanceVersion", - "properties": { - "instanceTemplate": { - "description": "[Output Only] The intended template of the instance. This field is empty when current_action is one of { DELETING, ABANDONING }.", - "type": "string" - }, - "name": { - "description": "[Output Only] Name of the version.", - "type": "string" - } - }, - "type": "object" - }, - "Metadata": { - "description": "A metadata key/value entry.", - "id": "Metadata", - "properties": { - "fingerprint": { - "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the resource.", - "format": "byte", - "type": "string" - }, - "items": { - "description": "Array of key/value pairs. The total size of all keys and values must be less than 512 KB.", - "items": { - "description": "Metadata", - "properties": { - "key": { - "annotations": { - "required": [ - "compute.instances.insert", - "compute.projects.setCommonInstanceMetadata" - ] + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } }, - "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.", - "pattern": "[a-zA-Z0-9-_]{1,128}", - "type": "string" + "type": "object" }, - "value": { - "annotations": { - "required": [ - "compute.instances.insert", - "compute.projects.setCommonInstanceMetadata" - ] - }, - "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB).", - "type": "string" - } + "type": "array" }, - "type": "object" + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } }, - "type": "array" - }, - "kind": { - "default": "compute#metadata", - "description": "[Output Only] Type of the resource. Always compute#metadata for metadata.", - "type": "string" + "type": "object" } }, "type": "object" }, - "MetadataFilter": { - "description": "Opaque filter criteria used by load balancers to restrict routing configuration to a limited set of load balancing proxies. Proxies and sidecars involved in load balancing would typically present metadata to the load balancers that need to match criteria specified here. If a match takes place, the relevant configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. An example for using metadataFilters would be: if load balancing involves Envoys, they receive routing configuration when values in metadataFilters match values supplied in of their XDS requests to loadbalancers.", - "id": "MetadataFilter", + "NetworkEdgeSecurityService": { + "description": "Represents a Google Cloud Armor network edge security service resource.", + "id": "NetworkEdgeSecurityService", "properties": { - "filterLabels": { - "description": "The list of label value pairs that must match labels in the provided metadata based on filterMatchCriteria This list must not be empty and can have at the most 64 entries.", - "items": { - "$ref": "MetadataFilterLabelMatch" - }, - "type": "array" + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" }, - "filterMatchCriteria": { - "description": "Specifies how individual filter label matches within the list of filterLabels and contributes toward the overall metadataFilter match. Supported values are: - MATCH_ANY: at least one of the filterLabels must have a matching label in the provided metadata. - MATCH_ALL: all filterLabels must have matching labels in the provided metadata. ", - "enum": [ - "MATCH_ALL", - "MATCH_ANY", - "NOT_SET" - ], - "enumDescriptions": [ - "Specifies that all filterLabels must match for the metadataFilter to be considered a match.", - "Specifies that any filterLabel must match for the metadataFilter to be considered a match.", - "Indicates that the match criteria was not set. A metadataFilter must never be created with this value." - ], + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" - } - }, - "type": "object" - }, - "MetadataFilterLabelMatch": { - "description": "MetadataFilter label name value pairs that are expected to match corresponding labels presented as metadata to the load balancer.", - "id": "MetadataFilterLabelMatch", - "properties": { - "name": { - "description": "Name of metadata label. The name can have a maximum length of 1024 characters and must be at least 1 character long.", + }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a NetworkEdgeSecurityService. An up-to-date fingerprint must be provided in order to update the NetworkEdgeSecurityService, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a NetworkEdgeSecurityService.", + "format": "byte", "type": "string" }, - "value": { - "description": "The value of the label must match the specified value. value can have a maximum length of 1024 characters.", + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", "type": "string" - } - }, - "type": "object" - }, - "NamedPort": { - "description": "The named port. For example: \u003c\"http\", 80\u003e.", - "id": "NamedPort", - "properties": { - "name": { - "description": "The name for this named port. The name must be 1-63 characters long, and comply with RFC1035.", + }, + "kind": { + "default": "compute#networkEdgeSecurityService", + "description": "[Output only] Type of the resource. Always compute#networkEdgeSecurityService for NetworkEdgeSecurityServices", "type": "string" }, - "port": { - "description": "The port number, which can be a value between 1 and 65535.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "Network": { - "description": "Represents a VPC Network resource. Networks connect resources to each other and to the internet. For more information, read Virtual Private Cloud (VPC) Network.", - "id": "Network", - "properties": { - "IPv4Range": { - "description": "Deprecated in favor of subnet mode networks. The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", - "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}", + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "autoCreateSubnetworks": { - "description": "Must be set to create a VPC network. If not set, a legacy network is created. When set to true, the VPC network is created in auto mode. When set to false, the VPC network is created in custom mode. An auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges. For custom mode VPC networks, you can add subnets using the subnetworks insert method.", - "type": "boolean" + "region": { + "description": "[Output Only] URL of the region where the resource resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "securityPolicy": { + "description": "The resource URL for the network edge security service associated with this network edge security service.", "type": "string" }, - "description": { - "description": "An optional description of this resource. Provide this field when you create the resource.", + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "gatewayIPv4": { - "description": "[Output Only] The gateway address for default routing out of the network, selected by GCP.", - "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEdgeSecurityServiceAggregatedList": { + "id": "NetworkEdgeSecurityServiceAggregatedList", + "properties": { + "etag": { "type": "string" }, "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, + "items": { + "additionalProperties": { + "$ref": "NetworkEdgeSecurityServicesScopedList", + "description": "Name of the scope containing this set of security policies." + }, + "description": "A list of NetworkEdgeSecurityServicesScopedList resources.", + "type": "object" + }, "kind": { - "default": "compute#network", - "description": "[Output Only] Type of the resource. Always compute#network for networks.", + "default": "compute#networkEdgeSecurityServiceAggregatedList", + "description": "[Output Only] Type of resource. Always compute#networkEdgeSecurityServiceAggregatedList for lists of Network Edge Security Services.", "type": "string" }, - "mtu": { - "description": "Maximum Transmission Unit in bytes. The minimum value for this field is 1460 and the maximum value is 1500 bytes. If unspecified, defaults to 1460.", - "format": "int32", - "type": "integer" + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" }, - "name": { - "annotations": { - "required": [ - "compute.networks.insert" - ] - }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "peerings": { - "description": "[Output Only] A list of network peerings for the resource.", + "unreachables": { + "description": "[Output Only] Unreachable resources.", "items": { - "$ref": "NetworkPeering" + "type": "string" }, "type": "array" }, - "routingConfig": { - "$ref": "NetworkRoutingConfig", - "description": "The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce." - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "subnetworks": { - "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this VPC network.", + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "NetworkEdgeSecurityServicesScopedList": { + "id": "NetworkEdgeSecurityServicesScopedList", + "properties": { + "networkEdgeSecurityServices": { + "description": "A list of NetworkEdgeSecurityServices contained in this scope.", "items": { - "type": "string" + "$ref": "NetworkEdgeSecurityService" }, "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of security policies when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" @@ -44076,7 +49429,7 @@ "type": "string" }, "networkEndpointType": { - "description": "Type of network endpoints in this network endpoint group. Can be one of GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, INTERNET_FQDN_PORT, INTERNET_IP_PORT, SERVERLESS, PRIVATE_SERVICE_CONNECT.", + "description": "Type of network endpoints in this network endpoint group. Can be one of GCE_VM_IP, GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, INTERNET_FQDN_PORT, INTERNET_IP_PORT, SERVERLESS, PRIVATE_SERVICE_CONNECT.", "enum": [ "GCE_VM_IP", "GCE_VM_IP_PORT", @@ -44097,8 +49450,11 @@ ], "type": "string" }, + "pscData": { + "$ref": "NetworkEndpointGroupPscData" + }, "pscTargetService": { - "description": "The target service url used to set up private service connection to a Google API. An example value is: \"asia-northeast3-cloudkms.googleapis.com\"", + "description": "The target service url used to set up private service connection to a Google API or a PSC Producer Service Attachment. An example value is: \"asia-northeast3-cloudkms.googleapis.com\"", "type": "string" }, "region": { @@ -44174,10 +49530,184 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "NetworkEndpointGroupAppEngine": { + "description": "Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present. Note: App Engine service must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupAppEngine", + "properties": { + "service": { + "description": "Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: \"default\", \"my-service\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs \"foo1-dot-appname.appspot.com/v1\" and \"foo1-dot-appname.appspot.com/v2\" can be backed by the same Serverless NEG with URL mask \"\u003cservice\u003e-dot-appname.appspot.com/\u003cversion\u003e\". The URL mask will parse them to { service = \"foo1\", version = \"v1\" } and { service = \"foo1\", version = \"v2\" } respectively.", + "type": "string" + }, + "version": { + "description": "Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: \"v1\", \"v2\".", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupCloudFunction": { + "description": "Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask. Note: Cloud Function must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupCloudFunction", + "properties": { + "function": { + "description": "A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: \"func1\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs \" mydomain.com/function1\" and \"mydomain.com/function2\" can be backed by the same Serverless NEG with URL mask \"/\u003cfunction\u003e\". The URL mask will parse them to { function = \"function1\" } and { function = \"function2\" } respectively.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupCloudRun": { + "description": "Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask. Note: Cloud Run service must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupCloudRun", + "properties": { + "service": { + "description": "Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: \"run-service\".", + "type": "string" + }, + "tag": { + "description": "Optional Cloud Run tag represents the \"named-revision\" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: \"revision-0010\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse \u003cservice\u003e and \u003ctag\u003e fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs \"foo1.domain.com/bar1\" and \"foo1.domain.com/bar2\" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask \"\u003ctag\u003e.domain.com/\u003cservice\u003e\". The URL mask will parse them to { service=\"bar1\", tag=\"foo1\" } and { service=\"bar2\", tag=\"foo2\" } respectively.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupList": { + "id": "NetworkEndpointGroupList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of NetworkEndpointGroup resources.", + "items": { + "$ref": "NetworkEndpointGroup" + }, + "type": "array" + }, + "kind": { + "default": "compute#networkEndpointGroupList", + "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupList for network endpoint group lists.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -44201,10 +49731,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -44248,174 +49780,42 @@ }, "type": "object" }, - "NetworkEndpointGroupAppEngine": { - "description": "Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present. Note: App Engine service must be in the same project and located in the same region as the Serverless NEG.", - "id": "NetworkEndpointGroupAppEngine", - "properties": { - "service": { - "description": "Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: \"default\", \"my-service\".", - "type": "string" - }, - "urlMask": { - "description": "A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs \"foo1-dot-appname.appspot.com/v1\" and \"foo1-dot-appname.appspot.com/v2\" can be backed by the same Serverless NEG with URL mask \"-dot-appname.appspot.com/\". The URL mask will parse them to { service = \"foo1\", version = \"v1\" } and { service = \"foo1\", version = \"v2\" } respectively.", - "type": "string" - }, - "version": { - "description": "Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: \"v1\", \"v2\".", - "type": "string" - } - }, - "type": "object" - }, - "NetworkEndpointGroupCloudFunction": { - "description": "Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask. Note: Cloud Function must be in the same project and located in the same region as the Serverless NEG.", - "id": "NetworkEndpointGroupCloudFunction", - "properties": { - "function": { - "description": "A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: \"func1\".", - "type": "string" - }, - "urlMask": { - "description": "A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs \" mydomain.com/function1\" and \"mydomain.com/function2\" can be backed by the same Serverless NEG with URL mask \"/\". The URL mask will parse them to { function = \"function1\" } and { function = \"function2\" } respectively.", - "type": "string" - } - }, - "type": "object" - }, - "NetworkEndpointGroupCloudRun": { - "description": "Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask. Note: Cloud Run service must be in the same project and located in the same region as the Serverless NEG.", - "id": "NetworkEndpointGroupCloudRun", + "NetworkEndpointGroupPscData": { + "description": "All data that is specifically relevant to only network endpoint groups of type PRIVATE_SERVICE_CONNECT.", + "id": "NetworkEndpointGroupPscData", "properties": { - "service": { - "description": "Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: \"run-service\".", + "consumerPscAddress": { + "description": "[Output Only] Address allocated from given subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as an endpoint in L7 PSC-XLB.", "type": "string" }, - "tag": { - "description": "Optional Cloud Run tag represents the \"named-revision\" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: \"revision-0010\".", + "pscConnectionId": { + "description": "[Output Only] The PSC connection id of the PSC Network Endpoint Group Consumer.", + "format": "uint64", "type": "string" }, - "urlMask": { - "description": "A template to parse service and tag fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs \"foo1.domain.com/bar1\" and \"foo1.domain.com/bar2\" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask \".domain.com/\". The URL mask will parse them to { service=\"bar1\", tag=\"foo1\" } and { service=\"bar2\", tag=\"foo2\" } respectively.", + "pscConnectionStatus": { + "description": "[Output Only] The connection status of the PSC Forwarding Rule.", + "enum": [ + "ACCEPTED", + "CLOSED", + "NEEDS_ATTENTION", + "PENDING", + "REJECTED", + "STATUS_UNSPECIFIED" + ], + "enumDescriptions": [ + "The connection has been accepted by the producer.", + "The connection has been closed by the producer and will not serve traffic going forward.", + "The connection has been accepted by the producer, but the producer needs to take further action before the forwarding rule can serve traffic.", + "The connection is pending acceptance by the producer.", + "The connection has been rejected by the producer.", + "" + ], "type": "string" } }, "type": "object" }, - "NetworkEndpointGroupList": { - "id": "NetworkEndpointGroupList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of NetworkEndpointGroup resources.", - "items": { - "$ref": "NetworkEndpointGroup" - }, - "type": "array" - }, - "kind": { - "default": "compute#networkEndpointGroupList", - "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupList for network endpoint group lists.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "LARGE_DEPLOYMENT_WARNING", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "PARTIAL_SUCCESS", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "Warning about failed cleanup of transient changes made by a failed operation.", - "A link to a deprecated resource was created.", - "When deploying and at least one of the resources has a type marked as deprecated", - "The user created a boot disk that is larger than image size.", - "When deploying and at least one of the resources has a type marked as experimental", - "Warning that is present in an external api call", - "Warning that value of a field has been overridden. Deprecated unused field.", - "The operation involved use of an injected kernel, which is deprecated.", - "When deploying a deployment with a exceedingly large number of resources", - "A resource depends on a missing type", - "The route's nextHopIp address is not assigned to an instance on the network.", - "The route's next hop instance cannot ip forward.", - "The route's nextHopInstance URL refers to an instance that does not exist.", - "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", - "The route's next hop instance does not have a status of RUNNING.", - "Error which is not critical. We decided to continue the process despite the mentioned error.", - "No results are present on a particular list page.", - "Success is reported, but some results may be missing due to errors", - "The user attempted to use a resource that requires a TOS they have not accepted.", - "Warning that a resource is in use.", - "One or more of the resources set to auto-delete could not be deleted because they were in use.", - "When a resource schema validation is ignored.", - "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", - "When undeclared properties in the schema are present", - "A given scope cannot be reached." - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, "NetworkEndpointGroupsAttachEndpointsRequest": { "id": "NetworkEndpointGroupsAttachEndpointsRequest", "properties": { @@ -44497,10 +49897,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -44524,10 +49926,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -44595,10 +49999,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -44622,10 +50028,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -44709,6 +50117,11 @@ "format": "byte", "type": "string" }, + "internalIpv6PrefixLength": { + "description": "The prefix length of the primary internal IPv6 range.", + "format": "int32", + "type": "integer" + }, "ipv6AccessConfigs": { "description": "An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access.", "items": { @@ -44720,11 +50133,11 @@ "description": "[Output Only] One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork. Valid only if stackType is IPV4_IPV6.", "enum": [ "EXTERNAL", - "UNSPECIFIED_IPV6_ACCESS_TYPE" + "INTERNAL" ], "enumDescriptions": [ "This network interface can have external IPv6.", - "IPv6 access type not set. Means this network interface hasn't been turned on IPv6 yet." + "This network interface can have internal IPv6." ], "type": "string" }, @@ -44738,7 +50151,7 @@ "type": "string" }, "name": { - "description": "[Output Only] The name of the network interface, which is generated by the server. For network devices, these are eth0, eth1, etc.", + "description": "[Output Only] The name of the network interface, which is generated by the server. For a VM, the network interface uses the nicN naming format. Where N is a value between 0 and 7. The default interface value is nic0.", "type": "string" }, "network": { @@ -44772,13 +50185,11 @@ "description": "The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations.", "enum": [ "IPV4_IPV6", - "IPV4_ONLY", - "UNSPECIFIED_STACK_TYPE" + "IPV4_ONLY" ], "enumDescriptions": [ "The network interface can have both IPv4 and IPv6 addresses.", - "The network interface will be assigned IPv4 address.", - "" + "The network interface will be assigned IPv4 address." ], "type": "string" }, @@ -44831,10 +50242,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -44858,10 +50271,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -44946,6 +50361,18 @@ "format": "int32", "type": "integer" }, + "stackType": { + "description": "Which IP version(s) of traffic and routes are allowed to be imported or exported between peer networks. The default value is IPV4_ONLY.", + "enum": [ + "IPV4_IPV6", + "IPV4_ONLY" + ], + "enumDescriptions": [ + "This Peering will allow IPv4 traffic and routes to be exchanged. Additionally if the matching peering is IPV4_IPV6, IPv6 traffic and routes will be exchanged as well.", + "This Peering will only allow IPv4 traffic and routes to be exchanged, even if the matching peering is IPV4_IPV6." + ], + "type": "string" + }, "state": { "description": "[Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The peering is `ACTIVE` when there's a matching configuration in the peer network.", "enum": [ @@ -45171,6 +50598,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "shareSettings": { + "$ref": "ShareSettings", + "description": "Share-settings for the node group" + }, "size": { "description": "[Output Only] The total number of nodes in the node group.", "format": "int32", @@ -45247,10 +50678,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -45274,10 +50707,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -45395,10 +50830,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -45422,10 +50859,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -45494,6 +50933,10 @@ }, "type": "array" }, + "consumedResources": { + "$ref": "InstanceConsumptionInfo", + "description": "Node resources that are reserved by all instances." + }, "cpuOvercommitType": { "description": "CPU overcommit.", "enum": [ @@ -45515,6 +50958,13 @@ }, "type": "array" }, + "instanceConsumptionData": { + "description": "Instance data that shows consumed resources on the node.", + "items": { + "$ref": "InstanceConsumptionData" + }, + "type": "array" + }, "instances": { "description": "Instances scheduled on this node.", "items": { @@ -45558,6 +51008,10 @@ "" ], "type": "string" + }, + "totalResources": { + "$ref": "InstanceConsumptionInfo", + "description": "Total amount of available resources on the node." } }, "type": "object" @@ -45627,10 +51081,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -45654,10 +51110,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -45725,10 +51183,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -45752,10 +51212,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -45960,10 +51422,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -45987,10 +51451,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -46076,10 +51542,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -46103,10 +51571,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -46189,10 +51659,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -46216,10 +51688,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -46373,10 +51847,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -46400,10 +51876,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -46489,10 +51967,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -46516,10 +51996,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -46587,10 +52069,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -46614,10 +52098,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -46721,7 +52207,7 @@ }, "resendInterval": { "$ref": "Duration", - "description": "Optional. This field is used to configure how often to send a full update of all non-healthy backends. If unspecified, full updates are not sent. If specified, must be in the range between 600 seconds to 3600 seconds. Nanos are disallowed." + "description": "Optional. This field is used to configure how often to send a full update of all non-healthy backends. If unspecified, full updates are not sent. If specified, must be in the range between 600 seconds to 3600 seconds. Nanos are disallowed. Can only be set for regional notification endpoints." }, "retryDurationSec": { "description": "How much time (in seconds) is spent attempting notification retries until a successful response is received. Default is 30s. Limit is 20m (1200s). Must be a positive number.", @@ -46772,10 +52258,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -46799,10 +52287,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -46877,6 +52367,27 @@ "description": "[Output Only] The error type identifier for this error.", "type": "string" }, + "errorDetails": { + "description": "[Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED.", + "items": { + "properties": { + "errorInfo": { + "$ref": "ErrorInfo" + }, + "help": { + "$ref": "Help" + }, + "localizedMessage": { + "$ref": "LocalizedMessage" + }, + "quotaInfo": { + "$ref": "QuotaExceededInfo" + } + }, + "type": "object" + }, + "type": "array" + }, "location": { "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional.", "type": "string" @@ -46991,10 +52502,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -47018,10 +52531,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -47120,10 +52635,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -47147,10 +52664,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -47236,10 +52755,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -47263,10 +52784,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -47334,10 +52857,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -47361,10 +52886,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -47417,22 +52944,22 @@ "description": "The base time that a host is ejected for. The real ejection time is equal to the base ejection time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s." }, "consecutiveErrors": { - "description": "Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5.", + "description": "Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, "consecutiveGatewayFailure": { - "description": "The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3.", + "description": "The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, "enforcingConsecutiveErrors": { - "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0.", + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, "enforcingConsecutiveGatewayFailure": { - "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, @@ -47658,10 +53185,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -47685,10 +53214,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -47822,10 +53353,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -47849,10 +53382,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -47989,10 +53524,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -48016,10 +53553,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -48069,7 +53608,7 @@ "properties": { "defaultRouteAction": { "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a path matcher's defaultRouteAction." + "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within a path matcher's defaultRouteAction." }, "defaultService": { "description": "The full or partial URL to the BackendService resource. This URL is used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: - https://www.googleapis.com/compute/v1/projects/project /global/backendServices/backendService - compute/v1/projects/project/global/backendServices/backendService - global/backendServices/backendService If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified. Only one of defaultService, defaultUrlRedirect , or defaultRouteAction.weightedBackendService must be set. Authorization requires one or more of the following Google IAM permissions on the specified resource default_service: - compute.backendBuckets.use - compute.backendServices.use ", @@ -48121,7 +53660,7 @@ }, "routeAction": { "$ref": "HttpRouteAction", - "description": "In response to a matching path, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of routeAction or urlRedirect must be set. URL maps for external HTTP(S) load balancers support only the urlRewrite action within a path rule's routeAction." + "description": "In response to a matching path, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of routeAction or urlRedirect must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within a path rule's routeAction." }, "service": { "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set.", @@ -48138,12 +53677,12 @@ "id": "PerInstanceConfig", "properties": { "fingerprint": { - "description": "Fingerprint of this per-instance config. This field can be used in optimistic locking. It is ignored when inserting a per-instance config. An up-to-date fingerprint must be provided in order to update an existing per-instance config or the field needs to be unset.", + "description": "Fingerprint of this per-instance config. This field can be used in optimistic locking. It is ignored when inserting a per-instance config. An up-to-date fingerprint must be provided in order to update an existing per-instance configuration or the field needs to be unset.", "format": "byte", "type": "string" }, "name": { - "description": "The name of a per-instance config and its corresponding instance. Serves as a merge key during UpdatePerInstanceConfigs operations, that is, if a per-instance config with the same name exists then it will be updated, otherwise a new one will be created for the VM instance with the same name. An attempt to create a per-instance config for a VM instance that either doesn't exist or is not part of the group will result in an error.", + "description": "The name of a per-instance configuration and its corresponding instance. Serves as a merge key during UpdatePerInstanceConfigs operations, that is, if a per-instance configuration with the same name exists then it will be updated, otherwise a new one will be created for the VM instance with the same name. An attempt to create a per-instance configconfiguration for a VM instance that either doesn't exist or is not part of the group will result in an error.", "type": "string" }, "preservedState": { @@ -48151,7 +53690,7 @@ "description": "The intended preserved state for the given instance. Does not contain preserved state generated from a stateful policy." }, "status": { - "description": "The status of applying this per-instance config on the corresponding managed instance.", + "description": "The status of applying this per-instance configuration on the corresponding managed instance.", "enum": [ "APPLYING", "DELETING", @@ -48161,12 +53700,12 @@ "UNAPPLIED_DELETION" ], "enumDescriptions": [ - "The per-instance config is being applied to the instance, but is not yet effective, possibly waiting for the instance to, for example, REFRESH.", - "The per-instance config deletion is being applied on the instance, possibly waiting for the instance to, for example, REFRESH.", - "The per-instance config is effective on the instance, meaning that all disks, ips and metadata specified in this config are attached or set on the instance.", - "*[Default]* The default status, when no per-instance config exists.", - "The per-instance config is set on an instance but not been applied yet.", - "The per-instance config has been deleted, but the deletion is not yet applied." + "The per-instance configuration is being applied to the instance, but is not yet effective, possibly waiting for the instance to, for example, REFRESH.", + "The per-instance configuration deletion is being applied on the instance, possibly waiting for the instance to, for example, REFRESH.", + "The per-instance configuration is effective on the instance, meaning that all disks, ips and metadata specified in this configuration are attached or set on the instance.", + "*[Default]* The default status, when no per-instance configuration exists.", + "The per-instance configuration is set on an instance but not been applied yet.", + "The per-instance configuration has been deleted, but the deletion is not yet applied." ], "type": "string" } @@ -48351,6 +53890,22 @@ "$ref": "UsageExportLocation", "description": "The naming prefix for daily usage reports and the Google Cloud Storage bucket where they are stored." }, + "vmDnsSetting": { + "description": "[Output Only] Default internal DNS setting used by VMs running in this project.", + "enum": [ + "GLOBAL_DEFAULT", + "UNSPECIFIED_VM_DNS_SETTING", + "ZONAL_DEFAULT", + "ZONAL_ONLY" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, "xpnProjectStatus": { "description": "[Output Only] The role this project has in a shared VPC configuration. Currently, only projects with the host role, which is specified by the value HOST, are differentiated.", "enum": [ @@ -48566,10 +54121,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -48593,10 +54150,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -48799,10 +54358,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -48826,10 +54387,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -48914,10 +54477,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -48941,10 +54506,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -49055,10 +54622,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -49082,10 +54651,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -49163,6 +54734,7 @@ "COMMITTED_N2A_CPUS", "COMMITTED_N2D_CPUS", "COMMITTED_N2_CPUS", + "COMMITTED_NVIDIA_A100_80GB_GPUS", "COMMITTED_NVIDIA_A100_GPUS", "COMMITTED_NVIDIA_K80_GPUS", "COMMITTED_NVIDIA_P100_GPUS", @@ -49175,13 +54747,18 @@ "CPUS_ALL_REGIONS", "DISKS_TOTAL_GB", "E2_CPUS", + "EXTERNAL_MANAGED_FORWARDING_RULES", "EXTERNAL_NETWORK_LB_FORWARDING_RULES", "EXTERNAL_PROTOCOL_FORWARDING_RULES", "EXTERNAL_VPN_GATEWAYS", "FIREWALLS", "FORWARDING_RULES", + "GLOBAL_EXTERNAL_MANAGED_BACKEND_SERVICES", "GLOBAL_EXTERNAL_MANAGED_FORWARDING_RULES", + "GLOBAL_EXTERNAL_PROXY_LB_BACKEND_SERVICES", "GLOBAL_INTERNAL_ADDRESSES", + "GLOBAL_INTERNAL_MANAGED_BACKEND_SERVICES", + "GLOBAL_INTERNAL_TRAFFIC_DIRECTOR_BACKEND_SERVICES", "GPUS_ALL_REGIONS", "HEALTH_CHECKS", "IMAGES", @@ -49212,6 +54789,7 @@ "NETWORK_FIREWALL_POLICIES", "NODE_GROUPS", "NODE_TEMPLATES", + "NVIDIA_A100_80GB_GPUS", "NVIDIA_A100_GPUS", "NVIDIA_K80_GPUS", "NVIDIA_P100_GPUS", @@ -49225,6 +54803,7 @@ "PD_EXTREME_TOTAL_PROVISIONED_IOPS", "PREEMPTIBLE_CPUS", "PREEMPTIBLE_LOCAL_SSD_GB", + "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS", "PREEMPTIBLE_NVIDIA_A100_GPUS", "PREEMPTIBLE_NVIDIA_K80_GPUS", "PREEMPTIBLE_NVIDIA_P100_GPUS", @@ -49239,7 +54818,11 @@ "PUBLIC_ADVERTISED_PREFIXES", "PUBLIC_DELEGATED_PREFIXES", "REGIONAL_AUTOSCALERS", + "REGIONAL_EXTERNAL_MANAGED_BACKEND_SERVICES", + "REGIONAL_EXTERNAL_NETWORK_LB_BACKEND_SERVICES", "REGIONAL_INSTANCE_GROUP_MANAGERS", + "REGIONAL_INTERNAL_LB_BACKEND_SERVICES", + "REGIONAL_INTERNAL_MANAGED_BACKEND_SERVICES", "RESERVATIONS", "RESOURCE_POLICIES", "ROUTERS", @@ -49255,6 +54838,7 @@ "SSL_CERTIFICATES", "STATIC_ADDRESSES", "STATIC_BYOIP_ADDRESSES", + "STATIC_EXTERNAL_IPV6_ADDRESS_RANGES", "SUBNETWORKS", "T2A_CPUS", "T2D_CPUS", @@ -49301,6 +54885,7 @@ "", "", "", + "", "Guest CPUs", "", "", @@ -49380,6 +54965,17 @@ "", "", "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", "The total number of snapshots allowed for a single project.", "", "", @@ -49398,6 +54994,7 @@ "", "", "", + "", "" ], "type": "string" @@ -49414,6 +55011,33 @@ }, "type": "object" }, + "QuotaExceededInfo": { + "description": "Additional details for quota exceeded error for resource quota.", + "id": "QuotaExceededInfo", + "properties": { + "dimensions": { + "additionalProperties": { + "type": "string" + }, + "description": "The map holding related quota dimensions.", + "type": "object" + }, + "limit": { + "description": "Current effective quota limit. The limit's unit depends on the quota type or metric.", + "format": "double", + "type": "number" + }, + "limitName": { + "description": "The name of the quota limit.", + "type": "string" + }, + "metricName": { + "description": "The Compute Engine quota metric name.", + "type": "string" + } + }, + "type": "object" + }, "Reference": { "description": "Represents a reference to a resource.", "id": "Reference", @@ -49547,10 +55171,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -49574,10 +55200,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -49662,10 +55290,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -49689,10 +55319,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -49815,10 +55447,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -49842,10 +55476,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -49945,10 +55581,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -49972,10 +55610,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -50024,7 +55664,7 @@ "id": "RegionInstanceGroupManagerPatchInstanceConfigReq", "properties": { "perInstanceConfigs": { - "description": "The list of per-instance configs to insert or patch on this managed instance group.", + "description": "The list of per-instance configurations to insert or patch on this managed instance group.", "items": { "$ref": "PerInstanceConfig" }, @@ -50038,7 +55678,7 @@ "id": "RegionInstanceGroupManagerUpdateInstanceConfigReq", "properties": { "perInstanceConfigs": { - "description": "The list of per-instance configs to insert or patch on this managed instance group.", + "description": "The list of per-instance configurations to insert or patch on this managed instance group.", "items": { "$ref": "PerInstanceConfig" }, @@ -50186,10 +55826,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -50213,10 +55855,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -50359,10 +56003,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -50386,10 +56032,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -50516,10 +56164,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -50543,10 +56193,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -50590,6 +56242,63 @@ }, "type": "object" }, + "RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse": { + "id": "RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse", + "properties": { + "firewallPolicys": { + "description": "Effective firewalls from firewall policy.", + "items": { + "$ref": "RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewallPolicy" + }, + "type": "array" + }, + "firewalls": { + "description": "Effective firewalls on the network.", + "items": { + "$ref": "Firewall" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewallPolicy": { + "id": "RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewallPolicy", + "properties": { + "displayName": { + "description": "[Output Only] The display name of the firewall policy.", + "type": "string" + }, + "name": { + "description": "[Output Only] The name of the firewall policy.", + "type": "string" + }, + "rules": { + "description": "The rules that apply to the network.", + "items": { + "$ref": "FirewallPolicyRule" + }, + "type": "array" + }, + "type": { + "description": "[Output Only] The type of the firewall policy. Can be one of HIERARCHY, NETWORK, NETWORK_REGIONAL.", + "enum": [ + "HIERARCHY", + "NETWORK", + "NETWORK_REGIONAL", + "UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "RegionSetLabelsRequest": { "id": "RegionSetLabelsRequest", "properties": { @@ -50634,7 +56343,7 @@ "id": "RegionTargetHttpsProxiesSetSslCertificatesRequest", "properties": { "sslCertificates": { - "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource. Currently exactly one SslCertificate resource must be specified.", + "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource.", "items": { "type": "string" }, @@ -50658,7 +56367,7 @@ "id": "RequestMirrorPolicy", "properties": { "backendService": { - "description": "The full or partial URL to the BackendService resource being mirrored to.", + "description": "The full or partial URL to the BackendService resource being mirrored to. The backend service configured for a mirroring policy must reference backends that are of the same type as the original backend service matched in the URL map. Serverless NEG backends are not currently supported as a mirrored backend service. ", "type": "string" } }, @@ -50700,6 +56409,13 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "resourcePolicies": { + "additionalProperties": { + "type": "string" + }, + "description": "Resource policies to be added to this reservation. The key is defined by user, and the value is resource policy url. This is to define placement policy with reservation.", + "type": "object" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -50710,7 +56426,7 @@ }, "shareSettings": { "$ref": "ShareSettings", - "description": "Share-settings for shared-reservation" + "description": "Specify share-settings to create a shared reservation. This property is optional. For more information about the syntax and options for this field and its subfields, see the guide for creating a shared reservation." }, "specificReservation": { "$ref": "AllocationSpecificSKUReservation", @@ -50829,10 +56545,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -50856,10 +56574,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -50944,10 +56664,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -50971,10 +56693,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -51053,10 +56777,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -51080,10 +56806,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -51141,7 +56869,7 @@ "type": "string" }, "type": { - "description": "Type of resource for which this commitment applies. Possible values are VCPU and MEMORY", + "description": "Type of resource for which this commitment applies. Possible values are VCPU, MEMORY, LOCAL_SSD, and ACCELERATOR.", "enum": [ "ACCELERATOR", "LOCAL_SSD", @@ -51195,10 +56923,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -51222,10 +56952,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -51397,10 +57129,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -51424,10 +57158,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -51476,7 +57212,7 @@ "id": "ResourcePolicyDailyCycle", "properties": { "daysInCycle": { - "description": "Defines a schedule with units measured in months. The value determines how many months pass between the start of each cycle.", + "description": "Defines a schedule with units measured in days. The value determines how many days pass between the start of each cycle.", "format": "int32", "type": "integer" }, @@ -51496,7 +57232,7 @@ "id": "ResourcePolicyGroupPlacementPolicy", "properties": { "availabilityDomainCount": { - "description": "The number of availability domains instances will be spread across. If two instances are in different availability domain, they will not be put in the same low latency network", + "description": "The number of availability domains to spread instances across. If two instances are in different availability domain, they are not in the same low latency network.", "format": "int32", "type": "integer" }, @@ -51513,7 +57249,7 @@ "type": "string" }, "vmCount": { - "description": "Number of vms in this placement group", + "description": "Number of VMs in this placement group. Google does not recommend that you use this field unless you use a compact policy and you want your policy to work only if it contains this exact number of VMs.", "format": "int32", "type": "integer" } @@ -51622,10 +57358,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -51649,10 +57387,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -51863,6 +57603,17 @@ }, "type": "object" }, + "ResourceStatus": { + "description": "Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls.", + "id": "ResourceStatus", + "properties": { + "physicalHost": { + "description": "[Output Only] An opaque ID of the host on which the VM is running.", + "type": "string" + } + }, + "type": "object" + }, "Route": { "description": "Represents a Route resource. A route defines a path from VM instances in the VPC network to a specific destination. This destination can be inside or outside the VPC network. For more information, read the Routes overview.", "id": "Route", @@ -51958,6 +57709,22 @@ "format": "uint32", "type": "integer" }, + "routeStatus": { + "description": "[Output only] The status of the route.", + "enum": [ + "ACTIVE", + "DROPPED", + "INACTIVE", + "PENDING" + ], + "enumDescriptions": [ + "This route is processed and active.", + "The route is dropped due to the VPC exceeding the dynamic route limit. For dynamic route limit, please refer to the Learned route example", + "This route is processed but inactive due to failure from the backend. The backend may have rejected the route", + "This route is being processed internally. The status will change once processed." + ], + "type": "string" + }, "routeType": { "description": "[Output Only] The type of this route, which can be one of the following values: - 'TRANSIT' for a transit route that this router learned from another Cloud Router and will readvertise to one of its BGP peers - 'SUBNET' for a route from a subnet of the VPC - 'BGP' for a route learned from a BGP peer of this router - 'STATIC' for a static route", "enum": [ @@ -52005,10 +57772,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -52032,10 +57801,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -52153,10 +57924,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -52180,10 +57953,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -52251,7 +58026,7 @@ "type": "string" }, "encryptedInterconnectRouter": { - "description": "Indicates if a router is dedicated for use with encrypted VLAN attachments (interconnectAttachments). Not currently available publicly. ", + "description": "Indicates if a router is dedicated for use with encrypted VLAN attachments (interconnectAttachments).", "type": "boolean" }, "id": { @@ -52271,6 +58046,13 @@ "description": "[Output Only] Type of resource. Always compute#router for routers.", "type": "string" }, + "md5AuthenticationKeys": { + "description": "Keys used for MD5 authentication.", + "items": { + "$ref": "RouterMd5AuthenticationKey" + }, + "type": "array" + }, "name": { "annotations": { "required": [ @@ -52374,10 +58156,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -52401,10 +58185,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -52580,6 +58366,10 @@ ], "type": "string" }, + "md5AuthenticationKeyName": { + "description": "Present if MD5 authentication is enabled for the peering. Must be the name of one of the entries in the Router.md5_authentication_keys. The field must comply with RFC1035.", + "type": "string" + }, "name": { "annotations": { "required": [ @@ -52634,7 +58424,7 @@ "type": "integer" }, "sessionInitializationMode": { - "description": "The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE.", + "description": "The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is DISABLED.", "enum": [ "ACTIVE", "DISABLED", @@ -52745,10 +58535,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -52772,10 +58564,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -52819,6 +58613,31 @@ }, "type": "object" }, + "RouterMd5AuthenticationKey": { + "id": "RouterMd5AuthenticationKey", + "properties": { + "key": { + "annotations": { + "required": [ + "compute.routers.insert" + ] + }, + "description": "[Input only] Value of the key. For patch and update calls, it can be skipped to copy the value from the previous configuration. This is allowed if the key with the same name existed before the operation. Maximum length is 80 characters. Can only contain printable ASCII characters.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.routers.insert", + "compute.routers.update" + ] + }, + "description": "Name used to identify the key. Must be unique within a router. Must be referenced by at least one bgpPeer. Must comply with RFC1035.", + "type": "string" + } + }, + "type": "object" + }, "RouterNat": { "description": "Represents a Nat resource. It enables the VMs within the specified subnetworks to access Internet without external IP addresses. It specifies a list of subnetworks (and the ranges within) that want to use NAT. Customers can also provide the external IPs that would be used for NAT. GCP would auto-allocate ephemeral IPs if no external IPs are provided.", "id": "RouterNat", @@ -52837,6 +58656,21 @@ "enableEndpointIndependentMapping": { "type": "boolean" }, + "endpointTypes": { + "description": "List of NAT-ted endpoint types supported by the Nat Gateway. If the list is empty, then it will be equivalent to include ENDPOINT_TYPE_VM", + "items": { + "enum": [ + "ENDPOINT_TYPE_SWG", + "ENDPOINT_TYPE_VM" + ], + "enumDescriptions": [ + "This is used for Secure Web Gateway endpoints.", + "This is the default." + ], + "type": "string" + }, + "type": "array" + }, "icmpIdleTimeoutSec": { "description": "Timeout (in seconds) for ICMP connections. Defaults to 30s if not set.", "format": "int32", @@ -53083,14 +58917,26 @@ "bfdStatus": { "$ref": "BfdStatus" }, + "enableIpv6": { + "description": "Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default.", + "type": "boolean" + }, "ipAddress": { "description": "IP address of the local BGP interface.", "type": "string" }, + "ipv6NexthopAddress": { + "description": "IPv6 address of the local BGP interface.", + "type": "string" + }, "linkedVpnTunnel": { "description": "URL of the VPN tunnel that this BGP peer controls.", "type": "string" }, + "md5AuthEnabled": { + "description": "Informs whether MD5 authentication is enabled on this BGP peer.", + "type": "boolean" + }, "name": { "description": "Name of this BGP peer. Unique within the Routers resource.", "type": "string" @@ -53104,12 +58950,16 @@ "description": "IP address of the remote BGP interface.", "type": "string" }, + "peerIpv6NexthopAddress": { + "description": "IPv6 address of the remote BGP interface.", + "type": "string" + }, "routerApplianceInstance": { "description": "[Output only] URI of the VM instance that is used as third-party router appliances such as Next Gen Firewalls, Virtual Routers, or Router Appliances. The VM instance is the peer side of the BGP session.", "type": "string" }, "state": { - "description": "BGP state as specified in RFC1771.", + "description": "The state of the BGP session. For a list of possible values for this field, see BGP session states.", "type": "string" }, "status": { @@ -53126,6 +58976,18 @@ ], "type": "string" }, + "statusReason": { + "description": "Indicates why particular status was returned.", + "enum": [ + "MD5_AUTH_INTERNAL_PROBLEM", + "STATUS_REASON_UNSPECIFIED" + ], + "enumDescriptions": [ + "Indicates internal problems with configuration of MD5 authentication. This particular reason can only be returned when md5AuthEnabled is true and status is DOWN.", + "" + ], + "type": "string" + }, "uptime": { "description": "Time this session has been up. Format: 14 years, 51 weeks, 6 days, 23 hours, 59 minutes, 59 seconds", "type": "string" @@ -53284,10 +59146,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -53311,10 +59175,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -53428,25 +59294,25 @@ "id": "SSLHealthCheck", "properties": { "port": { - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", + "description": "The TCP port number to which the health check prober sends packets. The default value is 443. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "description": "Not supported.", "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, SSL health check follows behavior specified in port and portName fields.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." ], "type": "string" }, @@ -53463,11 +59329,11 @@ "type": "string" }, "request": { - "description": "The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII.", + "description": "Instructs the health check prober to send this exact ASCII string, up to 1024 bytes in length, after establishing the TCP connection and SSL handshake.", "type": "string" }, "response": { - "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII.", + "description": "Creates a content-based SSL health check. In addition to establishing a TCP connection and the TLS handshake, you can configure the health check to pass only when the backend sends this exact response ASCII string, up to 1024 bytes in length. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-ssl-tcp", "type": "string" } }, @@ -53590,6 +59456,20 @@ "description": "An instance-attached disk resource.", "id": "SavedDisk", "properties": { + "architecture": { + "description": "[Output Only] The architecture of the attached disk.", + "enum": [ + "ARCHITECTURE_UNSPECIFIED", + "ARM64", + "X86_64" + ], + "enumDescriptions": [ + "Default value indicating Architecture is not set.", + "Machines with architecture ARM64", + "Machines with architecture X86_64" + ], + "type": "string" + }, "kind": { "default": "compute#savedDisk", "description": "[Output Only] Type of the resource. Always compute#savedDisk for attached disks.", @@ -53650,7 +59530,7 @@ "type": "object" }, "Scheduling": { - "description": "Sets the scheduling options for an Instance. NextID: 21", + "description": "Sets the scheduling options for an Instance.", "id": "Scheduling", "properties": { "automaticRestart": { @@ -53688,7 +59568,7 @@ "type": "array" }, "onHostMaintenance": { - "description": "Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Set VM availability policies.", + "description": "Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Set VM host maintenance policy.", "enum": [ "MIGRATE", "TERMINATE" @@ -53766,6 +59646,136 @@ }, "type": "object" }, + "SecurityPoliciesAggregatedList": { + "id": "SecurityPoliciesAggregatedList", + "properties": { + "etag": { + "type": "string" + }, + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "SecurityPoliciesScopedList", + "description": "Name of the scope containing this set of security policies." + }, + "description": "A list of SecurityPoliciesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#securityPoliciesAggregatedList", + "description": "[Output Only] Type of resource. Always compute#securityPolicyAggregatedList for lists of Security Policies.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "SecurityPoliciesListPreconfiguredExpressionSetsResponse": { "id": "SecurityPoliciesListPreconfiguredExpressionSetsResponse", "properties": { @@ -53775,6 +59785,108 @@ }, "type": "object" }, + "SecurityPoliciesScopedList": { + "id": "SecurityPoliciesScopedList", + "properties": { + "securityPolicies": { + "description": "A list of SecurityPolicies contained in this scope.", + "items": { + "$ref": "SecurityPolicy" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of security policies when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "SecurityPoliciesWafConfig": { "id": "SecurityPoliciesWafConfig", "properties": { @@ -53798,6 +59910,9 @@ "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, + "ddosProtectionConfig": { + "$ref": "SecurityPolicyDdosProtectionConfig" + }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" @@ -53825,8 +59940,12 @@ "recaptchaOptionsConfig": { "$ref": "SecurityPolicyRecaptchaOptionsConfig" }, + "region": { + "description": "[Output Only] URL of the region where the regional security policy resides. This field is not applicable to global security policies.", + "type": "string" + }, "rules": { - "description": "A list of rules that belong to this policy. There must always be a default rule (rule with priority 2147483647 and match \"*\"). If no rules are provided when creating a security policy, a default rule with action \"allow\" will be added.", + "description": "A list of rules that belong to this policy. There must always be a default rule which is a rule with priority 2147483647 and match all condition (for the match condition this means match \"*\" for srcIpRanges and for the networkMatch condition every field must be either match \"*\" or not set). If no rules are provided when creating a security policy, a default rule with action \"allow\" will be added.", "items": { "$ref": "SecurityPolicyRule" }, @@ -53837,12 +59956,14 @@ "type": "string" }, "type": { - "description": "The type indicates the intended use of the security policy. CLOUD_ARMOR - Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. CLOUD_ARMOR_EDGE - Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache.", + "description": "The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. This field can be set only at resource creation time.", "enum": [ "CLOUD_ARMOR", - "CLOUD_ARMOR_EDGE" + "CLOUD_ARMOR_EDGE", + "CLOUD_ARMOR_NETWORK" ], "enumDescriptions": [ + "", "", "" ], @@ -53888,6 +60009,10 @@ "SecurityPolicyAdvancedOptionsConfig": { "id": "SecurityPolicyAdvancedOptionsConfig", "properties": { + "jsonCustomConfig": { + "$ref": "SecurityPolicyAdvancedOptionsConfigJsonCustomConfig", + "description": "Custom configuration to apply the JSON parsing. Only applicable when json_parsing is set to STANDARD." + }, "jsonParsing": { "enum": [ "DISABLED", @@ -53913,6 +60038,36 @@ }, "type": "object" }, + "SecurityPolicyAdvancedOptionsConfigJsonCustomConfig": { + "id": "SecurityPolicyAdvancedOptionsConfigJsonCustomConfig", + "properties": { + "contentTypes": { + "description": "A list of custom Content-Type header values to apply the JSON parsing. As per RFC 1341, a Content-Type header value has the following format: Content-Type := type \"/\" subtype *[\";\" parameter] When configuring a custom Content-Type header value, only the type/subtype needs to be specified, and the parameters should be excluded.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "SecurityPolicyDdosProtectionConfig": { + "id": "SecurityPolicyDdosProtectionConfig", + "properties": { + "ddosProtection": { + "enum": [ + "ADVANCED", + "STANDARD" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "SecurityPolicyList": { "id": "SecurityPolicyList", "properties": { @@ -53950,10 +60105,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -53977,10 +60134,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -54170,15 +60329,21 @@ "type": "string" }, "enforceOnKey": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if this field 'enforce_on_key' is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforce_on_key_name\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key type defaults to ALL. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforce_on_key_name\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", "enum": [ "ALL", "HTTP_COOKIE", "HTTP_HEADER", + "HTTP_PATH", "IP", + "REGION_CODE", + "SNI", "XFF_IP" ], "enumDescriptions": [ + "", + "", + "", "", "", "", @@ -54192,7 +60357,7 @@ "type": "string" }, "exceedAction": { - "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are \"deny()\" where valid values for status are 403, 404, 429, and 502, and \"redirect\" where the redirect parameters come from exceed_redirect_options below.", + "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are \"deny(status)\", where valid values for status are 403, 404, 429, and 502, and \"redirect\" where the redirect parameters come from exceedRedirectOptions below.", "type": "string" }, "exceedRedirectOptions": { @@ -54492,10 +60657,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -54519,10 +60686,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -54584,6 +60753,7 @@ "enum": [ "ACCEPTED", "CLOSED", + "NEEDS_ATTENTION", "PENDING", "REJECTED", "STATUS_UNSPECIFIED" @@ -54591,6 +60761,7 @@ "enumDescriptions": [ "The connection has been accepted by the producer.", "The connection has been closed by the producer.", + "The connection has been accepted by the producer, but the producer needs to take further action before the forwarding rule can serve traffic.", "The connection is pending acceptance by the producer.", "The consumer is still connected but not using the connection.", "" @@ -54608,6 +60779,10 @@ "format": "uint32", "type": "integer" }, + "networkUrl": { + "description": "The network URL for the network to set the limit for.", + "type": "string" + }, "projectIdOrNum": { "description": "The project id or number for the project to set the limit for.", "type": "string" @@ -54656,10 +60831,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -54683,10 +60860,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -54754,10 +60933,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -54781,10 +60962,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -54843,11 +61026,13 @@ "description": "Type of sharing for this shared-reservation", "enum": [ "LOCAL", + "ORGANIZATION", "SHARE_TYPE_UNSPECIFIED", "SPECIFIC_PROJECTS" ], "enumDescriptions": [ "Default value.", + "Shared-reservation is open to entire Organization", "Default value. This value is unused.", "Shared-reservation is open to specific projects" ], @@ -54952,6 +61137,20 @@ "description": "Represents a Persistent Disk Snapshot resource. You can use snapshots to back up data on a regular interval. For more information, read Creating persistent disk snapshots.", "id": "Snapshot", "properties": { + "architecture": { + "description": "[Output Only] The architecture of the snapshot. Valid values are ARM64 or X86_64.", + "enum": [ + "ARCHITECTURE_UNSPECIFIED", + "ARM64", + "X86_64" + ], + "enumDescriptions": [ + "Default value indicating Architecture is not set.", + "Machines with architecture ARM64", + "Machines with architecture X86_64" + ], + "type": "string" + }, "autoCreated": { "description": "[Output Only] Set to true if snapshots are automatically created by applying resource policy on the target disk.", "type": "boolean" @@ -54960,6 +61159,11 @@ "description": "Creates the new snapshot in the snapshot chain labeled with the specified name. The chain name must be 1-63 characters long and comply with RFC1035. This is an uncommon option only for advanced service owners who needs to create separate snapshot chains, for example, for chargeback tracking. When you describe your snapshot resource, this field is visible only if it has a non-empty value.", "type": "string" }, + "creationSizeBytes": { + "description": "[Output Only] Size in bytes of the snapshot at creation time.", + "format": "int64", + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -55041,6 +61245,18 @@ "$ref": "CustomerEncryptionKey", "description": "Encrypts the snapshot using a customer-supplied encryption key. After you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the snapshot later. For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request. Customer-supplied encryption keys do not protect access to metadata of the snapshot. If you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." }, + "snapshotType": { + "description": "Indicates the type of the snapshot.", + "enum": [ + "ARCHIVE", + "STANDARD" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, "sourceDisk": { "description": "The source disk used to create this snapshot.", "type": "string" @@ -55053,6 +61269,14 @@ "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name.", "type": "string" }, + "sourceSnapshotSchedulePolicy": { + "description": "[Output Only] URL of the resource policy which created this scheduled snapshot.", + "type": "string" + }, + "sourceSnapshotSchedulePolicyId": { + "description": "[Output Only] ID of the resource policy which created this scheduled snapshot.", + "type": "string" + }, "status": { "description": "[Output Only] The status of the snapshot. This can be CREATING, DELETING, FAILED, READY, or UPLOADING.", "enum": [ @@ -55140,10 +61364,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -55167,10 +61393,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -55272,6 +61500,20 @@ }, "type": "array" }, + "keyRevocationActionType": { + "description": "KeyRevocationActionType of the instance. Supported options are \"STOP\" and \"NONE\". The default value is \"NONE\" if it is not specified.", + "enum": [ + "KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED", + "NONE", + "STOP" + ], + "enumDescriptions": [ + "Default value. This value is unused.", + "Indicates user chose no operation.", + "Indicates user chose to opt for VM shutdown on key revocation." + ], + "type": "string" + }, "labels": { "additionalProperties": { "type": "string" @@ -55444,10 +61686,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -55471,10 +61715,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -55560,10 +61806,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -55587,10 +61835,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -55732,10 +61982,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -55759,10 +62011,142 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "SslPoliciesAggregatedList": { + "id": "SslPoliciesAggregatedList", + "properties": { + "etag": { + "type": "string" + }, + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "SslPoliciesScopedList", + "description": "Name of the scope containing this set of SSL policies." + }, + "description": "A list of SslPoliciesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#sslPoliciesAggregatedList", + "description": "[Output Only] Type of resource. Always compute#sslPolicyAggregatedList for lists of SSL Policies.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -55847,10 +62231,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -55874,10 +62260,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -55933,6 +62321,108 @@ }, "type": "object" }, + "SslPoliciesScopedList": { + "id": "SslPoliciesScopedList", + "properties": { + "sslPolicies": { + "description": "A list of SslPolicies contained in this scope.", + "items": { + "$ref": "SslPolicy" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of SSL policies when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "SslPolicy": { "description": "Represents an SSL Policy resource. Use SSL policies to control the SSL features, such as versions and cipher suites, offered by an HTTPS or SSL Proxy load balancer. For more information, read SSL Policy Concepts.", "id": "SslPolicy", @@ -56009,6 +62499,10 @@ ], "type": "string" }, + "region": { + "description": "[Output Only] URL of the region where the regional SSL policy resides. This field is not applicable to global SSL policies.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -56028,10 +62522,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -56055,10 +62551,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -56172,7 +62670,7 @@ "type": "boolean" }, "externalIpv6Prefix": { - "description": "[Output Only] The range of external IPv6 addresses that are owned by this subnetwork.", + "description": "The external IPv6 address range that is owned by this subnetwork.", "type": "string" }, "fingerprint": { @@ -56189,24 +62687,28 @@ "format": "uint64", "type": "string" }, + "internalIpv6Prefix": { + "description": "[Output Only] The internal IPv6 address range that is assigned to this subnetwork.", + "type": "string" + }, "ipCidrRange": { "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 100.64.0.0/10. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. This field is set at resource creation time. The range can be any range listed in the Valid ranges list. The range can be expanded after creation using expandIpCidrRange.", "type": "string" }, "ipv6AccessType": { - "description": "The access type of IPv6 address this subnet holds. It's immutable and can only be specified during creation or the first time the subnet is updated into IPV4_IPV6 dual stack. If the ipv6_type is EXTERNAL then this subnet cannot enable direct path.", + "description": "The access type of IPv6 address this subnet holds. It's immutable and can only be specified during creation or the first time the subnet is updated into IPV4_IPV6 dual stack.", "enum": [ "EXTERNAL", - "UNSPECIFIED_IPV6_ACCESS_TYPE" + "INTERNAL" ], "enumDescriptions": [ - "VMs on this subnet will be assigned IPv6 addresses that are accesible via the Internet, as well as the VPC network.", - "IPv6 access type not set. Means this subnet hasn't been turned on IPv6 yet." + "VMs on this subnet will be assigned IPv6 addresses that are accessible via the Internet, as well as the VPC network.", + "VMs on this subnet will be assigned IPv6 addresses that are only accessible over the VPC network." ], "type": "string" }, "ipv6CidrRange": { - "description": "[Output Only] The range of internal IPv6 addresses that are owned by this subnetwork. Note this will be for private google access only eventually.", + "description": "[Output Only] This field is for internal use.", "type": "string" }, "kind": { @@ -56232,7 +62734,7 @@ "type": "boolean" }, "privateIpv6GoogleAccess": { - "description": "The private IPv6 google access type for the VMs in this subnet. This is an expanded field of enablePrivateV6Access. If both fields are set, privateIpv6GoogleAccess will take priority. This field can be both set at resource creation time and updated using patch.", + "description": "This field is for internal use. This field can be both set at resource creation time and updated using patch.", "enum": [ "DISABLE_GOOGLE_ACCESS", "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE", @@ -56291,16 +62793,14 @@ "type": "string" }, "stackType": { - "description": "The stack type for this subnet to identify whether the IPv6 feature is enabled or not. If not specified IPV4_ONLY will be used. This field can be both set at resource creation time and updated using patch.", + "description": "The stack type for the subnet. If set to IPV4_ONLY, new VMs in the subnet are assigned IPv4 addresses only. If set to IPV4_IPV6, new VMs in the subnet can be assigned both IPv4 and IPv6 addresses. If not specified, IPV4_ONLY is used. This field can be both set at resource creation time and updated using patch.", "enum": [ "IPV4_IPV6", - "IPV4_ONLY", - "UNSPECIFIED_STACK_TYPE" + "IPV4_ONLY" ], "enumDescriptions": [ "New VMs in this subnet can have both IPv4 and IPv6 addresses.", - "New VMs in this subnet will only be assigned IPv4 addresses.", - "" + "New VMs in this subnet will only be assigned IPv4 addresses." ], "type": "string" }, @@ -56368,10 +62868,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -56395,10 +62897,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -56484,10 +62988,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -56511,10 +63017,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -56587,7 +63095,7 @@ "type": "boolean" }, "filterExpr": { - "description": "Can only be specified if VPC flow logs for this subnetwork is enabled. Export filter used to define which VPC flow logs should be logged.", + "description": "Can only be specified if VPC flow logs for this subnetwork is enabled. The filter expression is used to define which VPC flow logs should be exported to Cloud Logging.", "type": "string" }, "flowSampling": { @@ -56668,10 +63176,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -56695,10 +63205,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -56773,25 +63285,25 @@ "id": "TCPHealthCheck", "properties": { "port": { - "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.", + "description": "The TCP port number to which the health check prober sends packets. The default value is 80. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "description": "Not supported.", "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, TCP health check follows behavior specified in port and portName fields.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." ], "type": "string" }, @@ -56808,11 +63320,11 @@ "type": "string" }, "request": { - "description": "The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII.", + "description": "Instructs the health check prober to send this exact ASCII string, up to 1024 bytes in length, after establishing the TCP connection.", "type": "string" }, "response": { - "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII.", + "description": "Creates a content-based TCP health check. In addition to establishing a TCP connection, you can configure the health check to pass only when the backend sends this exact response ASCII string, up to 1024 bytes in length. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-ssl-tcp", "type": "string" } }, @@ -56935,10 +63447,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -56962,10 +63476,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -57033,10 +63549,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -57060,10 +63578,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -57238,10 +63758,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -57265,10 +63787,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -57336,10 +63860,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -57363,10 +63889,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -57410,6 +63938,16 @@ }, "type": "object" }, + "TargetHttpsProxiesSetCertificateMapRequest": { + "id": "TargetHttpsProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { + "description": "URL of the Certificate Map to associate with this TargetHttpsProxy.", + "type": "string" + } + }, + "type": "object" + }, "TargetHttpsProxiesSetQuicOverrideRequest": { "id": "TargetHttpsProxiesSetQuicOverrideRequest", "properties": { @@ -57451,6 +63989,10 @@ "description": "Optional. A URL referring to a networksecurity.AuthorizationPolicy resource that describes how the proxy should authorize inbound traffic. If left blank, access will not be restricted by an authorization policy. Refer to the AuthorizationPolicy resource for additional details. authorizationPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. Note: This field currently has no impact.", "type": "string" }, + "certificateMap": { + "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -57576,10 +64118,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -57603,10 +64147,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -57692,10 +64238,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -57719,10 +64267,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -57798,7 +64348,7 @@ "type": "string" }, "natPolicy": { - "description": "NAT option controlling how IPs are NAT'ed to the instance. Currently only NO_NAT (default value) is supported.", + "description": "Must have a value of NO_NAT. Protocol forwarding delivers packets while preserving the destination IP address of the forwarding rule referencing the target instance.", "enum": [ "NO_NAT" ], @@ -57871,10 +64421,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -57898,10 +64450,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -57987,10 +64541,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -58014,10 +64570,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -58085,10 +64643,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -58112,10 +64672,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -58293,10 +64855,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -58320,10 +64884,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -58426,10 +64992,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -58453,10 +65021,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -58576,10 +65146,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -58603,10 +65175,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -58669,6 +65243,16 @@ }, "type": "object" }, + "TargetSslProxiesSetCertificateMapRequest": { + "id": "TargetSslProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { + "description": "URL of the Certificate Map to associate with this TargetSslProxy.", + "type": "string" + } + }, + "type": "object" + }, "TargetSslProxiesSetProxyHeaderRequest": { "id": "TargetSslProxiesSetProxyHeaderRequest", "properties": { @@ -58704,6 +65288,10 @@ "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a SSL Proxy load balancer. Global forwarding rules reference a target SSL proxy, and the target proxy then references an external backend service. For more information, read Using Target Proxies.", "id": "TargetSslProxy", "properties": { + "certificateMap": { + "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -58803,10 +65391,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -58830,10 +65420,114 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "TargetTcpProxiesScopedList": { + "id": "TargetTcpProxiesScopedList", + "properties": { + "targetTcpProxies": { + "description": "A list of TargetTcpProxies contained in this scope.", + "items": { + "$ref": "TargetTcpProxy" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of backend services when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -58948,6 +65642,10 @@ ], "type": "string" }, + "region": { + "description": "[Output Only] URL of the region where the regional TCP proxy resides. This field is not applicable to global TCP proxy.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -58959,6 +65657,133 @@ }, "type": "object" }, + "TargetTcpProxyAggregatedList": { + "id": "TargetTcpProxyAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "TargetTcpProxiesScopedList", + "description": "Name of the scope containing this set of TargetTcpProxies." + }, + "description": "A list of TargetTcpProxiesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#targetTcpProxyAggregatedList", + "description": "[Output Only] Type of resource. Always compute#targetTcpProxyAggregatedList for lists of Target TCP Proxies.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "TargetTcpProxyList": { "description": "Contains a list of TargetTcpProxy resources.", "id": "TargetTcpProxyList", @@ -59001,10 +65826,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -59028,10 +65855,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -59206,10 +66035,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -59233,10 +66064,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -59322,10 +66155,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -59349,10 +66184,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -59420,10 +66257,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -59447,10 +66286,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -59591,7 +66432,7 @@ }, "defaultRouteAction": { "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within defaultRouteAction. defaultRouteAction has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." + "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within defaultRouteAction. defaultRouteAction has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." }, "defaultService": { "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of defaultService, defaultUrlRedirect , or defaultRouteAction.weightedBackendService must be set. defaultService has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true.", @@ -59703,10 +66544,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -59730,10 +66573,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -59916,10 +66761,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -59943,10 +66790,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -60014,10 +66863,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -60041,10 +66892,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -60091,6 +66944,23 @@ "UrlMapsValidateRequest": { "id": "UrlMapsValidateRequest", "properties": { + "loadBalancingSchemes": { + "description": "Specifies the load balancer type(s) this validation request is for. Use EXTERNAL_MANAGED for HTTP/HTTPS External Global Load Balancer with Advanced Traffic Management. Use EXTERNAL for Classic HTTP/HTTPS External Global Load Balancer. Other load balancer types are not supported. For more information, refer to Choosing a load balancer. If unspecified, the load balancing scheme will be inferred from the backend service resources this URL map references. If that can not be inferred (for example, this URL map only references backend buckets, or this Url map is for rewrites and redirects only and doesn't reference any backends), EXTERNAL will be used as the default type. If specified, the scheme(s) must not conflict with the load balancing scheme of the backend service resources this Url map references.", + "items": { + "enum": [ + "EXTERNAL", + "EXTERNAL_MANAGED", + "LOAD_BALANCING_SCHEME_UNSPECIFIED" + ], + "enumDescriptions": [ + "Signifies that this will be used for Classic L7 External Load Balancing.", + "Signifies that this will be used for Envoy-based L7 External Load Balancing.", + "If unspecified, the validation will try to infer the scheme from the backend service resources this Url map references. If the inferrence is not possible, EXTERNAL will be used as the default type." + ], + "type": "string" + }, + "type": "array" + }, "resource": { "$ref": "UrlMap", "description": "Content of the UrlMap to be validated." @@ -60126,14 +66996,64 @@ "description": "Subnetwork which the current user has compute.subnetworks.use permission on.", "id": "UsableSubnetwork", "properties": { + "externalIpv6Prefix": { + "description": "[Output Only] The external IPv6 address range that is assigned to this subnetwork.", + "type": "string" + }, + "internalIpv6Prefix": { + "description": "[Output Only] The internal IPv6 address range that is assigned to this subnetwork.", + "type": "string" + }, "ipCidrRange": { "description": "The range of internal addresses that are owned by this subnetwork.", "type": "string" }, + "ipv6AccessType": { + "description": "The access type of IPv6 address this subnet holds. It's immutable and can only be specified during creation or the first time the subnet is updated into IPV4_IPV6 dual stack.", + "enum": [ + "EXTERNAL", + "INTERNAL" + ], + "enumDescriptions": [ + "VMs on this subnet will be assigned IPv6 addresses that are accessible via the Internet, as well as the VPC network.", + "VMs on this subnet will be assigned IPv6 addresses that are only accessible over the VPC network." + ], + "type": "string" + }, "network": { "description": "Network URL.", "type": "string" }, + "purpose": { + "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", + "enum": [ + "INTERNAL_HTTPS_LOAD_BALANCER", + "PRIVATE", + "PRIVATE_RFC_1918", + "PRIVATE_SERVICE_CONNECT", + "REGIONAL_MANAGED_PROXY" + ], + "enumDescriptions": [ + "Subnet reserved for Internal HTTP(S) Load Balancing.", + "Regular user created or automatically created subnet.", + "Regular user created or automatically created subnet.", + "Subnetworks created for Private Service Connect in the producer network.", + "Subnetwork used for Regional Internal/External HTTP(S) Load Balancing." + ], + "type": "string" + }, + "role": { + "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" + ], + "enumDescriptions": [ + "The ACTIVE subnet that is currently used.", + "The BACKUP subnet that could be promoted to ACTIVE." + ], + "type": "string" + }, "secondaryIpRanges": { "description": "Secondary IP ranges.", "items": { @@ -60141,6 +67061,18 @@ }, "type": "array" }, + "stackType": { + "description": "The stack type for the subnet. If set to IPV4_ONLY, new VMs in the subnet are assigned IPv4 addresses only. If set to IPV4_IPV6, new VMs in the subnet can be assigned both IPv4 and IPv6 addresses. If not specified, IPV4_ONLY is used. This field can be both set at resource creation time and updated using patch.", + "enum": [ + "IPV4_IPV6", + "IPV4_ONLY" + ], + "enumDescriptions": [ + "New VMs in this subnet can have both IPv4 and IPv6 addresses.", + "New VMs in this subnet will only be assigned IPv4 addresses." + ], + "type": "string" + }, "subnetwork": { "description": "Subnetwork URL.", "type": "string" @@ -60204,10 +67136,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -60231,10 +67165,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -60338,6 +67274,13 @@ "format": "int32", "type": "integer" }, + "ruleMappings": { + "description": "Information about mappings provided by rules in this NAT.", + "items": { + "$ref": "VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings" + }, + "type": "array" + }, "sourceAliasIpRange": { "description": "Alias IP range for this interface endpoint. It will be a private (RFC 1918) IP range. Examples: \"10.33.4.55/32\", or \"192.168.5.0/24\".", "type": "string" @@ -60349,6 +67292,42 @@ }, "type": "object" }, + "VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings": { + "description": "Contains information of NAT Mappings provided by a NAT Rule.", + "id": "VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings", + "properties": { + "drainNatIpPortRanges": { + "description": "List of all drain IP:port-range mappings assigned to this interface by this rule. These ranges are inclusive, that is, both the first and the last ports can be used for NAT. Example: [\"2.2.2.2:12345-12355\", \"1.1.1.1:2234-2234\"].", + "items": { + "type": "string" + }, + "type": "array" + }, + "natIpPortRanges": { + "description": "A list of all IP:port-range mappings assigned to this interface by this rule. These ranges are inclusive, that is, both the first and the last ports can be used for NAT. Example: [\"2.2.2.2:12345-12355\", \"1.1.1.1:2234-2234\"].", + "items": { + "type": "string" + }, + "type": "array" + }, + "numTotalDrainNatPorts": { + "description": "Total number of drain ports across all NAT IPs allocated to this interface by this rule. It equals the aggregated port number in the field drain_nat_ip_port_ranges.", + "format": "int32", + "type": "integer" + }, + "numTotalNatPorts": { + "description": "Total number of ports across all NAT IPs allocated to this interface by this rule. It equals the aggregated port number in the field nat_ip_port_ranges.", + "format": "int32", + "type": "integer" + }, + "ruleNumber": { + "description": "Rule number of the NAT Rule.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "VmEndpointNatMappingsList": { "description": "Contains a list of VmEndpointNatMappings.", "id": "VmEndpointNatMappingsList", @@ -60391,10 +67370,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -60418,10 +67399,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -60527,7 +67510,7 @@ "type": "string" }, "stackType": { - "description": "The stack type for this VPN gateway to identify the IP protocols that are enabled. If not specified, IPV4_ONLY will be used.", + "description": "The stack type for this VPN gateway to identify the IP protocols that are enabled. Possible values are: IPV4_ONLY, IPV4_IPV6. If not specified, IPV4_ONLY will be used.", "enum": [ "IPV4_IPV6", "IPV4_ONLY" @@ -60597,10 +67580,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -60624,10 +67609,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -60713,10 +67700,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -60740,10 +67729,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -60886,11 +67877,11 @@ "type": "integer" }, "interconnectAttachment": { - "description": "URL of the VLAN attachment (interconnectAttachment) resource for this VPN gateway interface. When the value of this field is present, the VPN gateway is used for IPsec-encrypted Cloud Interconnect; all egress or ingress traffic for this VPN gateway interface goes through the specified VLAN attachment resource. Not currently available publicly. ", + "description": "URL of the VLAN attachment (interconnectAttachment) resource for this VPN gateway interface. When the value of this field is present, the VPN gateway is used for HA VPN over Cloud Interconnect; all egress or ingress traffic for this VPN gateway interface goes through the specified VLAN attachment resource.", "type": "string" }, "ipAddress": { - "description": "[Output Only] IP address for this VPN interface associated with the VPN gateway. The IP address could be either a regional external IP address or a regional internal IP address. The two IP addresses for a VPN gateway must be all regional external or regional internal IP addresses. There cannot be a mix of regional external IP addresses and regional internal IP addresses. For IPsec-encrypted Cloud Interconnect, the IP addresses for both interfaces could either be regional internal IP addresses or regional external IP addresses. For regular (non IPsec-encrypted Cloud Interconnect) HA VPN tunnels, the IP address must be a regional external IP address.", + "description": "[Output Only] IP address for this VPN interface associated with the VPN gateway. The IP address could be either a regional external IP address or a regional internal IP address. The two IP addresses for a VPN gateway must be all regional external or regional internal IP addresses. There cannot be a mix of regional external IP addresses and regional internal IP addresses. For HA VPN over Cloud Interconnect, the IP addresses for both interfaces could either be regional internal IP addresses or regional external IP addresses. For regular (non HA VPN over Cloud Interconnect) HA VPN tunnels, the IP address must be a regional external IP address.", "type": "string" } }, @@ -60929,10 +67920,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -60956,10 +67949,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -61056,7 +68051,7 @@ "type": "string" }, "peerExternalGatewayInterface": { - "description": "The interface ID of the external VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created.", + "description": "The interface ID of the external VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created. Possible values are: `0`, `1`, `2`, `3`. The number of IDs in use depends on the external VPN gateway redundancy type.", "format": "int32", "type": "integer" }, @@ -61138,7 +68133,7 @@ "type": "string" }, "vpnGatewayInterface": { - "description": "The interface ID of the VPN gateway with which this VPN tunnel is associated.", + "description": "The interface ID of the VPN gateway with which this VPN tunnel is associated. Possible values are: `0`, `1`.", "format": "int32", "type": "integer" } @@ -61194,10 +68189,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -61221,10 +68218,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -61310,10 +68309,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -61337,10 +68338,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -61408,10 +68411,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -61435,10 +68440,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -61577,10 +68584,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -61604,10 +68613,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", @@ -61780,10 +68791,12 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", "NEXT_HOP_NOT_RUNNING", @@ -61807,10 +68820,12 @@ "Warning that is present in an external api call", "Warning that value of a field has been overridden. Deprecated unused field.", "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", "The route's nextHopInstance URL refers to an instance that does not exist.", "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", "The route's next hop instance does not have a status of RUNNING.", diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index d339ed68426a..df7f8c82d3b4 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -8,35 +8,35 @@ // // For product documentation, see: https://cloud.google.com/compute/ // -// Creating a client +// # Creating a client // // Usage example: // -// import "google.golang.org/api/compute/v1" -// ... -// ctx := context.Background() -// computeService, err := compute.NewService(ctx) +// import "google.golang.org/api/compute/v1" +// ... +// ctx := context.Background() +// computeService, err := compute.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // -// Other authentication options +// # Other authentication options // // By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: // -// computeService, err := compute.NewService(ctx, option.WithScopes(compute.DevstorageReadWriteScope)) +// computeService, err := compute.NewService(ctx, option.WithScopes(compute.DevstorageReadWriteScope)) // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // -// computeService, err := compute.NewService(ctx, option.WithAPIKey("AIza...")) +// computeService, err := compute.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// computeService, err := compute.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// computeService, err := compute.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package compute // import "google.golang.org/api/compute/v1" @@ -108,7 +108,7 @@ const ( // NewService creates a new Service. func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { - scopesOption := option.WithScopes( + scopesOption := internaloption.WithDefaultScopes( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly", @@ -177,7 +177,10 @@ func New(client *http.Client) (*Service, error) { s.Licenses = NewLicensesService(s) s.MachineImages = NewMachineImagesService(s) s.MachineTypes = NewMachineTypesService(s) + s.NetworkAttachments = NewNetworkAttachmentsService(s) + s.NetworkEdgeSecurityServices = NewNetworkEdgeSecurityServicesService(s) s.NetworkEndpointGroups = NewNetworkEndpointGroupsService(s) + s.NetworkFirewallPolicies = NewNetworkFirewallPoliciesService(s) s.Networks = NewNetworksService(s) s.NodeGroups = NewNodeGroupsService(s) s.NodeTemplates = NewNodeTemplatesService(s) @@ -197,11 +200,15 @@ func New(client *http.Client) (*Service, error) { s.RegionInstanceGroups = NewRegionInstanceGroupsService(s) s.RegionInstances = NewRegionInstancesService(s) s.RegionNetworkEndpointGroups = NewRegionNetworkEndpointGroupsService(s) + s.RegionNetworkFirewallPolicies = NewRegionNetworkFirewallPoliciesService(s) s.RegionNotificationEndpoints = NewRegionNotificationEndpointsService(s) s.RegionOperations = NewRegionOperationsService(s) + s.RegionSecurityPolicies = NewRegionSecurityPoliciesService(s) s.RegionSslCertificates = NewRegionSslCertificatesService(s) + s.RegionSslPolicies = NewRegionSslPoliciesService(s) s.RegionTargetHttpProxies = NewRegionTargetHttpProxiesService(s) s.RegionTargetHttpsProxies = NewRegionTargetHttpsProxiesService(s) + s.RegionTargetTcpProxies = NewRegionTargetTcpProxiesService(s) s.RegionUrlMaps = NewRegionUrlMapsService(s) s.Regions = NewRegionsService(s) s.Reservations = NewReservationsService(s) @@ -301,8 +308,14 @@ type Service struct { MachineTypes *MachineTypesService + NetworkAttachments *NetworkAttachmentsService + + NetworkEdgeSecurityServices *NetworkEdgeSecurityServicesService + NetworkEndpointGroups *NetworkEndpointGroupsService + NetworkFirewallPolicies *NetworkFirewallPoliciesService + Networks *NetworksService NodeGroups *NodeGroupsService @@ -341,16 +354,24 @@ type Service struct { RegionNetworkEndpointGroups *RegionNetworkEndpointGroupsService + RegionNetworkFirewallPolicies *RegionNetworkFirewallPoliciesService + RegionNotificationEndpoints *RegionNotificationEndpointsService RegionOperations *RegionOperationsService + RegionSecurityPolicies *RegionSecurityPoliciesService + RegionSslCertificates *RegionSslCertificatesService + RegionSslPolicies *RegionSslPoliciesService + RegionTargetHttpProxies *RegionTargetHttpProxiesService RegionTargetHttpsProxies *RegionTargetHttpsProxiesService + RegionTargetTcpProxies *RegionTargetTcpProxiesService + RegionUrlMaps *RegionUrlMapsService Regions *RegionsService @@ -706,6 +727,24 @@ type MachineTypesService struct { s *Service } +func NewNetworkAttachmentsService(s *Service) *NetworkAttachmentsService { + rs := &NetworkAttachmentsService{s: s} + return rs +} + +type NetworkAttachmentsService struct { + s *Service +} + +func NewNetworkEdgeSecurityServicesService(s *Service) *NetworkEdgeSecurityServicesService { + rs := &NetworkEdgeSecurityServicesService{s: s} + return rs +} + +type NetworkEdgeSecurityServicesService struct { + s *Service +} + func NewNetworkEndpointGroupsService(s *Service) *NetworkEndpointGroupsService { rs := &NetworkEndpointGroupsService{s: s} return rs @@ -715,6 +754,15 @@ type NetworkEndpointGroupsService struct { s *Service } +func NewNetworkFirewallPoliciesService(s *Service) *NetworkFirewallPoliciesService { + rs := &NetworkFirewallPoliciesService{s: s} + return rs +} + +type NetworkFirewallPoliciesService struct { + s *Service +} + func NewNetworksService(s *Service) *NetworksService { rs := &NetworksService{s: s} return rs @@ -886,6 +934,15 @@ type RegionNetworkEndpointGroupsService struct { s *Service } +func NewRegionNetworkFirewallPoliciesService(s *Service) *RegionNetworkFirewallPoliciesService { + rs := &RegionNetworkFirewallPoliciesService{s: s} + return rs +} + +type RegionNetworkFirewallPoliciesService struct { + s *Service +} + func NewRegionNotificationEndpointsService(s *Service) *RegionNotificationEndpointsService { rs := &RegionNotificationEndpointsService{s: s} return rs @@ -904,6 +961,15 @@ type RegionOperationsService struct { s *Service } +func NewRegionSecurityPoliciesService(s *Service) *RegionSecurityPoliciesService { + rs := &RegionSecurityPoliciesService{s: s} + return rs +} + +type RegionSecurityPoliciesService struct { + s *Service +} + func NewRegionSslCertificatesService(s *Service) *RegionSslCertificatesService { rs := &RegionSslCertificatesService{s: s} return rs @@ -913,6 +979,15 @@ type RegionSslCertificatesService struct { s *Service } +func NewRegionSslPoliciesService(s *Service) *RegionSslPoliciesService { + rs := &RegionSslPoliciesService{s: s} + return rs +} + +type RegionSslPoliciesService struct { + s *Service +} + func NewRegionTargetHttpProxiesService(s *Service) *RegionTargetHttpProxiesService { rs := &RegionTargetHttpProxiesService{s: s} return rs @@ -931,6 +1006,15 @@ type RegionTargetHttpsProxiesService struct { s *Service } +func NewRegionTargetTcpProxiesService(s *Service) *RegionTargetTcpProxiesService { + rs := &RegionTargetTcpProxiesService{s: s} + return rs +} + +type RegionTargetTcpProxiesService struct { + s *Service +} + func NewRegionUrlMapsService(s *Service) *RegionUrlMapsService { rs := &RegionUrlMapsService{s: s} return rs @@ -1347,6 +1431,9 @@ type AcceleratorTypeAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -1354,6 +1441,9 @@ type AcceleratorTypeAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -1531,6 +1621,9 @@ type AcceleratorTypeListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -1538,6 +1631,9 @@ type AcceleratorTypeListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -1694,6 +1790,9 @@ type AcceleratorTypesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -1701,6 +1800,9 @@ type AcceleratorTypesScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -1936,6 +2038,16 @@ type Address struct { // "UNSPECIFIED_VERSION" IpVersion string `json:"ipVersion,omitempty"` + // Ipv6EndpointType: The endpoint type of this address, which should be + // VM or NETLB. This is used for deciding which type of endpoint this + // address can be used after the external IPv6 address reservation. + // + // Possible values: + // "NETLB" - Reserved IPv6 address can be used on network load + // balancer. + // "VM" - Reserved IPv6 address can be used on VM. + Ipv6EndpointType string `json:"ipv6EndpointType,omitempty"` + // Kind: [Output Only] Type of the resource. Always compute#address for // addresses. Kind string `json:"kind,omitempty"` @@ -1986,20 +2098,20 @@ type Address struct { // NAT_AUTO for the regional external IP addresses used by Cloud NAT // when allocating addresses using automatic NAT IP address allocation. // - IPSEC_INTERCONNECT for addresses created from a private IP range - // that are reserved for a VLAN attachment in an *IPsec-encrypted Cloud + // that are reserved for a VLAN attachment in an *HA VPN over Cloud // Interconnect* configuration. These addresses are regional resources. - // Not currently available publicly. - `SHARED_LOADBALANCER_VIP` for an - // internal IP address that is assigned to multiple internal forwarding - // rules. - `PRIVATE_SERVICE_CONNECT` for a private network address that - // is used to configure Private Service Connect. Only global internal - // addresses can use this purpose. + // - `SHARED_LOADBALANCER_VIP` for an internal IP address that is + // assigned to multiple internal forwarding rules. - + // `PRIVATE_SERVICE_CONNECT` for a private network address that is used + // to configure Private Service Connect. Only global internal addresses + // can use this purpose. // // Possible values: // "DNS_RESOLVER" - DNS resolver address in the subnetwork. // "GCE_ENDPOINT" - VM internal/alias IP, Internal LB service IP, etc. // "IPSEC_INTERCONNECT" - A regional internal IP address range - // reserved for the VLAN attachment that is used in IPsec-encrypted - // Cloud Interconnect. This regional internal IP address range must not + // reserved for the VLAN attachment that is used in HA VPN over Cloud + // Interconnect. This regional internal IP address range must not // overlap with any IP address range of subnet/route in the VPC network // and its peering networks. After the VLAN attachment is created with // the reserved IP address range, when creating a new VPN gateway, its @@ -2009,6 +2121,8 @@ type Address struct { // "PRIVATE_SERVICE_CONNECT" - A private network IP address that can // be used to configure Private Service Connect. This purpose can be // specified only for GLOBAL addresses of Type INTERNAL + // "SERVERLESS" - A regional internal IP address range reserved for + // Serverless. // "SHARED_LOADBALANCER_VIP" - A private network IP address that can // be shared by multiple Internal Load Balancer forwarding rules. // "VPC_PEERING" - IP range for peer networks. @@ -2153,6 +2267,9 @@ type AddressAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -2160,6 +2277,9 @@ type AddressAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -2336,6 +2456,9 @@ type AddressListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -2343,6 +2466,9 @@ type AddressListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -2497,6 +2623,9 @@ type AddressesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -2504,6 +2633,9 @@ type AddressesScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -2624,6 +2756,13 @@ type AdvancedMachineFeatures struct { // processor is assumed. ThreadsPerCore int64 `json:"threadsPerCore,omitempty"` + // VisibleCoreCount: The number of physical cores to expose to an + // instance. Multiply by the number of threads per core to compute the + // total number of virtual CPUs to expose to the instance. If unset, the + // number of cores is inferred from the instance's nominal CPU count and + // the underlying platform's SMT width. + VisibleCoreCount int64 `json:"visibleCoreCount,omitempty"` + // ForceSendFields is a list of field names (e.g. // "EnableNestedVirtualization") to unconditionally include in API // requests. By default, fields with empty or default values are omitted @@ -2774,8 +2913,12 @@ func (s *AllocationSpecificSKUAllocationReservedInstanceProperties) MarshalJSON( } // AllocationSpecificSKUReservation: This reservation type allows to pre -// allocate specific instance configuration. Next ID: 5 +// allocate specific instance configuration. Next ID: 6 type AllocationSpecificSKUReservation struct { + // AssuredCount: [Output Only] Indicates how many instances are actually + // usable currently. + AssuredCount int64 `json:"assuredCount,omitempty,string"` + // Count: Specifies the number of resources that are allocated. Count int64 `json:"count,omitempty,string"` @@ -2785,7 +2928,7 @@ type AllocationSpecificSKUReservation struct { // InstanceProperties: The instance properties for the reservation. InstanceProperties *AllocationSpecificSKUAllocationReservedInstanceProperties `json:"instanceProperties,omitempty"` - // ForceSendFields is a list of field names (e.g. "Count") to + // ForceSendFields is a list of field names (e.g. "AssuredCount") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -2793,10 +2936,10 @@ type AllocationSpecificSKUReservation struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Count") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AssuredCount") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -2810,6 +2953,16 @@ func (s *AllocationSpecificSKUReservation) MarshalJSON() ([]byte, error) { // AttachedDisk: An instance-attached disk resource. type AttachedDisk struct { + // Architecture: [Output Only] The architecture of the attached disk. + // Valid values are ARM64 or X86_64. + // + // Possible values: + // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture + // is not set. + // "ARM64" - Machines with architecture ARM64 + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + // AutoDelete: Specifies whether the disk will be auto-deleted when the // instance is deleted (but not when the disk is detached from the // instance). @@ -2849,6 +3002,11 @@ type AttachedDisk struct { // DiskSizeGb: The size of the disk in GB. DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + // ForceAttach: [Input Only] Whether to force attach the regional disk + // even if it's currently attached to another instance. If you try to + // force attach a zonal disk to an instance, you will receive an error. + ForceAttach bool `json:"forceAttach,omitempty"` + // GuestOsFeatures: A list of features to enable on the guest operating // system. Applicable only for bootable images. Read Enabling guest // operating system features to see a list of available options. @@ -2867,11 +3025,10 @@ type AttachedDisk struct { InitializeParams *AttachedDiskInitializeParams `json:"initializeParams,omitempty"` // Interface: Specifies the disk interface to use for attaching this - // disk, which is either SCSI or NVME. The default is SCSI. Persistent - // disks must always use SCSI and the request will fail if you attempt - // to attach a persistent disk in any other format than SCSI. Local SSDs - // can use either NVME or SCSI. For performance characteristics of SCSI - // over NVMe, see Local SSD performance. + // disk, which is either SCSI or NVME. For most machine types, the + // default is SCSI. Local SSDs can use either NVME or SCSI. In certain + // configurations, persistent disks can use NVMe. For more information, + // see About persistent disks. // // Possible values: // "NVME" @@ -2919,7 +3076,7 @@ type AttachedDisk struct { // "SCRATCH" Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoDelete") to + // ForceSendFields is a list of field names (e.g. "Architecture") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -2927,10 +3084,10 @@ type AttachedDisk struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoDelete") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Architecture") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -2945,9 +3102,21 @@ func (s *AttachedDisk) MarshalJSON() ([]byte, error) { // AttachedDiskInitializeParams: [Input Only] Specifies the parameters // for a new disk that will be created alongside the new instance. Use // initialization parameters to create boot disks or local SSDs attached -// to the new instance. This property is mutually exclusive with the -// source property; you can only define one or the other, but not both. +// to the new instance. This field is persisted and returned for +// instanceTemplate and not returned in the context of instance. This +// property is mutually exclusive with the source property; you can only +// define one or the other, but not both. type AttachedDiskInitializeParams struct { + // Architecture: The architecture of the attached disk. Valid values are + // arm64 or x86_64. + // + // Possible values: + // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture + // is not set. + // "ARM64" - Machines with architecture ARM64 + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + // Description: An optional description. Provide this property when // creating the disk. Description string `json:"description,omitempty"` @@ -2970,13 +3139,15 @@ type AttachedDiskInitializeParams struct { // URL. For example: // https://www.googleapis.com/compute/v1/projects/project/zones/zone // /diskTypes/pd-standard For a full list of acceptable values, see - // Persistent disk types. If you define this field, you can provide - // either the full or partial URL. For example, the following are valid - // values: - + // Persistent disk types. If you specify this field when creating a VM, + // you can provide either the full or partial URL. For example, the + // following values are valid: - // https://www.googleapis.com/compute/v1/projects/project/zones/zone // /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - // - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this - // is the name of the disk type, not URL. + // - zones/zone/diskTypes/diskType If you specify this field when + // creating or updating an instance template or all-instances + // configuration, specify the type of the disk, not the URL. For + // example: pd-standard. DiskType string `json:"diskType,omitempty"` // Labels: Labels to apply to this disk. These can be later modified by @@ -3006,6 +3177,13 @@ type AttachedDiskInitializeParams struct { // see the Extreme persistent disk documentation. ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + // ResourceManagerTags: Resource manager tags to be bound to the disk. + // Tag keys and values have the same definition as resource manager + // tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values + // are in the format `tagValues/456`. The field is ignored (both PUT & + // PATCH) when empty. + ResourceManagerTags map[string]string `json:"resourceManagerTags,omitempty"` + // ResourcePolicies: Resource policies applied to this disk for // automatic snapshot creations. Specified using the full or partial // URL. For instance template, specify only the resource policy name. @@ -3030,10 +3208,10 @@ type AttachedDiskInitializeParams struct { // SourceImageEncryptionKey: The customer-supplied encryption key of the // source image. Required if the source image is protected by a - // customer-supplied encryption key. Instance templates do not store - // customer-supplied encryption keys, so you cannot create disks for - // instances in a managed instance group if the source images are - // encrypted with your own keys. + // customer-supplied encryption key. InstanceTemplate and + // InstancePropertiesPatch do not store customer-supplied encryption + // keys, so you cannot create disks for instances in a managed instance + // group if the source images are encrypted with your own keys. SourceImageEncryptionKey *CustomerEncryptionKey `json:"sourceImageEncryptionKey,omitempty"` // SourceSnapshot: The source snapshot to create this disk. When @@ -3048,7 +3226,7 @@ type AttachedDiskInitializeParams struct { // the source snapshot. SourceSnapshotEncryptionKey *CustomerEncryptionKey `json:"sourceSnapshotEncryptionKey,omitempty"` - // ForceSendFields is a list of field names (e.g. "Description") to + // ForceSendFields is a list of field names (e.g. "Architecture") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -3056,7 +3234,7 @@ type AttachedDiskInitializeParams struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include + // NullFields is a list of field names (e.g. "Architecture") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -3412,6 +3590,9 @@ type AutoscalerAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -3419,6 +3600,9 @@ type AutoscalerAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -3595,6 +3779,9 @@ type AutoscalerListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -3602,6 +3789,9 @@ type AutoscalerListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -3872,6 +4062,9 @@ type AutoscalersScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -3879,6 +4072,9 @@ type AutoscalersScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -4421,7 +4617,9 @@ type Backend struct { // offering 0% of its available capacity. The valid ranges are 0.0 and // [0.1,1.0]. You cannot configure a setting larger than 0 and smaller // than 0.1. You cannot configure a setting of 0 when there is only one - // backend attached to the backend service. + // backend attached to the backend service. Not available with backends + // that don't support using a balancingMode. This includes backends such + // as global internet NEGs, regional serverless NEGs, and PSC NEGs. CapacityScaler float64 `json:"capacityScaler,omitempty"` // Description: An optional description of this resource. Provide this @@ -4479,7 +4677,7 @@ type Backend struct { MaxRatePerInstance float64 `json:"maxRatePerInstance,omitempty"` // MaxUtilization: Optional parameter to define a target capacity for - // the UTILIZATIONbalancing mode. The valid range is [0.0, 1.0]. For + // the UTILIZATION balancing mode. The valid range is [0.0, 1.0]. For // usage guidelines, see Utilization balancing mode. MaxUtilization float64 `json:"maxUtilization,omitempty"` @@ -4536,6 +4734,16 @@ type BackendBucket struct { // CdnPolicy: Cloud CDN configuration for this BackendBucket. CdnPolicy *BackendBucketCdnPolicy `json:"cdnPolicy,omitempty"` + // CompressionMode: Compress text responses using Brotli or gzip + // compression, based on the client's Accept-Encoding header. + // + // Possible values: + // "AUTOMATIC" - Automatically uses the best compression based on the + // Accept-Encoding header sent by the client. + // "DISABLED" - Disables compression. Existing compressed responses + // cached by Cloud CDN will not be served to clients. + CompressionMode string `json:"compressionMode,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -4804,7 +5012,7 @@ type BackendBucketCdnPolicyCacheKeyPolicy struct { IncludeHttpHeaders []string `json:"includeHttpHeaders,omitempty"` // QueryStringWhitelist: Names of query string parameters to include in - // cache keys. All other parameters will be excluded. '&' and '=' will + // cache keys. Default parameters are always included. '&' and '=' will // be percent encoded and not treated as delimiters. QueryStringWhitelist []string `json:"queryStringWhitelist,omitempty"` @@ -4947,6 +5155,9 @@ type BackendBucketListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -4954,6 +5165,9 @@ type BackendBucketListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -5072,9 +5286,9 @@ type BackendService struct { // Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session // affinity. If set to 0, the cookie is non-persistent and lasts only // until the end of the browser session (or equivalent). The maximum - // allowed value is one day (86,400). Not supported when the backend - // service is referenced by a URL map that is bound to target gRPC proxy - // that has validateForProxyless field set to true. + // allowed value is two weeks (1,209,600). Not supported when the + // backend service is referenced by a URL map that is bound to target + // gRPC proxy that has validateForProxyless field set to true. AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` // Backends: The list of backends that serve this BackendService. @@ -5086,6 +5300,16 @@ type BackendService struct { CircuitBreakers *CircuitBreakers `json:"circuitBreakers,omitempty"` + // CompressionMode: Compress text responses using Brotli or gzip + // compression, based on the client's Accept-Encoding header. + // + // Possible values: + // "AUTOMATIC" - Automatically uses the best compression based on the + // Accept-Encoding header sent by the client. + // "DISABLED" - Disables compression. Existing compressed responses + // cached by Cloud CDN will not be served to clients. + CompressionMode string `json:"compressionMode,omitempty"` + ConnectionDraining *ConnectionDraining `json:"connectionDraining,omitempty"` // ConnectionTrackingPolicy: Connection Tracking configuration for this @@ -5192,6 +5416,14 @@ type BackendService struct { // "INVALID_LOAD_BALANCING_SCHEME" LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` + // LocalityLbPolicies: A list of locality load balancing policies to be + // used in order of preference. Either the policy or the customPolicy + // field should be set. Overrides any value set in the localityLbPolicy + // field. localityLbPolicies is only supported when the BackendService + // is referenced by a URL Map that is referenced by a target gRPC proxy + // that has the validateForProxyless field set to true. + LocalityLbPolicies []*BackendServiceLocalityLoadBalancingPolicyConfig `json:"localityLbPolicies,omitempty"` + // LocalityLbPolicy: The load balancing algorithm used within the scope // of the locality. The possible values are: - ROUND_ROBIN: This is a // simple policy in which each healthy backend is selected in round @@ -5277,11 +5509,9 @@ type BackendService struct { // hosts from the load balancing pool for the backend service. If not // set, this feature is considered disabled. This field is applicable to // either: - A regional backend service with the service_protocol set to - // HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to + // HTTP, HTTPS, HTTP2, or GRPC, and load_balancing_scheme set to // INTERNAL_MANAGED. - A global backend service with the - // load_balancing_scheme set to INTERNAL_SELF_MANAGED. Not supported - // when the backend service is referenced by a URL map that is bound to - // target gRPC proxy that has validateForProxyless field set to true. + // load_balancing_scheme set to INTERNAL_SELF_MANAGED. OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` // Port: Deprecated in favor of portName. The TCP port to connect on the @@ -5336,6 +5566,11 @@ type BackendService struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // ServiceBindings: URLs of networkservices.ServiceBinding resources. + // Can only be set if load balancing scheme is INTERNAL_SELF_MANAGED. If + // set, lists of backends and health checks must be both empty. + ServiceBindings []string `json:"serviceBindings,omitempty"` + // SessionAffinity: Type of session affinity to use. The default is // NONE. Only NONE and HEADER_FIELD are supported when the backend // service is referenced by a URL map that is bound to target gRPC proxy @@ -5374,10 +5609,15 @@ type BackendService struct { Subsetting *Subsetting `json:"subsetting,omitempty"` - // TimeoutSec: Not supported when the backend service is referenced by a - // URL map that is bound to target gRPC proxy that has - // validateForProxyless field set to true. Instead, use - // maxStreamDuration. + // TimeoutSec: The backend service timeout has a different meaning + // depending on the type of load balancer. For more information see, + // Backend service settings. The default is 30 seconds. The full range + // of timeout values allowed goes from 1 through 2,147,483,647 seconds. + // This value can be overridden in the PathMatcher configuration of the + // UrlMap that references this backend service. Not supported when the + // backend service is referenced by a URL map that is bound to target + // gRPC proxy that has validateForProxyless field set to true. Instead, + // use maxStreamDuration. TimeoutSec int64 `json:"timeoutSec,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -5490,6 +5730,9 @@ type BackendServiceAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -5497,6 +5740,9 @@ type BackendServiceAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -5857,6 +6103,10 @@ type BackendServiceConnectionTrackingPolicy struct { // "NEVER_PERSIST" ConnectionPersistenceOnUnhealthyBackends string `json:"connectionPersistenceOnUnhealthyBackends,omitempty"` + // EnableStrongAffinity: Enable Strong Session Affinity for Network Load + // Balancing. This option is not available publicly. + EnableStrongAffinity bool `json:"enableStrongAffinity,omitempty"` + // IdleTimeoutSec: Specifies how long to keep a Connection Tracking // entry while there is no matching traffic (in seconds). For Internal // TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the @@ -6150,6 +6400,9 @@ type BackendServiceListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -6157,6 +6410,9 @@ type BackendServiceListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -6256,11 +6512,137 @@ func (s *BackendServiceListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendServiceLocalityLoadBalancingPolicyConfig: Container for either +// a built-in LB policy supported by gRPC or Envoy or a custom one +// implemented by the end user. +type BackendServiceLocalityLoadBalancingPolicyConfig struct { + CustomPolicy *BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy `json:"customPolicy,omitempty"` + + Policy *BackendServiceLocalityLoadBalancingPolicyConfigPolicy `json:"policy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomPolicy") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomPolicy") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceLocalityLoadBalancingPolicyConfig) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceLocalityLoadBalancingPolicyConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy: The +// configuration for a custom policy implemented by the user and +// deployed with the client. +type BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy struct { + // Data: An optional, arbitrary JSON object with configuration data, + // understood by a locally installed custom policy implementation. + Data string `json:"data,omitempty"` + + // Name: Identifies the custom policy. The value should match the type + // the custom implementation is registered with on the gRPC clients. It + // should follow protocol buffer message naming conventions and include + // the full path (e.g. myorg.CustomLbPolicy). The maximum length is 256 + // characters. Note that specifying the same custom policy more than + // once for a backend is not a valid configuration and will be rejected. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Data") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Data") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendServiceLocalityLoadBalancingPolicyConfigPolicy: The +// configuration for a built-in load balancing policy. +type BackendServiceLocalityLoadBalancingPolicyConfigPolicy struct { + // Name: The name of a locality load balancer policy to be used. The + // value should be one of the predefined ones as supported by + // localityLbPolicy, although at the moment only ROUND_ROBIN is + // supported. This field should only be populated when the customPolicy + // field is not used. Note that specifying the same policy more than + // once for a backend is not a valid configuration and will be rejected. + // + // Possible values: + // "INVALID_LB_POLICY" + // "LEAST_REQUEST" - An O(1) algorithm which selects two random + // healthy hosts and picks the host which has fewer active requests. + // "MAGLEV" - This algorithm implements consistent hashing to + // backends. Maglev can be used as a drop in replacement for the ring + // hash load balancer. Maglev is not as stable as ring hash but has + // faster table lookup build times and host selection times. For more + // information about Maglev, see + // https://ai.google/research/pubs/pub44824 + // "ORIGINAL_DESTINATION" - Backend host is selected based on the + // client connection metadata, i.e., connections are opened to the same + // address as the destination address of the incoming connection before + // the connection was redirected to the load balancer. + // "RANDOM" - The load balancer selects a random healthy host. + // "RING_HASH" - The ring/modulo hash load balancer implements + // consistent hashing to backends. The algorithm has the property that + // the addition/removal of a host from a set of N hosts only affects 1/N + // of the requests. + // "ROUND_ROBIN" - This is a simple policy in which each healthy + // backend is selected in round robin order. This is the default. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceLocalityLoadBalancingPolicyConfigPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceLocalityLoadBalancingPolicyConfigPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BackendServiceLogConfig: The available logging options for the load // balancer traffic served by this backend service. type BackendServiceLogConfig struct { - // Enable: This field denotes whether to enable logging for the load - // balancer traffic served by this backend service. + // Enable: Denotes whether to enable logging for the load balancer + // traffic served by this backend service. The default value is false. Enable bool `json:"enable,omitempty"` // SampleRate: This field can only be specified if logging is enabled @@ -6390,6 +6772,9 @@ type BackendServicesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -6397,6 +6782,9 @@ type BackendServicesScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -6757,24 +7145,29 @@ type Binding struct { // (https://cloud.google.com/iam/help/conditions/resource-policies). Condition *Expr `json:"condition,omitempty"` - // Members: Specifies the principals requesting access for a Cloud - // Platform resource. `members` can have the following values: * + // Members: Specifies the principals requesting access for a Google + // Cloud resource. `members` can have the following values: * // `allUsers`: A special identifier that represents anyone who is on the // internet; with or without a Google account. * // `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. * - // `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . * - // `serviceAccount:{emailid}`: An email address that represents a - // service account. For example, - // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An - // email address that represents a Google group. For example, - // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An - // email address (plus unique identifier) representing a user that has - // been recently deleted. For example, - // `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered - // user retains the role in the binding. * + // who is authenticated with a Google account or a service account. Does + // not include identities that come from external identity providers + // (IdPs) through identity federation. * `user:{emailid}`: An email + // address that represents a specific Google account. For example, + // `alice@example.com` . * `serviceAccount:{emailid}`: An email address + // that represents a Google service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * + // `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: + // An identifier for a Kubernetes service account + // (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). + // For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. + // * `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. * + // `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a user that has been recently + // deleted. For example, `alice@example.com?uid=123456789012345678901`. + // If the user is recovered, this value reverts to `user:{emailid}` and + // the recovered user retains the role in the binding. * // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address // (plus unique identifier) representing a service account that has been // recently deleted. For example, @@ -6830,7 +7223,8 @@ type BulkInsertInstanceResource struct { // to be created. Required if sourceInstanceTemplate is not provided. InstanceProperties *InstanceProperties `json:"instanceProperties,omitempty"` - // LocationPolicy: Policy for chosing target zone. + // LocationPolicy: Policy for chosing target zone. For more information, + // see Create VMs in bulk . LocationPolicy *LocationPolicy `json:"locationPolicy,omitempty"` // MinCount: The minimum number of instances to create. If no min_count @@ -7019,28 +7413,36 @@ func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) { // CircuitBreakers: Settings controlling the volume of requests, // connections and retries to this backend service. type CircuitBreakers struct { - // MaxConnections: Not supported when the backend service is referenced - // by a URL map that is bound to target gRPC proxy that has - // validateForProxyless field set to true. + // MaxConnections: The maximum number of connections to the backend + // service. If not specified, there is no limit. Not supported when the + // backend service is referenced by a URL map that is bound to target + // gRPC proxy that has validateForProxyless field set to true. MaxConnections int64 `json:"maxConnections,omitempty"` - // MaxPendingRequests: Not supported when the backend service is - // referenced by a URL map that is bound to target gRPC proxy that has - // validateForProxyless field set to true. + // MaxPendingRequests: The maximum number of pending requests allowed to + // the backend service. If not specified, there is no limit. Not + // supported when the backend service is referenced by a URL map that is + // bound to target gRPC proxy that has validateForProxyless field set to + // true. MaxPendingRequests int64 `json:"maxPendingRequests,omitempty"` // MaxRequests: The maximum number of parallel requests that allowed to // the backend service. If not specified, there is no limit. MaxRequests int64 `json:"maxRequests,omitempty"` - // MaxRequestsPerConnection: Not supported when the backend service is - // referenced by a URL map that is bound to target gRPC proxy that has - // validateForProxyless field set to true. + // MaxRequestsPerConnection: Maximum requests for a single connection to + // the backend service. This parameter is respected by both the HTTP/1.1 + // and HTTP/2 implementations. If not specified, there is no limit. + // Setting this parameter to 1 will effectively disable keep alive. Not + // supported when the backend service is referenced by a URL map that is + // bound to target gRPC proxy that has validateForProxyless field set to + // true. MaxRequestsPerConnection int64 `json:"maxRequestsPerConnection,omitempty"` - // MaxRetries: Not supported when the backend service is referenced by a - // URL map that is bound to target gRPC proxy that has - // validateForProxyless field set to true. + // MaxRetries: The maximum number of parallel retries allowed to the + // backend cluster. If not specified, the default is 1. Not supported + // when the backend service is referenced by a URL map that is bound to + // target gRPC proxy that has validateForProxyless field set to true. MaxRetries int64 `json:"maxRetries,omitempty"` // ForceSendFields is a list of field names (e.g. "MaxConnections") to @@ -7117,6 +7519,10 @@ type Commitment struct { // license commitment. LicenseResource *LicenseResourceCommitment `json:"licenseResource,omitempty"` + // MergeSourceCommitments: List of source commitments to be merged into + // a new commitment. + MergeSourceCommitments []string `json:"mergeSourceCommitments,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -7150,6 +7556,10 @@ type Commitment struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SplitSourceCommitment: Source commitment to be splitted into a new + // commitment. + SplitSourceCommitment string `json:"splitSourceCommitment,omitempty"` + // StartTimestamp: [Output Only] Commitment start time in RFC3339 text // format. StartTimestamp string `json:"startTimestamp,omitempty"` @@ -7160,6 +7570,7 @@ type Commitment struct { // // Possible values: // "ACTIVE" + // "CANCELLED" // "CREATING" // "EXPIRED" // "NOT_YET_ACTIVE" @@ -7185,6 +7596,7 @@ type Commitment struct { // "GENERAL_PURPOSE_N2D" // "GENERAL_PURPOSE_T2D" // "MEMORY_OPTIMIZED" + // "MEMORY_OPTIMIZED_M3" // "TYPE_UNSPECIFIED" Type string `json:"type,omitempty"` @@ -7295,6 +7707,9 @@ type CommitmentAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -7302,6 +7717,9 @@ type CommitmentAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -7478,6 +7896,9 @@ type CommitmentListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -7485,6 +7906,9 @@ type CommitmentListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -7640,6 +8064,9 @@ type CommitmentsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -7647,6 +8074,9 @@ type CommitmentsScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -7982,7 +8412,9 @@ type CorsPolicy struct { // AllowOriginRegexes: Specifies a regular expression that matches // allowed origins. For more information about the regular expression // syntax, see Syntax. An origin is allowed if it matches either an item - // in allowOrigins or an item in allowOriginRegexes. + // in allowOrigins or an item in allowOriginRegexes. Regular expressions + // can only be used when the loadBalancingScheme is set to + // INTERNAL_SELF_MANAGED. AllowOriginRegexes []string `json:"allowOriginRegexes,omitempty"` // AllowOrigins: Specifies the list of origins that is allowed to do @@ -8200,6 +8632,16 @@ func (s *DeprecationStatus) MarshalJSON() ([]byte, error) { // persistent disks. The regionDisks resource represents a regional // persistent disk. For more information, read Regional resources. type Disk struct { + // Architecture: The architecture of the disk. Valid values are ARM64 or + // X86_64. + // + // Possible values: + // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture + // is not set. + // "ARM64" - Machines with architecture ARM64 + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -8287,6 +8729,10 @@ type Disk struct { // Options: Internal use only. Options string `json:"options,omitempty"` + // Params: Input only. [Input Only] Additional params passed with the + // request, but not persisted as part of resource payload. + Params *DiskParams `json:"params,omitempty"` + // PhysicalBlockSizeBytes: Physical block size of the persistent disk, // in bytes. If not present in a request, a default value is used. The // currently supported size is 4096, other sizes may be added in the @@ -8438,21 +8884,20 @@ type Disk struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Architecture") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Architecture") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -8542,6 +8987,9 @@ type DiskAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -8549,6 +8997,9 @@ type DiskAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -8805,6 +9256,9 @@ type DiskListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -8812,6 +9266,9 @@ type DiskListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -8950,6 +9407,39 @@ func (s *DiskMoveRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskParams: Additional disk params. +type DiskParams struct { + // ResourceManagerTags: Resource manager tags to be bound to the disk. + // Tag keys and values have the same definition as resource manager + // tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values + // are in the format `tagValues/456`. The field is ignored (both PUT & + // PATCH) when empty. + ResourceManagerTags map[string]string `json:"resourceManagerTags,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ResourceManagerTags") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ResourceManagerTags") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DiskParams) MarshalJSON() ([]byte, error) { + type NoMethod DiskParams + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DiskType: Represents a Disk Type resource. Google Compute Engine has // two Disk Type resources: * Regional // (/compute/docs/reference/rest/v1/regionDiskTypes) * Zonal @@ -9113,6 +9603,9 @@ type DiskTypeAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -9120,6 +9613,9 @@ type DiskTypeAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -9296,6 +9792,9 @@ type DiskTypeListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -9303,6 +9802,9 @@ type DiskTypeListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -9458,6 +9960,9 @@ type DiskTypesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -9465,6 +9970,9 @@ type DiskTypesScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -9704,6 +10212,9 @@ type DisksScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -9711,6 +10222,9 @@ type DisksScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -9954,6 +10468,65 @@ func (s *Duration) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ErrorInfo: Describes the cause of the error with structured details. +// Example of an error when contacting the "pubsub.googleapis.com" API +// when it is not enabled: { "reason": "API_DISABLED" "domain": +// "googleapis.com" "metadata": { "resource": "projects/123", "service": +// "pubsub.googleapis.com" } } This response indicates that the +// pubsub.googleapis.com API is not enabled. Example of an error that is +// returned when attempting to create a Spanner instance in a region +// that is out of stock: { "reason": "STOCKOUT" "domain": +// "spanner.googleapis.com", "metadata": { "availableRegions": +// "us-central1,us-east2" } } +type ErrorInfo struct { + // Domain: The logical grouping to which the "reason" belongs. The error + // domain is typically the registered service name of the tool or + // product that generates the error. Example: "pubsub.googleapis.com". + // If the error is generated by some common infrastructure, the error + // domain must be a globally unique value that identifies the + // infrastructure. For Google API infrastructure, the error domain is + // "googleapis.com". + Domain string `json:"domain,omitempty"` + + // Metadatas: Additional structured details about this error. Keys + // should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the + // units should be contained in the key, not the value. For example, + // rather than {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number + // of instances that can be created in a single (batch) request. + Metadatas map[string]string `json:"metadatas,omitempty"` + + // Reason: The reason of the error. This is a constant value that + // identifies the proximate cause of the error. Error reasons are unique + // within a particular domain of errors. This should be at most 63 + // characters and match a regular expression of `A-Z+[A-Z0-9]`, which + // represents UPPER_SNAKE_CASE. + Reason string `json:"reason,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Domain") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Domain") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ErrorInfo) MarshalJSON() ([]byte, error) { + type NoMethod ErrorInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ExchangedPeeringRoute struct { // DestRange: The destination range of the route. DestRange string `json:"destRange,omitempty"` @@ -10082,6 +10655,9 @@ type ExchangedPeeringRoutesListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -10089,6 +10665,9 @@ type ExchangedPeeringRoutesListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -10485,6 +11064,9 @@ type ExternalVpnGatewayListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -10492,6 +11074,9 @@ type ExternalVpnGatewayListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -10655,10 +11240,8 @@ type Firewall struct { DestinationRanges []string `json:"destinationRanges,omitempty"` // Direction: Direction of traffic to which this firewall applies, - // either `INGRESS` or `EGRESS`. The default is `INGRESS`. For `INGRESS` - // traffic, you cannot specify the destinationRanges field, and for - // `EGRESS` traffic, you cannot specify the sourceRanges or sourceTags - // fields. + // either `INGRESS` or `EGRESS`. The default is `INGRESS`. For `EGRESS` + // traffic, you cannot specify the sourceTags fields. // // Possible values: // "EGRESS" - Indicates that firewall should apply to outgoing @@ -10950,6 +11533,9 @@ type FirewallListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -10957,6 +11543,9 @@ type FirewallListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -11147,9 +11736,10 @@ type FirewallPolicy struct { // DisplayName: Deprecated, please use short name instead. User-provided // name of the Organization firewall policy. The name should be unique // in the organization in which the firewall policy is created. This - // name must be set on creation and cannot be changed. The name must be - // 1-63 characters long, and comply with RFC1035. Specifically, the name - // must be 1-63 characters long and match the regular expression + // field is not applicable to network firewall policies. This name must + // be set on creation and cannot be changed. The name must be 1-63 + // characters long, and comply with RFC1035. Specifically, the name must + // be 1-63 characters long and match the regular expression // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be // a lowercase letter, and all following characters must be a dash, // lowercase letter, or digit, except the last character, which cannot @@ -11174,13 +11764,21 @@ type FirewallPolicy struct { // compute#firewallPolicyfor firewall policies Kind string `json:"kind,omitempty"` - // Name: [Output Only] Name of the resource. It is a numeric ID - // allocated by GCP which uniquely identifies the Firewall Policy. + // Name: Name of the resource. For Organization Firewall Policies it's a + // [Output Only] numeric ID allocated by Google Cloud which uniquely + // identifies the Organization Firewall Policy. Name string `json:"name,omitempty"` - // Parent: [Output Only] The parent of the firewall policy. + // Parent: [Output Only] The parent of the firewall policy. This field + // is not applicable to network firewall policies. Parent string `json:"parent,omitempty"` + // Region: [Output Only] URL of the region where the regional firewall + // policy resides. This field is not applicable to global firewall + // policies. You must specify this field as part of the HTTP request + // URL. It is not settable as a field in the request body. + Region string `json:"region,omitempty"` + // RuleTupleCount: [Output Only] Total count of all firewall policy rule // tuples. A firewall policy can not exceed a set number of tuples. RuleTupleCount int64 `json:"ruleTupleCount,omitempty"` @@ -11198,15 +11796,16 @@ type FirewallPolicy struct { // with the resource id. SelfLinkWithId string `json:"selfLinkWithId,omitempty"` - // ShortName: User-provided name of the Organization firewall plicy. The - // name should be unique in the organization in which the firewall - // policy is created. This name must be set on creation and cannot be - // changed. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // ShortName: User-provided name of the Organization firewall policy. + // The name should be unique in the organization in which the firewall + // policy is created. This field is not applicable to network firewall + // policies. This name must be set on creation and cannot be changed. + // The name must be 1-63 characters long, and comply with RFC1035. + // Specifically, the name must be 1-63 characters long and match the + // regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first + // character must be a lowercase letter, and all following characters + // must be a dash, lowercase letter, or digit, except the last + // character, which cannot be a dash. ShortName string `json:"shortName,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -11357,6 +11956,9 @@ type FirewallPolicyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -11364,6 +11966,9 @@ type FirewallPolicyListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -11508,6 +12113,10 @@ type FirewallPolicyRule struct { // priority and 2147483647 is the lowest prority. Priority int64 `json:"priority,omitempty"` + // RuleName: An optional name for the rule. This field is not a unique + // identifier and can be updated. + RuleName string `json:"ruleName,omitempty"` + // RuleTupleCount: [Output Only] Calculation of the complexity of a // single firewall policy rule. RuleTupleCount int64 `json:"ruleTupleCount,omitempty"` @@ -11518,6 +12127,18 @@ type FirewallPolicyRule struct { // organization will receive the rule. TargetResources []string `json:"targetResources,omitempty"` + // TargetSecureTags: A list of secure tags that controls which instances + // the firewall rule applies to. If targetSecureTag are specified, then + // the firewall rule applies only to instances in the VPC network that + // have one of those EFFECTIVE secure tags, if all the target_secure_tag + // are in INEFFECTIVE state, then this rule will be ignored. + // targetSecureTag may not be set at the same time as + // targetServiceAccounts. If neither targetServiceAccounts nor + // targetSecureTag are specified, the firewall rule applies to all + // instances on the specified network. Maximum number of target label + // tags allowed is 256. + TargetSecureTags []*FirewallPolicyRuleSecureTag `json:"targetSecureTags,omitempty"` + // TargetServiceAccounts: A list of service accounts indicating the sets // of instances that are applied with this rule. TargetServiceAccounts []string `json:"targetServiceAccounts,omitempty"` @@ -11564,6 +12185,12 @@ type FirewallPolicyRuleMatcher struct { // ranges allowed is 5000. SrcIpRanges []string `json:"srcIpRanges,omitempty"` + // SrcSecureTags: List of secure tag values, which should be matched at + // the source of the traffic. For INGRESS rule, if all the srcSecureTag + // are INEFFECTIVE, and there is no srcIpRange, this rule will be + // ignored. Maximum number of source tag values allowed is 256. + SrcSecureTags []*FirewallPolicyRuleSecureTag `json:"srcSecureTags,omitempty"` + // ForceSendFields is a list of field names (e.g. "DestIpRanges") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -11624,6 +12251,42 @@ func (s *FirewallPolicyRuleMatcherLayer4Config) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type FirewallPolicyRuleSecureTag struct { + // Name: Name of the secure tag, created with TagManager's TagValue API. + Name string `json:"name,omitempty"` + + // State: [Output Only] State of the secure tag, either `EFFECTIVE` or + // `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted or + // its network is deleted. + // + // Possible values: + // "EFFECTIVE" + // "INEFFECTIVE" + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FirewallPolicyRuleSecureTag) MarshalJSON() ([]byte, error) { + type NoMethod FirewallPolicyRuleSecureTag + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // FixedOrPercent: Encapsulates numeric value that can be either // absolute or relative. type FixedOrPercent struct { @@ -11679,24 +12342,32 @@ func (s *FixedOrPercent) MarshalJSON() ([]byte, error) { // Classic gateways (targetVpnGateway). For more information, read // Forwarding rule concepts and Using protocol forwarding. type ForwardingRule struct { - // IPAddress: IP address that this forwarding rule serves. When a client - // sends traffic to this IP address, the forwarding rule directs the - // traffic to the target that you specify in the forwarding rule. If you - // don't specify a reserved IP address, an ephemeral IP address is - // assigned. Methods for specifying an IP address: * IPv4 dotted - // decimal, as in `100.1.2.3` * Full URL, as in - // https://www.googleapis.com/compute/v1/projects/project_id/regions/region - // /addresses/address-name * Partial URL or by name, as in: - + // IPAddress: IP address for which this forwarding rule accepts traffic. + // When a client sends traffic to this IP address, the forwarding rule + // directs the traffic to the referenced target or backendService. While + // creating a forwarding rule, specifying an IPAddress is required under + // the following circumstances: - When the target is set to + // targetGrpcProxy and validateForProxyless is set to true, the + // IPAddress should be set to 0.0.0.0. - When the target is a Private + // Service Connect Google APIs bundle, you must specify an IPAddress. + // Otherwise, you can optionally specify an IP address that references + // an existing static (reserved) IP address resource. When omitted, + // Google Cloud assigns an ephemeral IP address. Use one of the + // following formats to specify an IP address while creating a + // forwarding rule: * IP address number, as in `100.1.2.3` * IPv6 + // address range, as in `2600:1234::/96` * Full resource URL, as in + // https://www.googleapis.com/compute/v1/projects/ + // project_id/regions/region/addresses/address-name * Partial URL or by + // name, as in: - // projects/project_id/regions/region/addresses/address-name - // regions/region/addresses/address-name - global/addresses/address-name - // - address-name The loadBalancingScheme and the forwarding rule's - // target determine the type of IP address that you can use. For - // detailed information, see IP address specifications + // - address-name The forwarding rule's target or backendService, and in + // most cases, also the loadBalancingScheme, determine the type of IP + // address that you can use. For detailed information, see IP address + // specifications // (https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - // Must be set to `0.0.0.0` when the target is targetGrpcProxy that has - // validateForProxyless field set to true. For Private Service Connect - // forwarding rules that forward traffic to Google APIs, IP address must - // be provided. + // When reading an IPAddress, the API always returns the IP address + // number. IPAddress string `json:"IPAddress,omitempty"` // IPProtocol: The IP protocol to which this rule applies. For protocol @@ -11715,12 +12386,14 @@ type ForwardingRule struct { // "UDP" IPProtocol string `json:"IPProtocol,omitempty"` - // AllPorts: This field is used along with the backend_service field for - // Internal TCP/UDP Load Balancing or Network Load Balancing, or with - // the target field for internal and external TargetInstance. You can - // only use one of ports and port_range, or allPorts. The three are - // mutually exclusive. For TCP, UDP and SCTP traffic, packets addressed - // to any ports will be forwarded to the target or backendService. + // AllPorts: This field can only be used: - If IPProtocol is one of TCP, + // UDP, or SCTP. - By internal TCP/UDP load balancers, backend + // service-based network load balancers, and internal and external + // protocol forwarding. Set this field to true to allow packets + // addressed to any port or packets lacking destination port information + // (for example, UDP fragments after the first fragment) to be forwarded + // to the backends configured with this forwarding rule. The ports, + // port_range, and allPorts fields are mutually exclusive. AllPorts bool `json:"allPorts,omitempty"` // AllowGlobalAccess: This field is used along with the backend_service @@ -11758,8 +12431,7 @@ type ForwardingRule struct { Id uint64 `json:"id,omitempty,string"` // IpVersion: The IP Version that will be used by this forwarding rule. - // Valid options are IPV4 or IPV6. This can only be specified for an - // external global forwarding rule. + // Valid options are IPV4 or IPV6. // // Possible values: // "IPV4" @@ -11865,28 +12537,42 @@ type ForwardingRule struct { // configured. NetworkTier string `json:"networkTier,omitempty"` - // PortRange: This field can be used only if: - Load balancing scheme is - // one of EXTERNAL, INTERNAL_SELF_MANAGED or INTERNAL_MANAGED - - // IPProtocol is one of TCP, UDP, or SCTP. Packets addressed to ports in - // the specified range will be forwarded to target or backend_service. - // You can only use one of ports, port_range, or allPorts. The three are - // mutually exclusive. Forwarding rules with the same [IPAddress, - // IPProtocol] pair must have disjoint ports. Some types of forwarding - // target have constraints on the acceptable ports. For more - // information, see Port specifications - // (https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications). - // @pattern: \\d+(?:-\\d+)? + // NoAutomateDnsZone: This is used in PSC consumer ForwardingRule to + // control whether it should try to auto-generate a DNS zone or not. + // Non-PSC forwarding rules do not use this field. + NoAutomateDnsZone bool `json:"noAutomateDnsZone,omitempty"` + + // PortRange: This field can only be used: - If IPProtocol is one of + // TCP, UDP, or SCTP. - By backend service-based network load balancers, + // target pool-based network load balancers, internal proxy load + // balancers, external proxy load balancers, Traffic Director, external + // protocol forwarding, and Classic VPN. Some products have restrictions + // on what ports can be used. See port specifications for details. Only + // packets addressed to ports in the specified range will be forwarded + // to the backends configured with this forwarding rule. The ports, + // port_range, and allPorts fields are mutually exclusive. For external + // forwarding rules, two or more forwarding rules cannot use the same + // [IPAddress, IPProtocol] pair, and cannot have overlapping portRanges. + // For internal forwarding rules within the same VPC network, two or + // more forwarding rules cannot use the same [IPAddress, IPProtocol] + // pair, and cannot have overlapping portRanges. @pattern: + // \\d+(?:-\\d+)? PortRange string `json:"portRange,omitempty"` - // Ports: The ports field is only supported when the forwarding rule - // references a backend_service directly. Only packets addressed to the - // specified list of ports - // ((https://cloud.google.com/load-balancing/docs/forwarding-rule-concept - // s#port_specifications)) are forwarded to backends. You can only use - // one of ports and port_range, or allPorts. The three are mutually - // exclusive. You can specify a list of up to five ports, which can be - // non-contiguous. Forwarding rules with the same [IPAddress, - // IPProtocol] pair must have disjoint ports. @pattern: \\d+(?:-\\d+)? + // Ports: This field can only be used: - If IPProtocol is one of TCP, + // UDP, or SCTP. - By internal TCP/UDP load balancers, backend + // service-based network load balancers, and internal protocol + // forwarding. You can specify a list of up to five ports by number, + // separated by commas. The ports can be contiguous or discontiguous. + // Only packets addressed to these ports will be forwarded to the + // backends configured with this forwarding rule. For external + // forwarding rules, two or more forwarding rules cannot use the same + // [IPAddress, IPProtocol] pair, and cannot share any values defined in + // ports. For internal forwarding rules within the same VPC network, two + // or more forwarding rules cannot use the same [IPAddress, IPProtocol] + // pair, and cannot share any values defined in ports. The ports, + // port_range, and allPorts fields are mutually exclusive. @pattern: + // \\d+(?:-\\d+)? Ports []string `json:"ports,omitempty"` // PscConnectionId: [Output Only] The PSC connection id of the PSC @@ -11897,6 +12583,9 @@ type ForwardingRule struct { // "ACCEPTED" - The connection has been accepted by the producer. // "CLOSED" - The connection has been closed by the producer and will // not serve traffic going forward. + // "NEEDS_ATTENTION" - The connection has been accepted by the + // producer, but the producer needs to take further action before the + // forwarding rule can serve traffic. // "PENDING" - The connection is pending acceptance by the producer. // "REJECTED" - The connection has been rejected by the producer. // "STATUS_UNSPECIFIED" @@ -11940,6 +12629,17 @@ type ForwardingRule struct { // subnet mode or when creating external forwarding rule with IPv6. Subnetwork string `json:"subnetwork,omitempty"` + // Target: The URL of the target resource to receive the matched + // traffic. For regional forwarding rules, this target must be in the + // same region as the forwarding rule. For global forwarding rules, this + // target must be a global load balancing resource. The forwarded + // traffic must be of a type appropriate to the target object. For more + // information, see the "Target" column in Port specifications + // (https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). + // For Private Service Connect forwarding rules that forward traffic to + // Google APIs, provide the name of a supported Google API bundle: - + // vpc-sc - APIs that support VPC Service Controls. - all-apis - All + // supported Google APIs. Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -12049,6 +12749,9 @@ type ForwardingRuleAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -12056,6 +12759,9 @@ type ForwardingRuleAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -12232,6 +12938,9 @@ type ForwardingRuleListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -12239,6 +12948,9 @@ type ForwardingRuleListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -12464,6 +13176,9 @@ type ForwardingRulesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -12471,6 +13186,9 @@ type ForwardingRulesScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -12579,35 +13297,45 @@ type GRPCHealthCheck struct { // The grpc_service_name can only be ASCII. GrpcServiceName string `json:"grpcServiceName,omitempty"` - // Port: The port number for the health check request. Must be specified - // if port_name and port_specification are not set or if - // port_specification is USE_FIXED_PORT. Valid values are 1 through - // 65535. + // Port: The TCP port number to which the health check prober sends + // packets. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. The - // port_name should conform to RFC1035. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, gRPC health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ForceSendFields is a list of field names (e.g. "GrpcServiceName") to @@ -12738,14 +13466,10 @@ type GlobalSetLabelsRequest struct { // resource to get the latest fingerprint. LabelFingerprint string `json:"labelFingerprint,omitempty"` - // Labels: A list of labels to apply for this resource. Each label key & - // value must comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. For example, "webserver-frontend": "images". A label value - // can also be empty (e.g. "my-label": ""). + // Labels: A list of labels to apply for this resource. Each label must + // comply with the requirements for labels. For example, + // "webserver-frontend": "images". A label value can also be empty (e.g. + // "my-label": ""). Labels map[string]string `json:"labels,omitempty"` // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to @@ -12926,9 +13650,9 @@ type GuestOsFeature struct { // Type: The ID of a supported feature. To add multiple values, use // commas to separate values. Set to one or more of the following // values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - - // UEFI_COMPATIBLE - SECURE_BOOT - GVNIC - SEV_CAPABLE - - // SUSPEND_RESUME_COMPATIBLE For more information, see Enabling guest - // operating system features. + // UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - + // SEV_SNP_CAPABLE For more information, see Enabling guest operating + // system features. // // Possible values: // "FEATURE_TYPE_UNSPECIFIED" @@ -12966,36 +13690,52 @@ func (s *GuestOsFeature) MarshalJSON() ([]byte, error) { type HTTP2HealthCheck struct { // Host: The value of the host header in the HTTP/2 health check - // request. If left empty (default value), the IP on behalf of which - // this health check is performed will be used. + // request. If left empty (default value), the host header is set to the + // destination IP address to which health check packets are sent. The + // destination IP address depends on the type of load balancer. For + // details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest Host string `json:"host,omitempty"` - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 443. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, HTTP2 health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ProxyHeader: Specifies the type of proxy header to append before @@ -13011,9 +13751,12 @@ type HTTP2HealthCheck struct { // default value is /. RequestPath string `json:"requestPath,omitempty"` - // Response: The string to match anywhere in the first 1024 bytes of the - // response body. If left empty (the default value), the status code - // determines health. The response data can only be ASCII. + // Response: Creates a content-based HTTP/2 health check. In addition to + // the required HTTP 200 (OK) status code, you can configure the health + // check to pass only when the backend sends this specific ASCII + // response string within the first 1024 bytes of the HTTP response + // body. For details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http Response string `json:"response,omitempty"` // ForceSendFields is a list of field names (e.g. "Host") to @@ -13041,36 +13784,52 @@ func (s *HTTP2HealthCheck) MarshalJSON() ([]byte, error) { type HTTPHealthCheck struct { // Host: The value of the host header in the HTTP health check request. - // If left empty (default value), the IP on behalf of which this health - // check is performed will be used. + // If left empty (default value), the host header is set to the + // destination IP address to which health check packets are sent. The + // destination IP address depends on the type of load balancer. For + // details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest Host string `json:"host,omitempty"` - // Port: The TCP port number for the health check request. The default - // value is 80. Valid values are 1 through 65535. + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 80. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, HTTP health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Also supported in + // legacy HTTP health checks for target pools. The health check supports + // all backends supported by the backend service provided the backend + // can be health checked. For example, GCE_VM_IP network endpoint + // groups, GCE_VM_IP_PORT network endpoint groups, and instance group + // backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides + // an indirect method of specifying the health check port by referring + // to the backend service. Only supported by backend services for proxy + // load balancers. Not supported by target pools. Not supported by + // backend services for pass-through load balancers. Supports all + // backends that can be health checked; for example, GCE_VM_IP_PORT + // network endpoint groups and instance group backends. For + // GCE_VM_IP_PORT network endpoint group backends, the health check uses + // the port number specified for each endpoint in the network endpoint + // group. For instance group backends, the health check uses the port + // number determined by looking up the backend service's named port in + // the instance group's list of named ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ProxyHeader: Specifies the type of proxy header to append before @@ -13086,9 +13845,12 @@ type HTTPHealthCheck struct { // default value is /. RequestPath string `json:"requestPath,omitempty"` - // Response: The string to match anywhere in the first 1024 bytes of the - // response body. If left empty (the default value), the status code - // determines health. The response data can only be ASCII. + // Response: Creates a content-based HTTP health check. In addition to + // the required HTTP 200 (OK) status code, you can configure the health + // check to pass only when the backend sends this specific ASCII + // response string within the first 1024 bytes of the HTTP response + // body. For details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http Response string `json:"response,omitempty"` // ForceSendFields is a list of field names (e.g. "Host") to @@ -13116,36 +13878,52 @@ func (s *HTTPHealthCheck) MarshalJSON() ([]byte, error) { type HTTPSHealthCheck struct { // Host: The value of the host header in the HTTPS health check request. - // If left empty (default value), the IP on behalf of which this health - // check is performed will be used. + // If left empty (default value), the host header is set to the + // destination IP address to which health check packets are sent. The + // destination IP address depends on the type of load balancer. For + // details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest Host string `json:"host,omitempty"` - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 443. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, HTTPS health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ProxyHeader: Specifies the type of proxy header to append before @@ -13161,9 +13939,12 @@ type HTTPSHealthCheck struct { // default value is /. RequestPath string `json:"requestPath,omitempty"` - // Response: The string to match anywhere in the first 1024 bytes of the - // response body. If left empty (the default value), the status code - // determines health. The response data can only be ASCII. + // Response: Creates a content-based HTTPS health check. In addition to + // the required HTTP 200 (OK) status code, you can configure the health + // check to pass only when the backend sends this specific ASCII + // response string within the first 1024 bytes of the HTTP response + // body. For details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http Response string `json:"response,omitempty"` // ForceSendFields is a list of field names (e.g. "Host") to @@ -13268,8 +14049,8 @@ type HealthCheck struct { TimeoutSec int64 `json:"timeoutSec,omitempty"` // Type: Specifies the type of the healthCheck, either TCP, SSL, HTTP, - // HTTPS or HTTP2. Exactly one of the protocol-specific health check - // field must be specified, which must match type field. + // HTTPS, HTTP2 or GRPC. Exactly one of the protocol-specific health + // check fields must be specified, which must match type field. // // Possible values: // "GRPC" @@ -13390,6 +14171,9 @@ type HealthCheckListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -13397,6 +14181,9 @@ type HealthCheckListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -13577,14 +14364,20 @@ type HealthCheckService struct { Fingerprint string `json:"fingerprint,omitempty"` // HealthChecks: A list of URLs to the HealthCheck resources. Must have - // at least one HealthCheck, and not more than 10. HealthCheck resources - // must have portSpecification=USE_SERVING_PORT or + // at least one HealthCheck, and not more than 10 for regional + // HealthCheckService, and not more than 1 for global + // HealthCheckService. HealthCheck resources must have + // portSpecification=USE_SERVING_PORT or // portSpecification=USE_FIXED_PORT. For regional HealthCheckService, // the HealthCheck must be regional and in the same region. For global // HealthCheckService, HealthCheck must be global. Mix of regional and // global HealthChecks is not supported. Multiple regional HealthChecks // must belong to the same region. Regional HealthChecks must belong to - // the same region as zones of NEGs. + // the same region as zones of NetworkEndpointGroups. For global + // HealthCheckService using global INTERNET_IP_PORT + // NetworkEndpointGroups, the global HealthChecks must specify + // sourceRegions, and HealthChecks that specify sourceRegions can only + // be used with global INTERNET_IP_PORT NetworkEndpointGroups. HealthChecks []string `json:"healthChecks,omitempty"` // HealthStatusAggregationPolicy: Optional. Policy for how the results @@ -13594,6 +14387,7 @@ type HealthCheckService struct { // service. - AND. If any health check of an endpoint reports UNHEALTHY, // then UNHEALTHY is the HealthState of the endpoint. If all health // checks report HEALTHY, the HealthState of the endpoint is HEALTHY. . + // This is only allowed with regional HealthCheckService. // // Possible values: // "AND" - If any backend's health check reports UNHEALTHY, then @@ -13624,7 +14418,8 @@ type HealthCheckService struct { // NetworkEndpointGroups: A list of URLs to the NetworkEndpointGroup // resources. Must not have more than 100. For regional // HealthCheckService, NEGs must be in zones in the region of the - // HealthCheckService. + // HealthCheckService. For global HealthCheckServices, the + // NetworkEndpointGroups must be global INTERNET_IP_PORT. NetworkEndpointGroups []string `json:"networkEndpointGroups,omitempty"` // NotificationEndpoints: A list of URLs to the NotificationEndpoint @@ -13782,6 +14577,9 @@ type HealthCheckServicesListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -13789,6 +14587,9 @@ type HealthCheckServicesListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -13967,6 +14768,9 @@ type HealthChecksAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -13974,6 +14778,9 @@ type HealthChecksAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -14128,6 +14935,9 @@ type HealthChecksScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -14135,6 +14945,9 @@ type HealthChecksScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -14361,6 +15174,70 @@ func (s *HealthStatusForNetworkEndpoint) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Help: Provides links to documentation or for performing an out of +// band action. For example, if a quota check failed with an error +// indicating the calling project hasn't enabled the accessed service, +// this can contain a URL pointing directly to the right place in the +// developer console to flip the bit. +type Help struct { + // Links: URL(s) pointing to additional information on handling the + // current error. + Links []*HelpLink `json:"links,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Links") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Links") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Help) MarshalJSON() ([]byte, error) { + type NoMethod Help + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HelpLink: Describes a URL link. +type HelpLink struct { + // Description: Describes what the link offers. + Description string `json:"description,omitempty"` + + // Url: The URL of the link. + Url string `json:"url,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HelpLink) MarshalJSON() ([]byte, error) { + type NoMethod HelpLink + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HostRule: UrlMaps A host-matching rule for a URL. If matched, will // use the named PathMatcher to select the BackendService. type HostRule struct { @@ -14371,10 +15248,10 @@ type HostRule struct { // Hosts: The list of host patterns to match. They must be valid // hostnames with optional port numbers in the format host:port. * // matches any string of ([a-z0-9-.]*). In that case, * must be the - // first character and must be followed in the pattern by either - or .. - // * based matching is not supported when the URL map is bound to a - // target gRPC proxy that has the validateForProxyless field set to - // true. + // first character, and if followed by anything, the immediate following + // character must be either - or .. * based matching is not supported + // when the URL map is bound to a target gRPC proxy that has the + // validateForProxyless field set to true. Hosts []string `json:"hosts,omitempty"` // PathMatcher: The name of the PathMatcher to use to match the path @@ -14644,8 +15521,8 @@ type HttpHeaderMatch struct { // in the HTTP request, use a headerMatch with headerName set to PORT // and a regular expression that satisfies the RFC2616 Host header's // port specifier. Only one of exactMatch, prefixMatch, suffixMatch, - // regexMatch, presentMatch or rangeMatch must be set. regexMatch only - // applies to load balancers that have loadBalancingScheme set to + // regexMatch, presentMatch or rangeMatch must be set. Regular + // expressions can only be used when the loadBalancingScheme is set to // INTERNAL_SELF_MANAGED. RegexMatch string `json:"regexMatch,omitempty"` @@ -14886,6 +15763,9 @@ type HttpHealthCheckListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -14893,6 +15773,9 @@ type HttpHealthCheckListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -15014,8 +15897,8 @@ type HttpQueryParameterMatch struct { // RegexMatch: The queryParameterMatch matches if the value of the // parameter matches the regular expression specified by regexMatch. For // more information about regular expression syntax, see Syntax. Only - // one of presentMatch, exactMatch, or regexMatch must be set. - // regexMatch only applies when the loadBalancingScheme is set to + // one of presentMatch, exactMatch, or regexMatch must be set. Regular + // expressions can only be used when the loadBalancingScheme is set to // INTERNAL_SELF_MANAGED. RegexMatch string `json:"regexMatch,omitempty"` @@ -15196,9 +16079,14 @@ type HttpRouteAction struct { // requests to a backend service, delays can be introduced by a load // balancer on a percentage of requests before sending those requests to // the backend service. Similarly requests from clients can be aborted - // by the load balancer for a percentage of requests. For the requests - // impacted by fault injection, timeout and retry_policy is ignored by - // clients that are configured with a fault_injection_policy. + // by the load balancer for a percentage of requests. timeout and + // retry_policy is ignored by clients that are configured with a + // fault_injection_policy if: 1. The traffic is generated by fault + // injection AND 2. The fault injection is not a delay fault injection. + // Fault injection is not supported with the global external HTTP(S) + // load balancer (classic). To see which load balancers support fault + // injection, see Load balancing: Routing and traffic management + // features. FaultInjectionPolicy *HttpFaultInjection `json:"faultInjectionPolicy,omitempty"` // MaxStreamDuration: Specifies the maximum duration (timeout) for @@ -15325,9 +16213,9 @@ type HttpRouteRule struct { // backend. If routeAction specifies any weightedBackendServices, // service must not be set. Conversely if service is set, routeAction // cannot contain any weightedBackendServices. Only one of urlRedirect, - // service or routeAction.weightedBackendService must be set. UrlMaps - // for external HTTP(S) load balancers support only the urlRewrite - // action within a route rule's routeAction. + // service or routeAction.weightedBackendService must be set. URL maps + // for Classic external HTTP(S) load balancers only support the + // urlRewrite action within a route rule's routeAction. RouteAction *HttpRouteAction `json:"routeAction,omitempty"` // Service: The full or partial URL of the backend service resource to @@ -15428,8 +16316,8 @@ type HttpRouteRuleMatch struct { // after removing any query parameters and anchor supplied with the // original URL. For more information about regular expression syntax, // see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must - // be specified. regexMatch only applies to load balancers that have - // loadBalancingScheme set to INTERNAL_SELF_MANAGED. + // be specified. Regular expressions can only be used when the + // loadBalancingScheme is set to INTERNAL_SELF_MANAGED. RegexMatch string `json:"regexMatch,omitempty"` // ForceSendFields is a list of field names (e.g. "FullPathMatch") to @@ -15626,6 +16514,9 @@ type HttpsHealthCheckListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -15633,6 +16524,9 @@ type HttpsHealthCheckListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -15735,6 +16629,16 @@ func (s *HttpsHealthCheckListWarningData) MarshalJSON() ([]byte, error) { // Image: Represents an Image resource. You can use images to create // boot disks for your VM instances. For more information, read Images. type Image struct { + // Architecture: The architecture of the image. Valid values are ARM64 + // or X86_64. + // + // Possible values: + // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture + // is not set. + // "ARM64" - Machines with architecture ARM64 + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + // ArchiveSizeBytes: Size of the image tar.gz archive stored in Google // Cloud Storage (in bytes). ArchiveSizeBytes int64 `json:"archiveSizeBytes,omitempty,string"` @@ -15888,7 +16792,7 @@ type Image struct { SourceSnapshotId string `json:"sourceSnapshotId,omitempty"` // SourceType: The type of the image used to create this disk. The - // default and only value is RAW + // default and only valid value is RAW. // // Possible values: // "RAW" (default) @@ -15914,7 +16818,7 @@ type Image struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "ArchiveSizeBytes") to + // ForceSendFields is a list of field names (e.g. "Architecture") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -15922,13 +16826,12 @@ type Image struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ArchiveSizeBytes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Architecture") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -16094,6 +16997,9 @@ type ImageListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -16101,6 +17007,9 @@ type ImageListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -16300,6 +17209,18 @@ type Instance struct { // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` + // KeyRevocationActionType: KeyRevocationActionType of the instance. + // Supported options are "STOP" and "NONE". The default value is "NONE" + // if it is not specified. + // + // Possible values: + // "KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED" - Default value. This + // value is unused. + // "NONE" - Indicates user chose no operation. + // "STOP" - Indicates user chose to opt for VM shutdown on key + // revocation. + KeyRevocationActionType string `json:"keyRevocationActionType,omitempty"` + // Kind: [Output Only] Type of the resource. Always compute#instance for // instances. Kind string `json:"kind,omitempty"` @@ -16373,6 +17294,10 @@ type Instance struct { NetworkPerformanceConfig *NetworkPerformanceConfig `json:"networkPerformanceConfig,omitempty"` + // Params: Input only. [Input Only] Additional params passed with the + // request, but not persisted as part of resource payload. + Params *InstanceParams `json:"params,omitempty"` + // PrivateIpv6GoogleAccess: The private IPv6 google access type for the // VM. If not specified, use INHERIT_FROM_SUBNETWORK as default. // @@ -16397,6 +17322,11 @@ type Instance struct { // ResourcePolicies: Resource policies applied to this instance. ResourcePolicies []string `json:"resourcePolicies,omitempty"` + // ResourceStatus: [Output Only] Specifies values set for instance + // attributes as compared to the values requested by user in the + // corresponding input only field. + ResourceStatus *ResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -16579,6 +17509,9 @@ type InstanceAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -16586,6 +17519,9 @@ type InstanceAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -16685,6 +17621,77 @@ func (s *InstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstanceConsumptionData struct { + // ConsumptionInfo: Resources consumed by the instance. + ConsumptionInfo *InstanceConsumptionInfo `json:"consumptionInfo,omitempty"` + + // Instance: Server-defined URL for the instance. + Instance string `json:"instance,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConsumptionInfo") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsumptionInfo") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstanceConsumptionData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceConsumptionData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceConsumptionInfo struct { + // GuestCpus: The number of virtual CPUs that are available to the + // instance. + GuestCpus int64 `json:"guestCpus,omitempty"` + + // LocalSsdGb: The amount of local SSD storage available to the + // instance, defined in GiB. + LocalSsdGb int64 `json:"localSsdGb,omitempty"` + + // MemoryMb: The amount of physical memory available to the instance, + // defined in MiB. + MemoryMb int64 `json:"memoryMb,omitempty"` + + // MinNodeCpus: The minimal guaranteed number of virtual CPUs that are + // reserved. + MinNodeCpus int64 `json:"minNodeCpus,omitempty"` + + // ForceSendFields is a list of field names (e.g. "GuestCpus") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "GuestCpus") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceConsumptionInfo) MarshalJSON() ([]byte, error) { + type NoMethod InstanceConsumptionInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceGroup: Represents an Instance Group resource. Instance Groups // can be used to configure a target for load balancing. Instance groups // can either be managed or unmanaged. To create managed instance @@ -16723,9 +17730,9 @@ type InstanceGroup struct { // NamedPorts: Assigns a name to a port number. For example: {name: // "http", port: 80} This allows the system to reference ports by the // assigned name instead of a port number. Named ports can also contain - // multiple ports. For example: [{name: "http", port: 80},{name: "http", - // port: 8080}] Named ports apply to all instances in this instance - // group. + // multiple ports. For example: [{name: "app1", port: 8080}, {name: + // "app1", port: 8081}, {name: "app2", port: 8082}] Named ports apply to + // all instances in this instance group. NamedPorts []*NamedPort `json:"namedPorts,omitempty"` // Network: [Output Only] The URL of the network to which all instances @@ -16865,6 +17872,9 @@ type InstanceGroupAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -16872,6 +17882,9 @@ type InstanceGroupAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -17049,6 +18062,9 @@ type InstanceGroupListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -17056,6 +18072,9 @@ type InstanceGroupListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -17215,6 +18234,19 @@ type InstanceGroupManager struct { // compute#instanceGroupManager for managed instance groups. Kind string `json:"kind,omitempty"` + // ListManagedInstancesResults: Pagination behavior of the + // listManagedInstances API method for this managed instance group. + // + // Possible values: + // "PAGELESS" - (Default) Pagination is disabled for the group's + // listManagedInstances API method. maxResults and pageToken query + // parameters are ignored and all instances are returned in a single + // response. + // "PAGINATED" - Pagination is enabled for the group's + // listManagedInstances API method. maxResults and pageToken query + // parameters are respected. + ListManagedInstancesResults string `json:"listManagedInstancesResults,omitempty"` + // Name: The name of the managed instance group. The name must be 1-63 // characters long, and comply with RFC1035. Name string `json:"name,omitempty"` @@ -17473,6 +18505,9 @@ type InstanceGroupManagerAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -17480,6 +18515,9 @@ type InstanceGroupManagerAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -17695,6 +18733,9 @@ type InstanceGroupManagerListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -17702,6 +18743,9 @@ type InstanceGroupManagerListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -17850,13 +18894,14 @@ type InstanceGroupManagerStatusStateful struct { // HasStatefulConfig: [Output Only] A bit indicating whether the managed // instance group has stateful configuration, that is, if you have // configured any items in a stateful policy or in per-instance configs. - // The group might report that it has no stateful config even when there - // is still some preserved state on a managed instance, for example, if - // you have deleted all PICs but not yet applied those deletions. + // The group might report that it has no stateful configuration even + // when there is still some preserved state on a managed instance, for + // example, if you have deleted all PICs but not yet applied those + // deletions. HasStatefulConfig bool `json:"hasStatefulConfig,omitempty"` - // PerInstanceConfigs: [Output Only] Status of per-instance configs on - // the instance. + // PerInstanceConfigs: [Output Only] Status of per-instance + // configurations on the instance. PerInstanceConfigs *InstanceGroupManagerStatusStatefulPerInstanceConfigs `json:"perInstanceConfigs,omitempty"` // ForceSendFields is a list of field names (e.g. "HasStatefulConfig") @@ -17885,8 +18930,8 @@ func (s *InstanceGroupManagerStatusStateful) MarshalJSON() ([]byte, error) { type InstanceGroupManagerStatusStatefulPerInstanceConfigs struct { // AllEffective: A bit indicating if all of the group's per-instance - // configs (listed in the output of a listPerInstanceConfigs API call) - // have status EFFECTIVE or there are no per-instance-configs. + // configurations (listed in the output of a listPerInstanceConfigs API + // call) have status EFFECTIVE or there are no per-instance-configs. AllEffective bool `json:"allEffective,omitempty"` // ForceSendFields is a list of field names (e.g. "AllEffective") to @@ -17982,13 +19027,17 @@ type InstanceGroupManagerUpdatePolicy struct { // maxUnavailable. MaxUnavailable *FixedOrPercent `json:"maxUnavailable,omitempty"` - // MinimalAction: Minimal action to be taken on an instance. You can - // specify either RESTART to restart existing instances or REPLACE to - // delete and create new instances from the target template. If you - // specify a RESTART, the Updater will attempt to perform that action - // only. However, if the Updater determines that the minimal action you - // specify is not enough to perform the update, it might perform a more - // disruptive action. + // MinimalAction: Minimal action to be taken on an instance. Use this + // option to minimize disruption as much as possible or to apply a more + // disruptive action than is necessary. - To limit disruption as much as + // possible, set the minimal action to REFRESH. If your update requires + // a more disruptive action, Compute Engine performs the necessary + // action to execute the update. - To apply a more disruptive action + // than is strictly necessary, set the minimal action to RESTART or + // REPLACE. For example, Compute Engine does not need to restart a VM to + // change its metadata. But if your application reads instance metadata + // only when a VM is restarted, you can set the minimal action to + // RESTART in order to pick up metadata changes. // // Possible values: // "NONE" - Do not perform any action. @@ -18462,6 +19511,9 @@ type InstanceGroupManagersListPerInstanceConfigsRespWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -18469,6 +19521,9 @@ type InstanceGroupManagersListPerInstanceConfigsRespWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -18571,8 +19626,8 @@ func (s *InstanceGroupManagersListPerInstanceConfigsRespWarningData) MarshalJSON // InstanceGroupManagersPatchPerInstanceConfigsReq: // InstanceGroupManagers.patchPerInstanceConfigs type InstanceGroupManagersPatchPerInstanceConfigsReq struct { - // PerInstanceConfigs: The list of per-instance configs to insert or - // patch on this managed instance group. + // PerInstanceConfigs: The list of per-instance configurations to insert + // or patch on this managed instance group. PerInstanceConfigs []*PerInstanceConfig `json:"perInstanceConfigs,omitempty"` // ForceSendFields is a list of field names (e.g. "PerInstanceConfigs") @@ -18687,6 +19742,9 @@ type InstanceGroupManagersScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -18694,6 +19752,9 @@ type InstanceGroupManagersScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -18868,8 +19929,8 @@ func (s *InstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, erro // InstanceGroupManagersUpdatePerInstanceConfigsReq: // InstanceGroupManagers.updatePerInstanceConfigs type InstanceGroupManagersUpdatePerInstanceConfigsReq struct { - // PerInstanceConfigs: The list of per-instance configs to insert or - // patch on this managed instance group. + // PerInstanceConfigs: The list of per-instance configurations to insert + // or patch on this managed instance group. PerInstanceConfigs []*PerInstanceConfig `json:"perInstanceConfigs,omitempty"` // ForceSendFields is a list of field names (e.g. "PerInstanceConfigs") @@ -19001,6 +20062,9 @@ type InstanceGroupsListInstancesWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -19008,6 +20072,9 @@ type InstanceGroupsListInstancesWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -19228,6 +20295,9 @@ type InstanceGroupsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -19235,6 +20305,9 @@ type InstanceGroupsScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -19448,6 +20521,9 @@ type InstanceListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -19455,6 +20531,9 @@ type InstanceListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -19632,6 +20711,9 @@ type InstanceListReferrersWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -19639,6 +20721,9 @@ type InstanceListReferrersWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -19918,6 +21003,39 @@ func (s *InstanceMoveRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceParams: Additional instance params. +type InstanceParams struct { + // ResourceManagerTags: Resource manager tags to be bound to the + // instance. Tag keys and values have the same definition as resource + // manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and + // values are in the format `tagValues/456`. The field is ignored (both + // PUT & PATCH) when empty. + ResourceManagerTags map[string]string `json:"resourceManagerTags,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ResourceManagerTags") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ResourceManagerTags") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstanceParams) MarshalJSON() ([]byte, error) { + type NoMethod InstanceParams + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceProperties struct { // AdvancedMachineFeatures: Controls for advanced machine-related // behavior features. Note that for MachineImage, this is not supported @@ -19949,6 +21067,18 @@ type InstanceProperties struct { // to use for instances created from these properties. GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` + // KeyRevocationActionType: KeyRevocationActionType of the instance. + // Supported options are "STOP" and "NONE". The default value is "NONE" + // if it is not specified. + // + // Possible values: + // "KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED" - Default value. This + // value is unused. + // "NONE" - Indicates user chose no operation. + // "STOP" - Indicates user chose to opt for VM shutdown on key + // revocation. + KeyRevocationActionType string `json:"keyRevocationActionType,omitempty"` + // Labels: Labels to apply to instances that are created from these // properties. Labels map[string]string `json:"labels,omitempty"` @@ -20239,6 +21369,9 @@ type InstanceTemplateListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -20246,6 +21379,9 @@ type InstanceTemplateListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -20473,10 +21609,13 @@ type InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { // ShortName: [Output Only] The short name of the firewall policy. ShortName string `json:"shortName,omitempty"` - // Type: [Output Only] The type of the firewall policy. + // Type: [Output Only] The type of the firewall policy. Can be one of + // HIERARCHY, NETWORK, NETWORK_REGIONAL. // // Possible values: // "HIERARCHY" + // "NETWORK" + // "NETWORK_REGIONAL" // "UNSPECIFIED" Type string `json:"type,omitempty"` @@ -20586,6 +21725,9 @@ type InstancesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -20593,6 +21735,9 @@ type InstancesScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -21125,14 +22270,7 @@ type InterconnectAttachment struct { // "BPS_5G" - 5 Gbit/s Bandwidth string `json:"bandwidth,omitempty"` - // CandidateIpv6Subnets: Up to 16 candidate prefixes that control the - // allocation of cloudRouterIpv6Address and customerRouterIpv6Address - // for this attachment. Each prefix must be in the Global Unique Address - // (GUA) space. It is highly recommended that it be in a range owned by - // the requestor. A GUA in a range owned by Google will cause the - // request to fail. Google will select an available prefix from the - // supplied candidates or fail the request. If not supplied, a /125 from - // a Google-owned GUA block will be selected. + // CandidateIpv6Subnets: This field is not available. CandidateIpv6Subnets []string `json:"candidateIpv6Subnets,omitempty"` // CandidateSubnets: Up to 16 candidate prefixes that can be used to @@ -21155,11 +22293,7 @@ type InterconnectAttachment struct { // attachment. CloudRouterIpv6Address string `json:"cloudRouterIpv6Address,omitempty"` - // CloudRouterIpv6InterfaceId: If supplied, the interface id (index - // within the subnet) to be used for the cloud router address. The id - // must be in the range of 1 to 6. If a subnet mask is supplied, it must - // be /125, and the subnet should either be 0 or match the selected - // subnet. + // CloudRouterIpv6InterfaceId: This field is not available. CloudRouterIpv6InterfaceId string `json:"cloudRouterIpv6InterfaceId,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -21176,15 +22310,10 @@ type InterconnectAttachment struct { // interconnect attachment. CustomerRouterIpv6Address string `json:"customerRouterIpv6Address,omitempty"` - // CustomerRouterIpv6InterfaceId: If supplied, the interface id (index - // within the subnet) to be used for the customer router address. The id - // must be in the range of 1 to 6. If a subnet mask is supplied, it must - // be /125, and the subnet should either be 0 or match the selected - // subnet. + // CustomerRouterIpv6InterfaceId: This field is not available. CustomerRouterIpv6InterfaceId string `json:"customerRouterIpv6InterfaceId,omitempty"` - // DataplaneVersion: [Output only for types PARTNER and DEDICATED. Not - // present for PARTNER_PROVIDER.] Dataplane version for this + // DataplaneVersion: [Output Only] Dataplane version for this // InterconnectAttachment. This field is only present for Dataplane // version 2 and higher. Absence of this field in the API output // indicates that the Dataplane is version 1. @@ -21218,16 +22347,16 @@ type InterconnectAttachment struct { // - The VLAN attachment carries only encrypted traffic that is // encrypted by an IPsec device, such as an HA VPN gateway or // third-party IPsec VPN. VMs cannot directly send traffic to, or - // receive traffic from, such a VLAN attachment. To use *IPsec-encrypted + // receive traffic from, such a VLAN attachment. To use *HA VPN over // Cloud Interconnect*, the VLAN attachment must be created with this - // option. Not currently available publicly. + // option. // // Possible values: // "IPSEC" - The interconnect attachment will carry only encrypted // traffic that is encrypted by an IPsec device such as HA VPN gateway; // VMs cannot directly send traffic to or receive traffic from such an - // interconnect attachment. To use IPsec-encrypted Cloud Interconnect, - // the interconnect attachment must be created with this option. + // interconnect attachment. To use HA VPN over Cloud Interconnect, the + // interconnect attachment must be created with this option. // "NONE" - This is the default value, which means the Interconnect // Attachment will carry unencrypted traffic. VMs will be able to send // traffic to or receive traffic from such interconnect attachment. @@ -21513,6 +22642,9 @@ type InterconnectAttachmentAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -21520,6 +22652,9 @@ type InterconnectAttachmentAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -21699,6 +22834,9 @@ type InterconnectAttachmentListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -21706,6 +22844,9 @@ type InterconnectAttachmentListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -21940,6 +23081,9 @@ type InterconnectAttachmentsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -21947,6 +23091,9 @@ type InterconnectAttachmentsScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -22095,6 +23242,27 @@ type InterconnectDiagnostics struct { // Interconnect is not bundled. ArpCaches []*InterconnectDiagnosticsARPEntry `json:"arpCaches,omitempty"` + // BundleAggregationType: The aggregation type of the bundle interface. + // + // Possible values: + // "BUNDLE_AGGREGATION_TYPE_LACP" - LACP is enabled. + // "BUNDLE_AGGREGATION_TYPE_STATIC" - LACP is disabled. + BundleAggregationType string `json:"bundleAggregationType,omitempty"` + + // BundleOperationalStatus: The operational status of the bundle + // interface. + // + // Possible values: + // "BUNDLE_OPERATIONAL_STATUS_DOWN" - If bundleAggregationType is + // LACP: LACP is not established and/or all links in the bundle have + // DOWN operational status. If bundleAggregationType is STATIC: one or + // more links in the bundle has DOWN operational status. + // "BUNDLE_OPERATIONAL_STATUS_UP" - If bundleAggregationType is LACP: + // LACP is established and at least one link in the bundle has UP + // operational status. If bundleAggregationType is STATIC: all links in + // the bundle (typically just one) have UP operational status. + BundleOperationalStatus string `json:"bundleOperationalStatus,omitempty"` + // Links: A list of InterconnectDiagnostics.LinkStatus objects, // describing the status for each link on the Interconnect. Links []*InterconnectDiagnosticsLinkStatus `json:"links,omitempty"` @@ -22284,6 +23452,15 @@ type InterconnectDiagnosticsLinkStatus struct { LacpStatus *InterconnectDiagnosticsLinkLACPStatus `json:"lacpStatus,omitempty"` + // OperationalStatus: The operational status of the link. + // + // Possible values: + // "LINK_OPERATIONAL_STATUS_DOWN" - The interface is unable to + // communicate with the remote end. + // "LINK_OPERATIONAL_STATUS_UP" - The interface has low level + // communication with the remote end. + OperationalStatus string `json:"operationalStatus,omitempty"` + // ReceivingOpticalPower: An InterconnectDiagnostics.LinkOpticalPower // object, describing the current value and status of the received light // level. @@ -22395,6 +23572,9 @@ type InterconnectListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -22402,6 +23582,9 @@ type InterconnectListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -22700,6 +23883,9 @@ type InterconnectLocationListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -22707,6 +23893,9 @@ type InterconnectLocationListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -23308,6 +24497,9 @@ type LicensesListResponseWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -23315,6 +24507,9 @@ type LicensesListResponseWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -23450,6 +24645,40 @@ func (s *LocalDisk) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// LocalizedMessage: Provides a localized error message that is safe to +// return to the user which can be attached to an RPC error. +type LocalizedMessage struct { + // Locale: The locale used following the specification defined at + // https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", + // "fr-CH", "es-MX" + Locale string `json:"locale,omitempty"` + + // Message: The localized error message in the above locale. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Locale") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Locale") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LocalizedMessage) MarshalJSON() ([]byte, error) { + type NoMethod LocalizedMessage + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // LocationPolicy: Configuration for location policy among multiple // possible locations (e.g. preferences for zone selection among zones // in a single region). @@ -23459,6 +24688,24 @@ type LocationPolicy struct { // internal URLs, such as zones/us-central1-a. Locations map[string]LocationPolicyLocation `json:"locations,omitempty"` + // TargetShape: Strategy for distributing VMs across zones in a region. + // + // Possible values: + // "ANY" - GCE picks zones for creating VM instances to fulfill the + // requested number of VMs within present resource constraints and to + // maximize utilization of unused zonal reservations. Recommended for + // batch workloads that do not require high availability. + // "ANY_SINGLE_ZONE" - GCE always selects a single zone for all the + // VMs, optimizing for resource quotas, available reservations and + // general capacity. Recommended for batch workloads that cannot + // tollerate distribution over multiple zones. This the default shape in + // Bulk Insert and Capacity Advisor APIs. + // "BALANCED" - GCE prioritizes acquisition of resources, scheduling + // VMs in zones where resources are available while distributing VMs as + // evenly as possible across allowed zones to minimize the impact of + // zonal failure. Recommended for highly available serving workloads. + TargetShape string `json:"targetShape,omitempty"` + // ForceSendFields is a list of field names (e.g. "Locations") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -23483,7 +24730,12 @@ func (s *LocationPolicy) MarshalJSON() ([]byte, error) { } type LocationPolicyLocation struct { - // Preference: Preference for a given location. + // Constraints: Constraints that the caller requires on the result + // distribution in this zone. + Constraints *LocationPolicyLocationConstraints `json:"constraints,omitempty"` + + // Preference: Preference for a given location. Set to either ALLOW or + // DENY. // // Possible values: // "ALLOW" - Location is allowed for use. @@ -23491,7 +24743,7 @@ type LocationPolicyLocation struct { // "PREFERENCE_UNSPECIFIED" - Default value, unused. Preference string `json:"preference,omitempty"` - // ForceSendFields is a list of field names (e.g. "Preference") to + // ForceSendFields is a list of field names (e.g. "Constraints") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -23499,7 +24751,37 @@ type LocationPolicyLocation struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Preference") to include in + // NullFields is a list of field names (e.g. "Constraints") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LocationPolicyLocation) MarshalJSON() ([]byte, error) { + type NoMethod LocationPolicyLocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LocationPolicyLocationConstraints: Per-zone constraints on location +// policy for this zone. +type LocationPolicyLocationConstraints struct { + // MaxCount: Maximum number of items that are allowed to be placed in + // this zone. The value must be non-negative. + MaxCount int64 `json:"maxCount,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxCount") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxCount") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -23508,8 +24790,8 @@ type LocationPolicyLocation struct { NullFields []string `json:"-"` } -func (s *LocationPolicyLocation) MarshalJSON() ([]byte, error) { - type NoMethod LocationPolicyLocation +func (s *LocationPolicyLocationConstraints) MarshalJSON() ([]byte, error) { + type NoMethod LocationPolicyLocationConstraints raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -23708,8 +24990,7 @@ type MachineImage struct { // GuestFlush: [Input Only] Whether to attempt an application consistent // machine image by informing the OS to prepare for the snapshot - // process. Currently only supported on Windows instances using the - // Volume Shadow Copy Service (VSS). + // process. GuestFlush bool `json:"guestFlush,omitempty"` // Id: [Output Only] A unique identifier for this machine image. The @@ -23897,6 +25178,9 @@ type MachineImageListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -23904,6 +25188,9 @@ type MachineImageListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -24239,6 +25526,9 @@ type MachineTypeAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -24246,6 +25536,9 @@ type MachineTypeAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -24422,6 +25715,9 @@ type MachineTypeListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -24429,6 +25725,9 @@ type MachineTypeListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -24584,6 +25883,9 @@ type MachineTypesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -24591,6 +25893,9 @@ type MachineTypesScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -24932,6 +26237,13 @@ type ManagedInstanceLastAttemptErrorsErrors struct { // Code: [Output Only] The error type identifier for this error. Code string `json:"code,omitempty"` + // ErrorDetails: [Output Only] An optional list of messages that contain + // the error details. There is a set of defined message types to use for + // providing details.The syntax depends on the error code. For example, + // QuotaExceededInfo will have details when the error code is + // QUOTA_EXCEEDED. + ErrorDetails []*ManagedInstanceLastAttemptErrorsErrorsErrorDetails `json:"errorDetails,omitempty"` + // Location: [Output Only] Indicates the field in the request that // caused the error. This property is optional. Location string `json:"location,omitempty"` @@ -24962,6 +26274,38 @@ func (s *ManagedInstanceLastAttemptErrorsErrors) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ManagedInstanceLastAttemptErrorsErrorsErrorDetails struct { + ErrorInfo *ErrorInfo `json:"errorInfo,omitempty"` + + Help *Help `json:"help,omitempty"` + + LocalizedMessage *LocalizedMessage `json:"localizedMessage,omitempty"` + + QuotaInfo *QuotaExceededInfo `json:"quotaInfo,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ErrorInfo") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ErrorInfo") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedInstanceLastAttemptErrorsErrorsErrorDetails) MarshalJSON() ([]byte, error) { + type NoMethod ManagedInstanceLastAttemptErrorsErrorsErrorDetails + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ManagedInstanceVersion struct { // InstanceTemplate: [Output Only] The intended template of the // instance. This field is empty when current_action is one of { @@ -25230,6 +26574,15 @@ type Network struct { // field when you create the resource. Description string `json:"description,omitempty"` + // EnableUlaInternalIpv6: Enable ULA internal ipv6 on this network. + // Enabling this feature will assign a /48 from google defined ULA + // prefix fd20::/20. . + EnableUlaInternalIpv6 bool `json:"enableUlaInternalIpv6,omitempty"` + + // FirewallPolicy: [Output Only] URL of the firewall policy the network + // is associated with. + FirewallPolicy string `json:"firewallPolicy,omitempty"` + // GatewayIPv4: [Output Only] The gateway address for default routing // out of the network, selected by GCP. GatewayIPv4 string `json:"gatewayIPv4,omitempty"` @@ -25238,13 +26591,23 @@ type Network struct { // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` + // InternalIpv6Range: When enabling ula internal ipv6, caller optionally + // can specify the /48 range they want from the google defined ULA + // prefix fd20::/20. The input must be a valid /48 ULA IPv6 address and + // must be within the fd20::/20. Operation will fail if the speficied + // /48 is already in used by another resource. If the field is not + // speficied, then a /48 range will be randomly allocated from fd20::/20 + // and returned via this field. . + InternalIpv6Range string `json:"internalIpv6Range,omitempty"` + // Kind: [Output Only] Type of the resource. Always compute#network for // networks. Kind string `json:"kind,omitempty"` // Mtu: Maximum Transmission Unit in bytes. The minimum value for this - // field is 1460 and the maximum value is 1500 bytes. If unspecified, - // defaults to 1460. + // field is 1300 and the maximum value is 8896. The suggested value is + // 1500, which is the default MTU used on the Internet, or 8896 if you + // want to use Jumbo frames. If unspecified, the value defaults to 1460. Mtu int64 `json:"mtu,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -25256,6 +26619,16 @@ type Network struct { // digit. The last character must be a lowercase letter or digit. Name string `json:"name,omitempty"` + // NetworkFirewallPolicyEnforcementOrder: The network firewall policy + // enforcement order. Can be either AFTER_CLASSIC_FIREWALL or + // BEFORE_CLASSIC_FIREWALL. Defaults to AFTER_CLASSIC_FIREWALL if the + // field is not specified. + // + // Possible values: + // "AFTER_CLASSIC_FIREWALL" + // "BEFORE_CLASSIC_FIREWALL" + NetworkFirewallPolicyEnforcementOrder string `json:"networkFirewallPolicyEnforcementOrder,omitempty"` + // Peerings: [Output Only] A list of network peerings for the resource. Peerings []*NetworkPeering `json:"peerings,omitempty"` @@ -25267,6 +26640,10 @@ type Network struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // Subnetworks: [Output Only] Server-defined fully-qualified URLs for // all subnetworks in this VPC network. Subnetworks []string `json:"subnetworks,omitempty"` @@ -25298,101 +26675,41 @@ func (s *Network) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkEndpoint: The network endpoint. -type NetworkEndpoint struct { - // Annotations: Metadata defined as annotations on the network endpoint. - Annotations map[string]string `json:"annotations,omitempty"` - - // Fqdn: Optional fully qualified domain name of network endpoint. This - // can only be specified when NetworkEndpointGroup.network_endpoint_type - // is NON_GCP_FQDN_PORT. - Fqdn string `json:"fqdn,omitempty"` - - // Instance: The name for a specific VM instance that the IP address - // belongs to. This is required for network endpoints of type - // GCE_VM_IP_PORT. The instance must be in the same zone of network - // endpoint group. The name must be 1-63 characters long, and comply - // with RFC1035. - Instance string `json:"instance,omitempty"` - - // IpAddress: Optional IPv4 address of network endpoint. The IP address - // must belong to a VM in Compute Engine (either the primary IP or as - // part of an aliased IP range). If the IP address is not specified, - // then the primary IP address for the VM instance in the network that - // the network endpoint group belongs to will be used. - IpAddress string `json:"ipAddress,omitempty"` - - // Port: Optional port number of network endpoint. If not specified, the - // defaultPort for the network endpoint group will be used. - Port int64 `json:"port,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Annotations") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` +// NetworkAttachment: NetworkAttachments A network attachment resource +// ... +type NetworkAttachment struct { + // ConnectionEndpoints: [Output Only] An array of connections for all + // the producers connected to this network attachment. + ConnectionEndpoints []*NetworkAttachmentConnectedEndpoint `json:"connectionEndpoints,omitempty"` - // NullFields is a list of field names (e.g. "Annotations") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NetworkEndpoint) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpoint - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// NetworkEndpointGroup: Represents a collection of network endpoints. A -// network endpoint group (NEG) defines how a set of endpoints should be -// reached, whether they are reachable, and where they are located. For -// more information about using NEGs, see Setting up external HTTP(S) -// Load Balancing with internet NEGs, Setting up zonal NEGs, or Setting -// up external HTTP(S) Load Balancing with serverless NEGs. -type NetworkEndpointGroup struct { - // Annotations: Metadata defined as annotations on the network endpoint - // group. - Annotations map[string]string `json:"annotations,omitempty"` - - // AppEngine: Only valid when networkEndpointType is "SERVERLESS". Only - // one of cloudRun, appEngine or cloudFunction may be set. - AppEngine *NetworkEndpointGroupAppEngine `json:"appEngine,omitempty"` - - // CloudFunction: Only valid when networkEndpointType is "SERVERLESS". - // Only one of cloudRun, appEngine or cloudFunction may be set. - CloudFunction *NetworkEndpointGroupCloudFunction `json:"cloudFunction,omitempty"` - - // CloudRun: Only valid when networkEndpointType is "SERVERLESS". Only - // one of cloudRun, appEngine or cloudFunction may be set. - CloudRun *NetworkEndpointGroupCloudRun `json:"cloudRun,omitempty"` + // Possible values: + // "ACCEPT_AUTOMATIC" + // "ACCEPT_MANUAL" + // "INVALID" + ConnectionPreference string `json:"connectionPreference,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // DefaultPort: The default port used if the port number is not - // specified in the network endpoint. - DefaultPort int64 `json:"defaultPort,omitempty"` - // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. + // Fingerprint: [Output Only] Fingerprint of this resource. A hash of + // the contents stored in this object. This field is used in optimistic + // locking. An up-to-date fingerprint must be provided in order to + // patch. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource type. The + // server generates this identifier. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always - // compute#networkEndpointGroup for network endpoint group. + // Kind: [Output Only] Type of the resource. Kind string `json:"kind,omitempty"` - // Name: Name of the resource; provided by the client when the resource + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means @@ -25401,95 +26718,76 @@ type NetworkEndpointGroup struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Network: The URL of the network to which all network endpoints in the - // NEG belong. Uses "default" project network if unspecified. + // Network: [Output Only] The URL of the network which the Network + // Attachment belongs to. Network string `json:"network,omitempty"` - // NetworkEndpointType: Type of network endpoints in this network - // endpoint group. Can be one of GCE_VM_IP_PORT, - // NON_GCP_PRIVATE_IP_PORT, INTERNET_FQDN_PORT, INTERNET_IP_PORT, - // SERVERLESS, PRIVATE_SERVICE_CONNECT. - // - // Possible values: - // "GCE_VM_IP" - The network endpoint is represented by an IP address. - // "GCE_VM_IP_PORT" - The network endpoint is represented by IP - // address and port pair. - // "INTERNET_FQDN_PORT" - The network endpoint is represented by fully - // qualified domain name and port. - // "INTERNET_IP_PORT" - The network endpoint is represented by an - // internet IP address and port. - // "NON_GCP_PRIVATE_IP_PORT" - The network endpoint is represented by - // an IP address and port. The endpoint belongs to a VM or pod running - // in a customer's on-premises. - // "PRIVATE_SERVICE_CONNECT" - The network endpoint is either public - // Google APIs or services exposed by other GCP Project with a Service - // Attachment. The connection is set up by private service connect - // "SERVERLESS" - The network endpoint is handled by specified - // serverless infrastructure. - NetworkEndpointType string `json:"networkEndpointType,omitempty"` + // ProducerAcceptLists: Projects that are allowed to connect to this + // network attachment. The project can be specified using its id or + // number. + ProducerAcceptLists []string `json:"producerAcceptLists,omitempty"` - // PscTargetService: The target service url used to set up private - // service connection to a Google API. An example value is: - // "asia-northeast3-cloudkms.googleapis.com" - PscTargetService string `json:"pscTargetService,omitempty"` + // ProducerRejectLists: Projects that are not allowed to connect to this + // network attachment. The project can be specified using its id or + // number. + ProducerRejectLists []string `json:"producerRejectLists,omitempty"` - // Region: [Output Only] The URL of the region where the network - // endpoint group is located. + // Region: [Output Only] URL of the region where the network attachment + // resides. This field applies only to the region resource. You must + // specify this field as part of the HTTP request URL. It is not + // settable as a field in the request body. Region string `json:"region,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Size: [Output only] Number of network endpoints in the network - // endpoint group. - Size int64 `json:"size,omitempty"` - - // Subnetwork: Optional URL of the subnetwork to which all network - // endpoints in the NEG belong. - Subnetwork string `json:"subnetwork,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource's + // resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` - // Zone: [Output Only] The URL of the zone where the network endpoint - // group is located. - Zone string `json:"zone,omitempty"` + // Subnetworks: An array of URLs where each entry is the URL of a subnet + // provided by the service consumer to use for endpoints in the + // producers that connect to this network attachment. + Subnetworks []string `json:"subnetworks,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Annotations") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "ConnectionEndpoints") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Annotations") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "ConnectionEndpoints") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *NetworkEndpointGroup) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroup +func (s *NetworkAttachment) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachment raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEndpointGroupAggregatedList struct { +// NetworkAttachmentAggregatedList: Contains a list of +// NetworkAttachmentsScopedList. +type NetworkAttachmentAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of NetworkEndpointGroupsScopedList resources. - Items map[string]NetworkEndpointGroupsScopedList `json:"items,omitempty"` + // Items: A list of NetworkAttachmentsScopedList resources. + Items map[string]NetworkAttachmentsScopedList `json:"items,omitempty"` - // Kind: [Output Only] The resource type, which is always - // compute#networkEndpointGroupAggregatedList for aggregated lists of - // network endpoint groups. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -25503,11 +26801,8 @@ type NetworkEndpointGroupAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *NetworkEndpointGroupAggregatedListWarning `json:"warning,omitempty"` + Warning *NetworkAttachmentAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -25530,15 +26825,15 @@ type NetworkEndpointGroupAggregatedList struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupAggregatedList +func (s *NetworkAttachmentAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkEndpointGroupAggregatedListWarning: [Output Only] -// Informational warning message. -type NetworkEndpointGroupAggregatedListWarning struct { +// NetworkAttachmentAggregatedListWarning: [Output Only] Informational +// warning message. +type NetworkAttachmentAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -25560,6 +26855,9 @@ type NetworkEndpointGroupAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -25567,6 +26865,9 @@ type NetworkEndpointGroupAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -25600,7 +26901,7 @@ type NetworkEndpointGroupAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NetworkEndpointGroupAggregatedListWarningData `json:"data,omitempty"` + Data []*NetworkAttachmentAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -25623,13 +26924,13 @@ type NetworkEndpointGroupAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupAggregatedListWarning +func (s *NetworkAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEndpointGroupAggregatedListWarningData struct { +type NetworkAttachmentAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -25660,132 +26961,47 @@ type NetworkEndpointGroupAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupAggregatedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// NetworkEndpointGroupAppEngine: Configuration for an App Engine -// network endpoint group (NEG). The service is optional, may be -// provided explicitly or in the URL mask. The version is optional and -// can only be provided explicitly or in the URL mask when service is -// present. Note: App Engine service must be in the same project and -// located in the same region as the Serverless NEG. -type NetworkEndpointGroupAppEngine struct { - // Service: Optional serving service. The service name is case-sensitive - // and must be 1-63 characters long. Example value: "default", - // "my-service". - Service string `json:"service,omitempty"` - - // UrlMask: A template to parse service and version fields from a - // request URL. URL mask allows for routing to multiple App Engine - // services without having to create multiple Network Endpoint Groups - // and backend services. For example, the request URLs - // "foo1-dot-appname.appspot.com/v1" and - // "foo1-dot-appname.appspot.com/v2" can be backed by the same - // Serverless NEG with URL mask "-dot-appname.appspot.com/". The URL - // mask will parse them to { service = "foo1", version = "v1" } and { - // service = "foo1", version = "v2" } respectively. - UrlMask string `json:"urlMask,omitempty"` - - // Version: Optional serving version. The version name is case-sensitive - // and must be 1-100 characters long. Example value: "v1", "v2". - Version string `json:"version,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Service") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Service") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NetworkEndpointGroupAppEngine) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupAppEngine +func (s *NetworkAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkEndpointGroupCloudFunction: Configuration for a Cloud Function -// network endpoint group (NEG). The function must be provided -// explicitly or in the URL mask. Note: Cloud Function must be in the -// same project and located in the same region as the Serverless NEG. -type NetworkEndpointGroupCloudFunction struct { - // Function: A user-defined name of the Cloud Function. The function - // name is case-sensitive and must be 1-63 characters long. Example - // value: "func1". - Function string `json:"function,omitempty"` - - // UrlMask: A template to parse function field from a request URL. URL - // mask allows for routing to multiple Cloud Functions without having to - // create multiple Network Endpoint Groups and backend services. For - // example, request URLs " mydomain.com/function1" and - // "mydomain.com/function2" can be backed by the same Serverless NEG - // with URL mask "/". The URL mask will parse them to { function = - // "function1" } and { function = "function2" } respectively. - UrlMask string `json:"urlMask,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Function") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Function") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} +// NetworkAttachmentConnectedEndpoint: [Output Only] A connection +// connected to this network attachment. +type NetworkAttachmentConnectedEndpoint struct { + // IpAddress: The IP address assigned to the producer instance network + // interface. This value will be a range in case of Serverless. + IpAddress string `json:"ipAddress,omitempty"` -func (s *NetworkEndpointGroupCloudFunction) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupCloudFunction - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // ProjectIdOrNum: The project id or number of the interface to which + // the IP was assigned. + ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` -// NetworkEndpointGroupCloudRun: Configuration for a Cloud Run network -// endpoint group (NEG). The service must be provided explicitly or in -// the URL mask. The tag is optional, may be provided explicitly or in -// the URL mask. Note: Cloud Run service must be in the same project and -// located in the same region as the Serverless NEG. -type NetworkEndpointGroupCloudRun struct { - // Service: Cloud Run service is the main resource of Cloud Run. The - // service must be 1-63 characters long, and comply with RFC1035. - // Example value: "run-service". - Service string `json:"service,omitempty"` + // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork + SecondaryIpCidrRanges []string `json:"secondaryIpCidrRanges,omitempty"` - // Tag: Optional Cloud Run tag represents the "named-revision" to - // provide additional fine-grained traffic routing information. The tag - // must be 1-63 characters long, and comply with RFC1035. Example value: - // "revision-0010". - Tag string `json:"tag,omitempty"` + // Status: The status of a connected endpoint to this network + // attachment. + // + // Possible values: + // "ACCEPTED" - The consumer allows traffic from the producer to reach + // its VPC. + // "CLOSED" - The consumer network attachment no longer exists. + // "NEEDS_ATTENTION" - The consumer needs to take further action + // before traffic can be served. + // "PENDING" - The consumer neither allows nor prohibits traffic from + // the producer to reach its VPC. + // "REJECTED" - The consumer prohibits traffic from the producer to + // reach its VPC. + // "STATUS_UNSPECIFIED" + Status string `json:"status,omitempty"` - // UrlMask: A template to parse service and tag fields from a request - // URL. URL mask allows for routing to multiple Run services without - // having to create multiple network endpoint groups and backend - // services. For example, request URLs "foo1.domain.com/bar1" and - // "foo1.domain.com/bar2" can be backed by the same Serverless Network - // Endpoint Group (NEG) with URL mask ".domain.com/". The URL mask will - // parse them to { service="bar1", tag="foo1" } and { service="bar2", - // tag="foo2" } respectively. - UrlMask string `json:"urlMask,omitempty"` + // Subnetwork: The subnetwork used to assign the IP to the producer + // instance network interface. + Subnetwork string `json:"subnetwork,omitempty"` - // ForceSendFields is a list of field names (e.g. "Service") to + // ForceSendFields is a list of field names (e.g. "IpAddress") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -25793,7 +27009,7 @@ type NetworkEndpointGroupCloudRun struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Service") to include in + // NullFields is a list of field names (e.g. "IpAddress") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -25802,22 +27018,20 @@ type NetworkEndpointGroupCloudRun struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupCloudRun) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupCloudRun +func (s *NetworkAttachmentConnectedEndpoint) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentConnectedEndpoint raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEndpointGroupList struct { +type NetworkAttachmentList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of NetworkEndpointGroup resources. - Items []*NetworkEndpointGroup `json:"items,omitempty"` + // Items: A list of NetworkAttachment resources. + Items []*NetworkAttachment `json:"items,omitempty"` - // Kind: [Output Only] The resource type, which is always - // compute#networkEndpointGroupList for network endpoint group lists. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -25832,7 +27046,7 @@ type NetworkEndpointGroupList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *NetworkEndpointGroupListWarning `json:"warning,omitempty"` + Warning *NetworkAttachmentListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -25855,15 +27069,15 @@ type NetworkEndpointGroupList struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupList) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupList +func (s *NetworkAttachmentList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkEndpointGroupListWarning: [Output Only] Informational warning +// NetworkAttachmentListWarning: [Output Only] Informational warning // message. -type NetworkEndpointGroupListWarning struct { +type NetworkAttachmentListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -25885,6 +27099,9 @@ type NetworkEndpointGroupListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -25892,6 +27109,9 @@ type NetworkEndpointGroupListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -25925,7 +27145,7 @@ type NetworkEndpointGroupListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NetworkEndpointGroupListWarningData `json:"data,omitempty"` + Data []*NetworkAttachmentListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -25948,13 +27168,13 @@ type NetworkEndpointGroupListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupListWarning +func (s *NetworkAttachmentListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEndpointGroupListWarningData struct { +type NetworkAttachmentListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -25985,25 +27205,30 @@ type NetworkEndpointGroupListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupListWarningData +func (s *NetworkAttachmentListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEndpointGroupsAttachEndpointsRequest struct { - // NetworkEndpoints: The list of network endpoints to be attached. - NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` +type NetworkAttachmentsScopedList struct { + // NetworkAttachments: A list of NetworkAttachments contained in this + // scope. + NetworkAttachments []*NetworkAttachment `json:"networkAttachments,omitempty"` - // ForceSendFields is a list of field names (e.g. "NetworkEndpoints") to - // unconditionally include in API requests. By default, fields with + // Warning: Informational warning which replaces the list of network + // attachments when the list is empty. + Warning *NetworkAttachmentsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkAttachments") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NetworkEndpoints") to + // NullFields is a list of field names (e.g. "NetworkAttachments") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -26013,17 +27238,89 @@ type NetworkEndpointGroupsAttachEndpointsRequest struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsAttachEndpointsRequest) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupsAttachEndpointsRequest +func (s *NetworkAttachmentsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentsScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEndpointGroupsDetachEndpointsRequest struct { - // NetworkEndpoints: The list of network endpoints to be detached. - NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` +// NetworkAttachmentsScopedListWarning: Informational warning which +// replaces the list of network attachments when the list is empty. +type NetworkAttachmentsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "NetworkEndpoints") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*NetworkAttachmentsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -26031,35 +27328,36 @@ type NetworkEndpointGroupsDetachEndpointsRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NetworkEndpoints") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsDetachEndpointsRequest) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupsDetachEndpointsRequest +func (s *NetworkAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentsScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEndpointGroupsListEndpointsRequest struct { - // HealthStatus: Optional query parameter for showing the health status - // of each network endpoint. Valid options are SKIP or SHOW. If you - // don't specify this parameter, the health status of network endpoints - // will not be provided. - // - // Possible values: - // "SHOW" - Show the health status for each network endpoint. Impacts - // latency of the call. - // "SKIP" - Health status for network endpoints will not be provided. - HealthStatus string `json:"healthStatus,omitempty"` +type NetworkAttachmentsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -26067,32 +27365,115 @@ type NetworkEndpointGroupsListEndpointsRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthStatus") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsListEndpointsRequest) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupsListEndpointsRequest +func (s *NetworkAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentsScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEndpointGroupsListNetworkEndpoints struct { +// NetworkEdgeSecurityService: Represents a Google Cloud Armor network +// edge security service resource. +type NetworkEdgeSecurityService struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a NetworkEdgeSecurityService. An + // up-to-date fingerprint must be provided in order to update the + // NetworkEdgeSecurityService, otherwise the request will fail with + // error 412 conditionNotMet. To see the latest fingerprint, make a + // get() request to retrieve a NetworkEdgeSecurityService. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output only] Type of the resource. Always + // compute#networkEdgeSecurityService for NetworkEdgeSecurityServices + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Region: [Output Only] URL of the region where the resource resides. + // You must specify this field as part of the HTTP request URL. It is + // not settable as a field in the request body. + Region string `json:"region,omitempty"` + + // SecurityPolicy: The resource URL for the network edge security + // service associated with this network edge security service. + SecurityPolicy string `json:"securityPolicy,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEdgeSecurityService) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityService + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkEdgeSecurityServiceAggregatedList struct { + Etag string `json:"etag,omitempty"` + // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of NetworkEndpointWithHealthStatus resources. - Items []*NetworkEndpointWithHealthStatus `json:"items,omitempty"` + // Items: A list of NetworkEdgeSecurityServicesScopedList resources. + Items map[string]NetworkEdgeSecurityServicesScopedList `json:"items,omitempty"` - // Kind: [Output Only] The resource type, which is always - // compute#networkEndpointGroupsListNetworkEndpoints for the list of - // network endpoints in the specified network endpoint group. + // Kind: [Output Only] Type of resource. Always + // compute#networkEdgeSecurityServiceAggregatedList for lists of Network + // Edge Security Services. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -26103,14 +27484,20 @@ type NetworkEndpointGroupsListNetworkEndpoints struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *NetworkEndpointGroupsListNetworkEndpointsWarning `json:"warning,omitempty"` + Warning *NetworkEdgeSecurityServiceAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -26118,7 +27505,7 @@ type NetworkEndpointGroupsListNetworkEndpoints struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Etag") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -26127,15 +27514,15 @@ type NetworkEndpointGroupsListNetworkEndpoints struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsListNetworkEndpoints) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupsListNetworkEndpoints +func (s *NetworkEdgeSecurityServiceAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityServiceAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkEndpointGroupsListNetworkEndpointsWarning: [Output Only] +// NetworkEdgeSecurityServiceAggregatedListWarning: [Output Only] // Informational warning message. -type NetworkEndpointGroupsListNetworkEndpointsWarning struct { +type NetworkEdgeSecurityServiceAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -26157,6 +27544,9 @@ type NetworkEndpointGroupsListNetworkEndpointsWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -26164,6 +27554,9 @@ type NetworkEndpointGroupsListNetworkEndpointsWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -26197,7 +27590,7 @@ type NetworkEndpointGroupsListNetworkEndpointsWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NetworkEndpointGroupsListNetworkEndpointsWarningData `json:"data,omitempty"` + Data []*NetworkEdgeSecurityServiceAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -26220,13 +27613,13 @@ type NetworkEndpointGroupsListNetworkEndpointsWarning struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsListNetworkEndpointsWarning) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupsListNetworkEndpointsWarning +func (s *NetworkEdgeSecurityServiceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityServiceAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEndpointGroupsListNetworkEndpointsWarningData struct { +type NetworkEdgeSecurityServiceAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -26257,50 +27650,49 @@ type NetworkEndpointGroupsListNetworkEndpointsWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsListNetworkEndpointsWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupsListNetworkEndpointsWarningData +func (s *NetworkEdgeSecurityServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityServiceAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEndpointGroupsScopedList struct { - // NetworkEndpointGroups: [Output Only] The list of network endpoint - // groups that are contained in this scope. - NetworkEndpointGroups []*NetworkEndpointGroup `json:"networkEndpointGroups,omitempty"` +type NetworkEdgeSecurityServicesScopedList struct { + // NetworkEdgeSecurityServices: A list of NetworkEdgeSecurityServices + // contained in this scope. + NetworkEdgeSecurityServices []*NetworkEdgeSecurityService `json:"networkEdgeSecurityServices,omitempty"` - // Warning: [Output Only] An informational warning that replaces the - // list of network endpoint groups when the list is empty. - Warning *NetworkEndpointGroupsScopedListWarning `json:"warning,omitempty"` + // Warning: Informational warning which replaces the list of security + // policies when the list is empty. + Warning *NetworkEdgeSecurityServicesScopedListWarning `json:"warning,omitempty"` // ForceSendFields is a list of field names (e.g. - // "NetworkEndpointGroups") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // "NetworkEdgeSecurityServices") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NetworkEndpointGroups") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. + // "NetworkEdgeSecurityServices") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupsScopedList +func (s *NetworkEdgeSecurityServicesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityServicesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkEndpointGroupsScopedListWarning: [Output Only] An -// informational warning that replaces the list of network endpoint -// groups when the list is empty. -type NetworkEndpointGroupsScopedListWarning struct { +// NetworkEdgeSecurityServicesScopedListWarning: Informational warning +// which replaces the list of security policies when the list is empty. +type NetworkEdgeSecurityServicesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -26322,6 +27714,9 @@ type NetworkEndpointGroupsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -26329,6 +27724,9 @@ type NetworkEndpointGroupsScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -26362,7 +27760,7 @@ type NetworkEndpointGroupsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NetworkEndpointGroupsScopedListWarningData `json:"data,omitempty"` + Data []*NetworkEdgeSecurityServicesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -26385,13 +27783,13 @@ type NetworkEndpointGroupsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupsScopedListWarning +func (s *NetworkEdgeSecurityServicesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityServicesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEndpointGroupsScopedListWarningData struct { +type NetworkEdgeSecurityServicesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -26422,20 +27820,41 @@ type NetworkEndpointGroupsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkEndpointGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointGroupsScopedListWarningData +func (s *NetworkEdgeSecurityServicesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityServicesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEndpointWithHealthStatus struct { - // Healths: [Output only] The health status of network endpoint; - Healths []*HealthStatusForNetworkEndpoint `json:"healths,omitempty"` +// NetworkEndpoint: The network endpoint. +type NetworkEndpoint struct { + // Annotations: Metadata defined as annotations on the network endpoint. + Annotations map[string]string `json:"annotations,omitempty"` - // NetworkEndpoint: [Output only] The network endpoint; - NetworkEndpoint *NetworkEndpoint `json:"networkEndpoint,omitempty"` + // Fqdn: Optional fully qualified domain name of network endpoint. This + // can only be specified when NetworkEndpointGroup.network_endpoint_type + // is NON_GCP_FQDN_PORT. + Fqdn string `json:"fqdn,omitempty"` - // ForceSendFields is a list of field names (e.g. "Healths") to + // Instance: The name for a specific VM instance that the IP address + // belongs to. This is required for network endpoints of type + // GCE_VM_IP_PORT. The instance must be in the same zone of network + // endpoint group. The name must be 1-63 characters long, and comply + // with RFC1035. + Instance string `json:"instance,omitempty"` + + // IpAddress: Optional IPv4 address of network endpoint. The IP address + // must belong to a VM in Compute Engine (either the primary IP or as + // part of an aliased IP range). If the IP address is not specified, + // then the primary IP address for the VM instance in the network that + // the network endpoint group belongs to will be used. + IpAddress string `json:"ipAddress,omitempty"` + + // Port: Optional port number of network endpoint. If not specified, the + // defaultPort for the network endpoint group will be used. + Port int64 `json:"port,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Annotations") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -26443,129 +27862,132 @@ type NetworkEndpointWithHealthStatus struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Healths") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Annotations") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworkEndpointWithHealthStatus) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEndpointWithHealthStatus +func (s *NetworkEndpoint) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpoint raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkInterface: A network interface resource attached to an -// instance. -type NetworkInterface struct { - // AccessConfigs: An array of configurations for this interface. - // Currently, only one access config, ONE_TO_ONE_NAT, is supported. If - // there are no accessConfigs specified, then this instance will have no - // external internet access. - AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"` +// NetworkEndpointGroup: Represents a collection of network endpoints. A +// network endpoint group (NEG) defines how a set of endpoints should be +// reached, whether they are reachable, and where they are located. For +// more information about using NEGs, see Setting up external HTTP(S) +// Load Balancing with internet NEGs, Setting up zonal NEGs, or Setting +// up external HTTP(S) Load Balancing with serverless NEGs. +type NetworkEndpointGroup struct { + // Annotations: Metadata defined as annotations on the network endpoint + // group. + Annotations map[string]string `json:"annotations,omitempty"` - // AliasIpRanges: An array of alias IP ranges for this network - // interface. You can only specify this field for network interfaces in - // VPC networks. - AliasIpRanges []*AliasIpRange `json:"aliasIpRanges,omitempty"` + // AppEngine: Only valid when networkEndpointType is "SERVERLESS". Only + // one of cloudRun, appEngine or cloudFunction may be set. + AppEngine *NetworkEndpointGroupAppEngine `json:"appEngine,omitempty"` - // Fingerprint: Fingerprint hash of contents stored in this network - // interface. This field will be ignored when inserting an Instance or - // adding a NetworkInterface. An up-to-date fingerprint must be provided - // in order to update the NetworkInterface. The request will fail with - // error 400 Bad Request if the fingerprint is not provided, or 412 - // Precondition Failed if the fingerprint is out of date. - Fingerprint string `json:"fingerprint,omitempty"` + // CloudFunction: Only valid when networkEndpointType is "SERVERLESS". + // Only one of cloudRun, appEngine or cloudFunction may be set. + CloudFunction *NetworkEndpointGroupCloudFunction `json:"cloudFunction,omitempty"` - // Ipv6AccessConfigs: An array of IPv6 access configurations for this - // interface. Currently, only one IPv6 access config, DIRECT_IPV6, is - // supported. If there is no ipv6AccessConfig specified, then this - // instance will have no external IPv6 Internet access. - Ipv6AccessConfigs []*AccessConfig `json:"ipv6AccessConfigs,omitempty"` + // CloudRun: Only valid when networkEndpointType is "SERVERLESS". Only + // one of cloudRun, appEngine or cloudFunction may be set. + CloudRun *NetworkEndpointGroupCloudRun `json:"cloudRun,omitempty"` - // Ipv6AccessType: [Output Only] One of EXTERNAL, INTERNAL to indicate - // whether the IP can be accessed from the Internet. This field is - // always inherited from its subnetwork. Valid only if stackType is - // IPV4_IPV6. - // - // Possible values: - // "EXTERNAL" - This network interface can have external IPv6. - // "UNSPECIFIED_IPV6_ACCESS_TYPE" - IPv6 access type not set. Means - // this network interface hasn't been turned on IPv6 yet. - Ipv6AccessType string `json:"ipv6AccessType,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Ipv6Address: An IPv6 internal network address for this network - // interface. - Ipv6Address string `json:"ipv6Address,omitempty"` + // DefaultPort: The default port used if the port number is not + // specified in the network endpoint. + DefaultPort int64 `json:"defaultPort,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` // Kind: [Output Only] Type of the resource. Always - // compute#networkInterface for network interfaces. + // compute#networkEndpointGroup for network endpoint group. Kind string `json:"kind,omitempty"` - // Name: [Output Only] The name of the network interface, which is - // generated by the server. For network devices, these are eth0, eth1, - // etc. + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Network: URL of the VPC network resource for this instance. When - // creating an instance, if neither the network nor the subnetwork is - // specified, the default network global/networks/default is used. If - // the selected project doesn't have the default network, you must - // specify a network or subnet. If the network is not specified but the - // subnetwork is specified, the network is inferred. If you specify this - // property, you can specify the network as a full or partial URL. For - // example, the following are all valid URLs: - - // https://www.googleapis.com/compute/v1/projects/project/global/networks/ - // network - projects/project/global/networks/network - - // global/networks/default + // Network: The URL of the network to which all network endpoints in the + // NEG belong. Uses "default" project network if unspecified. Network string `json:"network,omitempty"` - // NetworkIP: An IPv4 internal IP address to assign to the instance for - // this network interface. If not specified by the user, an unused - // internal IP is assigned by the system. - NetworkIP string `json:"networkIP,omitempty"` - - // NicType: The type of vNIC to be used on this interface. This may be - // gVNIC or VirtioNet. + // NetworkEndpointType: Type of network endpoints in this network + // endpoint group. Can be one of GCE_VM_IP, GCE_VM_IP_PORT, + // NON_GCP_PRIVATE_IP_PORT, INTERNET_FQDN_PORT, INTERNET_IP_PORT, + // SERVERLESS, PRIVATE_SERVICE_CONNECT. // // Possible values: - // "GVNIC" - GVNIC - // "UNSPECIFIED_NIC_TYPE" - No type specified. - // "VIRTIO_NET" - VIRTIO - NicType string `json:"nicType,omitempty"` + // "GCE_VM_IP" - The network endpoint is represented by an IP address. + // "GCE_VM_IP_PORT" - The network endpoint is represented by IP + // address and port pair. + // "INTERNET_FQDN_PORT" - The network endpoint is represented by fully + // qualified domain name and port. + // "INTERNET_IP_PORT" - The network endpoint is represented by an + // internet IP address and port. + // "NON_GCP_PRIVATE_IP_PORT" - The network endpoint is represented by + // an IP address and port. The endpoint belongs to a VM or pod running + // in a customer's on-premises. + // "PRIVATE_SERVICE_CONNECT" - The network endpoint is either public + // Google APIs or services exposed by other GCP Project with a Service + // Attachment. The connection is set up by private service connect + // "SERVERLESS" - The network endpoint is handled by specified + // serverless infrastructure. + NetworkEndpointType string `json:"networkEndpointType,omitempty"` - // QueueCount: The networking queue count that's specified by users for - // the network interface. Both Rx and Tx queues will be set to this - // number. It'll be empty if not specified by the users. - QueueCount int64 `json:"queueCount,omitempty"` + PscData *NetworkEndpointGroupPscData `json:"pscData,omitempty"` - // StackType: The stack type for this network interface to identify - // whether the IPv6 feature is enabled or not. If not specified, - // IPV4_ONLY will be used. This field can be both set at instance - // creation and update network interface operations. - // - // Possible values: - // "IPV4_IPV6" - The network interface can have both IPv4 and IPv6 - // addresses. - // "IPV4_ONLY" - The network interface will be assigned IPv4 address. - // "UNSPECIFIED_STACK_TYPE" - StackType string `json:"stackType,omitempty"` + // PscTargetService: The target service url used to set up private + // service connection to a Google API or a PSC Producer Service + // Attachment. An example value is: + // "asia-northeast3-cloudkms.googleapis.com" + PscTargetService string `json:"pscTargetService,omitempty"` - // Subnetwork: The URL of the Subnetwork resource for this instance. If - // the network resource is in legacy mode, do not specify this field. If - // the network is in auto subnet mode, specifying the subnetwork is - // optional. If the network is in custom subnet mode, specifying the - // subnetwork is required. If you specify this field, you can specify - // the subnetwork as a full or partial URL. For example, the following - // are all valid URLs: - - // https://www.googleapis.com/compute/v1/projects/project/regions/region - // /subnetworks/subnetwork - regions/region/subnetworks/subnetwork + // Region: [Output Only] The URL of the region where the network + // endpoint group is located. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Size: [Output only] Number of network endpoints in the network + // endpoint group. + Size int64 `json:"size,omitempty"` + + // Subnetwork: Optional URL of the subnetwork to which all network + // endpoints in the NEG belong. Subnetwork string `json:"subnetwork,omitempty"` - // ForceSendFields is a list of field names (e.g. "AccessConfigs") to + // Zone: [Output Only] The URL of the zone where the network endpoint + // group is located. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Annotations") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -26573,7 +27995,7 @@ type NetworkInterface struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AccessConfigs") to include + // NullFields is a list of field names (e.g. "Annotations") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -26582,23 +28004,23 @@ type NetworkInterface struct { NullFields []string `json:"-"` } -func (s *NetworkInterface) MarshalJSON() ([]byte, error) { - type NoMethod NetworkInterface +func (s *NetworkEndpointGroup) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroup raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkList: Contains a list of networks. -type NetworkList struct { +type NetworkEndpointGroupAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Network resources. - Items []*Network `json:"items,omitempty"` + // Items: A list of NetworkEndpointGroupsScopedList resources. + Items map[string]NetworkEndpointGroupsScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#networkList for - // lists of networks. + // Kind: [Output Only] The resource type, which is always + // compute#networkEndpointGroupAggregatedList for aggregated lists of + // network endpoint groups. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -26612,8 +28034,11 @@ type NetworkList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *NetworkListWarning `json:"warning,omitempty"` + Warning *NetworkEndpointGroupAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -26636,14 +28061,15 @@ type NetworkList struct { NullFields []string `json:"-"` } -func (s *NetworkList) MarshalJSON() ([]byte, error) { - type NoMethod NetworkList +func (s *NetworkEndpointGroupAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkListWarning: [Output Only] Informational warning message. -type NetworkListWarning struct { +// NetworkEndpointGroupAggregatedListWarning: [Output Only] +// Informational warning message. +type NetworkEndpointGroupAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -26665,6 +28091,9 @@ type NetworkListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -26672,6 +28101,9 @@ type NetworkListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -26705,7 +28137,7 @@ type NetworkListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NetworkListWarningData `json:"data,omitempty"` + Data []*NetworkEndpointGroupAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -26728,13 +28160,13 @@ type NetworkListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NetworkListWarning +func (s *NetworkEndpointGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkListWarningData struct { +type NetworkEndpointGroupAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -26765,84 +28197,41 @@ type NetworkListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NetworkListWarningData +func (s *NetworkEndpointGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkPeering: A network peering attached to a network resource. The -// message includes the peering name, peer network, peering state, and a -// flag indicating whether Google Compute Engine should automatically -// create routes for the peering. -type NetworkPeering struct { - // AutoCreateRoutes: This field will be deprecated soon. Use the - // exchange_subnet_routes field instead. Indicates whether full mesh - // connectivity is created and managed automatically between peered - // networks. Currently this field should always be true since Google - // Compute Engine will automatically create and manage subnetwork routes - // between two networks when peering state is ACTIVE. - AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - - // ExchangeSubnetRoutes: Indicates whether full mesh connectivity is - // created and managed automatically between peered networks. Currently - // this field should always be true since Google Compute Engine will - // automatically create and manage subnetwork routes between two - // networks when peering state is ACTIVE. - ExchangeSubnetRoutes bool `json:"exchangeSubnetRoutes,omitempty"` - - // ExportCustomRoutes: Whether to export the custom routes to peer - // network. The default value is false. - ExportCustomRoutes bool `json:"exportCustomRoutes,omitempty"` - - // ExportSubnetRoutesWithPublicIp: Whether subnet routes with public IP - // range are exported. The default value is true, all subnet routes are - // exported. IPv4 special-use ranges are always exported to peers and - // are not controlled by this field. - ExportSubnetRoutesWithPublicIp bool `json:"exportSubnetRoutesWithPublicIp,omitempty"` - - // ImportCustomRoutes: Whether to import the custom routes from peer - // network. The default value is false. - ImportCustomRoutes bool `json:"importCustomRoutes,omitempty"` - - // ImportSubnetRoutesWithPublicIp: Whether subnet routes with public IP - // range are imported. The default value is false. IPv4 special-use - // ranges are always imported from peers and are not controlled by this - // field. - ImportSubnetRoutesWithPublicIp bool `json:"importSubnetRoutesWithPublicIp,omitempty"` - - // Name: Name of this peering. Provided by the client when the peering - // is created. The name must comply with RFC1035. Specifically, the name - // must be 1-63 characters long and match regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase - // letter, and all the following characters must be a dash, lowercase - // letter, or digit, except the last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: The URL of the peer network. It can be either full URL or - // partial URL. The peer network may belong to a different project. If - // the partial URL does not contain project, it is assumed that the peer - // network is in the same project as the current network. - Network string `json:"network,omitempty"` - - // PeerMtu: Maximum Transmission Unit in bytes. - PeerMtu int64 `json:"peerMtu,omitempty"` +// NetworkEndpointGroupAppEngine: Configuration for an App Engine +// network endpoint group (NEG). The service is optional, may be +// provided explicitly or in the URL mask. The version is optional and +// can only be provided explicitly or in the URL mask when service is +// present. Note: App Engine service must be in the same project and +// located in the same region as the Serverless NEG. +type NetworkEndpointGroupAppEngine struct { + // Service: Optional serving service. The service name is case-sensitive + // and must be 1-63 characters long. Example value: "default", + // "my-service". + Service string `json:"service,omitempty"` - // State: [Output Only] State for the peering, either `ACTIVE` or - // `INACTIVE`. The peering is `ACTIVE` when there's a matching - // configuration in the peer network. - // - // Possible values: - // "ACTIVE" - Matching configuration exists on the peer. - // "INACTIVE" - There is no matching configuration on the peer, - // including the case when peer does not exist. - State string `json:"state,omitempty"` + // UrlMask: A template to parse service and version fields from a + // request URL. URL mask allows for routing to multiple App Engine + // services without having to create multiple Network Endpoint Groups + // and backend services. For example, the request URLs + // "foo1-dot-appname.appspot.com/v1" and + // "foo1-dot-appname.appspot.com/v2" can be backed by the same + // Serverless NEG with URL mask + // "-dot-appname.appspot.com/". The URL mask will + // parse them to { service = "foo1", version = "v1" } and { service = + // "foo1", version = "v2" } respectively. + UrlMask string `json:"urlMask,omitempty"` - // StateDetails: [Output Only] Details about the current state of the - // peering. - StateDetails string `json:"stateDetails,omitempty"` + // Version: Optional serving version. The version name is case-sensitive + // and must be 1-100 characters long. Example value: "v1", "v2". + Version string `json:"version,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to + // ForceSendFields is a list of field names (e.g. "Service") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -26850,70 +28239,41 @@ type NetworkPeering struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoCreateRoutes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworkPeering) MarshalJSON() ([]byte, error) { - type NoMethod NetworkPeering +func (s *NetworkEndpointGroupAppEngine) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupAppEngine raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkPerformanceConfig struct { - // Possible values: - // "DEFAULT" - // "TIER_1" - TotalEgressBandwidthTier string `json:"totalEgressBandwidthTier,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "TotalEgressBandwidthTier") to unconditionally include in API - // requests. By default, fields with empty or default values are omitted - // from API requests. However, any non-pointer, non-interface field - // appearing in ForceSendFields will be sent to the server regardless of - // whether the field is empty or not. This may be used to include empty - // fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "TotalEgressBandwidthTier") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *NetworkPerformanceConfig) MarshalJSON() ([]byte, error) { - type NoMethod NetworkPerformanceConfig - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} +// NetworkEndpointGroupCloudFunction: Configuration for a Cloud Function +// network endpoint group (NEG). The function must be provided +// explicitly or in the URL mask. Note: Cloud Function must be in the +// same project and located in the same region as the Serverless NEG. +type NetworkEndpointGroupCloudFunction struct { + // Function: A user-defined name of the Cloud Function. The function + // name is case-sensitive and must be 1-63 characters long. Example + // value: "func1". + Function string `json:"function,omitempty"` -// NetworkRoutingConfig: A routing configuration attached to a network -// resource. The message includes the list of routers associated with -// the network, and a flag indicating the type of routing behavior to -// enforce network-wide. -type NetworkRoutingConfig struct { - // RoutingMode: The network-wide routing mode to use. If set to - // REGIONAL, this network's Cloud Routers will only advertise routes - // with subnets of this network in the same region as the router. If set - // to GLOBAL, this network's Cloud Routers will advertise routes with - // all subnets of this network, across regions. - // - // Possible values: - // "GLOBAL" - // "REGIONAL" - RoutingMode string `json:"routingMode,omitempty"` + // UrlMask: A template to parse function field from a request URL. URL + // mask allows for routing to multiple Cloud Functions without having to + // create multiple Network Endpoint Groups and backend services. For + // example, request URLs " mydomain.com/function1" and + // "mydomain.com/function2" can be backed by the same Serverless NEG + // with URL mask "/". The URL mask will parse them to { + // function = "function1" } and { function = "function2" } respectively. + UrlMask string `json:"urlMask,omitempty"` - // ForceSendFields is a list of field names (e.g. "RoutingMode") to + // ForceSendFields is a list of field names (e.g. "Function") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -26921,48 +28281,49 @@ type NetworkRoutingConfig struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "RoutingMode") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Function") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworkRoutingConfig) MarshalJSON() ([]byte, error) { - type NoMethod NetworkRoutingConfig +func (s *NetworkEndpointGroupCloudFunction) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupCloudFunction raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworksAddPeeringRequest struct { - // AutoCreateRoutes: This field will be deprecated soon. Use - // exchange_subnet_routes in network_peering instead. Indicates whether - // full mesh connectivity is created and managed automatically between - // peered networks. Currently this field should always be true since - // Google Compute Engine will automatically create and manage subnetwork - // routes between two networks when peering state is ACTIVE. - AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - - // Name: Name of the peering, which should conform to RFC1035. - Name string `json:"name,omitempty"` +// NetworkEndpointGroupCloudRun: Configuration for a Cloud Run network +// endpoint group (NEG). The service must be provided explicitly or in +// the URL mask. The tag is optional, may be provided explicitly or in +// the URL mask. Note: Cloud Run service must be in the same project and +// located in the same region as the Serverless NEG. +type NetworkEndpointGroupCloudRun struct { + // Service: Cloud Run service is the main resource of Cloud Run. The + // service must be 1-63 characters long, and comply with RFC1035. + // Example value: "run-service". + Service string `json:"service,omitempty"` - // NetworkPeering: Network peering parameters. In order to specify route - // policies for peering using import and export custom routes, you must - // specify all peering related parameters (name, peer network, - // exchange_subnet_routes) in the network_peering field. The - // corresponding fields in NetworksAddPeeringRequest will be deprecated - // soon. - NetworkPeering *NetworkPeering `json:"networkPeering,omitempty"` + // Tag: Optional Cloud Run tag represents the "named-revision" to + // provide additional fine-grained traffic routing information. The tag + // must be 1-63 characters long, and comply with RFC1035. Example value: + // "revision-0010". + Tag string `json:"tag,omitempty"` - // PeerNetwork: URL of the peer network. It can be either full URL or - // partial URL. The peer network may belong to a different project. If - // the partial URL does not contain project, it is assumed that the peer - // network is in the same project as the current network. - PeerNetwork string `json:"peerNetwork,omitempty"` + // UrlMask: A template to parse and fields from a + // request URL. URL mask allows for routing to multiple Run services + // without having to create multiple network endpoint groups and backend + // services. For example, request URLs "foo1.domain.com/bar1" and + // "foo1.domain.com/bar2" can be backed by the same Serverless Network + // Endpoint Group (NEG) with URL mask ".domain.com/". The + // URL mask will parse them to { service="bar1", tag="foo1" } and { + // service="bar2", tag="foo2" } respectively. + UrlMask string `json:"urlMask,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to + // ForceSendFields is a list of field names (e.g. "Service") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -26970,34 +28331,52 @@ type NetworksAddPeeringRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoCreateRoutes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { - type NoMethod NetworksAddPeeringRequest +func (s *NetworkEndpointGroupCloudRun) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupCloudRun raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworksGetEffectiveFirewallsResponse struct { - // FirewallPolicys: Effective firewalls from firewall policy. - FirewallPolicys []*NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy `json:"firewallPolicys,omitempty"` +type NetworkEndpointGroupList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Firewalls: Effective firewalls on the network. - Firewalls []*Firewall `json:"firewalls,omitempty"` + // Items: A list of NetworkEndpointGroup resources. + Items []*NetworkEndpointGroup `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#networkEndpointGroupList for network endpoint group lists. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *NetworkEndpointGroupListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "FirewallPolicys") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -27005,45 +28384,98 @@ type NetworksGetEffectiveFirewallsResponse struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "FirewallPolicys") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworksGetEffectiveFirewallsResponse) MarshalJSON() ([]byte, error) { - type NoMethod NetworksGetEffectiveFirewallsResponse +func (s *NetworkEndpointGroupList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { - // DisplayName: [Output Only] Deprecated, please use short name instead. - // The display name of the firewall policy. - DisplayName string `json:"displayName,omitempty"` - - // Name: [Output Only] The name of the firewall policy. - Name string `json:"name,omitempty"` - - // Rules: The rules that apply to the network. - Rules []*FirewallPolicyRule `json:"rules,omitempty"` - - // ShortName: [Output Only] The short name of the firewall policy. - ShortName string `json:"shortName,omitempty"` - - // Type: [Output Only] The type of the firewall policy. +// NetworkEndpointGroupListWarning: [Output Only] Informational warning +// message. +type NetworkEndpointGroupListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "HIERARCHY" - // "NETWORK" - // "UNSPECIFIED" - Type string `json:"type,omitempty"` + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "DisplayName") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*NetworkEndpointGroupListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -27051,26 +28483,36 @@ type NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DisplayName") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy) MarshalJSON() ([]byte, error) { - type NoMethod NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy +func (s *NetworkEndpointGroupListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworksRemovePeeringRequest struct { - // Name: Name of the peering, which should conform to RFC1035. - Name string `json:"name,omitempty"` +type NetworkEndpointGroupListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ForceSendFields is a list of field names (e.g. "Name") to + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -27078,7 +28520,7 @@ type NetworksRemovePeeringRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -27087,24 +28529,48 @@ type NetworksRemovePeeringRequest struct { NullFields []string `json:"-"` } -func (s *NetworksRemovePeeringRequest) MarshalJSON() ([]byte, error) { - type NoMethod NetworksRemovePeeringRequest +func (s *NetworkEndpointGroupListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworksUpdatePeeringRequest struct { - NetworkPeering *NetworkPeering `json:"networkPeering,omitempty"` +// NetworkEndpointGroupPscData: All data that is specifically relevant +// to only network endpoint groups of type PRIVATE_SERVICE_CONNECT. +type NetworkEndpointGroupPscData struct { + // ConsumerPscAddress: [Output Only] Address allocated from given + // subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, + // allowing it to act as an endpoint in L7 PSC-XLB. + ConsumerPscAddress string `json:"consumerPscAddress,omitempty"` - // ForceSendFields is a list of field names (e.g. "NetworkPeering") to - // unconditionally include in API requests. By default, fields with + // PscConnectionId: [Output Only] The PSC connection id of the PSC + // Network Endpoint Group Consumer. + PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` + + // PscConnectionStatus: [Output Only] The connection status of the PSC + // Forwarding Rule. + // + // Possible values: + // "ACCEPTED" - The connection has been accepted by the producer. + // "CLOSED" - The connection has been closed by the producer and will + // not serve traffic going forward. + // "NEEDS_ATTENTION" - The connection has been accepted by the + // producer, but the producer needs to take further action before the + // forwarding rule can serve traffic. + // "PENDING" - The connection is pending acceptance by the producer. + // "REJECTED" - The connection has been rejected by the producer. + // "STATUS_UNSPECIFIED" + PscConnectionStatus string `json:"pscConnectionStatus,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConsumerPscAddress") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NetworkPeering") to + // NullFields is a list of field names (e.g. "ConsumerPscAddress") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -27114,112 +28580,53 @@ type NetworksUpdatePeeringRequest struct { NullFields []string `json:"-"` } -func (s *NetworksUpdatePeeringRequest) MarshalJSON() ([]byte, error) { - type NoMethod NetworksUpdatePeeringRequest +func (s *NetworkEndpointGroupPscData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupPscData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeGroup: Represents a sole-tenant Node Group resource. A -// sole-tenant node is a physical server that is dedicated to hosting VM -// instances only for your specific project. Use sole-tenant nodes to -// keep your instances physically separated from instances in other -// projects, or to group your instances together on the same host -// hardware. For more information, read Sole-tenant nodes. -type NodeGroup struct { - // AutoscalingPolicy: Specifies how autoscaling should behave. - AutoscalingPolicy *NodeGroupAutoscalingPolicy `json:"autoscalingPolicy,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - Fingerprint string `json:"fingerprint,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] The type of the resource. Always - // compute#nodeGroup for node group. - Kind string `json:"kind,omitempty"` - - // LocationHint: An opaque location hint used to place the Node close to - // other resources. This field is for use by internal tools that use the - // public API. The location hint here on the NodeGroup overrides any - // location_hint present in the NodeTemplate. - LocationHint string `json:"locationHint,omitempty"` - - // MaintenancePolicy: Specifies how to handle instances when a node in - // the group undergoes maintenance. Set to one of: DEFAULT, - // RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is - // DEFAULT. For more information, see Maintenance policies. - // - // Possible values: - // "DEFAULT" - Allow the node and corresponding instances to retain - // default maintenance behavior. - // "MAINTENANCE_POLICY_UNSPECIFIED" - // "MIGRATE_WITHIN_NODE_GROUP" - When maintenance must be done on a - // node, the instances on that node will be moved to other nodes in the - // group. Instances with onHostMaintenance = MIGRATE will live migrate - // to their destinations while instances with onHostMaintenance = - // TERMINATE will terminate and then restart on their destination nodes - // if automaticRestart = true. - // "RESTART_IN_PLACE" - Instances in this group will restart on the - // same node when maintenance has completed. Instances must have - // onHostMaintenance = TERMINATE, and they will only restart if - // automaticRestart = true. - MaintenancePolicy string `json:"maintenancePolicy,omitempty"` - - MaintenanceWindow *NodeGroupMaintenanceWindow `json:"maintenanceWindow,omitempty"` - - // Name: The name of the resource, provided by the client when initially - // creating the resource. The resource name must be 1-63 characters - // long, and comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` - - // NodeTemplate: URL of the node template to create the node group from. - NodeTemplate string `json:"nodeTemplate,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` +type NetworkEndpointGroupsAttachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be attached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` - // Size: [Output Only] The total number of nodes in the node group. - Size int64 `json:"size,omitempty"` + // ForceSendFields is a list of field names (e.g. "NetworkEndpoints") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Possible values: - // "CREATING" - // "DELETING" - // "INVALID" - // "READY" - Status string `json:"status,omitempty"` + // NullFields is a list of field names (e.g. "NetworkEndpoints") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} - // Zone: [Output Only] The name of the zone where the node group - // resides, such as us-central1-a. - Zone string `json:"zone,omitempty"` +func (s *NetworkEndpointGroupsAttachEndpointsRequest) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsAttachEndpointsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type NetworkEndpointGroupsDetachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be detached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoscalingPolicy") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "NetworkEndpoints") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoscalingPolicy") to + // NullFields is a list of field names (e.g. "NetworkEndpoints") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -27229,22 +28636,58 @@ type NodeGroup struct { NullFields []string `json:"-"` } -func (s *NodeGroup) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroup +func (s *NetworkEndpointGroupsDetachEndpointsRequest) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsDetachEndpointsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupAggregatedList struct { +type NetworkEndpointGroupsListEndpointsRequest struct { + // HealthStatus: Optional query parameter for showing the health status + // of each network endpoint. Valid options are SKIP or SHOW. If you + // don't specify this parameter, the health status of network endpoints + // will not be provided. + // + // Possible values: + // "SHOW" - Show the health status for each network endpoint. Impacts + // latency of the call. + // "SKIP" - Health status for network endpoints will not be provided. + HealthStatus string `json:"healthStatus,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthStatus") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEndpointGroupsListEndpointsRequest) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsListEndpointsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkEndpointGroupsListNetworkEndpoints struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of NodeGroupsScopedList resources. - Items map[string]NodeGroupsScopedList `json:"items,omitempty"` + // Items: A list of NetworkEndpointWithHealthStatus resources. + Items []*NetworkEndpointWithHealthStatus `json:"items,omitempty"` - // Kind: [Output Only] Type of resource.Always - // compute#nodeGroupAggregatedList for aggregated lists of node groups. + // Kind: [Output Only] The resource type, which is always + // compute#networkEndpointGroupsListNetworkEndpoints for the list of + // network endpoints in the specified network endpoint group. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -27255,14 +28698,8 @@ type NodeGroupAggregatedList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *NodeGroupAggregatedListWarning `json:"warning,omitempty"` + Warning *NetworkEndpointGroupsListNetworkEndpointsWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -27285,15 +28722,15 @@ type NodeGroupAggregatedList struct { NullFields []string `json:"-"` } -func (s *NodeGroupAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupAggregatedList +func (s *NetworkEndpointGroupsListNetworkEndpoints) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsListNetworkEndpoints raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeGroupAggregatedListWarning: [Output Only] Informational warning -// message. -type NodeGroupAggregatedListWarning struct { +// NetworkEndpointGroupsListNetworkEndpointsWarning: [Output Only] +// Informational warning message. +type NetworkEndpointGroupsListNetworkEndpointsWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -27315,6 +28752,9 @@ type NodeGroupAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -27322,6 +28762,9 @@ type NodeGroupAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -27355,7 +28798,7 @@ type NodeGroupAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NodeGroupAggregatedListWarningData `json:"data,omitempty"` + Data []*NetworkEndpointGroupsListNetworkEndpointsWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -27378,13 +28821,13 @@ type NodeGroupAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupAggregatedListWarning +func (s *NetworkEndpointGroupsListNetworkEndpointsWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsListNetworkEndpointsWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupAggregatedListWarningData struct { +type NetworkEndpointGroupsListNetworkEndpointsWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -27415,110 +28858,50 @@ type NodeGroupAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupAggregatedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type NodeGroupAutoscalingPolicy struct { - // MaxNodes: The maximum number of nodes that the group should have. - // Must be set if autoscaling is enabled. Maximum value allowed is 100. - MaxNodes int64 `json:"maxNodes,omitempty"` - - // MinNodes: The minimum number of nodes that the group should have. - MinNodes int64 `json:"minNodes,omitempty"` - - // Mode: The autoscaling mode. Set to one of: ON, OFF, or - // ONLY_SCALE_OUT. For more information, see Autoscaler modes. - // - // Possible values: - // "MODE_UNSPECIFIED" - // "OFF" - Autoscaling is disabled. - // "ON" - Autocaling is fully enabled. - // "ONLY_SCALE_OUT" - Autoscaling will only scale out and will not - // remove nodes. - Mode string `json:"mode,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MaxNodes") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MaxNodes") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NodeGroupAutoscalingPolicy) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupAutoscalingPolicy +func (s *NetworkEndpointGroupsListNetworkEndpointsWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsListNetworkEndpointsWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeGroupList: Contains a list of nodeGroups. -type NodeGroupList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of NodeGroup resources. - Items []*NodeGroup `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource.Always compute#nodeGroupList for - // lists of node groups. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *NodeGroupListWarning `json:"warning,omitempty"` +type NetworkEndpointGroupsScopedList struct { + // NetworkEndpointGroups: [Output Only] The list of network endpoint + // groups that are contained in this scope. + NetworkEndpointGroups []*NetworkEndpointGroup `json:"networkEndpointGroups,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: [Output Only] An informational warning that replaces the + // list of network endpoint groups when the list is empty. + Warning *NetworkEndpointGroupsScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "NetworkEndpointGroups") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "NetworkEndpointGroups") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *NodeGroupList) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupList +func (s *NetworkEndpointGroupsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeGroupListWarning: [Output Only] Informational warning message. -type NodeGroupListWarning struct { +// NetworkEndpointGroupsScopedListWarning: [Output Only] An +// informational warning that replaces the list of network endpoint +// groups when the list is empty. +type NetworkEndpointGroupsScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -27540,6 +28923,9 @@ type NodeGroupListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -27547,6 +28933,9 @@ type NodeGroupListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -27580,7 +28969,7 @@ type NodeGroupListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NodeGroupListWarningData `json:"data,omitempty"` + Data []*NetworkEndpointGroupsScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -27603,13 +28992,13 @@ type NodeGroupListWarning struct { NullFields []string `json:"-"` } -func (s *NodeGroupListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupListWarning +func (s *NetworkEndpointGroupsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupListWarningData struct { +type NetworkEndpointGroupsScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -27640,92 +29029,153 @@ type NodeGroupListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeGroupListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupListWarningData +func (s *NetworkEndpointGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeGroupMaintenanceWindow: Time window specified for daily -// maintenance operations. GCE's internal maintenance will be performed -// within this window. -type NodeGroupMaintenanceWindow struct { - // MaintenanceDuration: [Output only] A predetermined duration for the - // window, automatically chosen to be the smallest possible in the given - // scenario. - MaintenanceDuration *Duration `json:"maintenanceDuration,omitempty"` +type NetworkEndpointWithHealthStatus struct { + // Healths: [Output only] The health status of network endpoint; + Healths []*HealthStatusForNetworkEndpoint `json:"healths,omitempty"` - // StartTime: Start time of the window. This must be in UTC format that - // resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For - // example, both 13:00-5 and 08:00 are valid. - StartTime string `json:"startTime,omitempty"` + // NetworkEndpoint: [Output only] The network endpoint; + NetworkEndpoint *NetworkEndpoint `json:"networkEndpoint,omitempty"` - // ForceSendFields is a list of field names (e.g. "MaintenanceDuration") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Healths") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MaintenanceDuration") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Healths") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NodeGroupMaintenanceWindow) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupMaintenanceWindow +func (s *NetworkEndpointWithHealthStatus) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointWithHealthStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupNode struct { - // Accelerators: Accelerators for this node. - Accelerators []*AcceleratorConfig `json:"accelerators,omitempty"` +// NetworkInterface: A network interface resource attached to an +// instance. +type NetworkInterface struct { + // AccessConfigs: An array of configurations for this interface. + // Currently, only one access config, ONE_TO_ONE_NAT, is supported. If + // there are no accessConfigs specified, then this instance will have no + // external internet access. + AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"` - // CpuOvercommitType: CPU overcommit. + // AliasIpRanges: An array of alias IP ranges for this network + // interface. You can only specify this field for network interfaces in + // VPC networks. + AliasIpRanges []*AliasIpRange `json:"aliasIpRanges,omitempty"` + + // Fingerprint: Fingerprint hash of contents stored in this network + // interface. This field will be ignored when inserting an Instance or + // adding a NetworkInterface. An up-to-date fingerprint must be provided + // in order to update the NetworkInterface. The request will fail with + // error 400 Bad Request if the fingerprint is not provided, or 412 + // Precondition Failed if the fingerprint is out of date. + Fingerprint string `json:"fingerprint,omitempty"` + + // InternalIpv6PrefixLength: The prefix length of the primary internal + // IPv6 range. + InternalIpv6PrefixLength int64 `json:"internalIpv6PrefixLength,omitempty"` + + // Ipv6AccessConfigs: An array of IPv6 access configurations for this + // interface. Currently, only one IPv6 access config, DIRECT_IPV6, is + // supported. If there is no ipv6AccessConfig specified, then this + // instance will have no external IPv6 Internet access. + Ipv6AccessConfigs []*AccessConfig `json:"ipv6AccessConfigs,omitempty"` + + // Ipv6AccessType: [Output Only] One of EXTERNAL, INTERNAL to indicate + // whether the IP can be accessed from the Internet. This field is + // always inherited from its subnetwork. Valid only if stackType is + // IPV4_IPV6. // // Possible values: - // "CPU_OVERCOMMIT_TYPE_UNSPECIFIED" - // "ENABLED" - // "NONE" - CpuOvercommitType string `json:"cpuOvercommitType,omitempty"` + // "EXTERNAL" - This network interface can have external IPv6. + // "INTERNAL" - This network interface can have internal IPv6. + Ipv6AccessType string `json:"ipv6AccessType,omitempty"` - // Disks: Local disk configurations. - Disks []*LocalDisk `json:"disks,omitempty"` + // Ipv6Address: An IPv6 internal network address for this network + // interface. + Ipv6Address string `json:"ipv6Address,omitempty"` - // Instances: Instances scheduled on this node. - Instances []string `json:"instances,omitempty"` + // Kind: [Output Only] Type of the resource. Always + // compute#networkInterface for network interfaces. + Kind string `json:"kind,omitempty"` - // Name: The name of the node. + // Name: [Output Only] The name of the network interface, which is + // generated by the server. For a VM, the network interface uses the + // nicN naming format. Where N is a value between 0 and 7. The default + // interface value is nic0. Name string `json:"name,omitempty"` - // NodeType: The type of this node. - NodeType string `json:"nodeType,omitempty"` + // Network: URL of the VPC network resource for this instance. When + // creating an instance, if neither the network nor the subnetwork is + // specified, the default network global/networks/default is used. If + // the selected project doesn't have the default network, you must + // specify a network or subnet. If the network is not specified but the + // subnetwork is specified, the network is inferred. If you specify this + // property, you can specify the network as a full or partial URL. For + // example, the following are all valid URLs: - + // https://www.googleapis.com/compute/v1/projects/project/global/networks/ + // network - projects/project/global/networks/network - + // global/networks/default + Network string `json:"network,omitempty"` - // SatisfiesPzs: [Output Only] Reserved for future use. - SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` + // NetworkIP: An IPv4 internal IP address to assign to the instance for + // this network interface. If not specified by the user, an unused + // internal IP is assigned by the system. + NetworkIP string `json:"networkIP,omitempty"` - // ServerBinding: Binding properties for the physical server. - ServerBinding *ServerBinding `json:"serverBinding,omitempty"` + // NicType: The type of vNIC to be used on this interface. This may be + // gVNIC or VirtioNet. + // + // Possible values: + // "GVNIC" - GVNIC + // "UNSPECIFIED_NIC_TYPE" - No type specified. + // "VIRTIO_NET" - VIRTIO + NicType string `json:"nicType,omitempty"` - // ServerId: Server ID associated with this node. - ServerId string `json:"serverId,omitempty"` + // QueueCount: The networking queue count that's specified by users for + // the network interface. Both Rx and Tx queues will be set to this + // number. It'll be empty if not specified by the users. + QueueCount int64 `json:"queueCount,omitempty"` + // StackType: The stack type for this network interface to identify + // whether the IPv6 feature is enabled or not. If not specified, + // IPV4_ONLY will be used. This field can be both set at instance + // creation and update network interface operations. + // // Possible values: - // "CREATING" - // "DELETING" - // "INVALID" - // "READY" - // "REPAIRING" - Status string `json:"status,omitempty"` + // "IPV4_IPV6" - The network interface can have both IPv4 and IPv6 + // addresses. + // "IPV4_ONLY" - The network interface will be assigned IPv4 address. + StackType string `json:"stackType,omitempty"` - // ForceSendFields is a list of field names (e.g. "Accelerators") to + // Subnetwork: The URL of the Subnetwork resource for this instance. If + // the network resource is in legacy mode, do not specify this field. If + // the network is in auto subnet mode, specifying the subnetwork is + // optional. If the network is in custom subnet mode, specifying the + // subnetwork is required. If you specify this field, you can specify + // the subnetwork as a full or partial URL. For example, the following + // are all valid URLs: - + // https://www.googleapis.com/compute/v1/projects/project/regions/region + // /subnetworks/subnetwork - regions/region/subnetworks/subnetwork + Subnetwork string `json:"subnetwork,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AccessConfigs") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -27733,7 +29183,7 @@ type NodeGroupNode struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Accelerators") to include + // NullFields is a list of field names (e.g. "AccessConfigs") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -27742,79 +29192,23 @@ type NodeGroupNode struct { NullFields []string `json:"-"` } -func (s *NodeGroupNode) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupNode - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type NodeGroupsAddNodesRequest struct { - // AdditionalNodeCount: Count of additional nodes to be added to the - // node group. - AdditionalNodeCount int64 `json:"additionalNodeCount,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AdditionalNodeCount") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AdditionalNodeCount") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *NodeGroupsAddNodesRequest) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsAddNodesRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type NodeGroupsDeleteNodesRequest struct { - // Nodes: Names of the nodes to delete. - Nodes []string `json:"nodes,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Nodes") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Nodes") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NodeGroupsDeleteNodesRequest) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsDeleteNodesRequest +func (s *NetworkInterface) MarshalJSON() ([]byte, error) { + type NoMethod NetworkInterface raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupsListNodes struct { +// NetworkList: Contains a list of networks. +type NetworkList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Node resources. - Items []*NodeGroupNode `json:"items,omitempty"` + // Items: A list of Network resources. + Items []*Network `json:"items,omitempty"` - // Kind: [Output Only] The resource type, which is always - // compute.nodeGroupsListNodes for the list of nodes in the specified - // node group. + // Kind: [Output Only] Type of resource. Always compute#networkList for + // lists of networks. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -27829,7 +29223,7 @@ type NodeGroupsListNodes struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *NodeGroupsListNodesWarning `json:"warning,omitempty"` + Warning *NetworkListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -27852,15 +29246,14 @@ type NodeGroupsListNodes struct { NullFields []string `json:"-"` } -func (s *NodeGroupsListNodes) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsListNodes +func (s *NetworkList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeGroupsListNodesWarning: [Output Only] Informational warning -// message. -type NodeGroupsListNodesWarning struct { +// NetworkListWarning: [Output Only] Informational warning message. +type NetworkListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -27882,6 +29275,9 @@ type NodeGroupsListNodesWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -27889,6 +29285,9 @@ type NodeGroupsListNodesWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -27922,7 +29321,7 @@ type NodeGroupsListNodesWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NodeGroupsListNodesWarningData `json:"data,omitempty"` + Data []*NetworkListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -27945,13 +29344,13 @@ type NodeGroupsListNodesWarning struct { NullFields []string `json:"-"` } -func (s *NodeGroupsListNodesWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsListNodesWarning +func (s *NetworkListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupsListNodesWarningData struct { +type NetworkListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -27982,22 +29381,96 @@ type NodeGroupsListNodesWarningData struct { NullFields []string `json:"-"` } -func (s *NodeGroupsListNodesWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsListNodesWarningData +func (s *NetworkListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupsScopedList struct { - // NodeGroups: [Output Only] A list of node groups contained in this - // scope. - NodeGroups []*NodeGroup `json:"nodeGroups,omitempty"` +// NetworkPeering: A network peering attached to a network resource. The +// message includes the peering name, peer network, peering state, and a +// flag indicating whether Google Compute Engine should automatically +// create routes for the peering. +type NetworkPeering struct { + // AutoCreateRoutes: This field will be deprecated soon. Use the + // exchange_subnet_routes field instead. Indicates whether full mesh + // connectivity is created and managed automatically between peered + // networks. Currently this field should always be true since Google + // Compute Engine will automatically create and manage subnetwork routes + // between two networks when peering state is ACTIVE. + AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - // Warning: [Output Only] An informational warning that appears when the - // nodeGroup list is empty. - Warning *NodeGroupsScopedListWarning `json:"warning,omitempty"` + // ExchangeSubnetRoutes: Indicates whether full mesh connectivity is + // created and managed automatically between peered networks. Currently + // this field should always be true since Google Compute Engine will + // automatically create and manage subnetwork routes between two + // networks when peering state is ACTIVE. + ExchangeSubnetRoutes bool `json:"exchangeSubnetRoutes,omitempty"` - // ForceSendFields is a list of field names (e.g. "NodeGroups") to + // ExportCustomRoutes: Whether to export the custom routes to peer + // network. The default value is false. + ExportCustomRoutes bool `json:"exportCustomRoutes,omitempty"` + + // ExportSubnetRoutesWithPublicIp: Whether subnet routes with public IP + // range are exported. The default value is true, all subnet routes are + // exported. IPv4 special-use ranges are always exported to peers and + // are not controlled by this field. + ExportSubnetRoutesWithPublicIp bool `json:"exportSubnetRoutesWithPublicIp,omitempty"` + + // ImportCustomRoutes: Whether to import the custom routes from peer + // network. The default value is false. + ImportCustomRoutes bool `json:"importCustomRoutes,omitempty"` + + // ImportSubnetRoutesWithPublicIp: Whether subnet routes with public IP + // range are imported. The default value is false. IPv4 special-use + // ranges are always imported from peers and are not controlled by this + // field. + ImportSubnetRoutesWithPublicIp bool `json:"importSubnetRoutesWithPublicIp,omitempty"` + + // Name: Name of this peering. Provided by the client when the peering + // is created. The name must comply with RFC1035. Specifically, the name + // must be 1-63 characters long and match regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase + // letter, and all the following characters must be a dash, lowercase + // letter, or digit, except the last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: The URL of the peer network. It can be either full URL or + // partial URL. The peer network may belong to a different project. If + // the partial URL does not contain project, it is assumed that the peer + // network is in the same project as the current network. + Network string `json:"network,omitempty"` + + // PeerMtu: Maximum Transmission Unit in bytes. + PeerMtu int64 `json:"peerMtu,omitempty"` + + // StackType: Which IP version(s) of traffic and routes are allowed to + // be imported or exported between peer networks. The default value is + // IPV4_ONLY. + // + // Possible values: + // "IPV4_IPV6" - This Peering will allow IPv4 traffic and routes to be + // exchanged. Additionally if the matching peering is IPV4_IPV6, IPv6 + // traffic and routes will be exchanged as well. + // "IPV4_ONLY" - This Peering will only allow IPv4 traffic and routes + // to be exchanged, even if the matching peering is IPV4_IPV6. + StackType string `json:"stackType,omitempty"` + + // State: [Output Only] State for the peering, either `ACTIVE` or + // `INACTIVE`. The peering is `ACTIVE` when there's a matching + // configuration in the peer network. + // + // Possible values: + // "ACTIVE" - Matching configuration exists on the peer. + // "INACTIVE" - There is no matching configuration on the peer, + // including the case when peer does not exist. + State string `json:"state,omitempty"` + + // StateDetails: [Output Only] Details about the current state of the + // peering. + StateDetails string `json:"stateDetails,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -28005,92 +29478,70 @@ type NodeGroupsScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NodeGroups") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AutoCreateRoutes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *NodeGroupsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsScopedList +func (s *NetworkPeering) MarshalJSON() ([]byte, error) { + type NoMethod NetworkPeering raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeGroupsScopedListWarning: [Output Only] An informational warning -// that appears when the nodeGroup list is empty. -type NodeGroupsScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // +type NetworkPerformanceConfig struct { // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` + // "DEFAULT" + // "TIER_1" + TotalEgressBandwidthTier string `json:"totalEgressBandwidthTier,omitempty"` - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*NodeGroupsScopedListWarningData `json:"data,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "TotalEgressBandwidthTier") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // NullFields is a list of field names (e.g. "TotalEgressBandwidthTier") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} - // ForceSendFields is a list of field names (e.g. "Code") to +func (s *NetworkPerformanceConfig) MarshalJSON() ([]byte, error) { + type NoMethod NetworkPerformanceConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkRoutingConfig: A routing configuration attached to a network +// resource. The message includes the list of routers associated with +// the network, and a flag indicating the type of routing behavior to +// enforce network-wide. +type NetworkRoutingConfig struct { + // RoutingMode: The network-wide routing mode to use. If set to + // REGIONAL, this network's Cloud Routers will only advertise routes + // with subnets of this network in the same region as the router. If set + // to GLOBAL, this network's Cloud Routers will advertise routes with + // all subnets of this network, across regions. + // + // Possible values: + // "GLOBAL" + // "REGIONAL" + RoutingMode string `json:"routingMode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RoutingMode") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -28098,36 +29549,48 @@ type NodeGroupsScopedListWarning struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "RoutingMode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NodeGroupsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsScopedListWarning +func (s *NetworkRoutingConfig) MarshalJSON() ([]byte, error) { + type NoMethod NetworkRoutingConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupsScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +type NetworksAddPeeringRequest struct { + // AutoCreateRoutes: This field will be deprecated soon. Use + // exchange_subnet_routes in network_peering instead. Indicates whether + // full mesh connectivity is created and managed automatically between + // peered networks. Currently this field should always be true since + // Google Compute Engine will automatically create and manage subnetwork + // routes between two networks when peering state is ACTIVE. + AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // Name: Name of the peering, which should conform to RFC1035. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // NetworkPeering: Network peering parameters. In order to specify route + // policies for peering using import and export custom routes, you must + // specify all peering related parameters (name, peer network, + // exchange_subnet_routes) in the network_peering field. The + // corresponding fields in NetworksAddPeeringRequest will be deprecated + // soon. + NetworkPeering *NetworkPeering `json:"networkPeering,omitempty"` + + // PeerNetwork: URL of the peer network. It can be either full URL or + // partial URL. The peer network may belong to a different project. If + // the partial URL does not contain project, it is assumed that the peer + // network is in the same project as the current network. + PeerNetwork string `json:"peerNetwork,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -28135,27 +29598,34 @@ type NodeGroupsScopedListWarningData struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AutoCreateRoutes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *NodeGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsScopedListWarningData +func (s *NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { + type NoMethod NetworksAddPeeringRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupsSetNodeTemplateRequest struct { - // NodeTemplate: Full or partial URL of the node template resource to be - // updated for this node group. - NodeTemplate string `json:"nodeTemplate,omitempty"` +type NetworksGetEffectiveFirewallsResponse struct { + // FirewallPolicys: Effective firewalls from firewall policy. + FirewallPolicys []*NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy `json:"firewallPolicys,omitempty"` - // ForceSendFields is a list of field names (e.g. "NodeTemplate") to + // Firewalls: Effective firewalls on the network. + Firewalls []*Firewall `json:"firewalls,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "FirewallPolicys") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -28163,7 +29633,53 @@ type NodeGroupsSetNodeTemplateRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NodeTemplate") to include + // NullFields is a list of field names (e.g. "FirewallPolicys") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NetworksGetEffectiveFirewallsResponse) MarshalJSON() ([]byte, error) { + type NoMethod NetworksGetEffectiveFirewallsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { + // DisplayName: [Output Only] Deprecated, please use short name instead. + // The display name of the firewall policy. + DisplayName string `json:"displayName,omitempty"` + + // Name: [Output Only] The name of the firewall policy. + Name string `json:"name,omitempty"` + + // Rules: The rules that apply to the network. + Rules []*FirewallPolicyRule `json:"rules,omitempty"` + + // ShortName: [Output Only] The short name of the firewall policy. + ShortName string `json:"shortName,omitempty"` + + // Type: [Output Only] The type of the firewall policy. + // + // Possible values: + // "HIERARCHY" + // "NETWORK" + // "UNSPECIFIED" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -28172,25 +29688,75 @@ type NodeGroupsSetNodeTemplateRequest struct { NullFields []string `json:"-"` } -func (s *NodeGroupsSetNodeTemplateRequest) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsSetNodeTemplateRequest +func (s *NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy) MarshalJSON() ([]byte, error) { + type NoMethod NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTemplate: Represent a sole-tenant Node Template resource. You can -// use a template to define properties for nodes in a node group. For -// more information, read Creating node groups and instances. -type NodeTemplate struct { - Accelerators []*AcceleratorConfig `json:"accelerators,omitempty"` +type NetworksRemovePeeringRequest struct { + // Name: Name of the peering, which should conform to RFC1035. + Name string `json:"name,omitempty"` - // CpuOvercommitType: CPU overcommit. - // - // Possible values: - // "CPU_OVERCOMMIT_TYPE_UNSPECIFIED" - // "ENABLED" - // "NONE" - CpuOvercommitType string `json:"cpuOvercommitType,omitempty"` + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworksRemovePeeringRequest) MarshalJSON() ([]byte, error) { + type NoMethod NetworksRemovePeeringRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworksUpdatePeeringRequest struct { + NetworkPeering *NetworkPeering `json:"networkPeering,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkPeering") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NetworkPeering") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NetworksUpdatePeeringRequest) MarshalJSON() ([]byte, error) { + type NoMethod NetworksUpdatePeeringRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NodeGroup: Represents a sole-tenant Node Group resource. A +// sole-tenant node is a physical server that is dedicated to hosting VM +// instances only for your specific project. Use sole-tenant nodes to +// keep your instances physically separated from instances in other +// projects, or to group your instances together on the same host +// hardware. For more information, read Sole-tenant nodes. +type NodeGroup struct { + // AutoscalingPolicy: Specifies how autoscaling should behave. + AutoscalingPolicy *NodeGroupAutoscalingPolicy `json:"autoscalingPolicy,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -28200,16 +29766,45 @@ type NodeTemplate struct { // property when you create the resource. Description string `json:"description,omitempty"` - Disks []*LocalDisk `json:"disks,omitempty"` + Fingerprint string `json:"fingerprint,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` // Kind: [Output Only] The type of the resource. Always - // compute#nodeTemplate for node templates. + // compute#nodeGroup for node group. Kind string `json:"kind,omitempty"` + // LocationHint: An opaque location hint used to place the Node close to + // other resources. This field is for use by internal tools that use the + // public API. The location hint here on the NodeGroup overrides any + // location_hint present in the NodeTemplate. + LocationHint string `json:"locationHint,omitempty"` + + // MaintenancePolicy: Specifies how to handle instances when a node in + // the group undergoes maintenance. Set to one of: DEFAULT, + // RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is + // DEFAULT. For more information, see Maintenance policies. + // + // Possible values: + // "DEFAULT" - Allow the node and corresponding instances to retain + // default maintenance behavior. + // "MAINTENANCE_POLICY_UNSPECIFIED" + // "MIGRATE_WITHIN_NODE_GROUP" - When maintenance must be done on a + // node, the instances on that node will be moved to other nodes in the + // group. Instances with onHostMaintenance = MIGRATE will live migrate + // to their destinations while instances with onHostMaintenance = + // TERMINATE will terminate and then restart on their destination nodes + // if automaticRestart = true. + // "RESTART_IN_PLACE" - Instances in this group will restart on the + // same node when maintenance has completed. Instances must have + // onHostMaintenance = TERMINATE, and they will only restart if + // automaticRestart = true. + MaintenancePolicy string `json:"maintenancePolicy,omitempty"` + + MaintenanceWindow *NodeGroupMaintenanceWindow `json:"maintenanceWindow,omitempty"` + // Name: The name of the resource, provided by the client when initially // creating the resource. The resource name must be 1-63 characters // long, and comply with RFC1035. Specifically, the name must be 1-63 @@ -28220,88 +29815,67 @@ type NodeTemplate struct { // be a dash. Name string `json:"name,omitempty"` - // NodeAffinityLabels: Labels to use for node affinity, which will be - // used in instance scheduling. - NodeAffinityLabels map[string]string `json:"nodeAffinityLabels,omitempty"` - - // NodeType: The node type to use for nodes group that are created from - // this template. - NodeType string `json:"nodeType,omitempty"` - - // NodeTypeFlexibility: The flexible properties of the desired node - // type. Node groups that use this node template will create nodes of a - // type that matches these properties. This field is mutually exclusive - // with the node_type property; you can only define one or the other, - // but not both. - NodeTypeFlexibility *NodeTemplateNodeTypeFlexibility `json:"nodeTypeFlexibility,omitempty"` - - // Region: [Output Only] The name of the region where the node template - // resides, such as us-central1. - Region string `json:"region,omitempty"` + // NodeTemplate: URL of the node template to create the node group from. + NodeTemplate string `json:"nodeTemplate,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // ServerBinding: Sets the binding properties for the physical server. - // Valid values include: - *[Default]* RESTART_NODE_ON_ANY_SERVER: - // Restarts VMs on any available physical server - - // RESTART_NODE_ON_MINIMAL_SERVER: Restarts VMs on the same physical - // server whenever possible See Sole-tenant node options for more - // information. - ServerBinding *ServerBinding `json:"serverBinding,omitempty"` + // ShareSettings: Share-settings for the node group + ShareSettings *ShareSettings `json:"shareSettings,omitempty"` + + // Size: [Output Only] The total number of nodes in the node group. + Size int64 `json:"size,omitempty"` - // Status: [Output Only] The status of the node template. One of the - // following values: CREATING, READY, and DELETING. - // // Possible values: - // "CREATING" - Resources are being allocated. - // "DELETING" - The node template is currently being deleted. - // "INVALID" - Invalid status. - // "READY" - The node template is ready. + // "CREATING" + // "DELETING" + // "INVALID" + // "READY" Status string `json:"status,omitempty"` - // StatusMessage: [Output Only] An optional, human-readable explanation - // of the status. - StatusMessage string `json:"statusMessage,omitempty"` + // Zone: [Output Only] The name of the zone where the node group + // resides, such as us-central1-a. + Zone string `json:"zone,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Accelerators") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AutoscalingPolicy") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Accelerators") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AutoscalingPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *NodeTemplate) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplate +func (s *NodeGroup) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroup raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTemplateAggregatedList struct { +type NodeGroupAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of NodeTemplatesScopedList resources. - Items map[string]NodeTemplatesScopedList `json:"items,omitempty"` + // Items: A list of NodeGroupsScopedList resources. + Items map[string]NodeGroupsScopedList `json:"items,omitempty"` // Kind: [Output Only] Type of resource.Always - // compute#nodeTemplateAggregatedList for aggregated lists of node - // templates. + // compute#nodeGroupAggregatedList for aggregated lists of node groups. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -28319,7 +29893,7 @@ type NodeTemplateAggregatedList struct { Unreachables []string `json:"unreachables,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *NodeTemplateAggregatedListWarning `json:"warning,omitempty"` + Warning *NodeGroupAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -28342,15 +29916,15 @@ type NodeTemplateAggregatedList struct { NullFields []string `json:"-"` } -func (s *NodeTemplateAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplateAggregatedList +func (s *NodeGroupAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTemplateAggregatedListWarning: [Output Only] Informational -// warning message. -type NodeTemplateAggregatedListWarning struct { +// NodeGroupAggregatedListWarning: [Output Only] Informational warning +// message. +type NodeGroupAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -28372,6 +29946,9 @@ type NodeTemplateAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -28379,6 +29956,9 @@ type NodeTemplateAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -28412,7 +29992,7 @@ type NodeTemplateAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NodeTemplateAggregatedListWarningData `json:"data,omitempty"` + Data []*NodeGroupAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -28435,13 +30015,13 @@ type NodeTemplateAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTemplateAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplateAggregatedListWarning +func (s *NodeGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTemplateAggregatedListWarningData struct { +type NodeGroupAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -28472,23 +30052,65 @@ type NodeTemplateAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTemplateAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplateAggregatedListWarningData +func (s *NodeGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTemplateList: Contains a list of node templates. -type NodeTemplateList struct { +type NodeGroupAutoscalingPolicy struct { + // MaxNodes: The maximum number of nodes that the group should have. + // Must be set if autoscaling is enabled. Maximum value allowed is 100. + MaxNodes int64 `json:"maxNodes,omitempty"` + + // MinNodes: The minimum number of nodes that the group should have. + MinNodes int64 `json:"minNodes,omitempty"` + + // Mode: The autoscaling mode. Set to one of: ON, OFF, or + // ONLY_SCALE_OUT. For more information, see Autoscaler modes. + // + // Possible values: + // "MODE_UNSPECIFIED" + // "OFF" - Autoscaling is disabled. + // "ON" - Autocaling is fully enabled. + // "ONLY_SCALE_OUT" - Autoscaling will only scale out and will not + // remove nodes. + Mode string `json:"mode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxNodes") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxNodes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NodeGroupAutoscalingPolicy) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupAutoscalingPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NodeGroupList: Contains a list of nodeGroups. +type NodeGroupList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of NodeTemplate resources. - Items []*NodeTemplate `json:"items,omitempty"` + // Items: A list of NodeGroup resources. + Items []*NodeGroup `json:"items,omitempty"` - // Kind: [Output Only] Type of resource.Always compute#nodeTemplateList - // for lists of node templates. + // Kind: [Output Only] Type of resource.Always compute#nodeGroupList for + // lists of node groups. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -28503,7 +30125,7 @@ type NodeTemplateList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *NodeTemplateListWarning `json:"warning,omitempty"` + Warning *NodeGroupListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -28526,14 +30148,14 @@ type NodeTemplateList struct { NullFields []string `json:"-"` } -func (s *NodeTemplateList) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplateList +func (s *NodeGroupList) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTemplateListWarning: [Output Only] Informational warning message. -type NodeTemplateListWarning struct { +// NodeGroupListWarning: [Output Only] Informational warning message. +type NodeGroupListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -28555,6 +30177,9 @@ type NodeTemplateListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -28562,6 +30187,9 @@ type NodeTemplateListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -28595,7 +30223,7 @@ type NodeTemplateListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NodeTemplateListWarningData `json:"data,omitempty"` + Data []*NodeGroupListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -28618,13 +30246,13 @@ type NodeTemplateListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTemplateListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplateListWarning +func (s *NodeGroupListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTemplateListWarningData struct { +type NodeGroupListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -28655,52 +30283,102 @@ type NodeTemplateListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTemplateListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplateListWarningData +func (s *NodeGroupListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTemplateNodeTypeFlexibility struct { - Cpus string `json:"cpus,omitempty"` - - LocalSsd string `json:"localSsd,omitempty"` +// NodeGroupMaintenanceWindow: Time window specified for daily +// maintenance operations. GCE's internal maintenance will be performed +// within this window. +type NodeGroupMaintenanceWindow struct { + // MaintenanceDuration: [Output only] A predetermined duration for the + // window, automatically chosen to be the smallest possible in the given + // scenario. + MaintenanceDuration *Duration `json:"maintenanceDuration,omitempty"` - Memory string `json:"memory,omitempty"` + // StartTime: Start time of the window. This must be in UTC format that + // resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For + // example, both 13:00-5 and 08:00 are valid. + StartTime string `json:"startTime,omitempty"` - // ForceSendFields is a list of field names (e.g. "Cpus") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "MaintenanceDuration") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Cpus") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "MaintenanceDuration") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *NodeTemplateNodeTypeFlexibility) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplateNodeTypeFlexibility +func (s *NodeGroupMaintenanceWindow) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupMaintenanceWindow raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTemplatesScopedList struct { - // NodeTemplates: [Output Only] A list of node templates contained in - // this scope. - NodeTemplates []*NodeTemplate `json:"nodeTemplates,omitempty"` +type NodeGroupNode struct { + // Accelerators: Accelerators for this node. + Accelerators []*AcceleratorConfig `json:"accelerators,omitempty"` - // Warning: [Output Only] An informational warning that appears when the - // node templates list is empty. - Warning *NodeTemplatesScopedListWarning `json:"warning,omitempty"` + // ConsumedResources: Node resources that are reserved by all instances. + ConsumedResources *InstanceConsumptionInfo `json:"consumedResources,omitempty"` - // ForceSendFields is a list of field names (e.g. "NodeTemplates") to + // CpuOvercommitType: CPU overcommit. + // + // Possible values: + // "CPU_OVERCOMMIT_TYPE_UNSPECIFIED" + // "ENABLED" + // "NONE" + CpuOvercommitType string `json:"cpuOvercommitType,omitempty"` + + // Disks: Local disk configurations. + Disks []*LocalDisk `json:"disks,omitempty"` + + // InstanceConsumptionData: Instance data that shows consumed resources + // on the node. + InstanceConsumptionData []*InstanceConsumptionData `json:"instanceConsumptionData,omitempty"` + + // Instances: Instances scheduled on this node. + Instances []string `json:"instances,omitempty"` + + // Name: The name of the node. + Name string `json:"name,omitempty"` + + // NodeType: The type of this node. + NodeType string `json:"nodeType,omitempty"` + + // SatisfiesPzs: [Output Only] Reserved for future use. + SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` + + // ServerBinding: Binding properties for the physical server. + ServerBinding *ServerBinding `json:"serverBinding,omitempty"` + + // ServerId: Server ID associated with this node. + ServerId string `json:"serverId,omitempty"` + + // Possible values: + // "CREATING" + // "DELETING" + // "INVALID" + // "READY" + // "REPAIRING" + Status string `json:"status,omitempty"` + + // TotalResources: Total amount of available resources on the node. + TotalResources *InstanceConsumptionInfo `json:"totalResources,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Accelerators") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -28708,7 +30386,7 @@ type NodeTemplatesScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NodeTemplates") to include + // NullFields is a list of field names (e.g. "Accelerators") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -28717,120 +30395,46 @@ type NodeTemplatesScopedList struct { NullFields []string `json:"-"` } -func (s *NodeTemplatesScopedList) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplatesScopedList +func (s *NodeGroupNode) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupNode raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTemplatesScopedListWarning: [Output Only] An informational -// warning that appears when the node templates list is empty. -type NodeTemplatesScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*NodeTemplatesScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` +type NodeGroupsAddNodesRequest struct { + // AdditionalNodeCount: Count of additional nodes to be added to the + // node group. + AdditionalNodeCount int64 `json:"additionalNodeCount,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AdditionalNodeCount") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AdditionalNodeCount") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *NodeTemplatesScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplatesScopedListWarning +func (s *NodeGroupsAddNodesRequest) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsAddNodesRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTemplatesScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` +type NodeGroupsDeleteNodesRequest struct { + // Nodes: Names of the nodes to delete. + Nodes []string `json:"nodes,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "Nodes") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -28838,7 +30442,7 @@ type NodeTemplatesScopedListWarningData struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API + // NullFields is a list of field names (e.g. "Nodes") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -28847,101 +30451,23 @@ type NodeTemplatesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTemplatesScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplatesScopedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// NodeType: Represent a sole-tenant Node Type resource. Each node -// within a node group must have a node type. A node type specifies the -// total amount of cores and memory for that node. Currently, the only -// available node type is n1-node-96-624 node type that has 96 vCPUs and -// 624 GB of memory, available in multiple zones. For more information -// read Node types. -type NodeType struct { - // CpuPlatform: [Output Only] The CPU platform used by this node type. - CpuPlatform string `json:"cpuPlatform,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Deprecated -- [Output Only] The deprecation status associated with - // this node type. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: [Output Only] An optional textual description of the - // resource. - Description string `json:"description,omitempty"` - - // GuestCpus: [Output Only] The number of virtual CPUs that are - // available to the node type. - GuestCpus int64 `json:"guestCpus,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] The type of the resource. Always compute#nodeType - // for node types. - Kind string `json:"kind,omitempty"` - - // LocalSsdGb: [Output Only] Local SSD available to the node type, - // defined in GB. - LocalSsdGb int64 `json:"localSsdGb,omitempty"` - - // MemoryMb: [Output Only] The amount of physical memory available to - // the node type, defined in MB. - MemoryMb int64 `json:"memoryMb,omitempty"` - - // Name: [Output Only] Name of the resource. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Zone: [Output Only] The name of the zone where the node type resides, - // such as us-central1-a. - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CpuPlatform") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CpuPlatform") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NodeType) MarshalJSON() ([]byte, error) { - type NoMethod NodeType +func (s *NodeGroupsDeleteNodesRequest) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsDeleteNodesRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTypeAggregatedList struct { +type NodeGroupsListNodes struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of NodeTypesScopedList resources. - Items map[string]NodeTypesScopedList `json:"items,omitempty"` + // Items: A list of Node resources. + Items []*NodeGroupNode `json:"items,omitempty"` - // Kind: [Output Only] Type of resource.Always - // compute#nodeTypeAggregatedList for aggregated lists of node types. + // Kind: [Output Only] The resource type, which is always + // compute.nodeGroupsListNodes for the list of nodes in the specified + // node group. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -28955,11 +30481,8 @@ type NodeTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *NodeTypeAggregatedListWarning `json:"warning,omitempty"` + Warning *NodeGroupsListNodesWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -28982,15 +30505,15 @@ type NodeTypeAggregatedList struct { NullFields []string `json:"-"` } -func (s *NodeTypeAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypeAggregatedList +func (s *NodeGroupsListNodes) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsListNodes raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTypeAggregatedListWarning: [Output Only] Informational warning +// NodeGroupsListNodesWarning: [Output Only] Informational warning // message. -type NodeTypeAggregatedListWarning struct { +type NodeGroupsListNodesWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -29012,6 +30535,9 @@ type NodeTypeAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -29019,6 +30545,9 @@ type NodeTypeAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -29052,7 +30581,7 @@ type NodeTypeAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NodeTypeAggregatedListWarningData `json:"data,omitempty"` + Data []*NodeGroupsListNodesWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -29075,13 +30604,13 @@ type NodeTypeAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypeAggregatedListWarning +func (s *NodeGroupsListNodesWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsListNodesWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTypeAggregatedListWarningData struct { +type NodeGroupsListNodesWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -29112,44 +30641,22 @@ type NodeTypeAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypeAggregatedListWarningData +func (s *NodeGroupsListNodesWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsListNodesWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTypeList: Contains a list of node types. -type NodeTypeList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of NodeType resources. - Items []*NodeType `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource.Always compute#nodeTypeList for - // lists of node types. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *NodeTypeListWarning `json:"warning,omitempty"` +type NodeGroupsScopedList struct { + // NodeGroups: [Output Only] A list of node groups contained in this + // scope. + NodeGroups []*NodeGroup `json:"nodeGroups,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: [Output Only] An informational warning that appears when the + // nodeGroup list is empty. + Warning *NodeGroupsScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "NodeGroups") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -29157,8 +30664,8 @@ type NodeTypeList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "NodeGroups") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -29166,14 +30673,15 @@ type NodeTypeList struct { NullFields []string `json:"-"` } -func (s *NodeTypeList) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypeList +func (s *NodeGroupsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTypeListWarning: [Output Only] Informational warning message. -type NodeTypeListWarning struct { +// NodeGroupsScopedListWarning: [Output Only] An informational warning +// that appears when the nodeGroup list is empty. +type NodeGroupsScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -29195,6 +30703,9 @@ type NodeTypeListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -29202,6 +30713,9 @@ type NodeTypeListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -29235,7 +30749,7 @@ type NodeTypeListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NodeTypeListWarningData `json:"data,omitempty"` + Data []*NodeGroupsScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -29258,13 +30772,13 @@ type NodeTypeListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTypeListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypeListWarning +func (s *NodeGroupsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTypeListWarningData struct { +type NodeGroupsScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -29295,22 +30809,131 @@ type NodeTypeListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTypeListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypeListWarningData +func (s *NodeGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTypesScopedList struct { - // NodeTypes: [Output Only] A list of node types contained in this - // scope. - NodeTypes []*NodeType `json:"nodeTypes,omitempty"` +type NodeGroupsSetNodeTemplateRequest struct { + // NodeTemplate: Full or partial URL of the node template resource to be + // updated for this node group. + NodeTemplate string `json:"nodeTemplate,omitempty"` - // Warning: [Output Only] An informational warning that appears when the - // node types list is empty. - Warning *NodeTypesScopedListWarning `json:"warning,omitempty"` + // ForceSendFields is a list of field names (e.g. "NodeTemplate") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // ForceSendFields is a list of field names (e.g. "NodeTypes") to + // NullFields is a list of field names (e.g. "NodeTemplate") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NodeGroupsSetNodeTemplateRequest) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsSetNodeTemplateRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NodeTemplate: Represent a sole-tenant Node Template resource. You can +// use a template to define properties for nodes in a node group. For +// more information, read Creating node groups and instances. +type NodeTemplate struct { + Accelerators []*AcceleratorConfig `json:"accelerators,omitempty"` + + // CpuOvercommitType: CPU overcommit. + // + // Possible values: + // "CPU_OVERCOMMIT_TYPE_UNSPECIFIED" + // "ENABLED" + // "NONE" + CpuOvercommitType string `json:"cpuOvercommitType,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + Disks []*LocalDisk `json:"disks,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] The type of the resource. Always + // compute#nodeTemplate for node templates. + Kind string `json:"kind,omitempty"` + + // Name: The name of the resource, provided by the client when initially + // creating the resource. The resource name must be 1-63 characters + // long, and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` + + // NodeAffinityLabels: Labels to use for node affinity, which will be + // used in instance scheduling. + NodeAffinityLabels map[string]string `json:"nodeAffinityLabels,omitempty"` + + // NodeType: The node type to use for nodes group that are created from + // this template. + NodeType string `json:"nodeType,omitempty"` + + // NodeTypeFlexibility: The flexible properties of the desired node + // type. Node groups that use this node template will create nodes of a + // type that matches these properties. This field is mutually exclusive + // with the node_type property; you can only define one or the other, + // but not both. + NodeTypeFlexibility *NodeTemplateNodeTypeFlexibility `json:"nodeTypeFlexibility,omitempty"` + + // Region: [Output Only] The name of the region where the node template + // resides, such as us-central1. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerBinding: Sets the binding properties for the physical server. + // Valid values include: - *[Default]* RESTART_NODE_ON_ANY_SERVER: + // Restarts VMs on any available physical server - + // RESTART_NODE_ON_MINIMAL_SERVER: Restarts VMs on the same physical + // server whenever possible See Sole-tenant node options for more + // information. + ServerBinding *ServerBinding `json:"serverBinding,omitempty"` + + // Status: [Output Only] The status of the node template. One of the + // following values: CREATING, READY, and DELETING. + // + // Possible values: + // "CREATING" - Resources are being allocated. + // "DELETING" - The node template is currently being deleted. + // "INVALID" - Invalid status. + // "READY" - The node template is ready. + Status string `json:"status,omitempty"` + + // StatusMessage: [Output Only] An optional, human-readable explanation + // of the status. + StatusMessage string `json:"statusMessage,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Accelerators") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -29318,8 +30941,65 @@ type NodeTypesScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NodeTypes") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Accelerators") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NodeTemplate) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplate + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NodeTemplateAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of NodeTemplatesScopedList resources. + Items map[string]NodeTemplatesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource.Always + // compute#nodeTemplateAggregatedList for aggregated lists of node + // templates. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *NodeTemplateAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -29327,15 +31007,15 @@ type NodeTypesScopedList struct { NullFields []string `json:"-"` } -func (s *NodeTypesScopedList) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypesScopedList +func (s *NodeTemplateAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplateAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTypesScopedListWarning: [Output Only] An informational warning -// that appears when the node types list is empty. -type NodeTypesScopedListWarning struct { +// NodeTemplateAggregatedListWarning: [Output Only] Informational +// warning message. +type NodeTemplateAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -29357,6 +31037,9 @@ type NodeTypesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -29364,6 +31047,9 @@ type NodeTypesScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -29397,7 +31083,7 @@ type NodeTypesScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NodeTypesScopedListWarningData `json:"data,omitempty"` + Data []*NodeTemplateAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -29420,13 +31106,13 @@ type NodeTypesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypesScopedListWarning +func (s *NodeTemplateAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplateAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTypesScopedListWarningData struct { +type NodeTemplateAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -29457,145 +31143,23 @@ type NodeTypesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypesScopedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// NotificationEndpoint: Represents a notification endpoint. A -// notification endpoint resource defines an endpoint to receive -// notifications when there are status changes detected by the -// associated health check service. For more information, see Health -// checks overview. -type NotificationEndpoint struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // GrpcSettings: Settings of the gRPC notification endpoint including - // the endpoint URL and the retry duration. - GrpcSettings *NotificationEndpointGrpcSettings `json:"grpcSettings,omitempty"` - - // Id: [Output Only] A unique identifier for this resource type. The - // server generates this identifier. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always - // compute#notificationEndpoint for notification endpoints. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Region: [Output Only] URL of the region where the notification - // endpoint resides. This field applies only to the regional resource. - // You must specify this field as part of the HTTP request URL. It is - // not settable as a field in the request body. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *NotificationEndpoint) MarshalJSON() ([]byte, error) { - type NoMethod NotificationEndpoint - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// NotificationEndpointGrpcSettings: Represents a gRPC setting that -// describes one gRPC notification endpoint and the retry duration -// attempting to send notification to this endpoint. -type NotificationEndpointGrpcSettings struct { - // Authority: Optional. If specified, this field is used to set the - // authority header by the sender of notifications. See - // https://tools.ietf.org/html/rfc7540#section-8.1.2.3 - Authority string `json:"authority,omitempty"` - - // Endpoint: Endpoint to which gRPC notifications are sent. This must be - // a valid gRPCLB DNS name. - Endpoint string `json:"endpoint,omitempty"` - - // PayloadName: Optional. If specified, this field is used to populate - // the "name" field in gRPC requests. - PayloadName string `json:"payloadName,omitempty"` - - // ResendInterval: Optional. This field is used to configure how often - // to send a full update of all non-healthy backends. If unspecified, - // full updates are not sent. If specified, must be in the range between - // 600 seconds to 3600 seconds. Nanos are disallowed. - ResendInterval *Duration `json:"resendInterval,omitempty"` - - // RetryDurationSec: How much time (in seconds) is spent attempting - // notification retries until a successful response is received. Default - // is 30s. Limit is 20m (1200s). Must be a positive number. - RetryDurationSec int64 `json:"retryDurationSec,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Authority") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Authority") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NotificationEndpointGrpcSettings) MarshalJSON() ([]byte, error) { - type NoMethod NotificationEndpointGrpcSettings +func (s *NodeTemplateAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplateAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NotificationEndpointList struct { +// NodeTemplateList: Contains a list of node templates. +type NodeTemplateList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of NotificationEndpoint resources. - Items []*NotificationEndpoint `json:"items,omitempty"` + // Items: A list of NodeTemplate resources. + Items []*NodeTemplate `json:"items,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#notificationEndpoint for notification endpoints. + // Kind: [Output Only] Type of resource.Always compute#nodeTemplateList + // for lists of node templates. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -29610,7 +31174,7 @@ type NotificationEndpointList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *NotificationEndpointListWarning `json:"warning,omitempty"` + Warning *NodeTemplateListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -29633,15 +31197,14 @@ type NotificationEndpointList struct { NullFields []string `json:"-"` } -func (s *NotificationEndpointList) MarshalJSON() ([]byte, error) { - type NoMethod NotificationEndpointList +func (s *NodeTemplateList) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplateList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NotificationEndpointListWarning: [Output Only] Informational warning -// message. -type NotificationEndpointListWarning struct { +// NodeTemplateListWarning: [Output Only] Informational warning message. +type NodeTemplateListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -29663,6 +31226,9 @@ type NotificationEndpointListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -29670,6 +31236,9 @@ type NotificationEndpointListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -29703,7 +31272,7 @@ type NotificationEndpointListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*NotificationEndpointListWarningData `json:"data,omitempty"` + Data []*NodeTemplateListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -29726,13 +31295,13 @@ type NotificationEndpointListWarning struct { NullFields []string `json:"-"` } -func (s *NotificationEndpointListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NotificationEndpointListWarning +func (s *NodeTemplateListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplateListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NotificationEndpointListWarningData struct { +type NodeTemplateListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -29763,166 +31332,20 @@ type NotificationEndpointListWarningData struct { NullFields []string `json:"-"` } -func (s *NotificationEndpointListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NotificationEndpointListWarningData +func (s *NodeTemplateListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplateListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Operation: Represents an Operation resource. Google Compute Engine -// has three Operation resources: * Global -// (/compute/docs/reference/rest/v1/globalOperations) * Regional -// (/compute/docs/reference/rest/v1/regionOperations) * Zonal -// (/compute/docs/reference/rest/v1/zoneOperations) You can use an -// operation resource to manage asynchronous API requests. For more -// information, read Handling API responses. Operations can be global, -// regional or zonal. - For global operations, use the -// `globalOperations` resource. - For regional operations, use the -// `regionOperations` resource. - For zonal operations, use the -// `zonalOperations` resource. For more information, read Global, -// Regional, and Zonal Resources. -type Operation struct { - // ClientOperationId: [Output Only] The value of `requestId` if you - // provided it in the request. Not present otherwise. - ClientOperationId string `json:"clientOperationId,omitempty"` - - // CreationTimestamp: [Deprecated] This field is deprecated. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: [Output Only] A textual description of the operation, - // which is set when the operation is created. - Description string `json:"description,omitempty"` - - // EndTime: [Output Only] The time that this operation was completed. - // This value is in RFC3339 text format. - EndTime string `json:"endTime,omitempty"` - - // Error: [Output Only] If errors are generated during processing of the - // operation, this field will be populated. - Error *OperationError `json:"error,omitempty"` - - // HttpErrorMessage: [Output Only] If the operation fails, this field - // contains the HTTP error message that was returned, such as `NOT - // FOUND`. - HttpErrorMessage string `json:"httpErrorMessage,omitempty"` - - // HttpErrorStatusCode: [Output Only] If the operation fails, this field - // contains the HTTP error status code that was returned. For example, a - // `404` means the resource was not found. - HttpErrorStatusCode int64 `json:"httpErrorStatusCode,omitempty"` - - // Id: [Output Only] The unique identifier for the operation. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // InsertTime: [Output Only] The time that this operation was requested. - // This value is in RFC3339 text format. - InsertTime string `json:"insertTime,omitempty"` - - // Kind: [Output Only] Type of the resource. Always `compute#operation` - // for Operation resources. - Kind string `json:"kind,omitempty"` - - // Name: [Output Only] Name of the operation. - Name string `json:"name,omitempty"` - - // OperationGroupId: [Output Only] An ID that represents a group of - // operations, such as when a group of operations results from a - // `bulkInsert` API request. - OperationGroupId string `json:"operationGroupId,omitempty"` - - // OperationType: [Output Only] The type of operation, such as `insert`, - // `update`, or `delete`, and so on. - OperationType string `json:"operationType,omitempty"` - - // Progress: [Output Only] An optional progress indicator that ranges - // from 0 to 100. There is no requirement that this be linear or support - // any granularity of operations. This should not be used to guess when - // the operation will be complete. This number should monotonically - // increase as the operation progresses. - Progress int64 `json:"progress,omitempty"` - - // Region: [Output Only] The URL of the region where the operation - // resides. Only applicable when performing regional operations. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // StartTime: [Output Only] The time that this operation was started by - // the server. This value is in RFC3339 text format. - StartTime string `json:"startTime,omitempty"` - - // Status: [Output Only] The status of the operation, which can be one - // of the following: `PENDING`, `RUNNING`, or `DONE`. - // - // Possible values: - // "DONE" - // "PENDING" - // "RUNNING" - Status string `json:"status,omitempty"` - - // StatusMessage: [Output Only] An optional textual description of the - // current status of the operation. - StatusMessage string `json:"statusMessage,omitempty"` - - // TargetId: [Output Only] The unique target ID, which identifies a - // specific incarnation of the target resource. - TargetId uint64 `json:"targetId,omitempty,string"` - - // TargetLink: [Output Only] The URL of the resource that the operation - // modifies. For operations related to creating a snapshot, this points - // to the persistent disk that the snapshot was created from. - TargetLink string `json:"targetLink,omitempty"` - - // User: [Output Only] User who requested the operation, for example: - // `user@example.com`. - User string `json:"user,omitempty"` - - // Warnings: [Output Only] If warning messages are generated during - // processing of the operation, this field will be populated. - Warnings []*OperationWarnings `json:"warnings,omitempty"` - - // Zone: [Output Only] The URL of the zone where the operation resides. - // Only applicable when performing per-zone operations. - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "ClientOperationId") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ClientOperationId") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} +type NodeTemplateNodeTypeFlexibility struct { + Cpus string `json:"cpus,omitempty"` -func (s *Operation) MarshalJSON() ([]byte, error) { - type NoMethod Operation - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + LocalSsd string `json:"localSsd,omitempty"` -// OperationError: [Output Only] If errors are generated during -// processing of the operation, this field will be populated. -type OperationError struct { - // Errors: [Output Only] The array of errors encountered while - // processing this operation. - Errors []*OperationErrorErrors `json:"errors,omitempty"` + Memory string `json:"memory,omitempty"` - // ForceSendFields is a list of field names (e.g. "Errors") to + // ForceSendFields is a list of field names (e.g. "Cpus") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -29930,7 +31353,7 @@ type OperationError struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Errors") to include in API + // NullFields is a list of field names (e.g. "Cpus") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -29939,24 +31362,22 @@ type OperationError struct { NullFields []string `json:"-"` } -func (s *OperationError) MarshalJSON() ([]byte, error) { - type NoMethod OperationError +func (s *NodeTemplateNodeTypeFlexibility) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplateNodeTypeFlexibility raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationErrorErrors struct { - // Code: [Output Only] The error type identifier for this error. - Code string `json:"code,omitempty"` - - // Location: [Output Only] Indicates the field in the request that - // caused the error. This property is optional. - Location string `json:"location,omitempty"` +type NodeTemplatesScopedList struct { + // NodeTemplates: [Output Only] A list of node templates contained in + // this scope. + NodeTemplates []*NodeTemplate `json:"nodeTemplates,omitempty"` - // Message: [Output Only] An optional, human-readable error message. - Message string `json:"message,omitempty"` + // Warning: [Output Only] An informational warning that appears when the + // node templates list is empty. + Warning *NodeTemplatesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "NodeTemplates") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -29964,22 +31385,24 @@ type OperationErrorErrors struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "NodeTemplates") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { - type NoMethod OperationErrorErrors +func (s *NodeTemplatesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplatesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationWarnings struct { +// NodeTemplatesScopedListWarning: [Output Only] An informational +// warning that appears when the node templates list is empty. +type NodeTemplatesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -30001,6 +31424,9 @@ type OperationWarnings struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -30008,6 +31434,9 @@ type OperationWarnings struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -30041,7 +31470,7 @@ type OperationWarnings struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*OperationWarningsData `json:"data,omitempty"` + Data []*NodeTemplatesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -30064,13 +31493,13 @@ type OperationWarnings struct { NullFields []string `json:"-"` } -func (s *OperationWarnings) MarshalJSON() ([]byte, error) { - type NoMethod OperationWarnings +func (s *NodeTemplatesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplatesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationWarningsData struct { +type NodeTemplatesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -30101,30 +31530,109 @@ type OperationWarningsData struct { NullFields []string `json:"-"` } -func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { - type NoMethod OperationWarningsData +func (s *NodeTemplatesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplatesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationAggregatedList struct { +// NodeType: Represent a sole-tenant Node Type resource. Each node +// within a node group must have a node type. A node type specifies the +// total amount of cores and memory for that node. Currently, the only +// available node type is n1-node-96-624 node type that has 96 vCPUs and +// 624 GB of memory, available in multiple zones. For more information +// read Node types. +type NodeType struct { + // CpuPlatform: [Output Only] The CPU platform used by this node type. + CpuPlatform string `json:"cpuPlatform,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated -- [Output Only] The deprecation status associated with + // this node type. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] An optional textual description of the + // resource. + Description string `json:"description,omitempty"` + + // GuestCpus: [Output Only] The number of virtual CPUs that are + // available to the node type. + GuestCpus int64 `json:"guestCpus,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] The type of the resource. Always compute#nodeType + // for node types. + Kind string `json:"kind,omitempty"` + + // LocalSsdGb: [Output Only] Local SSD available to the node type, + // defined in GB. + LocalSsdGb int64 `json:"localSsdGb,omitempty"` + + // MemoryMb: [Output Only] The amount of physical memory available to + // the node type, defined in MB. + MemoryMb int64 `json:"memoryMb,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Zone: [Output Only] The name of the zone where the node type resides, + // such as us-central1-a. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CpuPlatform") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CpuPlatform") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NodeType) MarshalJSON() ([]byte, error) { + type NoMethod NodeType + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NodeTypeAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` - // Items: [Output Only] A map of scoped operation lists. - Items map[string]OperationsScopedList `json:"items,omitempty"` + // Items: A list of NodeTypesScopedList resources. + Items map[string]NodeTypesScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always - // `compute#operationAggregatedList` for aggregated lists of operations. + // Kind: [Output Only] Type of resource.Always + // compute#nodeTypeAggregatedList for aggregated lists of node types. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next // page of results for list requests. If the number of results is larger - // than `maxResults`, use the `nextPageToken` as a value for the query - // parameter `pageToken` in the next list request. Subsequent list - // requests will have their own `nextPageToken` to continue paging - // through the results. + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. NextPageToken string `json:"nextPageToken,omitempty"` // SelfLink: [Output Only] Server-defined URL for this resource. @@ -30134,7 +31642,7 @@ type OperationAggregatedList struct { Unreachables []string `json:"unreachables,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *OperationAggregatedListWarning `json:"warning,omitempty"` + Warning *NodeTypeAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -30157,15 +31665,15 @@ type OperationAggregatedList struct { NullFields []string `json:"-"` } -func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod OperationAggregatedList +func (s *NodeTypeAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypeAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationAggregatedListWarning: [Output Only] Informational warning +// NodeTypeAggregatedListWarning: [Output Only] Informational warning // message. -type OperationAggregatedListWarning struct { +type NodeTypeAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -30187,6 +31695,9 @@ type OperationAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -30194,6 +31705,9 @@ type OperationAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -30227,7 +31741,7 @@ type OperationAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*OperationAggregatedListWarningData `json:"data,omitempty"` + Data []*NodeTypeAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -30250,13 +31764,13 @@ type OperationAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *OperationAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod OperationAggregatedListWarning +func (s *NodeTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypeAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationAggregatedListWarningData struct { +type NodeTypeAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -30287,38 +31801,38 @@ type OperationAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *OperationAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod OperationAggregatedListWarningData +func (s *NodeTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypeAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationList: Contains a list of Operation resources. -type OperationList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. +// NodeTypeList: Contains a list of node types. +type NodeTypeList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` - // Items: [Output Only] A list of Operation resources. - Items []*Operation `json:"items,omitempty"` + // Items: A list of NodeType resources. + Items []*NodeType `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always `compute#operations` for - // Operations resource. + // Kind: [Output Only] Type of resource.Always compute#nodeTypeList for + // lists of node types. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next // page of results for list requests. If the number of results is larger - // than `maxResults`, use the `nextPageToken` as a value for the query - // parameter `pageToken` in the next list request. Subsequent list - // requests will have their own `nextPageToken` to continue paging - // through the results. + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. NextPageToken string `json:"nextPageToken,omitempty"` // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *OperationListWarning `json:"warning,omitempty"` + Warning *NodeTypeListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -30341,14 +31855,14 @@ type OperationList struct { NullFields []string `json:"-"` } -func (s *OperationList) MarshalJSON() ([]byte, error) { - type NoMethod OperationList +func (s *NodeTypeList) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypeList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationListWarning: [Output Only] Informational warning message. -type OperationListWarning struct { +// NodeTypeListWarning: [Output Only] Informational warning message. +type NodeTypeListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -30370,6 +31884,9 @@ type OperationListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -30377,6 +31894,9 @@ type OperationListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -30410,7 +31930,7 @@ type OperationListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*OperationListWarningData `json:"data,omitempty"` + Data []*NodeTypeListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -30433,13 +31953,13 @@ type OperationListWarning struct { NullFields []string `json:"-"` } -func (s *OperationListWarning) MarshalJSON() ([]byte, error) { - type NoMethod OperationListWarning +func (s *NodeTypeListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypeListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationListWarningData struct { +type NodeTypeListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -30470,22 +31990,22 @@ type OperationListWarningData struct { NullFields []string `json:"-"` } -func (s *OperationListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod OperationListWarningData +func (s *NodeTypeListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypeListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationsScopedList struct { - // Operations: [Output Only] A list of operations contained in this +type NodeTypesScopedList struct { + // NodeTypes: [Output Only] A list of node types contained in this // scope. - Operations []*Operation `json:"operations,omitempty"` + NodeTypes []*NodeType `json:"nodeTypes,omitempty"` - // Warning: [Output Only] Informational warning which replaces the list - // of operations when the list is empty. - Warning *OperationsScopedListWarning `json:"warning,omitempty"` + // Warning: [Output Only] An informational warning that appears when the + // node types list is empty. + Warning *NodeTypesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Operations") to + // ForceSendFields is a list of field names (e.g. "NodeTypes") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -30493,7 +32013,7 @@ type OperationsScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Operations") to include in + // NullFields is a list of field names (e.g. "NodeTypes") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -30502,15 +32022,15 @@ type OperationsScopedList struct { NullFields []string `json:"-"` } -func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod OperationsScopedList +func (s *NodeTypesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationsScopedListWarning: [Output Only] Informational warning -// which replaces the list of operations when the list is empty. -type OperationsScopedListWarning struct { +// NodeTypesScopedListWarning: [Output Only] An informational warning +// that appears when the node types list is empty. +type NodeTypesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -30532,6 +32052,9 @@ type OperationsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -30539,6 +32062,9 @@ type OperationsScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -30572,7 +32098,7 @@ type OperationsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*OperationsScopedListWarningData `json:"data,omitempty"` + Data []*NodeTypesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -30595,13 +32121,13 @@ type OperationsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod OperationsScopedListWarning +func (s *NodeTypesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationsScopedListWarningData struct { +type NodeTypesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -30632,92 +32158,69 @@ type OperationsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod OperationsScopedListWarningData +func (s *NodeTypesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OutlierDetection: Settings controlling the eviction of unhealthy -// hosts from the load balancing pool for the backend service. -type OutlierDetection struct { - // BaseEjectionTime: The base time that a host is ejected for. The real - // ejection time is equal to the base ejection time multiplied by the - // number of times the host has been ejected. Defaults to 30000ms or - // 30s. - BaseEjectionTime *Duration `json:"baseEjectionTime,omitempty"` - - // ConsecutiveErrors: Number of errors before a host is ejected from the - // connection pool. When the backend host is accessed over HTTP, a 5xx - // return code qualifies as an error. Defaults to 5. - ConsecutiveErrors int64 `json:"consecutiveErrors,omitempty"` - - // ConsecutiveGatewayFailure: The number of consecutive gateway failures - // (502, 503, 504 status or connection errors that are mapped to one of - // those status codes) before a consecutive gateway failure ejection - // occurs. Defaults to 3. - ConsecutiveGatewayFailure int64 `json:"consecutiveGatewayFailure,omitempty"` +// NotificationEndpoint: Represents a notification endpoint. A +// notification endpoint resource defines an endpoint to receive +// notifications when there are status changes detected by the +// associated health check service. For more information, see Health +// checks overview. +type NotificationEndpoint struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // EnforcingConsecutiveErrors: The percentage chance that a host will be - // actually ejected when an outlier status is detected through - // consecutive 5xx. This setting can be used to disable ejection or to - // ramp it up slowly. Defaults to 0. - EnforcingConsecutiveErrors int64 `json:"enforcingConsecutiveErrors,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // EnforcingConsecutiveGatewayFailure: The percentage chance that a host - // will be actually ejected when an outlier status is detected through - // consecutive gateway failures. This setting can be used to disable - // ejection or to ramp it up slowly. Defaults to 100. - EnforcingConsecutiveGatewayFailure int64 `json:"enforcingConsecutiveGatewayFailure,omitempty"` + // GrpcSettings: Settings of the gRPC notification endpoint including + // the endpoint URL and the retry duration. + GrpcSettings *NotificationEndpointGrpcSettings `json:"grpcSettings,omitempty"` - // EnforcingSuccessRate: The percentage chance that a host will be - // actually ejected when an outlier status is detected through success - // rate statistics. This setting can be used to disable ejection or to - // ramp it up slowly. Defaults to 100. - EnforcingSuccessRate int64 `json:"enforcingSuccessRate,omitempty"` + // Id: [Output Only] A unique identifier for this resource type. The + // server generates this identifier. + Id uint64 `json:"id,omitempty,string"` - // Interval: Time interval between ejection analysis sweeps. This can - // result in both new ejections as well as hosts being returned to - // service. Defaults to 1 second. - Interval *Duration `json:"interval,omitempty"` + // Kind: [Output Only] Type of the resource. Always + // compute#notificationEndpoint for notification endpoints. + Kind string `json:"kind,omitempty"` - // MaxEjectionPercent: Maximum percentage of hosts in the load balancing - // pool for the backend service that can be ejected. Defaults to 50%. - MaxEjectionPercent int64 `json:"maxEjectionPercent,omitempty"` + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` - // SuccessRateMinimumHosts: The number of hosts in a cluster that must - // have enough request volume to detect success rate outliers. If the - // number of hosts is less than this setting, outlier detection via - // success rate statistics is not performed for any host in the cluster. - // Defaults to 5. - SuccessRateMinimumHosts int64 `json:"successRateMinimumHosts,omitempty"` + // Region: [Output Only] URL of the region where the notification + // endpoint resides. This field applies only to the regional resource. + // You must specify this field as part of the HTTP request URL. It is + // not settable as a field in the request body. + Region string `json:"region,omitempty"` - // SuccessRateRequestVolume: The minimum number of total requests that - // must be collected in one interval (as defined by the interval - // duration above) to include this host in success rate based outlier - // detection. If the volume is lower than this setting, outlier - // detection via success rate statistics is not performed for that host. - // Defaults to 100. - SuccessRateRequestVolume int64 `json:"successRateRequestVolume,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` - // SuccessRateStdevFactor: This factor is used to determine the ejection - // threshold for success rate outlier ejection. The ejection threshold - // is the difference between the mean success rate, and the product of - // this factor and the standard deviation of the mean success rate: mean - // - (stdev * success_rate_stdev_factor). This factor is divided by a - // thousand to get a double. That is, if the desired factor is 1.9, the - // runtime value should be 1900. Defaults to 1900. - SuccessRateStdevFactor int64 `json:"successRateStdevFactor,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "BaseEjectionTime") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BaseEjectionTime") to + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -30727,49 +32230,42 @@ type OutlierDetection struct { NullFields []string `json:"-"` } -func (s *OutlierDetection) MarshalJSON() ([]byte, error) { - type NoMethod OutlierDetection +func (s *NotificationEndpoint) MarshalJSON() ([]byte, error) { + type NoMethod NotificationEndpoint raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PacketIntervals: Next free: 7 -type PacketIntervals struct { - // AvgMs: Average observed inter-packet interval in milliseconds. - AvgMs int64 `json:"avgMs,omitempty,string"` - - // Duration: From how long ago in the past these intervals were - // observed. - // - // Possible values: - // "DURATION_UNSPECIFIED" - // "HOUR" - // "MAX" - From BfdSession object creation time. - // "MINUTE" - Duration string `json:"duration,omitempty"` +// NotificationEndpointGrpcSettings: Represents a gRPC setting that +// describes one gRPC notification endpoint and the retry duration +// attempting to send notification to this endpoint. +type NotificationEndpointGrpcSettings struct { + // Authority: Optional. If specified, this field is used to set the + // authority header by the sender of notifications. See + // https://tools.ietf.org/html/rfc7540#section-8.1.2.3 + Authority string `json:"authority,omitempty"` - // MaxMs: Maximum observed inter-packet interval in milliseconds. - MaxMs int64 `json:"maxMs,omitempty,string"` + // Endpoint: Endpoint to which gRPC notifications are sent. This must be + // a valid gRPCLB DNS name. + Endpoint string `json:"endpoint,omitempty"` - // MinMs: Minimum observed inter-packet interval in milliseconds. - MinMs int64 `json:"minMs,omitempty,string"` + // PayloadName: Optional. If specified, this field is used to populate + // the "name" field in gRPC requests. + PayloadName string `json:"payloadName,omitempty"` - // NumIntervals: Number of inter-packet intervals from which these - // statistics were derived. - NumIntervals int64 `json:"numIntervals,omitempty,string"` + // ResendInterval: Optional. This field is used to configure how often + // to send a full update of all non-healthy backends. If unspecified, + // full updates are not sent. If specified, must be in the range between + // 600 seconds to 3600 seconds. Nanos are disallowed. Can only be set + // for regional notification endpoints. + ResendInterval *Duration `json:"resendInterval,omitempty"` - // Type: The type of packets for which inter-packet intervals were - // computed. - // - // Possible values: - // "LOOPBACK" - Only applies to Echo packets. This shows the intervals - // between sending and receiving the same packet. - // "RECEIVE" - Intervals between received packets. - // "TRANSMIT" - Intervals between transmitted packets. - // "TYPE_UNSPECIFIED" - Type string `json:"type,omitempty"` + // RetryDurationSec: How much time (in seconds) is spent attempting + // notification retries until a successful response is received. Default + // is 30s. Limit is 20m (1200s). Must be a positive number. + RetryDurationSec int64 `json:"retryDurationSec,omitempty"` - // ForceSendFields is a list of field names (e.g. "AvgMs") to + // ForceSendFields is a list of field names (e.g. "Authority") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -30777,8 +32273,8 @@ type PacketIntervals struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AvgMs") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Authority") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -30786,126 +32282,22 @@ type PacketIntervals struct { NullFields []string `json:"-"` } -func (s *PacketIntervals) MarshalJSON() ([]byte, error) { - type NoMethod PacketIntervals - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// PacketMirroring: Represents a Packet Mirroring resource. Packet -// Mirroring clones the traffic of specified instances in your Virtual -// Private Cloud (VPC) network and forwards it to a collector -// destination, such as an instance group of an internal TCP/UDP load -// balancer, for analysis or examination. For more information about -// setting up Packet Mirroring, see Using Packet Mirroring. -type PacketMirroring struct { - // CollectorIlb: The Forwarding Rule resource of type - // loadBalancingScheme=INTERNAL that will be used as collector for - // mirrored traffic. The specified forwarding rule must have - // isMirroringCollector set to true. - CollectorIlb *PacketMirroringForwardingRuleInfo `json:"collectorIlb,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Enable: Indicates whether or not this packet mirroring takes effect. - // If set to FALSE, this packet mirroring policy will not be enforced on - // the network. The default is TRUE. - // - // Possible values: - // "FALSE" - // "TRUE" - Enable string `json:"enable,omitempty"` - - // Filter: Filter for mirrored traffic. If unspecified, all traffic is - // mirrored. - Filter *PacketMirroringFilter `json:"filter,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always - // compute#packetMirroring for packet mirrorings. - Kind string `json:"kind,omitempty"` - - // MirroredResources: PacketMirroring mirroredResourceInfos. - // MirroredResourceInfo specifies a set of mirrored VM instances, - // subnetworks and/or tags for which traffic from/to all VM instances - // will be mirrored. - MirroredResources *PacketMirroringMirroredResourceInfo `json:"mirroredResources,omitempty"` - - // Name: Name of the resource; provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: Specifies the mirrored VPC network. Only packets in this - // network will be mirrored. All mirrored VMs should have a NIC in the - // given network. All mirrored subnetworks should belong to the given - // network. - Network *PacketMirroringNetworkInfo `json:"network,omitempty"` - - // Priority: The priority of applying this configuration. Priority is - // used to break ties in cases where there is more than one matching - // rule. In the case of two rules that apply for a given Instance, the - // one with the lowest-numbered priority value wins. Default value is - // 1000. Valid range is 0 through 65535. - Priority int64 `json:"priority,omitempty"` - - // Region: [Output Only] URI of the region where the packetMirroring - // resides. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CollectorIlb") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CollectorIlb") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PacketMirroring) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroring +func (s *NotificationEndpointGrpcSettings) MarshalJSON() ([]byte, error) { + type NoMethod NotificationEndpointGrpcSettings raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PacketMirroringAggregatedList: Contains a list of packetMirrorings. -type PacketMirroringAggregatedList struct { +type NotificationEndpointList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of PacketMirroring resources. - Items map[string]PacketMirroringsScopedList `json:"items,omitempty"` + // Items: A list of NotificationEndpoint resources. + Items []*NotificationEndpoint `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of the resource. Always + // compute#notificationEndpoint for notification endpoints. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -30919,11 +32311,8 @@ type PacketMirroringAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *PacketMirroringAggregatedListWarning `json:"warning,omitempty"` + Warning *NotificationEndpointListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -30946,15 +32335,15 @@ type PacketMirroringAggregatedList struct { NullFields []string `json:"-"` } -func (s *PacketMirroringAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringAggregatedList +func (s *NotificationEndpointList) MarshalJSON() ([]byte, error) { + type NoMethod NotificationEndpointList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PacketMirroringAggregatedListWarning: [Output Only] Informational -// warning message. -type PacketMirroringAggregatedListWarning struct { +// NotificationEndpointListWarning: [Output Only] Informational warning +// message. +type NotificationEndpointListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -30976,6 +32365,9 @@ type PacketMirroringAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -30983,6 +32375,9 @@ type PacketMirroringAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -31016,7 +32411,7 @@ type PacketMirroringAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*PacketMirroringAggregatedListWarningData `json:"data,omitempty"` + Data []*NotificationEndpointListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -31039,13 +32434,13 @@ type PacketMirroringAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *PacketMirroringAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringAggregatedListWarning +func (s *NotificationEndpointListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NotificationEndpointListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PacketMirroringAggregatedListWarningData struct { +type NotificationEndpointListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -31076,36 +32471,166 @@ type PacketMirroringAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *PacketMirroringAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringAggregatedListWarningData +func (s *NotificationEndpointListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NotificationEndpointListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PacketMirroringFilter struct { - // IPProtocols: Protocols that apply as filter on mirrored traffic. If - // no protocols are specified, all traffic that matches the specified - // CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is - // specified, all traffic is mirrored. - IPProtocols []string `json:"IPProtocols,omitempty"` +// Operation: Represents an Operation resource. Google Compute Engine +// has three Operation resources: * Global +// (/compute/docs/reference/rest/v1/globalOperations) * Regional +// (/compute/docs/reference/rest/v1/regionOperations) * Zonal +// (/compute/docs/reference/rest/v1/zoneOperations) You can use an +// operation resource to manage asynchronous API requests. For more +// information, read Handling API responses. Operations can be global, +// regional or zonal. - For global operations, use the +// `globalOperations` resource. - For regional operations, use the +// `regionOperations` resource. - For zonal operations, use the +// `zonalOperations` resource. For more information, read Global, +// Regional, and Zonal Resources. +type Operation struct { + // ClientOperationId: [Output Only] The value of `requestId` if you + // provided it in the request. Not present otherwise. + ClientOperationId string `json:"clientOperationId,omitempty"` - // CidrRanges: IP CIDR ranges that apply as filter on the source - // (ingress) or destination (egress) IP in the IP header. Only IPv4 is - // supported. If no ranges are specified, all traffic that matches the - // specified IPProtocols is mirrored. If neither cidrRanges nor - // IPProtocols is specified, all traffic is mirrored. - CidrRanges []string `json:"cidrRanges,omitempty"` + // CreationTimestamp: [Deprecated] This field is deprecated. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Direction: Direction of traffic to mirror, either INGRESS, EGRESS, or - // BOTH. The default is BOTH. + // Description: [Output Only] A textual description of the operation, + // which is set when the operation is created. + Description string `json:"description,omitempty"` + + // EndTime: [Output Only] The time that this operation was completed. + // This value is in RFC3339 text format. + EndTime string `json:"endTime,omitempty"` + + // Error: [Output Only] If errors are generated during processing of the + // operation, this field will be populated. + Error *OperationError `json:"error,omitempty"` + + // HttpErrorMessage: [Output Only] If the operation fails, this field + // contains the HTTP error message that was returned, such as `NOT + // FOUND`. + HttpErrorMessage string `json:"httpErrorMessage,omitempty"` + + // HttpErrorStatusCode: [Output Only] If the operation fails, this field + // contains the HTTP error status code that was returned. For example, a + // `404` means the resource was not found. + HttpErrorStatusCode int64 `json:"httpErrorStatusCode,omitempty"` + + // Id: [Output Only] The unique identifier for the operation. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // InsertTime: [Output Only] The time that this operation was requested. + // This value is in RFC3339 text format. + InsertTime string `json:"insertTime,omitempty"` + + // Kind: [Output Only] Type of the resource. Always `compute#operation` + // for Operation resources. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the operation. + Name string `json:"name,omitempty"` + + // OperationGroupId: [Output Only] An ID that represents a group of + // operations, such as when a group of operations results from a + // `bulkInsert` API request. + OperationGroupId string `json:"operationGroupId,omitempty"` + + // OperationType: [Output Only] The type of operation, such as `insert`, + // `update`, or `delete`, and so on. + OperationType string `json:"operationType,omitempty"` + + // Progress: [Output Only] An optional progress indicator that ranges + // from 0 to 100. There is no requirement that this be linear or support + // any granularity of operations. This should not be used to guess when + // the operation will be complete. This number should monotonically + // increase as the operation progresses. + Progress int64 `json:"progress,omitempty"` + + // Region: [Output Only] The URL of the region where the operation + // resides. Only applicable when performing regional operations. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // StartTime: [Output Only] The time that this operation was started by + // the server. This value is in RFC3339 text format. + StartTime string `json:"startTime,omitempty"` + + // Status: [Output Only] The status of the operation, which can be one + // of the following: `PENDING`, `RUNNING`, or `DONE`. // // Possible values: - // "BOTH" - Default, both directions are mirrored. - // "EGRESS" - Only egress traffic is mirrored. - // "INGRESS" - Only ingress traffic is mirrored. - Direction string `json:"direction,omitempty"` + // "DONE" + // "PENDING" + // "RUNNING" + Status string `json:"status,omitempty"` - // ForceSendFields is a list of field names (e.g. "IPProtocols") to + // StatusMessage: [Output Only] An optional textual description of the + // current status of the operation. + StatusMessage string `json:"statusMessage,omitempty"` + + // TargetId: [Output Only] The unique target ID, which identifies a + // specific incarnation of the target resource. + TargetId uint64 `json:"targetId,omitempty,string"` + + // TargetLink: [Output Only] The URL of the resource that the operation + // modifies. For operations related to creating a snapshot, this points + // to the persistent disk that the snapshot was created from. + TargetLink string `json:"targetLink,omitempty"` + + // User: [Output Only] User who requested the operation, for example: + // `user@example.com`. + User string `json:"user,omitempty"` + + // Warnings: [Output Only] If warning messages are generated during + // processing of the operation, this field will be populated. + Warnings []*OperationWarnings `json:"warnings,omitempty"` + + // Zone: [Output Only] The URL of the zone where the operation resides. + // Only applicable when performing per-zone operations. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ClientOperationId") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClientOperationId") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Operation) MarshalJSON() ([]byte, error) { + type NoMethod Operation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationError: [Output Only] If errors are generated during +// processing of the operation, this field will be populated. +type OperationError struct { + // Errors: [Output Only] The array of errors encountered while + // processing this operation. + Errors []*OperationErrorErrors `json:"errors,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Errors") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -31113,31 +32638,40 @@ type PacketMirroringFilter struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IPProtocols") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Errors") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *PacketMirroringFilter) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringFilter +func (s *OperationError) MarshalJSON() ([]byte, error) { + type NoMethod OperationError raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PacketMirroringForwardingRuleInfo struct { - // CanonicalUrl: [Output Only] Unique identifier for the forwarding - // rule; defined by the server. - CanonicalUrl string `json:"canonicalUrl,omitempty"` +type OperationErrorErrors struct { + // Code: [Output Only] The error type identifier for this error. + Code string `json:"code,omitempty"` - // Url: Resource URL to the forwarding rule representing the ILB - // configured as destination of the mirrored traffic. - Url string `json:"url,omitempty"` + // ErrorDetails: [Output Only] An optional list of messages that contain + // the error details. There is a set of defined message types to use for + // providing details.The syntax depends on the error code. For example, + // QuotaExceededInfo will have details when the error code is + // QUOTA_EXCEEDED. + ErrorDetails []*OperationErrorErrorsErrorDetails `json:"errorDetails,omitempty"` - // ForceSendFields is a list of field names (e.g. "CanonicalUrl") to + // Location: [Output Only] Indicates the field in the request that + // caused the error. This property is optional. + Location string `json:"location,omitempty"` + + // Message: [Output Only] An optional, human-readable error message. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -31145,53 +32679,31 @@ type PacketMirroringForwardingRuleInfo struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CanonicalUrl") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *PacketMirroringForwardingRuleInfo) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringForwardingRuleInfo +func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { + type NoMethod OperationErrorErrors raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PacketMirroringList: Contains a list of PacketMirroring resources. -type PacketMirroringList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of PacketMirroring resources. - Items []*PacketMirroring `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#packetMirroring - // for packetMirrorings. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` +type OperationErrorErrorsErrorDetails struct { + ErrorInfo *ErrorInfo `json:"errorInfo,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + Help *Help `json:"help,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *PacketMirroringListWarning `json:"warning,omitempty"` + LocalizedMessage *LocalizedMessage `json:"localizedMessage,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + QuotaInfo *QuotaExceededInfo `json:"quotaInfo,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "ErrorInfo") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -31199,8 +32711,8 @@ type PacketMirroringList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "ErrorInfo") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -31208,15 +32720,13 @@ type PacketMirroringList struct { NullFields []string `json:"-"` } -func (s *PacketMirroringList) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringList +func (s *OperationErrorErrorsErrorDetails) MarshalJSON() ([]byte, error) { + type NoMethod OperationErrorErrorsErrorDetails raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PacketMirroringListWarning: [Output Only] Informational warning -// message. -type PacketMirroringListWarning struct { +type OperationWarnings struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -31238,6 +32748,9 @@ type PacketMirroringListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -31245,6 +32758,9 @@ type PacketMirroringListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -31278,7 +32794,7 @@ type PacketMirroringListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*PacketMirroringListWarningData `json:"data,omitempty"` + Data []*OperationWarningsData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -31301,13 +32817,13 @@ type PacketMirroringListWarning struct { NullFields []string `json:"-"` } -func (s *PacketMirroringListWarning) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringListWarning +func (s *OperationWarnings) MarshalJSON() ([]byte, error) { + type NoMethod OperationWarnings raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PacketMirroringListWarningData struct { +type OperationWarningsData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -31338,31 +32854,46 @@ type PacketMirroringListWarningData struct { NullFields []string `json:"-"` } -func (s *PacketMirroringListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringListWarningData +func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { + type NoMethod OperationWarningsData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PacketMirroringMirroredResourceInfo struct { - // Instances: A set of virtual machine instances that are being - // mirrored. They must live in zones contained in the same region as - // this packetMirroring. Note that this config will apply only to those - // network interfaces of the Instances that belong to the network - // specified in this packetMirroring. You may specify a maximum of 50 - // Instances. - Instances []*PacketMirroringMirroredResourceInfoInstanceInfo `json:"instances,omitempty"` +type OperationAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` - // Subnetworks: A set of subnetworks for which traffic from/to all VM - // instances will be mirrored. They must live in the same region as this - // packetMirroring. You may specify a maximum of 5 subnetworks. - Subnetworks []*PacketMirroringMirroredResourceInfoSubnetInfo `json:"subnetworks,omitempty"` + // Items: [Output Only] A map of scoped operation lists. + Items map[string]OperationsScopedList `json:"items,omitempty"` - // Tags: A set of mirrored tags. Traffic from/to all VM instances that - // have one or more of these tags will be mirrored. - Tags []string `json:"tags,omitempty"` + // Kind: [Output Only] Type of resource. Always + // `compute#operationAggregatedList` for aggregated lists of operations. + Kind string `json:"kind,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than `maxResults`, use the `nextPageToken` as a value for the query + // parameter `pageToken` in the next list request. Subsequent list + // requests will have their own `nextPageToken` to continue paging + // through the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *OperationAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -31370,8 +32901,8 @@ type PacketMirroringMirroredResourceInfo struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -31379,22 +32910,89 @@ type PacketMirroringMirroredResourceInfo struct { NullFields []string `json:"-"` } -func (s *PacketMirroringMirroredResourceInfo) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringMirroredResourceInfo +func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod OperationAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PacketMirroringMirroredResourceInfoInstanceInfo struct { - // CanonicalUrl: [Output Only] Unique identifier for the instance; - // defined by the server. - CanonicalUrl string `json:"canonicalUrl,omitempty"` +// OperationAggregatedListWarning: [Output Only] Informational warning +// message. +type OperationAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` - // Url: Resource URL to the virtual machine instance which is being - // mirrored. - Url string `json:"url,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*OperationAggregatedListWarningData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. "CanonicalUrl") to + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -31402,31 +33000,36 @@ type PacketMirroringMirroredResourceInfoInstanceInfo struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CanonicalUrl") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *PacketMirroringMirroredResourceInfoInstanceInfo) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringMirroredResourceInfoInstanceInfo +func (s *OperationAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod OperationAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PacketMirroringMirroredResourceInfoSubnetInfo struct { - // CanonicalUrl: [Output Only] Unique identifier for the subnetwork; - // defined by the server. - CanonicalUrl string `json:"canonicalUrl,omitempty"` +type OperationAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Url: Resource URL to the subnetwork for which traffic from/to all VM - // instances will be mirrored. - Url string `json:"url,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "CanonicalUrl") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -31434,61 +33037,53 @@ type PacketMirroringMirroredResourceInfoSubnetInfo struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CanonicalUrl") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *PacketMirroringMirroredResourceInfoSubnetInfo) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringMirroredResourceInfoSubnetInfo +func (s *OperationAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod OperationAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PacketMirroringNetworkInfo struct { - // CanonicalUrl: [Output Only] Unique identifier for the network; - // defined by the server. - CanonicalUrl string `json:"canonicalUrl,omitempty"` +// OperationList: Contains a list of Operation resources. +type OperationList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` - // Url: URL of the network resource. - Url string `json:"url,omitempty"` + // Items: [Output Only] A list of Operation resources. + Items []*Operation `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "CanonicalUrl") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Kind: [Output Only] Type of resource. Always `compute#operations` for + // Operations resource. + Kind string `json:"kind,omitempty"` - // NullFields is a list of field names (e.g. "CanonicalUrl") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than `maxResults`, use the `nextPageToken` as a value for the query + // parameter `pageToken` in the next list request. Subsequent list + // requests will have their own `nextPageToken` to continue paging + // through the results. + NextPageToken string `json:"nextPageToken,omitempty"` -func (s *PacketMirroringNetworkInfo) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringNetworkInfo - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` -type PacketMirroringsScopedList struct { - // PacketMirrorings: A list of packetMirrorings contained in this scope. - PacketMirrorings []*PacketMirroring `json:"packetMirrorings,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *OperationListWarning `json:"warning,omitempty"` - // Warning: Informational warning which replaces the list of - // packetMirrorings when the list is empty. - Warning *PacketMirroringsScopedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "PacketMirrorings") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -31496,25 +33091,23 @@ type PacketMirroringsScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "PacketMirrorings") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *PacketMirroringsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringsScopedList +func (s *OperationList) MarshalJSON() ([]byte, error) { + type NoMethod OperationList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PacketMirroringsScopedListWarning: Informational warning which -// replaces the list of packetMirrorings when the list is empty. -type PacketMirroringsScopedListWarning struct { +// OperationListWarning: [Output Only] Informational warning message. +type OperationListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -31536,6 +33129,9 @@ type PacketMirroringsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -31543,6 +33139,9 @@ type PacketMirroringsScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -31576,7 +33175,7 @@ type PacketMirroringsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*PacketMirroringsScopedListWarningData `json:"data,omitempty"` + Data []*OperationListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -31599,13 +33198,13 @@ type PacketMirroringsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *PacketMirroringsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringsScopedListWarning +func (s *OperationListWarning) MarshalJSON() ([]byte, error) { + type NoMethod OperationListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PacketMirroringsScopedListWarningData struct { +type OperationListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -31636,150 +33235,22 @@ type PacketMirroringsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *PacketMirroringsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod PacketMirroringsScopedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// PathMatcher: A matcher for the path portion of the URL. The -// BackendService from the longest-matched rule will serve the URL. If -// no rule was matched, the default service is used. -type PathMatcher struct { - // DefaultRouteAction: defaultRouteAction takes effect when none of the - // pathRules or routeRules match. The load balancer performs advanced - // routing actions, such as URL rewrites and header transformations, - // before forwarding the request to the selected backend. If - // defaultRouteAction specifies any weightedBackendServices, - // defaultService must not be set. Conversely if defaultService is set, - // defaultRouteAction cannot contain any weightedBackendServices. Only - // one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps - // for external HTTP(S) load balancers support only the urlRewrite - // action within a path matcher's defaultRouteAction. - DefaultRouteAction *HttpRouteAction `json:"defaultRouteAction,omitempty"` - - // DefaultService: The full or partial URL to the BackendService - // resource. This URL is used if none of the pathRules or routeRules - // defined by this PathMatcher are matched. For example, the following - // are all valid URLs to a BackendService resource: - - // https://www.googleapis.com/compute/v1/projects/project - // /global/backendServices/backendService - - // compute/v1/projects/project/global/backendServices/backendService - - // global/backendServices/backendService If defaultRouteAction is also - // specified, advanced routing actions, such as URL rewrites, take - // effect before sending the request to the backend. However, if - // defaultService is specified, defaultRouteAction cannot contain any - // weightedBackendServices. Conversely, if defaultRouteAction specifies - // any weightedBackendServices, defaultService must not be specified. - // Only one of defaultService, defaultUrlRedirect , or - // defaultRouteAction.weightedBackendService must be set. Authorization - // requires one or more of the following Google IAM permissions on the - // specified resource default_service: - compute.backendBuckets.use - - // compute.backendServices.use - DefaultService string `json:"defaultService,omitempty"` - - // DefaultUrlRedirect: When none of the specified pathRules or - // routeRules match, the request is redirected to a URL specified by - // defaultUrlRedirect. If defaultUrlRedirect is specified, - // defaultService or defaultRouteAction must not be set. Not supported - // when the URL map is bound to a target gRPC proxy. - DefaultUrlRedirect *HttpRedirectAction `json:"defaultUrlRedirect,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // HeaderAction: Specifies changes to request and response headers that - // need to take effect for the selected backend service. HeaderAction - // specified here are applied after the matching HttpRouteRule - // HeaderAction and before the HeaderAction in the UrlMap HeaderAction - // is not supported for load balancers that have their - // loadBalancingScheme set to EXTERNAL. Not supported when the URL map - // is bound to a target gRPC proxy that has validateForProxyless field - // set to true. - HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` - - // Name: The name to which this PathMatcher is referred by the HostRule. - Name string `json:"name,omitempty"` - - // PathRules: The list of path rules. Use this list instead of - // routeRules when routing based on simple path matching is all that's - // required. The order by which path rules are specified does not - // matter. Matches are always done on the longest-path-first basis. For - // example: a pathRule with a path /a/b/c/* will match before /a/b/* - // irrespective of the order in which those paths appear in this list. - // Within a given pathMatcher, only one of pathRules or routeRules must - // be set. - PathRules []*PathRule `json:"pathRules,omitempty"` - - // RouteRules: The list of HTTP route rules. Use this list instead of - // pathRules when advanced route matching and routing actions are - // desired. routeRules are evaluated in order of priority, from the - // lowest to highest number. Within a given pathMatcher, you can set - // only one of pathRules or routeRules. - RouteRules []*HttpRouteRule `json:"routeRules,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DefaultRouteAction") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DefaultRouteAction") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *PathMatcher) MarshalJSON() ([]byte, error) { - type NoMethod PathMatcher +func (s *OperationListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod OperationListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PathRule: A path-matching rule for a URL. If matched, will use the -// specified BackendService to handle the traffic arriving at this URL. -type PathRule struct { - // Paths: The list of path patterns to match. Each must start with / and - // the only place a * is allowed is at the end following a /. The string - // fed to the path matcher does not include any text after the first ? - // or #, and those chars are not allowed here. - Paths []string `json:"paths,omitempty"` - - // RouteAction: In response to a matching path, the load balancer - // performs advanced routing actions, such as URL rewrites and header - // transformations, before forwarding the request to the selected - // backend. If routeAction specifies any weightedBackendServices, - // service must not be set. Conversely if service is set, routeAction - // cannot contain any weightedBackendServices. Only one of routeAction - // or urlRedirect must be set. URL maps for external HTTP(S) load - // balancers support only the urlRewrite action within a path rule's - // routeAction. - RouteAction *HttpRouteAction `json:"routeAction,omitempty"` - - // Service: The full or partial URL of the backend service resource to - // which traffic is directed if this rule is matched. If routeAction is - // also specified, advanced routing actions, such as URL rewrites, take - // effect before sending the request to the backend. However, if service - // is specified, routeAction cannot contain any weightedBackendServices. - // Conversely, if routeAction specifies any weightedBackendServices, - // service must not be specified. Only one of urlRedirect, service or - // routeAction.weightedBackendService must be set. - Service string `json:"service,omitempty"` +type OperationsScopedList struct { + // Operations: [Output Only] A list of operations contained in this + // scope. + Operations []*Operation `json:"operations,omitempty"` - // UrlRedirect: When a path pattern is matched, the request is - // redirected to a URL specified by urlRedirect. If urlRedirect is - // specified, service or routeAction must not be set. Not supported when - // the URL map is bound to a target gRPC proxy. - UrlRedirect *HttpRedirectAction `json:"urlRedirect,omitempty"` + // Warning: [Output Only] Informational warning which replaces the list + // of operations when the list is empty. + Warning *OperationsScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Paths") to + // ForceSendFields is a list of field names (e.g. "Operations") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -31787,8 +33258,8 @@ type PathRule struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Paths") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Operations") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -31796,229 +33267,89 @@ type PathRule struct { NullFields []string `json:"-"` } -func (s *PathRule) MarshalJSON() ([]byte, error) { - type NoMethod PathRule +func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod OperationsScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PerInstanceConfig struct { - // Fingerprint: Fingerprint of this per-instance config. This field can - // be used in optimistic locking. It is ignored when inserting a - // per-instance config. An up-to-date fingerprint must be provided in - // order to update an existing per-instance config or the field needs to - // be unset. - Fingerprint string `json:"fingerprint,omitempty"` - - // Name: The name of a per-instance config and its corresponding - // instance. Serves as a merge key during UpdatePerInstanceConfigs - // operations, that is, if a per-instance config with the same name - // exists then it will be updated, otherwise a new one will be created - // for the VM instance with the same name. An attempt to create a - // per-instance config for a VM instance that either doesn't exist or is - // not part of the group will result in an error. - Name string `json:"name,omitempty"` - - // PreservedState: The intended preserved state for the given instance. - // Does not contain preserved state generated from a stateful policy. - PreservedState *PreservedState `json:"preservedState,omitempty"` - - // Status: The status of applying this per-instance config on the - // corresponding managed instance. +// OperationsScopedListWarning: [Output Only] Informational warning +// which replaces the list of operations when the list is empty. +type OperationsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "APPLYING" - The per-instance config is being applied to the - // instance, but is not yet effective, possibly waiting for the instance - // to, for example, REFRESH. - // "DELETING" - The per-instance config deletion is being applied on - // the instance, possibly waiting for the instance to, for example, - // REFRESH. - // "EFFECTIVE" - The per-instance config is effective on the instance, - // meaning that all disks, ips and metadata specified in this config are - // attached or set on the instance. - // "NONE" - *[Default]* The default status, when no per-instance - // config exists. - // "UNAPPLIED" - The per-instance config is set on an instance but not - // been applied yet. - // "UNAPPLIED_DELETION" - The per-instance config has been deleted, - // but the deletion is not yet applied. - Status string `json:"status,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Fingerprint") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PerInstanceConfig) MarshalJSON() ([]byte, error) { - type NoMethod PerInstanceConfig - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Policy: An Identity and Access Management (IAM) policy, which -// specifies access controls for Google Cloud resources. A `Policy` is a -// collection of `bindings`. A `binding` binds one or more `members`, or -// principals, to a single `role`. Principals can be user accounts, -// service accounts, Google groups, and domains (such as G Suite). A -// `role` is a named list of permissions; each `role` can be an IAM -// predefined role or a user-created custom role. For some types of -// Google Cloud resources, a `binding` can also specify a `condition`, -// which is a logical expression that allows access to a resource only -// if the expression evaluates to `true`. A condition can add -// constraints based on attributes of the request, the resource, or -// both. To learn which resources support conditions in their IAM -// policies, see the IAM documentation -// (https://cloud.google.com/iam/help/conditions/resource-policies). -// **JSON example:** { "bindings": [ { "role": -// "roles/resourcemanager.organizationAdmin", "members": [ -// "user:mike@example.com", "group:admins@example.com", -// "domain:google.com", -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { -// "role": "roles/resourcemanager.organizationViewer", "members": [ -// "user:eve@example.com" ], "condition": { "title": "expirable access", -// "description": "Does not grant access after Sep 2020", "expression": -// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], -// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - -// members: - user:mike@example.com - group:admins@example.com - -// domain:google.com - -// serviceAccount:my-project-id@appspot.gserviceaccount.com role: -// roles/resourcemanager.organizationAdmin - members: - -// user:eve@example.com role: roles/resourcemanager.organizationViewer -// condition: title: expirable access description: Does not grant access -// after Sep 2020 expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 -// For a description of IAM and its features, see the IAM documentation -// (https://cloud.google.com/iam/docs/). -type Policy struct { - // AuditConfigs: Specifies cloud audit logging configuration for this - // policy. - AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` - - // Bindings: Associates a list of `members`, or principals, with a - // `role`. Optionally, may specify a `condition` that determines how and - // when the `bindings` are applied. Each of the `bindings` must contain - // at least one principal. The `bindings` in a `Policy` can refer to up - // to 1,500 principals; up to 250 of these principals can be Google - // groups. Each occurrence of a principal counts towards these limits. - // For example, if the `bindings` grant 50 different roles to - // `user:alice@example.com`, and not to any other principal, then you - // can add another 1,450 principals to the `bindings` in the `Policy`. - Bindings []*Binding `json:"bindings,omitempty"` - - // Etag: `etag` is used for optimistic concurrency control as a way to - // help prevent simultaneous updates of a policy from overwriting each - // other. It is strongly suggested that systems make use of the `etag` - // in the read-modify-write cycle to perform policy updates in order to - // avoid race conditions: An `etag` is returned in the response to - // `getIamPolicy`, and systems are expected to put that etag in the - // request to `setIamPolicy` to ensure that their change will be applied - // to the same version of the policy. **Important:** If you use IAM - // Conditions, you must include the `etag` field whenever you call - // `setIamPolicy`. If you omit this field, then IAM allows you to - // overwrite a version `3` policy with a version `1` policy, and all of - // the conditions in the version `3` policy are lost. - Etag string `json:"etag,omitempty"` - - // Rules: This is deprecated and has no effect. Do not use. - Rules []*Rule `json:"rules,omitempty"` - - // Version: Specifies the format of the policy. Valid values are `0`, - // `1`, and `3`. Requests that specify an invalid value are rejected. - // Any operation that affects conditional role bindings must specify - // version `3`. This requirement applies to the following operations: * - // Getting a policy that includes a conditional role binding * Adding a - // conditional role binding to a policy * Changing a conditional role - // binding in a policy * Removing any role binding, with or without a - // condition, from a policy that includes conditions **Important:** If - // you use IAM Conditions, you must include the `etag` field whenever - // you call `setIamPolicy`. If you omit this field, then IAM allows you - // to overwrite a version `3` policy with a version `1` policy, and all - // of the conditions in the version `3` policy are lost. If a policy - // does not include any conditions, operations on that policy may - // specify any valid version or leave the field unset. To learn which - // resources support conditions in their IAM policies, see the IAM - // documentation - // (https://cloud.google.com/iam/help/conditions/resource-policies). - Version int64 `json:"version,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "AuditConfigs") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AuditConfigs") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Policy) MarshalJSON() ([]byte, error) { - type NoMethod Policy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type PreconfiguredWafSet struct { - // ExpressionSets: List of entities that are currently supported for WAF - // rules. - ExpressionSets []*WafExpressionSet `json:"expressionSets,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ExpressionSets") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ExpressionSets") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *PreconfiguredWafSet) MarshalJSON() ([]byte, error) { - type NoMethod PreconfiguredWafSet - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` -// PreservedState: Preserved state for a given instance. -type PreservedState struct { - // Disks: Preserved disks defined for this instance. This map is keyed - // with the device names of the disks. - Disks map[string]PreservedStatePreservedDisk `json:"disks,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*OperationsScopedListWarningData `json:"data,omitempty"` - // Metadata: Preserved metadata defined for this instance. - Metadata map[string]string `json:"metadata,omitempty"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Disks") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -32026,7 +33357,7 @@ type PreservedState struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Disks") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -32035,42 +33366,27 @@ type PreservedState struct { NullFields []string `json:"-"` } -func (s *PreservedState) MarshalJSON() ([]byte, error) { - type NoMethod PreservedState +func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod OperationsScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PreservedStatePreservedDisk struct { - // AutoDelete: These stateful disks will never be deleted during - // autohealing, update, instance recreate operations. This flag is used - // to configure if the disk should be deleted after it is no longer used - // by the group, e.g. when the given instance or the whole MIG is - // deleted. Note: disks attached in READ_ONLY mode cannot be - // auto-deleted. - // - // Possible values: - // "NEVER" - // "ON_PERMANENT_INSTANCE_DELETION" - AutoDelete string `json:"autoDelete,omitempty"` - - // Mode: The mode in which to attach this disk, either READ_WRITE or - // READ_ONLY. If not specified, the default is to attach the disk in - // READ_WRITE mode. - // - // Possible values: - // "READ_ONLY" - Attaches this disk in read-only mode. Multiple VM - // instances can use a disk in READ_ONLY mode at a time. - // "READ_WRITE" - *[Default]* Attaches this disk in READ_WRITE mode. - // Only one VM instance at a time can be attached to a disk in - // READ_WRITE mode. - Mode string `json:"mode,omitempty"` +type OperationsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Source: The URL of the disk resource that is stateful and should be - // attached to the VM instance. - Source string `json:"source,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoDelete") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -32078,8 +33394,8 @@ type PreservedStatePreservedDisk struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoDelete") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -32087,117 +33403,92 @@ type PreservedStatePreservedDisk struct { NullFields []string `json:"-"` } -func (s *PreservedStatePreservedDisk) MarshalJSON() ([]byte, error) { - type NoMethod PreservedStatePreservedDisk +func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod OperationsScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Project: Represents a Project resource. A project is used to organize -// resources in a Google Cloud Platform environment. For more -// information, read about the Resource Hierarchy. -type Project struct { - // CommonInstanceMetadata: Metadata key/value pairs available to all - // instances contained in this project. See Custom metadata for more - // information. - CommonInstanceMetadata *Metadata `json:"commonInstanceMetadata,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // DefaultNetworkTier: This signifies the default network tier used for - // configuring resources of the project and can only take the following - // values: PREMIUM, STANDARD. Initially the default network tier is - // PREMIUM. - // - // Possible values: - // "FIXED_STANDARD" - Public internet quality with fixed bandwidth. - // "PREMIUM" - High quality, Google-grade network tier, support for - // all networking products. - // "STANDARD" - Public internet quality, only limited support for - // other networking products. - // "STANDARD_OVERRIDES_FIXED_STANDARD" - (Output only) Temporary tier - // for FIXED_STANDARD when fixed standard tier is expired or not - // configured. - DefaultNetworkTier string `json:"defaultNetworkTier,omitempty"` - - // DefaultServiceAccount: [Output Only] Default service account used by - // VMs running in this project. - DefaultServiceAccount string `json:"defaultServiceAccount,omitempty"` - - // Description: An optional textual description of the resource. - Description string `json:"description,omitempty"` - - // EnabledFeatures: Restricted features enabled for use on this project. - EnabledFeatures []string `json:"enabledFeatures,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. This is *not* the project ID, - // and is just a unique ID used by Compute Engine to identify resources. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#project for - // projects. - Kind string `json:"kind,omitempty"` +// OutlierDetection: Settings controlling the eviction of unhealthy +// hosts from the load balancing pool for the backend service. +type OutlierDetection struct { + // BaseEjectionTime: The base time that a host is ejected for. The real + // ejection time is equal to the base ejection time multiplied by the + // number of times the host has been ejected. Defaults to 30000ms or + // 30s. + BaseEjectionTime *Duration `json:"baseEjectionTime,omitempty"` - // Name: The project ID. For example: my-example-project. Use the - // project ID to make requests to Compute Engine. - Name string `json:"name,omitempty"` + // ConsecutiveErrors: Number of errors before a host is ejected from the + // connection pool. When the backend host is accessed over HTTP, a 5xx + // return code qualifies as an error. Defaults to 5. Not supported when + // the backend service is referenced by a URL map that is bound to + // target gRPC proxy that has validateForProxyless field set to true. + ConsecutiveErrors int64 `json:"consecutiveErrors,omitempty"` - // Quotas: [Output Only] Quotas assigned to this project. - Quotas []*Quota `json:"quotas,omitempty"` + // ConsecutiveGatewayFailure: The number of consecutive gateway failures + // (502, 503, 504 status or connection errors that are mapped to one of + // those status codes) before a consecutive gateway failure ejection + // occurs. Defaults to 3. Not supported when the backend service is + // referenced by a URL map that is bound to target gRPC proxy that has + // validateForProxyless field set to true. + ConsecutiveGatewayFailure int64 `json:"consecutiveGatewayFailure,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // EnforcingConsecutiveErrors: The percentage chance that a host will be + // actually ejected when an outlier status is detected through + // consecutive 5xx. This setting can be used to disable ejection or to + // ramp it up slowly. Defaults to 0. Not supported when the backend + // service is referenced by a URL map that is bound to target gRPC proxy + // that has validateForProxyless field set to true. + EnforcingConsecutiveErrors int64 `json:"enforcingConsecutiveErrors,omitempty"` - // UsageExportLocation: The naming prefix for daily usage reports and - // the Google Cloud Storage bucket where they are stored. - UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"` + // EnforcingConsecutiveGatewayFailure: The percentage chance that a host + // will be actually ejected when an outlier status is detected through + // consecutive gateway failures. This setting can be used to disable + // ejection or to ramp it up slowly. Defaults to 100. Not supported when + // the backend service is referenced by a URL map that is bound to + // target gRPC proxy that has validateForProxyless field set to true. + EnforcingConsecutiveGatewayFailure int64 `json:"enforcingConsecutiveGatewayFailure,omitempty"` - // XpnProjectStatus: [Output Only] The role this project has in a shared - // VPC configuration. Currently, only projects with the host role, which - // is specified by the value HOST, are differentiated. - // - // Possible values: - // "HOST" - // "UNSPECIFIED_XPN_PROJECT_STATUS" - XpnProjectStatus string `json:"xpnProjectStatus,omitempty"` + // EnforcingSuccessRate: The percentage chance that a host will be + // actually ejected when an outlier status is detected through success + // rate statistics. This setting can be used to disable ejection or to + // ramp it up slowly. Defaults to 100. + EnforcingSuccessRate int64 `json:"enforcingSuccessRate,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Interval: Time interval between ejection analysis sweeps. This can + // result in both new ejections as well as hosts being returned to + // service. Defaults to 1 second. + Interval *Duration `json:"interval,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "CommonInstanceMetadata") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` + // MaxEjectionPercent: Maximum percentage of hosts in the load balancing + // pool for the backend service that can be ejected. Defaults to 50%. + MaxEjectionPercent int64 `json:"maxEjectionPercent,omitempty"` - // NullFields is a list of field names (e.g. "CommonInstanceMetadata") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} + // SuccessRateMinimumHosts: The number of hosts in a cluster that must + // have enough request volume to detect success rate outliers. If the + // number of hosts is less than this setting, outlier detection via + // success rate statistics is not performed for any host in the cluster. + // Defaults to 5. + SuccessRateMinimumHosts int64 `json:"successRateMinimumHosts,omitempty"` -func (s *Project) MarshalJSON() ([]byte, error) { - type NoMethod Project - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // SuccessRateRequestVolume: The minimum number of total requests that + // must be collected in one interval (as defined by the interval + // duration above) to include this host in success rate based outlier + // detection. If the volume is lower than this setting, outlier + // detection via success rate statistics is not performed for that host. + // Defaults to 100. + SuccessRateRequestVolume int64 `json:"successRateRequestVolume,omitempty"` -type ProjectsDisableXpnResourceRequest struct { - // XpnResource: Service resource (a.k.a service project) ID. - XpnResource *XpnResourceId `json:"xpnResource,omitempty"` + // SuccessRateStdevFactor: This factor is used to determine the ejection + // threshold for success rate outlier ejection. The ejection threshold + // is the difference between the mean success rate, and the product of + // this factor and the standard deviation of the mean success rate: mean + // - (stdev * success_rate_stdev_factor). This factor is divided by a + // thousand to get a double. That is, if the desired factor is 1.9, the + // runtime value should be 1900. Defaults to 1900. + SuccessRateStdevFactor int64 `json:"successRateStdevFactor,omitempty"` - // ForceSendFields is a list of field names (e.g. "XpnResource") to + // ForceSendFields is a list of field names (e.g. "BaseEjectionTime") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -32205,71 +33496,59 @@ type ProjectsDisableXpnResourceRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "XpnResource") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "BaseEjectionTime") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *ProjectsDisableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type NoMethod ProjectsDisableXpnResourceRequest +func (s *OutlierDetection) MarshalJSON() ([]byte, error) { + type NoMethod OutlierDetection raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ProjectsEnableXpnResourceRequest struct { - // XpnResource: Service resource (a.k.a service project) ID. - XpnResource *XpnResourceId `json:"xpnResource,omitempty"` - - // ForceSendFields is a list of field names (e.g. "XpnResource") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "XpnResource") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} +// PacketIntervals: Next free: 7 +type PacketIntervals struct { + // AvgMs: Average observed inter-packet interval in milliseconds. + AvgMs int64 `json:"avgMs,omitempty,string"` -func (s *ProjectsEnableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type NoMethod ProjectsEnableXpnResourceRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Duration: From how long ago in the past these intervals were + // observed. + // + // Possible values: + // "DURATION_UNSPECIFIED" + // "HOUR" + // "MAX" - From BfdSession object creation time. + // "MINUTE" + Duration string `json:"duration,omitempty"` -type ProjectsGetXpnResources struct { - // Kind: [Output Only] Type of resource. Always - // compute#projectsGetXpnResources for lists of service resources (a.k.a - // service projects) - Kind string `json:"kind,omitempty"` + // MaxMs: Maximum observed inter-packet interval in milliseconds. + MaxMs int64 `json:"maxMs,omitempty,string"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // MinMs: Minimum observed inter-packet interval in milliseconds. + MinMs int64 `json:"minMs,omitempty,string"` - // Resources: Service resources (a.k.a service projects) attached to - // this project as their shared VPC host. - Resources []*XpnResourceId `json:"resources,omitempty"` + // NumIntervals: Number of inter-packet intervals from which these + // statistics were derived. + NumIntervals int64 `json:"numIntervals,omitempty,string"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Type: The type of packets for which inter-packet intervals were + // computed. + // + // Possible values: + // "LOOPBACK" - Only applies to Echo packets. This shows the intervals + // between sending and receiving the same packet. + // "RECEIVE" - Intervals between received packets. + // "TRANSMIT" - Intervals between transmitted packets. + // "TYPE_UNSPECIFIED" + Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // ForceSendFields is a list of field names (e.g. "AvgMs") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -32277,7 +33556,7 @@ type ProjectsGetXpnResources struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API + // NullFields is a list of field names (e.g. "AvgMs") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -32286,83 +33565,25 @@ type ProjectsGetXpnResources struct { NullFields []string `json:"-"` } -func (s *ProjectsGetXpnResources) MarshalJSON() ([]byte, error) { - type NoMethod ProjectsGetXpnResources - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ProjectsListXpnHostsRequest struct { - // Organization: Optional organization ID managed by Cloud Resource - // Manager, for which to list shared VPC host projects. If not - // specified, the organization will be inferred from the project. - Organization string `json:"organization,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Organization") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Organization") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ProjectsListXpnHostsRequest) MarshalJSON() ([]byte, error) { - type NoMethod ProjectsListXpnHostsRequest +func (s *PacketIntervals) MarshalJSON() ([]byte, error) { + type NoMethod PacketIntervals raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ProjectsSetDefaultNetworkTierRequest struct { - // NetworkTier: Default network tier to be set. - // - // Possible values: - // "FIXED_STANDARD" - Public internet quality with fixed bandwidth. - // "PREMIUM" - High quality, Google-grade network tier, support for - // all networking products. - // "STANDARD" - Public internet quality, only limited support for - // other networking products. - // "STANDARD_OVERRIDES_FIXED_STANDARD" - (Output only) Temporary tier - // for FIXED_STANDARD when fixed standard tier is expired or not - // configured. - NetworkTier string `json:"networkTier,omitempty"` - - // ForceSendFields is a list of field names (e.g. "NetworkTier") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "NetworkTier") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ProjectsSetDefaultNetworkTierRequest) MarshalJSON() ([]byte, error) { - type NoMethod ProjectsSetDefaultNetworkTierRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} +// PacketMirroring: Represents a Packet Mirroring resource. Packet +// Mirroring clones the traffic of specified instances in your Virtual +// Private Cloud (VPC) network and forwards it to a collector +// destination, such as an instance group of an internal TCP/UDP load +// balancer, for analysis or examination. For more information about +// setting up Packet Mirroring, see Using Packet Mirroring. +type PacketMirroring struct { + // CollectorIlb: The Forwarding Rule resource of type + // loadBalancingScheme=INTERNAL that will be used as collector for + // mirrored traffic. The specified forwarding rule must have + // isMirroringCollector set to true. + CollectorIlb *PacketMirroringForwardingRuleInfo `json:"collectorIlb,omitempty"` -// PublicAdvertisedPrefix: A public advertised prefix represents an -// aggregated IP prefix or netblock which customers bring to cloud. The -// IP prefix is a single unit of route advertisement and is announced -// globally to the internet. -type PublicAdvertisedPrefix struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -32371,32 +33592,34 @@ type PublicAdvertisedPrefix struct { // property when you create the resource. Description string `json:"description,omitempty"` - // DnsVerificationIp: The IPv4 address to be used for reverse DNS - // verification. - DnsVerificationIp string `json:"dnsVerificationIp,omitempty"` + // Enable: Indicates whether or not this packet mirroring takes effect. + // If set to FALSE, this packet mirroring policy will not be enforced on + // the network. The default is TRUE. + // + // Possible values: + // "FALSE" + // "TRUE" + Enable string `json:"enable,omitempty"` - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a new PublicAdvertisedPrefix. An - // up-to-date fingerprint must be provided in order to update the - // PublicAdvertisedPrefix, otherwise the request will fail with error - // 412 conditionNotMet. To see the latest fingerprint, make a get() - // request to retrieve a PublicAdvertisedPrefix. - Fingerprint string `json:"fingerprint,omitempty"` + // Filter: Filter for mirrored traffic. If unspecified, all traffic is + // mirrored. + Filter *PacketMirroringFilter `json:"filter,omitempty"` - // Id: [Output Only] The unique identifier for the resource type. The - // server generates this identifier. + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // IpCidrRange: The IPv4 address range, in CIDR format, represented by - // this public advertised prefix. - IpCidrRange string `json:"ipCidrRange,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#publicAdvertisedPrefix for public advertised prefixes. + // compute#packetMirroring for packet mirrorings. Kind string `json:"kind,omitempty"` - // Name: Name of the resource. Provided by the client when the resource + // MirroredResources: PacketMirroring mirroredResourceInfos. + // MirroredResourceInfo specifies a set of mirrored VM instances, + // subnetworks and/or tags for which traffic from/to all VM instances + // will be mirrored. + MirroredResources *PacketMirroringMirroredResourceInfo `json:"mirroredResources,omitempty"` + + // Name: Name of the resource; provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means @@ -32405,75 +33628,63 @@ type PublicAdvertisedPrefix struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // PublicDelegatedPrefixs: [Output Only] The list of public delegated - // prefixes that exist for this public advertised prefix. - PublicDelegatedPrefixs []*PublicAdvertisedPrefixPublicDelegatedPrefix `json:"publicDelegatedPrefixs,omitempty"` + // Network: Specifies the mirrored VPC network. Only packets in this + // network will be mirrored. All mirrored VMs should have a NIC in the + // given network. All mirrored subnetworks should belong to the given + // network. + Network *PacketMirroringNetworkInfo `json:"network,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // Priority: The priority of applying this configuration. Priority is + // used to break ties in cases where there is more than one matching + // rule. In the case of two rules that apply for a given Instance, the + // one with the lowest-numbered priority value wins. Default value is + // 1000. Valid range is 0 through 65535. + Priority int64 `json:"priority,omitempty"` - // SharedSecret: [Output Only] The shared secret to be used for reverse - // DNS verification. - SharedSecret string `json:"sharedSecret,omitempty"` + // Region: [Output Only] URI of the region where the packetMirroring + // resides. + Region string `json:"region,omitempty"` - // Status: The status of the public advertised prefix. Possible values - // include: - `INITIAL`: RPKI validation is complete. - - // `PTR_CONFIGURED`: User has configured the PTR. - `VALIDATED`: Reverse - // DNS lookup is successful. - `REVERSE_DNS_LOOKUP_FAILED`: Reverse DNS - // lookup failed. - `PREFIX_CONFIGURATION_IN_PROGRESS`: The prefix is - // being configured. - `PREFIX_CONFIGURATION_COMPLETE`: The prefix is - // fully configured. - `PREFIX_REMOVAL_IN_PROGRESS`: The prefix is being - // removed. - // - // Possible values: - // "INITIAL" - RPKI validation is complete. - // "PREFIX_CONFIGURATION_COMPLETE" - The prefix is fully configured. - // "PREFIX_CONFIGURATION_IN_PROGRESS" - The prefix is being - // configured. - // "PREFIX_REMOVAL_IN_PROGRESS" - The prefix is being removed. - // "PTR_CONFIGURED" - User has configured the PTR. - // "REVERSE_DNS_LOOKUP_FAILED" - Reverse DNS lookup failed. - // "VALIDATED" - Reverse DNS lookup is successful. - Status string `json:"status,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CollectorIlb") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "CollectorIlb") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *PublicAdvertisedPrefix) MarshalJSON() ([]byte, error) { - type NoMethod PublicAdvertisedPrefix +func (s *PacketMirroring) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroring raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PublicAdvertisedPrefixList struct { +// PacketMirroringAggregatedList: Contains a list of packetMirrorings. +type PacketMirroringAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of PublicAdvertisedPrefix resources. - Items []*PublicAdvertisedPrefix `json:"items,omitempty"` + // Items: A list of PacketMirroring resources. + Items map[string]PacketMirroringsScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#publicAdvertisedPrefix for public advertised prefixes. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -32487,8 +33698,11 @@ type PublicAdvertisedPrefixList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *PublicAdvertisedPrefixListWarning `json:"warning,omitempty"` + Warning *PacketMirroringAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -32511,15 +33725,15 @@ type PublicAdvertisedPrefixList struct { NullFields []string `json:"-"` } -func (s *PublicAdvertisedPrefixList) MarshalJSON() ([]byte, error) { - type NoMethod PublicAdvertisedPrefixList +func (s *PacketMirroringAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PublicAdvertisedPrefixListWarning: [Output Only] Informational +// PacketMirroringAggregatedListWarning: [Output Only] Informational // warning message. -type PublicAdvertisedPrefixListWarning struct { +type PacketMirroringAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -32541,6 +33755,9 @@ type PublicAdvertisedPrefixListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -32548,6 +33765,9 @@ type PublicAdvertisedPrefixListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -32581,7 +33801,7 @@ type PublicAdvertisedPrefixListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*PublicAdvertisedPrefixListWarningData `json:"data,omitempty"` + Data []*PacketMirroringAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -32604,13 +33824,13 @@ type PublicAdvertisedPrefixListWarning struct { NullFields []string `json:"-"` } -func (s *PublicAdvertisedPrefixListWarning) MarshalJSON() ([]byte, error) { - type NoMethod PublicAdvertisedPrefixListWarning +func (s *PacketMirroringAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PublicAdvertisedPrefixListWarningData struct { +type PacketMirroringAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -32641,35 +33861,36 @@ type PublicAdvertisedPrefixListWarningData struct { NullFields []string `json:"-"` } -func (s *PublicAdvertisedPrefixListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod PublicAdvertisedPrefixListWarningData +func (s *PacketMirroringAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PublicAdvertisedPrefixPublicDelegatedPrefix: Represents a CIDR range -// which can be used to assign addresses. -type PublicAdvertisedPrefixPublicDelegatedPrefix struct { - // IpRange: The IP address range of the public delegated prefix - IpRange string `json:"ipRange,omitempty"` - - // Name: The name of the public delegated prefix - Name string `json:"name,omitempty"` - - // Project: The project number of the public delegated prefix - Project string `json:"project,omitempty"` +type PacketMirroringFilter struct { + // IPProtocols: Protocols that apply as filter on mirrored traffic. If + // no protocols are specified, all traffic that matches the specified + // CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is + // specified, all traffic is mirrored. + IPProtocols []string `json:"IPProtocols,omitempty"` - // Region: The region of the public delegated prefix if it is regional. - // If absent, the prefix is global. - Region string `json:"region,omitempty"` + // CidrRanges: IP CIDR ranges that apply as filter on the source + // (ingress) or destination (egress) IP in the IP header. Only IPv4 is + // supported. If no ranges are specified, all traffic that matches the + // specified IPProtocols is mirrored. If neither cidrRanges nor + // IPProtocols is specified, all traffic is mirrored. + CidrRanges []string `json:"cidrRanges,omitempty"` - // Status: The status of the public delegated prefix. Possible values - // are: INITIALIZING: The public delegated prefix is being initialized - // and addresses cannot be created yet. ANNOUNCED: The public delegated - // prefix is active. - Status string `json:"status,omitempty"` + // Direction: Direction of traffic to mirror, either INGRESS, EGRESS, or + // BOTH. The default is BOTH. + // + // Possible values: + // "BOTH" - Default, both directions are mirrored. + // "EGRESS" - Only egress traffic is mirrored. + // "INGRESS" - Only ingress traffic is mirrored. + Direction string `json:"direction,omitempty"` - // ForceSendFields is a list of field names (e.g. "IpRange") to + // ForceSendFields is a list of field names (e.g. "IPProtocols") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -32677,142 +33898,64 @@ type PublicAdvertisedPrefixPublicDelegatedPrefix struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpRange") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "IPProtocols") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *PublicAdvertisedPrefixPublicDelegatedPrefix) MarshalJSON() ([]byte, error) { - type NoMethod PublicAdvertisedPrefixPublicDelegatedPrefix +func (s *PacketMirroringFilter) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringFilter raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PublicDelegatedPrefix: A PublicDelegatedPrefix resource represents an -// IP block within a PublicAdvertisedPrefix that is configured within a -// single cloud scope (global or region). IPs in the block can be -// allocated to resources within that scope. Public delegated prefixes -// may be further broken up into smaller IP blocks in the same scope as -// the parent block. -type PublicDelegatedPrefix struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a new PublicDelegatedPrefix. An - // up-to-date fingerprint must be provided in order to update the - // PublicDelegatedPrefix, otherwise the request will fail with error 412 - // conditionNotMet. To see the latest fingerprint, make a get() request - // to retrieve a PublicDelegatedPrefix. - Fingerprint string `json:"fingerprint,omitempty"` - - // Id: [Output Only] The unique identifier for the resource type. The - // server generates this identifier. - Id uint64 `json:"id,omitempty,string"` - - // IpCidrRange: The IPv4 address range, in CIDR format, represented by - // this public delegated prefix. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // IsLiveMigration: If true, the prefix will be live migrated. - IsLiveMigration bool `json:"isLiveMigration,omitempty"` - - // Kind: [Output Only] Type of the resource. Always - // compute#publicDelegatedPrefix for public delegated prefixes. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // ParentPrefix: The URL of parent prefix. Either PublicAdvertisedPrefix - // or PublicDelegatedPrefix. - ParentPrefix string `json:"parentPrefix,omitempty"` - - // PublicDelegatedSubPrefixs: The list of sub public delegated prefixes - // that exist for this public delegated prefix. - PublicDelegatedSubPrefixs []*PublicDelegatedPrefixPublicDelegatedSubPrefix `json:"publicDelegatedSubPrefixs,omitempty"` - - // Region: [Output Only] URL of the region where the public delegated - // prefix resides. This field applies only to the region resource. You - // must specify this field as part of the HTTP request URL. It is not - // settable as a field in the request body. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Status: [Output Only] The status of the public delegated prefix, - // which can be one of following values: - `INITIALIZING` The public - // delegated prefix is being initialized and addresses cannot be created - // yet. - `READY_TO_ANNOUNCE` The public delegated prefix is a live - // migration prefix and is active. - `ANNOUNCED` The public delegated - // prefix is active. - `DELETING` The public delegated prefix is being - // deprovsioned. - // - // Possible values: - // "ANNOUNCED" - The public delegated prefix is active. - // "DELETING" - The public delegated prefix is being deprovsioned. - // "INITIALIZING" - The public delegated prefix is being initialized - // and addresses cannot be created yet. - // "READY_TO_ANNOUNCE" - The public delegated prefix is currently - // withdrawn but ready to be announced. - Status string `json:"status,omitempty"` +type PacketMirroringForwardingRuleInfo struct { + // CanonicalUrl: [Output Only] Unique identifier for the forwarding + // rule; defined by the server. + CanonicalUrl string `json:"canonicalUrl,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Url: Resource URL to the forwarding rule representing the ILB + // configured as destination of the mirrored traffic. + Url string `json:"url,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CanonicalUrl") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "CanonicalUrl") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefix) MarshalJSON() ([]byte, error) { - type NoMethod PublicDelegatedPrefix +func (s *PacketMirroringForwardingRuleInfo) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringForwardingRuleInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PublicDelegatedPrefixAggregatedList struct { +// PacketMirroringList: Contains a list of PacketMirroring resources. +type PacketMirroringList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of PublicDelegatedPrefixesScopedList resources. - Items map[string]PublicDelegatedPrefixesScopedList `json:"items,omitempty"` + // Items: A list of PacketMirroring resources. + Items []*PacketMirroring `json:"items,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#publicDelegatedPrefixAggregatedList for aggregated lists of - // public delegated prefixes. + // Kind: [Output Only] Type of resource. Always compute#packetMirroring + // for packetMirrorings. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -32826,11 +33969,8 @@ type PublicDelegatedPrefixAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *PublicDelegatedPrefixAggregatedListWarning `json:"warning,omitempty"` + Warning *PacketMirroringListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -32853,15 +33993,15 @@ type PublicDelegatedPrefixAggregatedList struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod PublicDelegatedPrefixAggregatedList +func (s *PacketMirroringList) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PublicDelegatedPrefixAggregatedListWarning: [Output Only] -// Informational warning message. -type PublicDelegatedPrefixAggregatedListWarning struct { +// PacketMirroringListWarning: [Output Only] Informational warning +// message. +type PacketMirroringListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -32883,6 +34023,9 @@ type PublicDelegatedPrefixAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -32890,6 +34033,9 @@ type PublicDelegatedPrefixAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -32923,7 +34069,7 @@ type PublicDelegatedPrefixAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*PublicDelegatedPrefixAggregatedListWarningData `json:"data,omitempty"` + Data []*PacketMirroringListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -32946,13 +34092,13 @@ type PublicDelegatedPrefixAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod PublicDelegatedPrefixAggregatedListWarning +func (s *PacketMirroringListWarning) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PublicDelegatedPrefixAggregatedListWarningData struct { +type PacketMirroringListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -32983,43 +34129,31 @@ type PublicDelegatedPrefixAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod PublicDelegatedPrefixAggregatedListWarningData +func (s *PacketMirroringListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PublicDelegatedPrefixList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of PublicDelegatedPrefix resources. - Items []*PublicDelegatedPrefix `json:"items,omitempty"` - - // Kind: [Output Only] Type of the resource. Always - // compute#publicDelegatedPrefixList for public delegated prefixes. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +type PacketMirroringMirroredResourceInfo struct { + // Instances: A set of virtual machine instances that are being + // mirrored. They must live in zones contained in the same region as + // this packetMirroring. Note that this config will apply only to those + // network interfaces of the Instances that belong to the network + // specified in this packetMirroring. You may specify a maximum of 50 + // Instances. + Instances []*PacketMirroringMirroredResourceInfoInstanceInfo `json:"instances,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *PublicDelegatedPrefixListWarning `json:"warning,omitempty"` + // Subnetworks: A set of subnetworks for which traffic from/to all VM + // instances will be mirrored. They must live in the same region as this + // packetMirroring. You may specify a maximum of 5 subnetworks. + Subnetworks []*PacketMirroringMirroredResourceInfoSubnetInfo `json:"subnetworks,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Tags: A set of mirrored tags. Traffic from/to all VM instances that + // have one or more of these tags will be mirrored. + Tags []string `json:"tags,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Instances") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -33027,8 +34161,8 @@ type PublicDelegatedPrefixList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -33036,83 +34170,22 @@ type PublicDelegatedPrefixList struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixList) MarshalJSON() ([]byte, error) { - type NoMethod PublicDelegatedPrefixList +func (s *PacketMirroringMirroredResourceInfo) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringMirroredResourceInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PublicDelegatedPrefixListWarning: [Output Only] Informational warning -// message. -type PublicDelegatedPrefixListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*PublicDelegatedPrefixListWarningData `json:"data,omitempty"` +type PacketMirroringMirroredResourceInfoInstanceInfo struct { + // CanonicalUrl: [Output Only] Unique identifier for the instance; + // defined by the server. + CanonicalUrl string `json:"canonicalUrl,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // Url: Resource URL to the virtual machine instance which is being + // mirrored. + Url string `json:"url,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "CanonicalUrl") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -33120,36 +34193,31 @@ type PublicDelegatedPrefixListWarning struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "CanonicalUrl") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixListWarning) MarshalJSON() ([]byte, error) { - type NoMethod PublicDelegatedPrefixListWarning +func (s *PacketMirroringMirroredResourceInfoInstanceInfo) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringMirroredResourceInfoInstanceInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PublicDelegatedPrefixListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +type PacketMirroringMirroredResourceInfoSubnetInfo struct { + // CanonicalUrl: [Output Only] Unique identifier for the subnetwork; + // defined by the server. + CanonicalUrl string `json:"canonicalUrl,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // Url: Resource URL to the subnetwork for which traffic from/to all VM + // instances will be mirrored. + Url string `json:"url,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "CanonicalUrl") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -33157,55 +34225,30 @@ type PublicDelegatedPrefixListWarningData struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "CanonicalUrl") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod PublicDelegatedPrefixListWarningData +func (s *PacketMirroringMirroredResourceInfoSubnetInfo) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringMirroredResourceInfoSubnetInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PublicDelegatedPrefixPublicDelegatedSubPrefix: Represents a sub -// PublicDelegatedPrefix. -type PublicDelegatedPrefixPublicDelegatedSubPrefix struct { - // DelegateeProject: Name of the project scoping this - // PublicDelegatedSubPrefix. - DelegateeProject string `json:"delegateeProject,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // IpCidrRange: The IPv4 address range, in CIDR format, represented by - // this sub public delegated prefix. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // IsAddress: Whether the sub prefix is delegated to create Address - // resources in the delegatee project. - IsAddress bool `json:"isAddress,omitempty"` - - // Name: The name of the sub public delegated prefix. - Name string `json:"name,omitempty"` - - // Region: [Output Only] The region of the sub public delegated prefix - // if it is regional. If absent, the sub prefix is global. - Region string `json:"region,omitempty"` +type PacketMirroringNetworkInfo struct { + // CanonicalUrl: [Output Only] Unique identifier for the network; + // defined by the server. + CanonicalUrl string `json:"canonicalUrl,omitempty"` - // Status: [Output Only] The status of the sub public delegated prefix. - // - // Possible values: - // "ACTIVE" - // "INACTIVE" - Status string `json:"status,omitempty"` + // Url: URL of the network resource. + Url string `json:"url,omitempty"` - // ForceSendFields is a list of field names (e.g. "DelegateeProject") to + // ForceSendFields is a list of field names (e.g. "CanonicalUrl") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -33213,60 +34256,56 @@ type PublicDelegatedPrefixPublicDelegatedSubPrefix struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DelegateeProject") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "CanonicalUrl") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixPublicDelegatedSubPrefix) MarshalJSON() ([]byte, error) { - type NoMethod PublicDelegatedPrefixPublicDelegatedSubPrefix +func (s *PacketMirroringNetworkInfo) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringNetworkInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PublicDelegatedPrefixesScopedList struct { - // PublicDelegatedPrefixes: [Output Only] A list of - // PublicDelegatedPrefixes contained in this scope. - PublicDelegatedPrefixes []*PublicDelegatedPrefix `json:"publicDelegatedPrefixes,omitempty"` +type PacketMirroringsScopedList struct { + // PacketMirrorings: A list of packetMirrorings contained in this scope. + PacketMirrorings []*PacketMirroring `json:"packetMirrorings,omitempty"` - // Warning: [Output Only] Informational warning which replaces the list - // of public delegated prefixes when the list is empty. - Warning *PublicDelegatedPrefixesScopedListWarning `json:"warning,omitempty"` + // Warning: Informational warning which replaces the list of + // packetMirrorings when the list is empty. + Warning *PacketMirroringsScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "PublicDelegatedPrefixes") to unconditionally include in API - // requests. By default, fields with empty or default values are omitted - // from API requests. However, any non-pointer, non-interface field - // appearing in ForceSendFields will be sent to the server regardless of - // whether the field is empty or not. This may be used to include empty - // fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "PacketMirrorings") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "PublicDelegatedPrefixes") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the + // NullFields is a list of field names (e.g. "PacketMirrorings") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixesScopedList) MarshalJSON() ([]byte, error) { - type NoMethod PublicDelegatedPrefixesScopedList +func (s *PacketMirroringsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringsScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PublicDelegatedPrefixesScopedListWarning: [Output Only] Informational -// warning which replaces the list of public delegated prefixes when the -// list is empty. -type PublicDelegatedPrefixesScopedListWarning struct { +// PacketMirroringsScopedListWarning: Informational warning which +// replaces the list of packetMirrorings when the list is empty. +type PacketMirroringsScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -33288,6 +34327,9 @@ type PublicDelegatedPrefixesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -33295,6 +34337,9 @@ type PublicDelegatedPrefixesScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -33328,7 +34373,7 @@ type PublicDelegatedPrefixesScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*PublicDelegatedPrefixesScopedListWarningData `json:"data,omitempty"` + Data []*PacketMirroringsScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -33351,13 +34396,13 @@ type PublicDelegatedPrefixesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixesScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod PublicDelegatedPrefixesScopedListWarning +func (s *PacketMirroringsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringsScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PublicDelegatedPrefixesScopedListWarningData struct { +type PacketMirroringsScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -33388,214 +34433,150 @@ type PublicDelegatedPrefixesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *PublicDelegatedPrefixesScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod PublicDelegatedPrefixesScopedListWarningData +func (s *PacketMirroringsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod PacketMirroringsScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Quota: A quotas entry. -type Quota struct { - // Limit: [Output Only] Quota limit for this metric. - Limit float64 `json:"limit,omitempty"` +// PathMatcher: A matcher for the path portion of the URL. The +// BackendService from the longest-matched rule will serve the URL. If +// no rule was matched, the default service is used. +type PathMatcher struct { + // DefaultRouteAction: defaultRouteAction takes effect when none of the + // pathRules or routeRules match. The load balancer performs advanced + // routing actions, such as URL rewrites and header transformations, + // before forwarding the request to the selected backend. If + // defaultRouteAction specifies any weightedBackendServices, + // defaultService must not be set. Conversely if defaultService is set, + // defaultRouteAction cannot contain any weightedBackendServices. Only + // one of defaultRouteAction or defaultUrlRedirect must be set. URL maps + // for Classic external HTTP(S) load balancers only support the + // urlRewrite action within a path matcher's defaultRouteAction. + DefaultRouteAction *HttpRouteAction `json:"defaultRouteAction,omitempty"` - // Metric: [Output Only] Name of the quota metric. - // - // Possible values: - // "A2_CPUS" - // "AFFINITY_GROUPS" - // "AUTOSCALERS" - // "BACKEND_BUCKETS" - // "BACKEND_SERVICES" - // "C2D_CPUS" - // "C2_CPUS" - // "C3_CPUS" - // "COMMITMENTS" - // "COMMITTED_A2_CPUS" - // "COMMITTED_C2D_CPUS" - // "COMMITTED_C2_CPUS" - // "COMMITTED_C3_CPUS" - // "COMMITTED_CPUS" - // "COMMITTED_E2_CPUS" - // "COMMITTED_LICENSES" - // "COMMITTED_LOCAL_SSD_TOTAL_GB" - // "COMMITTED_M3_CPUS" - // "COMMITTED_MEMORY_OPTIMIZED_CPUS" - // "COMMITTED_N2A_CPUS" - // "COMMITTED_N2D_CPUS" - // "COMMITTED_N2_CPUS" - // "COMMITTED_NVIDIA_A100_GPUS" - // "COMMITTED_NVIDIA_K80_GPUS" - // "COMMITTED_NVIDIA_P100_GPUS" - // "COMMITTED_NVIDIA_P4_GPUS" - // "COMMITTED_NVIDIA_T4_GPUS" - // "COMMITTED_NVIDIA_V100_GPUS" - // "COMMITTED_T2A_CPUS" - // "COMMITTED_T2D_CPUS" - // "CPUS" - Guest CPUs - // "CPUS_ALL_REGIONS" - // "DISKS_TOTAL_GB" - // "E2_CPUS" - // "EXTERNAL_NETWORK_LB_FORWARDING_RULES" - // "EXTERNAL_PROTOCOL_FORWARDING_RULES" - // "EXTERNAL_VPN_GATEWAYS" - // "FIREWALLS" - // "FORWARDING_RULES" - // "GLOBAL_EXTERNAL_MANAGED_FORWARDING_RULES" - // "GLOBAL_INTERNAL_ADDRESSES" - // "GPUS_ALL_REGIONS" - // "HEALTH_CHECKS" - // "IMAGES" - // "INSTANCES" - // "INSTANCE_GROUPS" - // "INSTANCE_GROUP_MANAGERS" - // "INSTANCE_TEMPLATES" - // "INTERCONNECTS" - // "INTERCONNECT_ATTACHMENTS_PER_REGION" - // "INTERCONNECT_ATTACHMENTS_TOTAL_MBPS" - // "INTERCONNECT_TOTAL_GBPS" - // "INTERNAL_ADDRESSES" - // "INTERNAL_TRAFFIC_DIRECTOR_FORWARDING_RULES" - // "IN_PLACE_SNAPSHOTS" - // "IN_USE_ADDRESSES" - // "IN_USE_BACKUP_SCHEDULES" - // "IN_USE_SNAPSHOT_SCHEDULES" - // "LOCAL_SSD_TOTAL_GB" - // "M1_CPUS" - // "M2_CPUS" - // "M3_CPUS" - // "MACHINE_IMAGES" - // "N2A_CPUS" - // "N2D_CPUS" - // "N2_CPUS" - // "NETWORKS" - // "NETWORK_ENDPOINT_GROUPS" - // "NETWORK_FIREWALL_POLICIES" - // "NODE_GROUPS" - // "NODE_TEMPLATES" - // "NVIDIA_A100_GPUS" - // "NVIDIA_K80_GPUS" - // "NVIDIA_P100_GPUS" - // "NVIDIA_P100_VWS_GPUS" - // "NVIDIA_P4_GPUS" - // "NVIDIA_P4_VWS_GPUS" - // "NVIDIA_T4_GPUS" - // "NVIDIA_T4_VWS_GPUS" - // "NVIDIA_V100_GPUS" - // "PACKET_MIRRORINGS" - // "PD_EXTREME_TOTAL_PROVISIONED_IOPS" - // "PREEMPTIBLE_CPUS" - // "PREEMPTIBLE_LOCAL_SSD_GB" - // "PREEMPTIBLE_NVIDIA_A100_GPUS" - // "PREEMPTIBLE_NVIDIA_K80_GPUS" - // "PREEMPTIBLE_NVIDIA_P100_GPUS" - // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" - // "PREEMPTIBLE_NVIDIA_P4_GPUS" - // "PREEMPTIBLE_NVIDIA_P4_VWS_GPUS" - // "PREEMPTIBLE_NVIDIA_T4_GPUS" - // "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS" - // "PREEMPTIBLE_NVIDIA_V100_GPUS" - // "PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK" - // "PSC_INTERNAL_LB_FORWARDING_RULES" - // "PUBLIC_ADVERTISED_PREFIXES" - // "PUBLIC_DELEGATED_PREFIXES" - // "REGIONAL_AUTOSCALERS" - // "REGIONAL_INSTANCE_GROUP_MANAGERS" - // "RESERVATIONS" - // "RESOURCE_POLICIES" - // "ROUTERS" - // "ROUTES" - // "SECURITY_POLICIES" - // "SECURITY_POLICIES_PER_REGION" - // "SECURITY_POLICY_CEVAL_RULES" - // "SECURITY_POLICY_RULES" - // "SECURITY_POLICY_RULES_PER_REGION" - // "SERVICE_ATTACHMENTS" - // "SNAPSHOTS" - The total number of snapshots allowed for a single - // project. - // "SSD_TOTAL_GB" - // "SSL_CERTIFICATES" - // "STATIC_ADDRESSES" - // "STATIC_BYOIP_ADDRESSES" - // "SUBNETWORKS" - // "T2A_CPUS" - // "T2D_CPUS" - // "TARGET_HTTPS_PROXIES" - // "TARGET_HTTP_PROXIES" - // "TARGET_INSTANCES" - // "TARGET_POOLS" - // "TARGET_SSL_PROXIES" - // "TARGET_TCP_PROXIES" - // "TARGET_VPN_GATEWAYS" - // "URL_MAPS" - // "VPN_GATEWAYS" - // "VPN_TUNNELS" - // "XPN_SERVICE_PROJECTS" - Metric string `json:"metric,omitempty"` + // DefaultService: The full or partial URL to the BackendService + // resource. This URL is used if none of the pathRules or routeRules + // defined by this PathMatcher are matched. For example, the following + // are all valid URLs to a BackendService resource: - + // https://www.googleapis.com/compute/v1/projects/project + // /global/backendServices/backendService - + // compute/v1/projects/project/global/backendServices/backendService - + // global/backendServices/backendService If defaultRouteAction is also + // specified, advanced routing actions, such as URL rewrites, take + // effect before sending the request to the backend. However, if + // defaultService is specified, defaultRouteAction cannot contain any + // weightedBackendServices. Conversely, if defaultRouteAction specifies + // any weightedBackendServices, defaultService must not be specified. + // Only one of defaultService, defaultUrlRedirect , or + // defaultRouteAction.weightedBackendService must be set. Authorization + // requires one or more of the following Google IAM permissions on the + // specified resource default_service: - compute.backendBuckets.use - + // compute.backendServices.use + DefaultService string `json:"defaultService,omitempty"` - // Owner: [Output Only] Owning resource. This is the resource on which - // this quota is applied. - Owner string `json:"owner,omitempty"` + // DefaultUrlRedirect: When none of the specified pathRules or + // routeRules match, the request is redirected to a URL specified by + // defaultUrlRedirect. If defaultUrlRedirect is specified, + // defaultService or defaultRouteAction must not be set. Not supported + // when the URL map is bound to a target gRPC proxy. + DefaultUrlRedirect *HttpRedirectAction `json:"defaultUrlRedirect,omitempty"` - // Usage: [Output Only] Current usage of this metric. - Usage float64 `json:"usage,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // ForceSendFields is a list of field names (e.g. "Limit") to - // unconditionally include in API requests. By default, fields with + // HeaderAction: Specifies changes to request and response headers that + // need to take effect for the selected backend service. HeaderAction + // specified here are applied after the matching HttpRouteRule + // HeaderAction and before the HeaderAction in the UrlMap HeaderAction + // is not supported for load balancers that have their + // loadBalancingScheme set to EXTERNAL. Not supported when the URL map + // is bound to a target gRPC proxy that has validateForProxyless field + // set to true. + HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` + + // Name: The name to which this PathMatcher is referred by the HostRule. + Name string `json:"name,omitempty"` + + // PathRules: The list of path rules. Use this list instead of + // routeRules when routing based on simple path matching is all that's + // required. The order by which path rules are specified does not + // matter. Matches are always done on the longest-path-first basis. For + // example: a pathRule with a path /a/b/c/* will match before /a/b/* + // irrespective of the order in which those paths appear in this list. + // Within a given pathMatcher, only one of pathRules or routeRules must + // be set. + PathRules []*PathRule `json:"pathRules,omitempty"` + + // RouteRules: The list of HTTP route rules. Use this list instead of + // pathRules when advanced route matching and routing actions are + // desired. routeRules are evaluated in order of priority, from the + // lowest to highest number. Within a given pathMatcher, you can set + // only one of pathRules or routeRules. + RouteRules []*HttpRouteRule `json:"routeRules,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DefaultRouteAction") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Limit") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "DefaultRouteAction") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *Quota) MarshalJSON() ([]byte, error) { - type NoMethod Quota +func (s *PathMatcher) MarshalJSON() ([]byte, error) { + type NoMethod PathMatcher raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -func (s *Quota) UnmarshalJSON(data []byte) error { - type NoMethod Quota - var s1 struct { - Limit gensupport.JSONFloat64 `json:"limit"` - Usage gensupport.JSONFloat64 `json:"usage"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.Limit = float64(s1.Limit) - s.Usage = float64(s1.Usage) - return nil -} - -// Reference: Represents a reference to a resource. -type Reference struct { - // Kind: [Output Only] Type of the resource. Always compute#reference - // for references. - Kind string `json:"kind,omitempty"` +// PathRule: A path-matching rule for a URL. If matched, will use the +// specified BackendService to handle the traffic arriving at this URL. +type PathRule struct { + // Paths: The list of path patterns to match. Each must start with / and + // the only place a * is allowed is at the end following a /. The string + // fed to the path matcher does not include any text after the first ? + // or #, and those chars are not allowed here. + Paths []string `json:"paths,omitempty"` - // ReferenceType: A description of the reference type with no implied - // semantics. Possible values include: 1. MEMBER_OF - ReferenceType string `json:"referenceType,omitempty"` + // RouteAction: In response to a matching path, the load balancer + // performs advanced routing actions, such as URL rewrites and header + // transformations, before forwarding the request to the selected + // backend. If routeAction specifies any weightedBackendServices, + // service must not be set. Conversely if service is set, routeAction + // cannot contain any weightedBackendServices. Only one of routeAction + // or urlRedirect must be set. URL maps for Classic external HTTP(S) + // load balancers only support the urlRewrite action within a path + // rule's routeAction. + RouteAction *HttpRouteAction `json:"routeAction,omitempty"` - // Referrer: URL of the resource which refers to the target. - Referrer string `json:"referrer,omitempty"` + // Service: The full or partial URL of the backend service resource to + // which traffic is directed if this rule is matched. If routeAction is + // also specified, advanced routing actions, such as URL rewrites, take + // effect before sending the request to the backend. However, if service + // is specified, routeAction cannot contain any weightedBackendServices. + // Conversely, if routeAction specifies any weightedBackendServices, + // service must not be specified. Only one of urlRedirect, service or + // routeAction.weightedBackendService must be set. + Service string `json:"service,omitempty"` - // Target: URL of the resource to which this reference points. - Target string `json:"target,omitempty"` + // UrlRedirect: When a path pattern is matched, the request is + // redirected to a URL specified by urlRedirect. If urlRedirect is + // specified, service or routeAction must not be set. Not supported when + // the URL map is bound to a target gRPC proxy. + UrlRedirect *HttpRedirectAction `json:"urlRedirect,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // ForceSendFields is a list of field names (e.g. "Paths") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -33603,7 +34584,7 @@ type Reference struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API + // NullFields is a list of field names (e.g. "Paths") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -33612,117 +34593,168 @@ type Reference struct { NullFields []string `json:"-"` } -func (s *Reference) MarshalJSON() ([]byte, error) { - type NoMethod Reference +func (s *PathRule) MarshalJSON() ([]byte, error) { + type NoMethod PathRule raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Region: Represents a Region resource. A region is a geographical area -// where a resource is located. For more information, read Regions and -// Zones. -type Region struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Deprecated -- [Output Only] The deprecation status associated with - // this region. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: [Output Only] Textual description of the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#region for - // regions. - Kind string `json:"kind,omitempty"` +type PerInstanceConfig struct { + // Fingerprint: Fingerprint of this per-instance config. This field can + // be used in optimistic locking. It is ignored when inserting a + // per-instance config. An up-to-date fingerprint must be provided in + // order to update an existing per-instance configuration or the field + // needs to be unset. + Fingerprint string `json:"fingerprint,omitempty"` - // Name: [Output Only] Name of the resource. + // Name: The name of a per-instance configuration and its corresponding + // instance. Serves as a merge key during UpdatePerInstanceConfigs + // operations, that is, if a per-instance configuration with the same + // name exists then it will be updated, otherwise a new one will be + // created for the VM instance with the same name. An attempt to create + // a per-instance configconfiguration for a VM instance that either + // doesn't exist or is not part of the group will result in an error. Name string `json:"name,omitempty"` - // Quotas: [Output Only] Quotas assigned to this region. - Quotas []*Quota `json:"quotas,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // PreservedState: The intended preserved state for the given instance. + // Does not contain preserved state generated from a stateful policy. + PreservedState *PreservedState `json:"preservedState,omitempty"` - // Status: [Output Only] Status of the region, either UP or DOWN. + // Status: The status of applying this per-instance configuration on the + // corresponding managed instance. // // Possible values: - // "DOWN" - // "UP" + // "APPLYING" - The per-instance configuration is being applied to the + // instance, but is not yet effective, possibly waiting for the instance + // to, for example, REFRESH. + // "DELETING" - The per-instance configuration deletion is being + // applied on the instance, possibly waiting for the instance to, for + // example, REFRESH. + // "EFFECTIVE" - The per-instance configuration is effective on the + // instance, meaning that all disks, ips and metadata specified in this + // configuration are attached or set on the instance. + // "NONE" - *[Default]* The default status, when no per-instance + // configuration exists. + // "UNAPPLIED" - The per-instance configuration is set on an instance + // but not been applied yet. + // "UNAPPLIED_DELETION" - The per-instance configuration has been + // deleted, but the deletion is not yet applied. Status string `json:"status,omitempty"` - // SupportsPzs: [Output Only] Reserved for future use. - SupportsPzs bool `json:"supportsPzs,omitempty"` - - // Zones: [Output Only] A list of zones available in this region, in the - // form of resource URLs. - Zones []string `json:"zones,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Region) MarshalJSON() ([]byte, error) { - type NoMethod Region +func (s *PerInstanceConfig) MarshalJSON() ([]byte, error) { + type NoMethod PerInstanceConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionAutoscalerList: Contains a list of autoscalers. -type RegionAutoscalerList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Autoscaler resources. - Items []*Autoscaler `json:"items,omitempty"` +// Policy: An Identity and Access Management (IAM) policy, which +// specifies access controls for Google Cloud resources. A `Policy` is a +// collection of `bindings`. A `binding` binds one or more `members`, or +// principals, to a single `role`. Principals can be user accounts, +// service accounts, Google groups, and domains (such as G Suite). A +// `role` is a named list of permissions; each `role` can be an IAM +// predefined role or a user-created custom role. For some types of +// Google Cloud resources, a `binding` can also specify a `condition`, +// which is a logical expression that allows access to a resource only +// if the expression evaluates to `true`. A condition can add +// constraints based on attributes of the request, the resource, or +// both. To learn which resources support conditions in their IAM +// policies, see the IAM documentation +// (https://cloud.google.com/iam/help/conditions/resource-policies). +// **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 +// For a description of IAM and its features, see the IAM documentation +// (https://cloud.google.com/iam/docs/). +type Policy struct { + // AuditConfigs: Specifies cloud audit logging configuration for this + // policy. + AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` + // Bindings: Associates a list of `members`, or principals, with a + // `role`. Optionally, may specify a `condition` that determines how and + // when the `bindings` are applied. Each of the `bindings` must contain + // at least one principal. The `bindings` in a `Policy` can refer to up + // to 1,500 principals; up to 250 of these principals can be Google + // groups. Each occurrence of a principal counts towards these limits. + // For example, if the `bindings` grant 50 different roles to + // `user:alice@example.com`, and not to any other principal, then you + // can add another 1,450 principals to the `bindings` in the `Policy`. + Bindings []*Binding `json:"bindings,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // Etag: `etag` is used for optimistic concurrency control as a way to + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of + // the conditions in the version `3` policy are lost. + Etag string `json:"etag,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Rules: This is deprecated and has no effect. Do not use. + Rules []*Rule `json:"rules,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *RegionAutoscalerListWarning `json:"warning,omitempty"` + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. + // Any operation that affects conditional role bindings must specify + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the IAM + // documentation + // (https://cloud.google.com/iam/help/conditions/resource-policies). + Version int64 `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "AuditConfigs") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -33730,92 +34762,60 @@ type RegionAutoscalerList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AuditConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { - type NoMethod RegionAutoscalerList +func (s *Policy) MarshalJSON() ([]byte, error) { + type NoMethod Policy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionAutoscalerListWarning: [Output Only] Informational warning -// message. -type RegionAutoscalerListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` +type PreconfiguredWafSet struct { + // ExpressionSets: List of entities that are currently supported for WAF + // rules. + ExpressionSets []*WafExpressionSet `json:"expressionSets,omitempty"` - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*RegionAutoscalerListWarningData `json:"data,omitempty"` + // ForceSendFields is a list of field names (e.g. "ExpressionSets") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // NullFields is a list of field names (e.g. "ExpressionSets") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} - // ForceSendFields is a list of field names (e.g. "Code") to +func (s *PreconfiguredWafSet) MarshalJSON() ([]byte, error) { + type NoMethod PreconfiguredWafSet + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PreservedState: Preserved state for a given instance. +type PreservedState struct { + // Disks: Preserved disks defined for this instance. This map is keyed + // with the device names of the disks. + Disks map[string]PreservedStatePreservedDisk `json:"disks,omitempty"` + + // Metadata: Preserved metadata defined for this instance. + Metadata map[string]string `json:"metadata,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Disks") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -33823,7 +34823,7 @@ type RegionAutoscalerListWarning struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API + // NullFields is a list of field names (e.g. "Disks") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -33832,27 +34832,42 @@ type RegionAutoscalerListWarning struct { NullFields []string `json:"-"` } -func (s *RegionAutoscalerListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RegionAutoscalerListWarning +func (s *PreservedState) MarshalJSON() ([]byte, error) { + type NoMethod PreservedState raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionAutoscalerListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +type PreservedStatePreservedDisk struct { + // AutoDelete: These stateful disks will never be deleted during + // autohealing, update, instance recreate operations. This flag is used + // to configure if the disk should be deleted after it is no longer used + // by the group, e.g. when the given instance or the whole MIG is + // deleted. Note: disks attached in READ_ONLY mode cannot be + // auto-deleted. + // + // Possible values: + // "NEVER" + // "ON_PERMANENT_INSTANCE_DELETION" + AutoDelete string `json:"autoDelete,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // Mode: The mode in which to attach this disk, either READ_WRITE or + // READ_ONLY. If not specified, the default is to attach the disk in + // READ_WRITE mode. + // + // Possible values: + // "READ_ONLY" - Attaches this disk in read-only mode. Multiple VM + // instances can use a disk in READ_ONLY mode at a time. + // "READ_WRITE" - *[Default]* Attaches this disk in READ_WRITE mode. + // Only one VM instance at a time can be attached to a disk in + // READ_WRITE mode. + Mode string `json:"mode,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // Source: The URL of the disk resource that is stateful and should be + // attached to the VM instance. + Source string `json:"source,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoDelete") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -33860,8 +34875,8 @@ type RegionAutoscalerListWarningData struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "AutoDelete") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -33869,43 +34884,127 @@ type RegionAutoscalerListWarningData struct { NullFields []string `json:"-"` } -func (s *RegionAutoscalerListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RegionAutoscalerListWarningData +func (s *PreservedStatePreservedDisk) MarshalJSON() ([]byte, error) { + type NoMethod PreservedStatePreservedDisk raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionDiskTypeList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +// Project: Represents a Project resource. A project is used to organize +// resources in a Google Cloud Platform environment. For more +// information, read about the Resource Hierarchy. +type Project struct { + // CommonInstanceMetadata: Metadata key/value pairs available to all + // instances contained in this project. See Custom metadata for more + // information. + CommonInstanceMetadata *Metadata `json:"commonInstanceMetadata,omitempty"` - // Items: A list of DiskType resources. - Items []*DiskType `json:"items,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#regionDiskTypeList for region disk types. + // DefaultNetworkTier: This signifies the default network tier used for + // configuring resources of the project and can only take the following + // values: PREMIUM, STANDARD. Initially the default network tier is + // PREMIUM. + // + // Possible values: + // "FIXED_STANDARD" - Public internet quality with fixed bandwidth. + // "PREMIUM" - High quality, Google-grade network tier, support for + // all networking products. + // "STANDARD" - Public internet quality, only limited support for + // other networking products. + // "STANDARD_OVERRIDES_FIXED_STANDARD" - (Output only) Temporary tier + // for FIXED_STANDARD when fixed standard tier is expired or not + // configured. + DefaultNetworkTier string `json:"defaultNetworkTier,omitempty"` + + // DefaultServiceAccount: [Output Only] Default service account used by + // VMs running in this project. + DefaultServiceAccount string `json:"defaultServiceAccount,omitempty"` + + // Description: An optional textual description of the resource. + Description string `json:"description,omitempty"` + + // EnabledFeatures: Restricted features enabled for use on this project. + EnabledFeatures []string `json:"enabledFeatures,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. This is *not* the project ID, + // and is just a unique ID used by Compute Engine to identify resources. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#project for + // projects. Kind string `json:"kind,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // Name: The project ID. For example: my-example-project. Use the + // project ID to make requests to Compute Engine. + Name string `json:"name,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. + // Quotas: [Output Only] Quotas assigned to this project. + Quotas []*Quota `json:"quotas,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *RegionDiskTypeListWarning `json:"warning,omitempty"` + // UsageExportLocation: The naming prefix for daily usage reports and + // the Google Cloud Storage bucket where they are stored. + UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"` + + // VmDnsSetting: [Output Only] Default internal DNS setting used by VMs + // running in this project. + // + // Possible values: + // "GLOBAL_DEFAULT" + // "UNSPECIFIED_VM_DNS_SETTING" + // "ZONAL_DEFAULT" + // "ZONAL_ONLY" + VmDnsSetting string `json:"vmDnsSetting,omitempty"` + + // XpnProjectStatus: [Output Only] The role this project has in a shared + // VPC configuration. Currently, only projects with the host role, which + // is specified by the value HOST, are differentiated. + // + // Possible values: + // "HOST" + // "UNSPECIFIED_XPN_PROJECT_STATUS" + XpnProjectStatus string `json:"xpnProjectStatus,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. + // "CommonInstanceMetadata") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CommonInstanceMetadata") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Project) MarshalJSON() ([]byte, error) { + type NoMethod Project + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ProjectsDisableXpnResourceRequest struct { + // XpnResource: Service resource (a.k.a service project) ID. + XpnResource *XpnResourceId `json:"xpnResource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "XpnResource") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -33913,92 +35012,26 @@ type RegionDiskTypeList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "XpnResource") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionDiskTypeList) MarshalJSON() ([]byte, error) { - type NoMethod RegionDiskTypeList +func (s *ProjectsDisableXpnResourceRequest) MarshalJSON() ([]byte, error) { + type NoMethod ProjectsDisableXpnResourceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionDiskTypeListWarning: [Output Only] Informational warning -// message. -type RegionDiskTypeListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*RegionDiskTypeListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` +type ProjectsEnableXpnResourceRequest struct { + // XpnResource: Service resource (a.k.a service project) ID. + XpnResource *XpnResourceId `json:"xpnResource,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "XpnResource") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -34006,36 +35039,44 @@ type RegionDiskTypeListWarning struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "XpnResource") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionDiskTypeListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RegionDiskTypeListWarning +func (s *ProjectsEnableXpnResourceRequest) MarshalJSON() ([]byte, error) { + type NoMethod ProjectsEnableXpnResourceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionDiskTypeListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +type ProjectsGetXpnResources struct { + // Kind: [Output Only] Type of resource. Always + // compute#projectsGetXpnResources for lists of service resources (a.k.a + // service projects) + Kind string `json:"kind,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // Resources: Service resources (a.k.a service projects) attached to + // this project as their shared VPC host. + Resources []*XpnResourceId `json:"resources,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -34043,7 +35084,7 @@ type RegionDiskTypeListWarningData struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API + // NullFields is a list of field names (e.g. "Kind") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -34052,17 +35093,19 @@ type RegionDiskTypeListWarningData struct { NullFields []string `json:"-"` } -func (s *RegionDiskTypeListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RegionDiskTypeListWarningData +func (s *ProjectsGetXpnResources) MarshalJSON() ([]byte, error) { + type NoMethod ProjectsGetXpnResources raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionDisksAddResourcePoliciesRequest struct { - // ResourcePolicies: Resource policies to be added to this disk. - ResourcePolicies []string `json:"resourcePolicies,omitempty"` +type ProjectsListXpnHostsRequest struct { + // Organization: Optional organization ID managed by Cloud Resource + // Manager, for which to list shared VPC host projects. If not + // specified, the organization will be inferred from the project. + Organization string `json:"organization,omitempty"` - // ForceSendFields is a list of field names (e.g. "ResourcePolicies") to + // ForceSendFields is a list of field names (e.g. "Organization") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -34070,27 +35113,36 @@ type RegionDisksAddResourcePoliciesRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ResourcePolicies") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Organization") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionDisksAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionDisksAddResourcePoliciesRequest +func (s *ProjectsListXpnHostsRequest) MarshalJSON() ([]byte, error) { + type NoMethod ProjectsListXpnHostsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionDisksRemoveResourcePoliciesRequest struct { - // ResourcePolicies: Resource policies to be removed from this disk. - ResourcePolicies []string `json:"resourcePolicies,omitempty"` +type ProjectsSetDefaultNetworkTierRequest struct { + // NetworkTier: Default network tier to be set. + // + // Possible values: + // "FIXED_STANDARD" - Public internet quality with fixed bandwidth. + // "PREMIUM" - High quality, Google-grade network tier, support for + // all networking products. + // "STANDARD" - Public internet quality, only limited support for + // other networking products. + // "STANDARD_OVERRIDES_FIXED_STANDARD" - (Output only) Temporary tier + // for FIXED_STANDARD when fixed standard tier is expired or not + // configured. + NetworkTier string `json:"networkTier,omitempty"` - // ForceSendFields is a list of field names (e.g. "ResourcePolicies") to + // ForceSendFields is a list of field names (e.g. "NetworkTier") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -34098,60 +35150,137 @@ type RegionDisksRemoveResourcePoliciesRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ResourcePolicies") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "NetworkTier") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionDisksRemoveResourcePoliciesRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionDisksRemoveResourcePoliciesRequest +func (s *ProjectsSetDefaultNetworkTierRequest) MarshalJSON() ([]byte, error) { + type NoMethod ProjectsSetDefaultNetworkTierRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionDisksResizeRequest struct { - // SizeGb: The new size of the regional persistent disk, which is - // specified in GB. - SizeGb int64 `json:"sizeGb,omitempty,string"` +// PublicAdvertisedPrefix: A public advertised prefix represents an +// aggregated IP prefix or netblock which customers bring to cloud. The +// IP prefix is a single unit of route advertisement and is announced +// globally to the internet. +type PublicAdvertisedPrefix struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // ForceSendFields is a list of field names (e.g. "SizeGb") to - // unconditionally include in API requests. By default, fields with + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DnsVerificationIp: The IPv4 address to be used for reverse DNS + // verification. + DnsVerificationIp string `json:"dnsVerificationIp,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a new PublicAdvertisedPrefix. An + // up-to-date fingerprint must be provided in order to update the + // PublicAdvertisedPrefix, otherwise the request will fail with error + // 412 conditionNotMet. To see the latest fingerprint, make a get() + // request to retrieve a PublicAdvertisedPrefix. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource type. The + // server generates this identifier. + Id uint64 `json:"id,omitempty,string"` + + // IpCidrRange: The IPv4 address range, in CIDR format, represented by + // this public advertised prefix. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#publicAdvertisedPrefix for public advertised prefixes. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PublicDelegatedPrefixs: [Output Only] The list of public delegated + // prefixes that exist for this public advertised prefix. + PublicDelegatedPrefixs []*PublicAdvertisedPrefixPublicDelegatedPrefix `json:"publicDelegatedPrefixs,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SharedSecret: [Output Only] The shared secret to be used for reverse + // DNS verification. + SharedSecret string `json:"sharedSecret,omitempty"` + + // Status: The status of the public advertised prefix. Possible values + // include: - `INITIAL`: RPKI validation is complete. - + // `PTR_CONFIGURED`: User has configured the PTR. - `VALIDATED`: Reverse + // DNS lookup is successful. - `REVERSE_DNS_LOOKUP_FAILED`: Reverse DNS + // lookup failed. - `PREFIX_CONFIGURATION_IN_PROGRESS`: The prefix is + // being configured. - `PREFIX_CONFIGURATION_COMPLETE`: The prefix is + // fully configured. - `PREFIX_REMOVAL_IN_PROGRESS`: The prefix is being + // removed. + // + // Possible values: + // "INITIAL" - RPKI validation is complete. + // "PREFIX_CONFIGURATION_COMPLETE" - The prefix is fully configured. + // "PREFIX_CONFIGURATION_IN_PROGRESS" - The prefix is being + // configured. + // "PREFIX_REMOVAL_IN_PROGRESS" - The prefix is being removed. + // "PTR_CONFIGURED" - User has configured the PTR. + // "REVERSE_DNS_LOOKUP_FAILED" - Reverse DNS lookup failed. + // "VALIDATED" - Reverse DNS lookup is successful. + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SizeGb") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RegionDisksResizeRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionDisksResizeRequest +func (s *PublicAdvertisedPrefix) MarshalJSON() ([]byte, error) { + type NoMethod PublicAdvertisedPrefix raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupList: Contains a list of InstanceGroup resources. -type RegionInstanceGroupList struct { +type PublicAdvertisedPrefixList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of InstanceGroup resources. - Items []*InstanceGroup `json:"items,omitempty"` + // Items: A list of PublicAdvertisedPrefix resources. + Items []*PublicAdvertisedPrefix `json:"items,omitempty"` - // Kind: The resource type. + // Kind: [Output Only] Type of the resource. Always + // compute#publicAdvertisedPrefix for public advertised prefixes. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -34166,7 +35295,7 @@ type RegionInstanceGroupList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *RegionInstanceGroupListWarning `json:"warning,omitempty"` + Warning *PublicAdvertisedPrefixListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -34189,15 +35318,15 @@ type RegionInstanceGroupList struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupList +func (s *PublicAdvertisedPrefixList) MarshalJSON() ([]byte, error) { + type NoMethod PublicAdvertisedPrefixList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupListWarning: [Output Only] Informational warning -// message. -type RegionInstanceGroupListWarning struct { +// PublicAdvertisedPrefixListWarning: [Output Only] Informational +// warning message. +type PublicAdvertisedPrefixListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -34219,6 +35348,9 @@ type RegionInstanceGroupListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -34226,6 +35358,9 @@ type RegionInstanceGroupListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -34259,7 +35394,7 @@ type RegionInstanceGroupListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*RegionInstanceGroupListWarningData `json:"data,omitempty"` + Data []*PublicAdvertisedPrefixListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -34282,13 +35417,13 @@ type RegionInstanceGroupListWarning struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupListWarning +func (s *PublicAdvertisedPrefixListWarning) MarshalJSON() ([]byte, error) { + type NoMethod PublicAdvertisedPrefixListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupListWarningData struct { +type PublicAdvertisedPrefixListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -34319,20 +35454,35 @@ type RegionInstanceGroupListWarningData struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupListWarningData +func (s *PublicAdvertisedPrefixListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod PublicAdvertisedPrefixListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupManagerDeleteInstanceConfigReq: -// RegionInstanceGroupManagers.deletePerInstanceConfigs -type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { - // Names: The list of instance names for which we want to delete - // per-instance configs on this managed instance group. - Names []string `json:"names,omitempty"` +// PublicAdvertisedPrefixPublicDelegatedPrefix: Represents a CIDR range +// which can be used to assign addresses. +type PublicAdvertisedPrefixPublicDelegatedPrefix struct { + // IpRange: The IP address range of the public delegated prefix + IpRange string `json:"ipRange,omitempty"` - // ForceSendFields is a list of field names (e.g. "Names") to + // Name: The name of the public delegated prefix + Name string `json:"name,omitempty"` + + // Project: The project number of the public delegated prefix + Project string `json:"project,omitempty"` + + // Region: The region of the public delegated prefix if it is regional. + // If absent, the prefix is global. + Region string `json:"region,omitempty"` + + // Status: The status of the public delegated prefix. Possible values + // are: INITIALIZING: The public delegated prefix is being initialized + // and addresses cannot be created yet. ANNOUNCED: The public delegated + // prefix is active. + Status string `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpRange") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -34340,8 +35490,8 @@ type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Names") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "IpRange") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -34349,25 +35499,133 @@ type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerDeleteInstanceConfigReq) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagerDeleteInstanceConfigReq +func (s *PublicAdvertisedPrefixPublicDelegatedPrefix) MarshalJSON() ([]byte, error) { + type NoMethod PublicAdvertisedPrefixPublicDelegatedPrefix raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupManagerList: Contains a list of managed instance -// groups. -type RegionInstanceGroupManagerList struct { +// PublicDelegatedPrefix: A PublicDelegatedPrefix resource represents an +// IP block within a PublicAdvertisedPrefix that is configured within a +// single cloud scope (global or region). IPs in the block can be +// allocated to resources within that scope. Public delegated prefixes +// may be further broken up into smaller IP blocks in the same scope as +// the parent block. +type PublicDelegatedPrefix struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a new PublicDelegatedPrefix. An + // up-to-date fingerprint must be provided in order to update the + // PublicDelegatedPrefix, otherwise the request will fail with error 412 + // conditionNotMet. To see the latest fingerprint, make a get() request + // to retrieve a PublicDelegatedPrefix. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource type. The + // server generates this identifier. + Id uint64 `json:"id,omitempty,string"` + + // IpCidrRange: The IPv4 address range, in CIDR format, represented by + // this public delegated prefix. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // IsLiveMigration: If true, the prefix will be live migrated. + IsLiveMigration bool `json:"isLiveMigration,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#publicDelegatedPrefix for public delegated prefixes. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // ParentPrefix: The URL of parent prefix. Either PublicAdvertisedPrefix + // or PublicDelegatedPrefix. + ParentPrefix string `json:"parentPrefix,omitempty"` + + // PublicDelegatedSubPrefixs: The list of sub public delegated prefixes + // that exist for this public delegated prefix. + PublicDelegatedSubPrefixs []*PublicDelegatedPrefixPublicDelegatedSubPrefix `json:"publicDelegatedSubPrefixs,omitempty"` + + // Region: [Output Only] URL of the region where the public delegated + // prefix resides. This field applies only to the region resource. You + // must specify this field as part of the HTTP request URL. It is not + // settable as a field in the request body. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] The status of the public delegated prefix, + // which can be one of following values: - `INITIALIZING` The public + // delegated prefix is being initialized and addresses cannot be created + // yet. - `READY_TO_ANNOUNCE` The public delegated prefix is a live + // migration prefix and is active. - `ANNOUNCED` The public delegated + // prefix is active. - `DELETING` The public delegated prefix is being + // deprovsioned. + // + // Possible values: + // "ANNOUNCED" - The public delegated prefix is active. + // "DELETING" - The public delegated prefix is being deprovsioned. + // "INITIALIZING" - The public delegated prefix is being initialized + // and addresses cannot be created yet. + // "READY_TO_ANNOUNCE" - The public delegated prefix is currently + // withdrawn but ready to be announced. + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *PublicDelegatedPrefix) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefix + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type PublicDelegatedPrefixAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of InstanceGroupManager resources. - Items []*InstanceGroupManager `json:"items,omitempty"` + // Items: A list of PublicDelegatedPrefixesScopedList resources. + Items map[string]PublicDelegatedPrefixesScopedList `json:"items,omitempty"` - // Kind: [Output Only] The resource type, which is always - // compute#instanceGroupManagerList for a list of managed instance - // groups that exist in th regional scope. + // Kind: [Output Only] Type of the resource. Always + // compute#publicDelegatedPrefixAggregatedList for aggregated lists of + // public delegated prefixes. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -34381,8 +35639,11 @@ type RegionInstanceGroupManagerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *RegionInstanceGroupManagerListWarning `json:"warning,omitempty"` + Warning *PublicDelegatedPrefixAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -34405,15 +35666,15 @@ type RegionInstanceGroupManagerList struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagerList +func (s *PublicDelegatedPrefixAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupManagerListWarning: [Output Only] Informational -// warning message. -type RegionInstanceGroupManagerListWarning struct { +// PublicDelegatedPrefixAggregatedListWarning: [Output Only] +// Informational warning message. +type PublicDelegatedPrefixAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -34435,6 +35696,9 @@ type RegionInstanceGroupManagerListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -34442,6 +35706,9 @@ type RegionInstanceGroupManagerListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -34475,7 +35742,7 @@ type RegionInstanceGroupManagerListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*RegionInstanceGroupManagerListWarningData `json:"data,omitempty"` + Data []*PublicDelegatedPrefixAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -34498,13 +35765,13 @@ type RegionInstanceGroupManagerListWarning struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagerListWarning +func (s *PublicDelegatedPrefixAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagerListWarningData struct { +type PublicDelegatedPrefixAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -34535,284 +35802,23 @@ type RegionInstanceGroupManagerListWarningData struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagerListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionInstanceGroupManagerPatchInstanceConfigReq: -// RegionInstanceGroupManagers.patchPerInstanceConfigs -type RegionInstanceGroupManagerPatchInstanceConfigReq struct { - // PerInstanceConfigs: The list of per-instance configs to insert or - // patch on this managed instance group. - PerInstanceConfigs []*PerInstanceConfig `json:"perInstanceConfigs,omitempty"` - - // ForceSendFields is a list of field names (e.g. "PerInstanceConfigs") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "PerInstanceConfigs") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagerPatchInstanceConfigReq) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagerPatchInstanceConfigReq - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionInstanceGroupManagerUpdateInstanceConfigReq: -// RegionInstanceGroupManagers.updatePerInstanceConfigs -type RegionInstanceGroupManagerUpdateInstanceConfigReq struct { - // PerInstanceConfigs: The list of per-instance configs to insert or - // patch on this managed instance group. - PerInstanceConfigs []*PerInstanceConfig `json:"perInstanceConfigs,omitempty"` - - // ForceSendFields is a list of field names (e.g. "PerInstanceConfigs") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "PerInstanceConfigs") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagerUpdateInstanceConfigReq) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagerUpdateInstanceConfigReq - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupManagersAbandonInstancesRequest struct { - // Instances: The URLs of one or more instances to abandon. This can be - // a full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersAbandonInstancesRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionInstanceGroupManagersApplyUpdatesRequest: -// RegionInstanceGroupManagers.applyUpdatesToInstances -type RegionInstanceGroupManagersApplyUpdatesRequest struct { - // AllInstances: Flag to update all instances instead of specified list - // of “instances”. If the flag is set to true then the instances may - // not be specified in the request. - AllInstances bool `json:"allInstances,omitempty"` - - // Instances: The list of URLs of one or more instances for which you - // want to apply updates. Each URL can be a full URL or a partial URL, - // such as zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` - - // MinimalAction: The minimal action that you want to perform on each - // instance during the update: - REPLACE: At minimum, delete the - // instance and create it again. - RESTART: Stop the instance and start - // it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt - // the instance at all. By default, the minimum action is NONE. If your - // update requires a more disruptive action than you set with this flag, - // the necessary action is performed to execute the update. - // - // Possible values: - // "NONE" - Do not perform any action. - // "REFRESH" - Updates applied in runtime, instances will not be - // disrupted. - // "REPLACE" - Old instances will be deleted. New instances will be - // created from the target template. - // "RESTART" - Every instance will be restarted. - MinimalAction string `json:"minimalAction,omitempty"` - - // MostDisruptiveAllowedAction: The most disruptive action that you want - // to perform on each instance during the update: - REPLACE: Delete the - // instance and create it again. - RESTART: Stop the instance and start - // it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt - // the instance at all. By default, the most disruptive allowed action - // is REPLACE. If your update requires a more disruptive action than you - // set with this flag, the update request will fail. - // - // Possible values: - // "NONE" - Do not perform any action. - // "REFRESH" - Updates applied in runtime, instances will not be - // disrupted. - // "REPLACE" - Old instances will be deleted. New instances will be - // created from the target template. - // "RESTART" - Every instance will be restarted. - MostDisruptiveAllowedAction string `json:"mostDisruptiveAllowedAction,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AllInstances") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AllInstances") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersApplyUpdatesRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersApplyUpdatesRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionInstanceGroupManagersCreateInstancesRequest: -// RegionInstanceGroupManagers.createInstances -type RegionInstanceGroupManagersCreateInstancesRequest struct { - // Instances: [Required] List of specifications of per-instance configs. - Instances []*PerInstanceConfig `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersCreateInstancesRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupManagersDeleteInstancesRequest struct { - // Instances: The URLs of one or more instances to delete. This can be a - // full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` - - // SkipInstancesOnValidationError: Specifies whether the request should - // proceed despite the inclusion of instances that are not members of - // the group or that are already in the process of being deleted or - // abandoned. If this field is set to `false` and such an instance is - // specified in the request, the operation fails. The operation always - // fails if the request contains a malformed instance URL or a reference - // to an instance that exists in a zone or region other than the group's - // zone or region. - SkipInstancesOnValidationError bool `json:"skipInstancesOnValidationError,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersDeleteInstancesRequest +func (s *PublicDelegatedPrefixAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersListErrorsResponse struct { - // Items: [Output Only] The list of errors of the managed instance - // group. - Items []*InstanceManagedByIgmError `json:"items,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the +type PublicDelegatedPrefixList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + Id string `json:"id,omitempty"` -func (s *RegionInstanceGroupManagersListErrorsResponse) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersListErrorsResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Items: A list of PublicDelegatedPrefix resources. + Items []*PublicDelegatedPrefix `json:"items,omitempty"` -type RegionInstanceGroupManagersListInstanceConfigsResp struct { - // Items: [Output Only] The list of PerInstanceConfig. - Items []*PerInstanceConfig `json:"items,omitempty"` + // Kind: [Output Only] Type of the resource. Always + // compute#publicDelegatedPrefixList for public delegated prefixes. + Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next // page of results for list requests. If the number of results is larger @@ -34822,14 +35828,17 @@ type RegionInstanceGroupManagersListInstanceConfigsResp struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *RegionInstanceGroupManagersListInstanceConfigsRespWarning `json:"warning,omitempty"` + Warning *PublicDelegatedPrefixListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Items") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -34837,7 +35846,7 @@ type RegionInstanceGroupManagersListInstanceConfigsResp struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Items") to include in API + // NullFields is a list of field names (e.g. "Id") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -34846,15 +35855,15 @@ type RegionInstanceGroupManagersListInstanceConfigsResp struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersListInstanceConfigsResp) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersListInstanceConfigsResp +func (s *PublicDelegatedPrefixList) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupManagersListInstanceConfigsRespWarning: [Output -// Only] Informational warning message. -type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { +// PublicDelegatedPrefixListWarning: [Output Only] Informational warning +// message. +type PublicDelegatedPrefixListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -34876,6 +35885,9 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -34883,6 +35895,9 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -34916,7 +35931,7 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*RegionInstanceGroupManagersListInstanceConfigsRespWarningData `json:"data,omitempty"` + Data []*PublicDelegatedPrefixListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -34939,13 +35954,13 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersListInstanceConfigsRespWarning) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersListInstanceConfigsRespWarning +func (s *PublicDelegatedPrefixListWarning) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersListInstanceConfigsRespWarningData struct { +type PublicDelegatedPrefixListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -34976,121 +35991,46 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarningData struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersListInstanceConfigsRespWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersListInstanceConfigsRespWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupManagersListInstancesResponse struct { - // ManagedInstances: A list of managed instances. - ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "ManagedInstances") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ManagedInstances") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersListInstancesResponse +func (s *PublicDelegatedPrefixListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersRecreateRequest struct { - // Instances: The URLs of one or more instances to recreate. This can be - // a full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersRecreateRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} +// PublicDelegatedPrefixPublicDelegatedSubPrefix: Represents a sub +// PublicDelegatedPrefix. +type PublicDelegatedPrefixPublicDelegatedSubPrefix struct { + // DelegateeProject: Name of the project scoping this + // PublicDelegatedSubPrefix. + DelegateeProject string `json:"delegateeProject,omitempty"` -type RegionInstanceGroupManagersSetTargetPoolsRequest struct { - // Fingerprint: Fingerprint of the target pools information, which is a - // hash of the contents. This field is used for optimistic locking when - // you update the target pool entries. This field is optional. - Fingerprint string `json:"fingerprint,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // TargetPools: The URL of all TargetPool resources to which instances - // in the instanceGroup field are added. The target pools automatically - // apply to all of the instances in the managed instance group. - TargetPools []string `json:"targetPools,omitempty"` + // IpCidrRange: The IPv4 address range, in CIDR format, represented by + // this sub public delegated prefix. + IpCidrRange string `json:"ipCidrRange,omitempty"` - // ForceSendFields is a list of field names (e.g. "Fingerprint") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // IsAddress: Whether the sub prefix is delegated to create Address + // resources in the delegatee project. + IsAddress bool `json:"isAddress,omitempty"` - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // Name: The name of the sub public delegated prefix. + Name string `json:"name,omitempty"` -func (s *RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersSetTargetPoolsRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Region: [Output Only] The region of the sub public delegated prefix + // if it is regional. If absent, the sub prefix is global. + Region string `json:"region,omitempty"` -type RegionInstanceGroupManagersSetTemplateRequest struct { - // InstanceTemplate: URL of the InstanceTemplate resource from which all - // new instances will be created. - InstanceTemplate string `json:"instanceTemplate,omitempty"` + // Status: [Output Only] The status of the sub public delegated prefix. + // + // Possible values: + // "ACTIVE" + // "INACTIVE" + Status string `json:"status,omitempty"` - // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to + // ForceSendFields is a list of field names (e.g. "DelegateeProject") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -35098,7 +36038,7 @@ type RegionInstanceGroupManagersSetTemplateRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InstanceTemplate") to + // NullFields is a list of field names (e.g. "DelegateeProject") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -35108,67 +36048,50 @@ type RegionInstanceGroupManagersSetTemplateRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersSetTemplateRequest +func (s *PublicDelegatedPrefixPublicDelegatedSubPrefix) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixPublicDelegatedSubPrefix raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupsListInstances struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of InstanceWithNamedPorts resources. - Items []*InstanceWithNamedPorts `json:"items,omitempty"` - - // Kind: The resource type. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *RegionInstanceGroupsListInstancesWarning `json:"warning,omitempty"` +type PublicDelegatedPrefixesScopedList struct { + // PublicDelegatedPrefixes: [Output Only] A list of + // PublicDelegatedPrefixes contained in this scope. + PublicDelegatedPrefixes []*PublicDelegatedPrefix `json:"publicDelegatedPrefixes,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: [Output Only] Informational warning which replaces the list + // of public delegated prefixes when the list is empty. + Warning *PublicDelegatedPrefixesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "PublicDelegatedPrefixes") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "PublicDelegatedPrefixes") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupsListInstances +func (s *PublicDelegatedPrefixesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupsListInstancesWarning: [Output Only] Informational -// warning message. -type RegionInstanceGroupsListInstancesWarning struct { +// PublicDelegatedPrefixesScopedListWarning: [Output Only] Informational +// warning which replaces the list of public delegated prefixes when the +// list is empty. +type PublicDelegatedPrefixesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -35190,6 +36113,9 @@ type RegionInstanceGroupsListInstancesWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -35197,6 +36123,9 @@ type RegionInstanceGroupsListInstancesWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -35230,7 +36159,7 @@ type RegionInstanceGroupsListInstancesWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*RegionInstanceGroupsListInstancesWarningData `json:"data,omitempty"` + Data []*PublicDelegatedPrefixesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -35253,13 +36182,13 @@ type RegionInstanceGroupsListInstancesWarning struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupsListInstancesWarning +func (s *PublicDelegatedPrefixesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupsListInstancesWarningData struct { +type PublicDelegatedPrefixesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -35290,247 +36219,172 @@ type RegionInstanceGroupsListInstancesWarningData struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupsListInstancesWarningData +func (s *PublicDelegatedPrefixesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod PublicDelegatedPrefixesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupsListInstancesRequest struct { - // InstanceState: Instances in which state should be returned. Valid - // options are: 'ALL', 'RUNNING'. By default, it lists all instances. +// Quota: A quotas entry. +type Quota struct { + // Limit: [Output Only] Quota limit for this metric. + Limit float64 `json:"limit,omitempty"` + + // Metric: [Output Only] Name of the quota metric. // // Possible values: - // "ALL" - Matches any status of the instances, running, non-running - // and others. - // "RUNNING" - Instance is in RUNNING state if it is running. - InstanceState string `json:"instanceState,omitempty"` - - // PortName: Name of port user is interested in. It is optional. If it - // is set, only information about this ports will be returned. If it is - // not set, all the named ports will be returned. Always lists all - // instances. - PortName string `json:"portName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "InstanceState") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "InstanceState") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupsListInstancesRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupsSetNamedPortsRequest struct { - // Fingerprint: The fingerprint of the named ports information for this - // instance group. Use this optional property to prevent conflicts when - // multiple users change the named ports settings concurrently. Obtain - // the fingerprint with the instanceGroups.get method. Then, include the - // fingerprint in your request to ensure that you do not overwrite - // changes that were applied from another concurrent request. - Fingerprint string `json:"fingerprint,omitempty"` - - // NamedPorts: The list of named ports to set for this instance group. - NamedPorts []*NamedPort `json:"namedPorts,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Fingerprint") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupsSetNamedPortsRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionList: Contains a list of region resources. -type RegionList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Region resources. - Items []*Region `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#regionList for - // lists of regions. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *RegionListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionList) MarshalJSON() ([]byte, error) { - type NoMethod RegionList - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionListWarning: [Output Only] Informational warning message. -type RegionListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*RegionListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RegionListWarning - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // "A2_CPUS" + // "AFFINITY_GROUPS" + // "AUTOSCALERS" + // "BACKEND_BUCKETS" + // "BACKEND_SERVICES" + // "C2D_CPUS" + // "C2_CPUS" + // "C3_CPUS" + // "COMMITMENTS" + // "COMMITTED_A2_CPUS" + // "COMMITTED_C2D_CPUS" + // "COMMITTED_C2_CPUS" + // "COMMITTED_C3_CPUS" + // "COMMITTED_CPUS" + // "COMMITTED_E2_CPUS" + // "COMMITTED_LICENSES" + // "COMMITTED_LOCAL_SSD_TOTAL_GB" + // "COMMITTED_M3_CPUS" + // "COMMITTED_MEMORY_OPTIMIZED_CPUS" + // "COMMITTED_N2A_CPUS" + // "COMMITTED_N2D_CPUS" + // "COMMITTED_N2_CPUS" + // "COMMITTED_NVIDIA_A100_80GB_GPUS" + // "COMMITTED_NVIDIA_A100_GPUS" + // "COMMITTED_NVIDIA_K80_GPUS" + // "COMMITTED_NVIDIA_P100_GPUS" + // "COMMITTED_NVIDIA_P4_GPUS" + // "COMMITTED_NVIDIA_T4_GPUS" + // "COMMITTED_NVIDIA_V100_GPUS" + // "COMMITTED_T2A_CPUS" + // "COMMITTED_T2D_CPUS" + // "CPUS" - Guest CPUs + // "CPUS_ALL_REGIONS" + // "DISKS_TOTAL_GB" + // "E2_CPUS" + // "EXTERNAL_MANAGED_FORWARDING_RULES" + // "EXTERNAL_NETWORK_LB_FORWARDING_RULES" + // "EXTERNAL_PROTOCOL_FORWARDING_RULES" + // "EXTERNAL_VPN_GATEWAYS" + // "FIREWALLS" + // "FORWARDING_RULES" + // "GLOBAL_EXTERNAL_MANAGED_BACKEND_SERVICES" + // "GLOBAL_EXTERNAL_MANAGED_FORWARDING_RULES" + // "GLOBAL_EXTERNAL_PROXY_LB_BACKEND_SERVICES" + // "GLOBAL_INTERNAL_ADDRESSES" + // "GLOBAL_INTERNAL_MANAGED_BACKEND_SERVICES" + // "GLOBAL_INTERNAL_TRAFFIC_DIRECTOR_BACKEND_SERVICES" + // "GPUS_ALL_REGIONS" + // "HEALTH_CHECKS" + // "IMAGES" + // "INSTANCES" + // "INSTANCE_GROUPS" + // "INSTANCE_GROUP_MANAGERS" + // "INSTANCE_TEMPLATES" + // "INTERCONNECTS" + // "INTERCONNECT_ATTACHMENTS_PER_REGION" + // "INTERCONNECT_ATTACHMENTS_TOTAL_MBPS" + // "INTERCONNECT_TOTAL_GBPS" + // "INTERNAL_ADDRESSES" + // "INTERNAL_TRAFFIC_DIRECTOR_FORWARDING_RULES" + // "IN_PLACE_SNAPSHOTS" + // "IN_USE_ADDRESSES" + // "IN_USE_BACKUP_SCHEDULES" + // "IN_USE_SNAPSHOT_SCHEDULES" + // "LOCAL_SSD_TOTAL_GB" + // "M1_CPUS" + // "M2_CPUS" + // "M3_CPUS" + // "MACHINE_IMAGES" + // "N2A_CPUS" + // "N2D_CPUS" + // "N2_CPUS" + // "NETWORKS" + // "NETWORK_ENDPOINT_GROUPS" + // "NETWORK_FIREWALL_POLICIES" + // "NODE_GROUPS" + // "NODE_TEMPLATES" + // "NVIDIA_A100_80GB_GPUS" + // "NVIDIA_A100_GPUS" + // "NVIDIA_K80_GPUS" + // "NVIDIA_P100_GPUS" + // "NVIDIA_P100_VWS_GPUS" + // "NVIDIA_P4_GPUS" + // "NVIDIA_P4_VWS_GPUS" + // "NVIDIA_T4_GPUS" + // "NVIDIA_T4_VWS_GPUS" + // "NVIDIA_V100_GPUS" + // "PACKET_MIRRORINGS" + // "PD_EXTREME_TOTAL_PROVISIONED_IOPS" + // "PREEMPTIBLE_CPUS" + // "PREEMPTIBLE_LOCAL_SSD_GB" + // "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS" + // "PREEMPTIBLE_NVIDIA_A100_GPUS" + // "PREEMPTIBLE_NVIDIA_K80_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_P4_GPUS" + // "PREEMPTIBLE_NVIDIA_P4_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_T4_GPUS" + // "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_V100_GPUS" + // "PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK" + // "PSC_INTERNAL_LB_FORWARDING_RULES" + // "PUBLIC_ADVERTISED_PREFIXES" + // "PUBLIC_DELEGATED_PREFIXES" + // "REGIONAL_AUTOSCALERS" + // "REGIONAL_EXTERNAL_MANAGED_BACKEND_SERVICES" + // "REGIONAL_EXTERNAL_NETWORK_LB_BACKEND_SERVICES" + // "REGIONAL_INSTANCE_GROUP_MANAGERS" + // "REGIONAL_INTERNAL_LB_BACKEND_SERVICES" + // "REGIONAL_INTERNAL_MANAGED_BACKEND_SERVICES" + // "RESERVATIONS" + // "RESOURCE_POLICIES" + // "ROUTERS" + // "ROUTES" + // "SECURITY_POLICIES" + // "SECURITY_POLICIES_PER_REGION" + // "SECURITY_POLICY_CEVAL_RULES" + // "SECURITY_POLICY_RULES" + // "SECURITY_POLICY_RULES_PER_REGION" + // "SERVICE_ATTACHMENTS" + // "SNAPSHOTS" - The total number of snapshots allowed for a single + // project. + // "SSD_TOTAL_GB" + // "SSL_CERTIFICATES" + // "STATIC_ADDRESSES" + // "STATIC_BYOIP_ADDRESSES" + // "STATIC_EXTERNAL_IPV6_ADDRESS_RANGES" + // "SUBNETWORKS" + // "T2A_CPUS" + // "T2D_CPUS" + // "TARGET_HTTPS_PROXIES" + // "TARGET_HTTP_PROXIES" + // "TARGET_INSTANCES" + // "TARGET_POOLS" + // "TARGET_SSL_PROXIES" + // "TARGET_TCP_PROXIES" + // "TARGET_VPN_GATEWAYS" + // "URL_MAPS" + // "VPN_GATEWAYS" + // "VPN_TUNNELS" + // "XPN_SERVICE_PROJECTS" + Metric string `json:"metric,omitempty"` -type RegionListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` + // Owner: [Output Only] Owning resource. This is the resource on which + // this quota is applied. + Owner string `json:"owner,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // Usage: [Output Only] Current usage of this metric. + Usage float64 `json:"usage,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "Limit") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -35538,7 +36392,7 @@ type RegionListWarningData struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API + // NullFields is a list of field names (e.g. "Limit") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -35547,64 +36401,45 @@ type RegionListWarningData struct { NullFields []string `json:"-"` } -func (s *RegionListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RegionListWarningData +func (s *Quota) MarshalJSON() ([]byte, error) { + type NoMethod Quota raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionSetLabelsRequest struct { - // LabelFingerprint: The fingerprint of the previous set of labels for - // this resource, used to detect conflicts. The fingerprint is initially - // generated by Compute Engine and changes after every request to modify - // or update labels. You must always provide an up-to-date fingerprint - // hash in order to update or change labels. Make a get() request to the - // resource to get the latest fingerprint. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: The labels to set for this resource. - Labels map[string]string `json:"labels,omitempty"` - - // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "LabelFingerprint") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` +func (s *Quota) UnmarshalJSON(data []byte) error { + type NoMethod Quota + var s1 struct { + Limit gensupport.JSONFloat64 `json:"limit"` + Usage gensupport.JSONFloat64 `json:"usage"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Limit = float64(s1.Limit) + s.Usage = float64(s1.Usage) + return nil } -func (s *RegionSetLabelsRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionSetLabelsRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} +// QuotaExceededInfo: Additional details for quota exceeded error for +// resource quota. +type QuotaExceededInfo struct { + // Dimensions: The map holding related quota dimensions. + Dimensions map[string]string `json:"dimensions,omitempty"` -type RegionSetPolicyRequest struct { - // Bindings: Flatten Policy to create a backwacd compatible wire-format. - // Deprecated. Use 'policy' to specify bindings. - Bindings []*Binding `json:"bindings,omitempty"` + // Limit: Current effective quota limit. The limit's unit depends on the + // quota type or metric. + Limit float64 `json:"limit,omitempty"` - // Etag: Flatten Policy to create a backward compatible wire-format. - // Deprecated. Use 'policy' to specify the etag. - Etag string `json:"etag,omitempty"` + // LimitName: The name of the quota limit. + LimitName string `json:"limitName,omitempty"` - // Policy: REQUIRED: The complete policy to be applied to the - // 'resource'. The size of the policy is limited to a few 10s of KB. An - // empty policy is in general a valid policy but certain services (like - // Projects) might reject them. - Policy *Policy `json:"policy,omitempty"` + // MetricName: The Compute Engine quota metric name. + MetricName string `json:"metricName,omitempty"` - // ForceSendFields is a list of field names (e.g. "Bindings") to + // ForceSendFields is a list of field names (e.g. "Dimensions") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -35612,7 +36447,7 @@ type RegionSetPolicyRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bindings") to include in + // NullFields is a list of field names (e.g. "Dimensions") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -35621,47 +36456,43 @@ type RegionSetPolicyRequest struct { NullFields []string `json:"-"` } -func (s *RegionSetPolicyRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionSetPolicyRequest +func (s *QuotaExceededInfo) MarshalJSON() ([]byte, error) { + type NoMethod QuotaExceededInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionTargetHttpsProxiesSetSslCertificatesRequest struct { - // SslCertificates: New set of SslCertificate resources to associate - // with this TargetHttpsProxy resource. Currently exactly one - // SslCertificate resource must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` +func (s *QuotaExceededInfo) UnmarshalJSON(data []byte) error { + type NoMethod QuotaExceededInfo + var s1 struct { + Limit gensupport.JSONFloat64 `json:"limit"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Limit = float64(s1.Limit) + return nil +} - // ForceSendFields is a list of field names (e.g. "SslCertificates") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` +// Reference: Represents a reference to a resource. +type Reference struct { + // Kind: [Output Only] Type of the resource. Always compute#reference + // for references. + Kind string `json:"kind,omitempty"` - // NullFields is a list of field names (e.g. "SslCertificates") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} + // ReferenceType: A description of the reference type with no implied + // semantics. Possible values include: 1. MEMBER_OF + ReferenceType string `json:"referenceType,omitempty"` -func (s *RegionTargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionTargetHttpsProxiesSetSslCertificatesRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Referrer: URL of the resource which refers to the target. + Referrer string `json:"referrer,omitempty"` -type RegionUrlMapsValidateRequest struct { - // Resource: Content of the UrlMap to be validated. - Resource *UrlMap `json:"resource,omitempty"` + // Target: URL of the resource to which this reference points. + Target string `json:"target,omitempty"` - // ForceSendFields is a list of field names (e.g. "Resource") to + // ForceSendFields is a list of field names (e.g. "Kind") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -35669,8 +36500,8 @@ type RegionUrlMapsValidateRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Resource") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -35678,203 +36509,94 @@ type RegionUrlMapsValidateRequest struct { NullFields []string `json:"-"` } -func (s *RegionUrlMapsValidateRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionUrlMapsValidateRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RequestMirrorPolicy: A policy that specifies how requests intended -// for the route's backends are shadowed to a separate mirrored backend -// service. The load balancer doesn't wait for responses from the shadow -// service. Before sending traffic to the shadow service, the host or -// authority header is suffixed with -shadow. -type RequestMirrorPolicy struct { - // BackendService: The full or partial URL to the BackendService - // resource being mirrored to. - BackendService string `json:"backendService,omitempty"` - - // ForceSendFields is a list of field names (e.g. "BackendService") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BackendService") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *RequestMirrorPolicy) MarshalJSON() ([]byte, error) { - type NoMethod RequestMirrorPolicy +func (s *Reference) MarshalJSON() ([]byte, error) { + type NoMethod Reference raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Reservation: Represents a reservation resource. A reservation ensures -// that capacity is held in a specific zone even if the reserved VMs are -// not running. For more information, read Reserving zonal resources. -type Reservation struct { - // Commitment: [Output Only] Full or partial URL to a parent commitment. - // This field displays for reservations that are tied to a commitment. - Commitment string `json:"commitment,omitempty"` - +// Region: Represents a Region resource. A region is a geographical area +// where a resource is located. For more information, read Regions and +// Zones. +type Region struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. + // Deprecated -- [Output Only] The deprecation status associated with + // this region. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] Textual description of the resource. Description string `json:"description,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always compute#reservations - // for reservations. + // Kind: [Output Only] Type of the resource. Always compute#region for + // regions. Kind string `json:"kind,omitempty"` - // Name: The name of the resource, provided by the client when initially - // creating the resource. The resource name must be 1-63 characters - // long, and comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. + // Name: [Output Only] Name of the resource. Name string `json:"name,omitempty"` - // SatisfiesPzs: [Output Only] Reserved for future use. - SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` + // Quotas: [Output Only] Quotas assigned to this region. + Quotas []*Quota `json:"quotas,omitempty"` - // SelfLink: [Output Only] Server-defined fully-qualified URL for this - // resource. + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // ShareSettings: Share-settings for shared-reservation - ShareSettings *ShareSettings `json:"shareSettings,omitempty"` - - // SpecificReservation: Reservation for instances with specific machine - // shapes. - SpecificReservation *AllocationSpecificSKUReservation `json:"specificReservation,omitempty"` - - // SpecificReservationRequired: Indicates whether the reservation can be - // consumed by VMs with affinity for "any" reservation. If the field is - // set, then only VMs that target the reservation by name can consume - // from this reservation. - SpecificReservationRequired bool `json:"specificReservationRequired,omitempty"` - - // Status: [Output Only] The status of the reservation. + // Status: [Output Only] Status of the region, either UP or DOWN. // // Possible values: - // "CREATING" - Resources are being allocated for the reservation. - // "DELETING" - Reservation is currently being deleted. - // "INVALID" - // "READY" - Reservation has allocated all its resources. - // "UPDATING" - Reservation is currently being resized. + // "DOWN" + // "UP" Status string `json:"status,omitempty"` - // Zone: Zone in which the reservation resides. A zone must be provided - // if the reservation is created within a commitment. - Zone string `json:"zone,omitempty"` + // SupportsPzs: [Output Only] Reserved for future use. + SupportsPzs bool `json:"supportsPzs,omitempty"` + + // Zones: [Output Only] A list of zones available in this region, in the + // form of resource URLs. + Zones []string `json:"zones,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Commitment") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Commitment") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Reservation) MarshalJSON() ([]byte, error) { - type NoMethod Reservation - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ReservationAffinity: Specifies the reservations that this instance -// can consume from. -type ReservationAffinity struct { - // ConsumeReservationType: Specifies the type of reservation from which - // this instance can consume resources: ANY_RESERVATION (default), - // SPECIFIC_RESERVATION, or NO_RESERVATION. See Consuming reserved - // instances for examples. - // - // Possible values: - // "ANY_RESERVATION" - Consume any allocation available. - // "NO_RESERVATION" - Do not consume from any allocated capacity. - // "SPECIFIC_RESERVATION" - Must consume from a specific reservation. - // Must specify key value fields for specifying the reservations. - // "UNSPECIFIED" - ConsumeReservationType string `json:"consumeReservationType,omitempty"` - - // Key: Corresponds to the label key of a reservation resource. To - // target a SPECIFIC_RESERVATION by name, specify - // googleapis.com/reservation-name as the key and specify the name of - // your reservation as its value. - Key string `json:"key,omitempty"` - - // Values: Corresponds to the label values of a reservation resource. - // This can be either a name to a reservation in the same project or - // "projects/different-project/reservations/some-reservation-name" to - // target a shared reservation in the same zone but in a different - // project. - Values []string `json:"values,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "ConsumeReservationType") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ConsumeReservationType") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } -func (s *ReservationAffinity) MarshalJSON() ([]byte, error) { - type NoMethod ReservationAffinity +func (s *Region) MarshalJSON() ([]byte, error) { + type NoMethod Region raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ReservationAggregatedList: Contains a list of reservations. -type ReservationAggregatedList struct { +// RegionAutoscalerList: Contains a list of autoscalers. +type RegionAutoscalerList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Allocation resources. - Items map[string]ReservationsScopedList `json:"items,omitempty"` + // Items: A list of Autoscaler resources. + Items []*Autoscaler `json:"items,omitempty"` // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -35890,11 +36612,8 @@ type ReservationAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *ReservationAggregatedListWarning `json:"warning,omitempty"` + Warning *RegionAutoscalerListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -35917,15 +36636,15 @@ type ReservationAggregatedList struct { NullFields []string `json:"-"` } -func (s *ReservationAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod ReservationAggregatedList +func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { + type NoMethod RegionAutoscalerList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ReservationAggregatedListWarning: [Output Only] Informational warning +// RegionAutoscalerListWarning: [Output Only] Informational warning // message. -type ReservationAggregatedListWarning struct { +type RegionAutoscalerListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -35947,6 +36666,9 @@ type ReservationAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -35954,6 +36676,9 @@ type ReservationAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -35987,7 +36712,7 @@ type ReservationAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*ReservationAggregatedListWarningData `json:"data,omitempty"` + Data []*RegionAutoscalerListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -36010,13 +36735,13 @@ type ReservationAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *ReservationAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod ReservationAggregatedListWarning +func (s *RegionAutoscalerListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionAutoscalerListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ReservationAggregatedListWarningData struct { +type RegionAutoscalerListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -36047,22 +36772,22 @@ type ReservationAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *ReservationAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod ReservationAggregatedListWarningData +func (s *RegionAutoscalerListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionAutoscalerListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ReservationList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. +type RegionDiskTypeList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` - // Items: [Output Only] A list of Allocation resources. - Items []*Reservation `json:"items,omitempty"` + // Items: A list of DiskType resources. + Items []*DiskType `json:"items,omitempty"` - // Kind: [Output Only] Type of resource.Always compute#reservationsList - // for listsof reservations + // Kind: [Output Only] Type of resource. Always + // compute#regionDiskTypeList for region disk types. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -36077,7 +36802,7 @@ type ReservationList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *ReservationListWarning `json:"warning,omitempty"` + Warning *RegionDiskTypeListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -36100,14 +36825,15 @@ type ReservationList struct { NullFields []string `json:"-"` } -func (s *ReservationList) MarshalJSON() ([]byte, error) { - type NoMethod ReservationList +func (s *RegionDiskTypeList) MarshalJSON() ([]byte, error) { + type NoMethod RegionDiskTypeList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ReservationListWarning: [Output Only] Informational warning message. -type ReservationListWarning struct { +// RegionDiskTypeListWarning: [Output Only] Informational warning +// message. +type RegionDiskTypeListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -36129,6 +36855,9 @@ type ReservationListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -36136,6 +36865,9 @@ type ReservationListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -36169,7 +36901,7 @@ type ReservationListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*ReservationListWarningData `json:"data,omitempty"` + Data []*RegionDiskTypeListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -36192,13 +36924,13 @@ type ReservationListWarning struct { NullFields []string `json:"-"` } -func (s *ReservationListWarning) MarshalJSON() ([]byte, error) { - type NoMethod ReservationListWarning +func (s *RegionDiskTypeListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionDiskTypeListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ReservationListWarningData struct { +type RegionDiskTypeListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -36229,18 +36961,17 @@ type ReservationListWarningData struct { NullFields []string `json:"-"` } -func (s *ReservationListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod ReservationListWarningData +func (s *RegionDiskTypeListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionDiskTypeListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ReservationsResizeRequest struct { - // SpecificSkuCount: Number of allocated resources can be resized with - // minimum = 1 and maximum = 1000. - SpecificSkuCount int64 `json:"specificSkuCount,omitempty,string"` +type RegionDisksAddResourcePoliciesRequest struct { + // ResourcePolicies: Resource policies to be added to this disk. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` - // ForceSendFields is a list of field names (e.g. "SpecificSkuCount") to + // ForceSendFields is a list of field names (e.g. "ResourcePolicies") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -36248,7 +36979,7 @@ type ReservationsResizeRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SpecificSkuCount") to + // NullFields is a list of field names (e.g. "ResourcePolicies") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -36258,21 +36989,46 @@ type ReservationsResizeRequest struct { NullFields []string `json:"-"` } -func (s *ReservationsResizeRequest) MarshalJSON() ([]byte, error) { - type NoMethod ReservationsResizeRequest +func (s *RegionDisksAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionDisksAddResourcePoliciesRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ReservationsScopedList struct { - // Reservations: A list of reservations contained in this scope. - Reservations []*Reservation `json:"reservations,omitempty"` +type RegionDisksRemoveResourcePoliciesRequest struct { + // ResourcePolicies: Resource policies to be removed from this disk. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` - // Warning: Informational warning which replaces the list of - // reservations when the list is empty. - Warning *ReservationsScopedListWarning `json:"warning,omitempty"` + // ForceSendFields is a list of field names (e.g. "ResourcePolicies") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // ForceSendFields is a list of field names (e.g. "Reservations") to + // NullFields is a list of field names (e.g. "ResourcePolicies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RegionDisksRemoveResourcePoliciesRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionDisksRemoveResourcePoliciesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionDisksResizeRequest struct { + // SizeGb: The new size of the regional persistent disk, which is + // specified in GB. + SizeGb int64 `json:"sizeGb,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "SizeGb") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -36280,24 +37036,77 @@ type ReservationsScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Reservations") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "SizeGb") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ReservationsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod ReservationsScopedList +func (s *RegionDisksResizeRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionDisksResizeRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ReservationsScopedListWarning: Informational warning which replaces -// the list of reservations when the list is empty. -type ReservationsScopedListWarning struct { +// RegionInstanceGroupList: Contains a list of InstanceGroup resources. +type RegionInstanceGroupList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of InstanceGroup resources. + Items []*InstanceGroup `json:"items,omitempty"` + + // Kind: The resource type. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RegionInstanceGroupListWarning: [Output Only] Informational warning +// message. +type RegionInstanceGroupListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -36319,6 +37128,9 @@ type ReservationsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -36326,6 +37138,9 @@ type ReservationsScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -36359,7 +37174,7 @@ type ReservationsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*ReservationsScopedListWarningData `json:"data,omitempty"` + Data []*RegionInstanceGroupListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -36382,13 +37197,13 @@ type ReservationsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *ReservationsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod ReservationsScopedListWarning +func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ReservationsScopedListWarningData struct { +type RegionInstanceGroupListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -36419,66 +37234,20 @@ type ReservationsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *ReservationsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod ReservationsScopedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ResourceCommitment: Commitment for a particular resource (a -// Commitment is composed of one or more of these). -type ResourceCommitment struct { - // AcceleratorType: Name of the accelerator type resource. Applicable - // only when the type is ACCELERATOR. - AcceleratorType string `json:"acceleratorType,omitempty"` - - // Amount: The amount of the resource purchased (in a type-dependent - // unit, such as bytes). For vCPUs, this can just be an integer. For - // memory, this must be provided in MB. Memory must be a multiple of 256 - // MB, with up to 6.5GB of memory per every vCPU. - Amount int64 `json:"amount,omitempty,string"` - - // Type: Type of resource for which this commitment applies. Possible - // values are VCPU and MEMORY - // - // Possible values: - // "ACCELERATOR" - // "LOCAL_SSD" - // "MEMORY" - // "UNSPECIFIED" - // "VCPU" - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AcceleratorType") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AcceleratorType") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ResourceCommitment) MarshalJSON() ([]byte, error) { - type NoMethod ResourceCommitment +func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ResourceGroupReference struct { - // Group: A URI referencing one of the instance groups or network - // endpoint groups listed in the backend service. - Group string `json:"group,omitempty"` +// RegionInstanceGroupManagerDeleteInstanceConfigReq: +// RegionInstanceGroupManagers.deletePerInstanceConfigs +type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { + // Names: The list of instance names for which we want to delete + // per-instance configs on this managed instance group. + Names []string `json:"names,omitempty"` - // ForceSendFields is a list of field names (e.g. "Group") to + // ForceSendFields is a list of field names (e.g. "Names") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -36486,7 +37255,7 @@ type ResourceGroupReference struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Group") to include in API + // NullFields is a list of field names (e.g. "Names") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -36495,21 +37264,46 @@ type ResourceGroupReference struct { NullFields []string `json:"-"` } -func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { - type NoMethod ResourceGroupReference +func (s *RegionInstanceGroupManagerDeleteInstanceConfigReq) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagerDeleteInstanceConfigReq raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ResourcePoliciesScopedList struct { - // ResourcePolicies: A list of resourcePolicies contained in this scope. - ResourcePolicies []*ResourcePolicy `json:"resourcePolicies,omitempty"` +// RegionInstanceGroupManagerList: Contains a list of managed instance +// groups. +type RegionInstanceGroupManagerList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Warning: Informational warning which replaces the list of - // resourcePolicies when the list is empty. - Warning *ResourcePoliciesScopedListWarning `json:"warning,omitempty"` + // Items: A list of InstanceGroupManager resources. + Items []*InstanceGroupManager `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "ResourcePolicies") to + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupManagerList for a list of managed instance + // groups that exist in th regional scope. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupManagerListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -36517,25 +37311,24 @@ type ResourcePoliciesScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ResourcePolicies") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ResourcePoliciesScopedList) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePoliciesScopedList +func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagerList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResourcePoliciesScopedListWarning: Informational warning which -// replaces the list of resourcePolicies when the list is empty. -type ResourcePoliciesScopedListWarning struct { +// RegionInstanceGroupManagerListWarning: [Output Only] Informational +// warning message. +type RegionInstanceGroupManagerListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -36557,6 +37350,9 @@ type ResourcePoliciesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -36564,6 +37360,9 @@ type ResourcePoliciesScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -36597,7 +37396,7 @@ type ResourcePoliciesScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*ResourcePoliciesScopedListWarningData `json:"data,omitempty"` + Data []*RegionInstanceGroupManagerListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -36620,13 +37419,13 @@ type ResourcePoliciesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *ResourcePoliciesScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePoliciesScopedListWarning +func (s *RegionInstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagerListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ResourcePoliciesScopedListWarningData struct { +type RegionInstanceGroupManagerListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -36657,114 +37456,245 @@ type ResourcePoliciesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *ResourcePoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePoliciesScopedListWarningData +func (s *RegionInstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagerListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResourcePolicy: Represents a Resource Policy resource. You can use -// resource policies to schedule actions for some Compute Engine -// resources. For example, you can use them to schedule persistent disk -// snapshots. -type ResourcePolicy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` +// RegionInstanceGroupManagerPatchInstanceConfigReq: +// RegionInstanceGroupManagers.patchPerInstanceConfigs +type RegionInstanceGroupManagerPatchInstanceConfigReq struct { + // PerInstanceConfigs: The list of per-instance configurations to insert + // or patch on this managed instance group. + PerInstanceConfigs []*PerInstanceConfig `json:"perInstanceConfigs,omitempty"` - Description string `json:"description,omitempty"` + // ForceSendFields is a list of field names (e.g. "PerInstanceConfigs") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // GroupPlacementPolicy: Resource policy for instances for placement - // configuration. - GroupPlacementPolicy *ResourcePolicyGroupPlacementPolicy `json:"groupPlacementPolicy,omitempty"` + // NullFields is a list of field names (e.g. "PerInstanceConfigs") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` +func (s *RegionInstanceGroupManagerPatchInstanceConfigReq) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagerPatchInstanceConfigReq + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // InstanceSchedulePolicy: Resource policy for scheduling instance - // operations. - InstanceSchedulePolicy *ResourcePolicyInstanceSchedulePolicy `json:"instanceSchedulePolicy,omitempty"` +// RegionInstanceGroupManagerUpdateInstanceConfigReq: +// RegionInstanceGroupManagers.updatePerInstanceConfigs +type RegionInstanceGroupManagerUpdateInstanceConfigReq struct { + // PerInstanceConfigs: The list of per-instance configurations to insert + // or patch on this managed instance group. + PerInstanceConfigs []*PerInstanceConfig `json:"perInstanceConfigs,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#resource_policies for resource policies. - Kind string `json:"kind,omitempty"` + // ForceSendFields is a list of field names (e.g. "PerInstanceConfigs") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Name: The name of the resource, provided by the client when initially - // creating the resource. The resource name must be 1-63 characters - // long, and comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` + // NullFields is a list of field names (e.g. "PerInstanceConfigs") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} - Region string `json:"region,omitempty"` +func (s *RegionInstanceGroupManagerUpdateInstanceConfigReq) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagerUpdateInstanceConfigReq + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ResourceStatus: [Output Only] The system status of the resource - // policy. - ResourceStatus *ResourcePolicyResourceStatus `json:"resourceStatus,omitempty"` +type RegionInstanceGroupManagersAbandonInstancesRequest struct { + // Instances: The URLs of one or more instances to abandon. This can be + // a full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` - // SelfLink: [Output Only] Server-defined fully-qualified URL for this - // resource. - SelfLink string `json:"selfLink,omitempty"` + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // SnapshotSchedulePolicy: Resource policy for persistent disks for - // creating snapshots. - SnapshotSchedulePolicy *ResourcePolicySnapshotSchedulePolicy `json:"snapshotSchedulePolicy,omitempty"` + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // Status: [Output Only] The status of resource policy creation. +func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersAbandonInstancesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RegionInstanceGroupManagersApplyUpdatesRequest: +// RegionInstanceGroupManagers.applyUpdatesToInstances +type RegionInstanceGroupManagersApplyUpdatesRequest struct { + // AllInstances: Flag to update all instances instead of specified list + // of “instances”. If the flag is set to true then the instances may + // not be specified in the request. + AllInstances bool `json:"allInstances,omitempty"` + + // Instances: The list of URLs of one or more instances for which you + // want to apply updates. Each URL can be a full URL or a partial URL, + // such as zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` + + // MinimalAction: The minimal action that you want to perform on each + // instance during the update: - REPLACE: At minimum, delete the + // instance and create it again. - RESTART: Stop the instance and start + // it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt + // the instance at all. By default, the minimum action is NONE. If your + // update requires a more disruptive action than you set with this flag, + // the necessary action is performed to execute the update. // // Possible values: - // "CREATING" - Resource policy is being created. - // "DELETING" - Resource policy is being deleted. - // "EXPIRED" - Resource policy is expired and will not run again. - // "INVALID" - // "READY" - Resource policy is ready to be used. - Status string `json:"status,omitempty"` + // "NONE" - Do not perform any action. + // "REFRESH" - Updates applied in runtime, instances will not be + // disrupted. + // "REPLACE" - Old instances will be deleted. New instances will be + // created from the target template. + // "RESTART" - Every instance will be restarted. + MinimalAction string `json:"minimalAction,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // MostDisruptiveAllowedAction: The most disruptive action that you want + // to perform on each instance during the update: - REPLACE: Delete the + // instance and create it again. - RESTART: Stop the instance and start + // it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt + // the instance at all. By default, the most disruptive allowed action + // is REPLACE. If your update requires a more disruptive action than you + // set with this flag, the update request will fail. + // + // Possible values: + // "NONE" - Do not perform any action. + // "REFRESH" - Updates applied in runtime, instances will not be + // disrupted. + // "REPLACE" - Old instances will be deleted. New instances will be + // created from the target template. + // "RESTART" - Every instance will be restarted. + MostDisruptiveAllowedAction string `json:"mostDisruptiveAllowedAction,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AllInstances") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "AllInstances") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ResourcePolicy) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicy +func (s *RegionInstanceGroupManagersApplyUpdatesRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersApplyUpdatesRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResourcePolicyAggregatedList: Contains a list of resourcePolicies. -type ResourcePolicyAggregatedList struct { - Etag string `json:"etag,omitempty"` +// RegionInstanceGroupManagersCreateInstancesRequest: +// RegionInstanceGroupManagers.createInstances +type RegionInstanceGroupManagersCreateInstancesRequest struct { + // Instances: [Required] List of specifications of per-instance configs. + Instances []*PerInstanceConfig `json:"instances,omitempty"` - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Items: A list of ResourcePolicy resources. - Items map[string]ResourcePoliciesScopedList `json:"items,omitempty"` + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` +func (s *RegionInstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersCreateInstancesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupManagersDeleteInstancesRequest struct { + // Instances: The URLs of one or more instances to delete. This can be a + // full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` + + // SkipInstancesOnValidationError: Specifies whether the request should + // proceed despite the inclusion of instances that are not members of + // the group or that are already in the process of being deleted or + // abandoned. If this field is set to `false` and such an instance is + // specified in the request, the operation fails. The operation always + // fails if the request contains a malformed instance URL or a reference + // to an instance that exists in a zone or region other than the group's + // zone or region. + SkipInstancesOnValidationError bool `json:"skipInstancesOnValidationError,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersDeleteInstancesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupManagersListErrorsResponse struct { + // Items: [Output Only] The list of errors of the managed instance + // group. + Items []*InstanceManagedByIgmError `json:"items,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next // page of results for list requests. If the number of results is larger @@ -36774,20 +37704,53 @@ type ResourcePolicyAggregatedList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagersListErrorsResponse) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersListErrorsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupManagersListInstanceConfigsResp struct { + // Items: [Output Only] The list of PerInstanceConfig. + Items []*PerInstanceConfig `json:"items,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *ResourcePolicyAggregatedListWarning `json:"warning,omitempty"` + Warning *RegionInstanceGroupManagersListInstanceConfigsRespWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Etag") to + // ForceSendFields is a list of field names (e.g. "Items") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -36795,7 +37758,7 @@ type ResourcePolicyAggregatedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Etag") to include in API + // NullFields is a list of field names (e.g. "Items") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -36804,15 +37767,15 @@ type ResourcePolicyAggregatedList struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyAggregatedList +func (s *RegionInstanceGroupManagersListInstanceConfigsResp) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersListInstanceConfigsResp raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResourcePolicyAggregatedListWarning: [Output Only] Informational -// warning message. -type ResourcePolicyAggregatedListWarning struct { +// RegionInstanceGroupManagersListInstanceConfigsRespWarning: [Output +// Only] Informational warning message. +type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -36834,6 +37797,9 @@ type ResourcePolicyAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -36841,6 +37807,9 @@ type ResourcePolicyAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -36874,7 +37843,7 @@ type ResourcePolicyAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*ResourcePolicyAggregatedListWarningData `json:"data,omitempty"` + Data []*RegionInstanceGroupManagersListInstanceConfigsRespWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -36897,13 +37866,13 @@ type ResourcePolicyAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyAggregatedListWarning +func (s *RegionInstanceGroupManagersListInstanceConfigsRespWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersListInstanceConfigsRespWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ResourcePolicyAggregatedListWarningData struct { +type RegionInstanceGroupManagersListInstanceConfigsRespWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -36934,30 +37903,29 @@ type ResourcePolicyAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyAggregatedListWarningData +func (s *RegionInstanceGroupManagersListInstanceConfigsRespWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersListInstanceConfigsRespWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResourcePolicyDailyCycle: Time window specified for daily operations. -type ResourcePolicyDailyCycle struct { - // DaysInCycle: Defines a schedule with units measured in months. The - // value determines how many months pass between the start of each - // cycle. - DaysInCycle int64 `json:"daysInCycle,omitempty"` +type RegionInstanceGroupManagersListInstancesResponse struct { + // ManagedInstances: A list of managed instances. + ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` - // Duration: [Output only] A predetermined duration for the window, - // automatically chosen to be the smallest possible in the given - // scenario. - Duration string `json:"duration,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // StartTime: Start time of the window. This must be in UTC format that - // resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For - // example, both 13:00-5 and 08:00 are valid. - StartTime string `json:"startTime,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "DaysInCycle") to + // ForceSendFields is a list of field names (e.g. "ManagedInstances") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -36965,81 +37933,29 @@ type ResourcePolicyDailyCycle struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DaysInCycle") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ResourcePolicyDailyCycle) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyDailyCycle - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ResourcePolicyGroupPlacementPolicy: A GroupPlacementPolicy specifies -// resource placement configuration. It specifies the failure bucket -// separation as well as network locality -type ResourcePolicyGroupPlacementPolicy struct { - // AvailabilityDomainCount: The number of availability domains instances - // will be spread across. If two instances are in different availability - // domain, they will not be put in the same low latency network - AvailabilityDomainCount int64 `json:"availabilityDomainCount,omitempty"` - - // Collocation: Specifies network collocation - // - // Possible values: - // "COLLOCATED" - // "UNSPECIFIED_COLLOCATION" - Collocation string `json:"collocation,omitempty"` - - // VmCount: Number of vms in this placement group - VmCount int64 `json:"vmCount,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "AvailabilityDomainCount") to unconditionally include in API - // requests. By default, fields with empty or default values are omitted - // from API requests. However, any non-pointer, non-interface field - // appearing in ForceSendFields will be sent to the server regardless of - // whether the field is empty or not. This may be used to include empty - // fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AvailabilityDomainCount") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the + // NullFields is a list of field names (e.g. "ManagedInstances") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } -func (s *ResourcePolicyGroupPlacementPolicy) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyGroupPlacementPolicy +func (s *RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersListInstancesResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResourcePolicyHourlyCycle: Time window specified for hourly -// operations. -type ResourcePolicyHourlyCycle struct { - // Duration: [Output only] Duration of the time window, automatically - // chosen to be smallest possible in the given scenario. - Duration string `json:"duration,omitempty"` - - // HoursInCycle: Defines a schedule with units measured in hours. The - // value determines how many hours pass between the start of each cycle. - HoursInCycle int64 `json:"hoursInCycle,omitempty"` - - // StartTime: Time within the window to start the operations. It must be - // in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. - StartTime string `json:"startTime,omitempty"` +type RegionInstanceGroupManagersRecreateRequest struct { + // Instances: The URLs of one or more instances to recreate. This can be + // a full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` - // ForceSendFields is a list of field names (e.g. "Duration") to + // ForceSendFields is a list of field names (e.g. "Instances") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -37047,7 +37963,7 @@ type ResourcePolicyHourlyCycle struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Duration") to include in + // NullFields is a list of field names (e.g. "Instances") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -37056,36 +37972,24 @@ type ResourcePolicyHourlyCycle struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyHourlyCycle) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyHourlyCycle +func (s *RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersRecreateRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResourcePolicyInstanceSchedulePolicy: An InstanceSchedulePolicy -// specifies when and how frequent certain operations are performed on -// the instance. -type ResourcePolicyInstanceSchedulePolicy struct { - // ExpirationTime: The expiration time of the schedule. The timestamp is - // an RFC3339 string. - ExpirationTime string `json:"expirationTime,omitempty"` - - // StartTime: The start time of the schedule. The timestamp is an - // RFC3339 string. - StartTime string `json:"startTime,omitempty"` - - // TimeZone: Specifies the time zone to be used in interpreting - // Schedule.schedule. The value of this field must be a time zone name - // from the tz database: http://en.wikipedia.org/wiki/Tz_database. - TimeZone string `json:"timeZone,omitempty"` - - // VmStartSchedule: Specifies the schedule for starting instances. - VmStartSchedule *ResourcePolicyInstanceSchedulePolicySchedule `json:"vmStartSchedule,omitempty"` +type RegionInstanceGroupManagersSetTargetPoolsRequest struct { + // Fingerprint: Fingerprint of the target pools information, which is a + // hash of the contents. This field is used for optimistic locking when + // you update the target pool entries. This field is optional. + Fingerprint string `json:"fingerprint,omitempty"` - // VmStopSchedule: Specifies the schedule for stopping instances. - VmStopSchedule *ResourcePolicyInstanceSchedulePolicySchedule `json:"vmStopSchedule,omitempty"` + // TargetPools: The URL of all TargetPool resources to which instances + // in the instanceGroup field are added. The target pools automatically + // apply to all of the instances in the managed instance group. + TargetPools []string `json:"targetPools,omitempty"` - // ForceSendFields is a list of field names (e.g. "ExpirationTime") to + // ForceSendFields is a list of field names (e.g. "Fingerprint") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -37093,30 +37997,27 @@ type ResourcePolicyInstanceSchedulePolicy struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ExpirationTime") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ResourcePolicyInstanceSchedulePolicy) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyInstanceSchedulePolicy +func (s *RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersSetTargetPoolsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResourcePolicyInstanceSchedulePolicySchedule: Schedule for an -// instance operation. -type ResourcePolicyInstanceSchedulePolicySchedule struct { - // Schedule: Specifies the frequency for the operation, using the - // unix-cron format. - Schedule string `json:"schedule,omitempty"` +type RegionInstanceGroupManagersSetTemplateRequest struct { + // InstanceTemplate: URL of the InstanceTemplate resource from which all + // new instances will be created. + InstanceTemplate string `json:"instanceTemplate,omitempty"` - // ForceSendFields is a list of field names (e.g. "Schedule") to + // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -37124,33 +38025,31 @@ type ResourcePolicyInstanceSchedulePolicySchedule struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Schedule") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "InstanceTemplate") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *ResourcePolicyInstanceSchedulePolicySchedule) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyInstanceSchedulePolicySchedule +func (s *RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersSetTemplateRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ResourcePolicyList struct { - Etag string `json:"etag,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. +type RegionInstanceGroupsListInstances struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` - // Items: [Output Only] A list of ResourcePolicy resources. - Items []*ResourcePolicy `json:"items,omitempty"` + // Items: A list of InstanceWithNamedPorts resources. + Items []*InstanceWithNamedPorts `json:"items,omitempty"` - // Kind: [Output Only] Type of resource.Always - // compute#resourcePoliciesList for listsof resourcePolicies + // Kind: The resource type. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -37165,13 +38064,13 @@ type ResourcePolicyList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *ResourcePolicyListWarning `json:"warning,omitempty"` + Warning *RegionInstanceGroupsListInstancesWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Etag") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -37179,7 +38078,7 @@ type ResourcePolicyList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Etag") to include in API + // NullFields is a list of field names (e.g. "Id") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -37188,15 +38087,15 @@ type ResourcePolicyList struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyList) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyList +func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupsListInstances raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResourcePolicyListWarning: [Output Only] Informational warning -// message. -type ResourcePolicyListWarning struct { +// RegionInstanceGroupsListInstancesWarning: [Output Only] Informational +// warning message. +type RegionInstanceGroupsListInstancesWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -37218,6 +38117,9 @@ type ResourcePolicyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -37225,6 +38127,9 @@ type ResourcePolicyListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -37258,7 +38163,7 @@ type ResourcePolicyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*ResourcePolicyListWarningData `json:"data,omitempty"` + Data []*RegionInstanceGroupsListInstancesWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -37281,13 +38186,13 @@ type ResourcePolicyListWarning struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyListWarning +func (s *RegionInstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupsListInstancesWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ResourcePolicyListWarningData struct { +type RegionInstanceGroupsListInstancesWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -37318,173 +38223,29 @@ type ResourcePolicyListWarningData struct { NullFields []string `json:"-"` } -func (s *ResourcePolicyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ResourcePolicyResourceStatus: Contains output only fields. Use this -// sub-message for all output fields set on ResourcePolicy. The internal -// structure of this "status" field should mimic the structure of -// ResourcePolicy proto specification. -type ResourcePolicyResourceStatus struct { - // InstanceSchedulePolicy: [Output Only] Specifies a set of output - // values reffering to the instance_schedule_policy system status. This - // field should have the same name as corresponding policy field. - InstanceSchedulePolicy *ResourcePolicyResourceStatusInstanceSchedulePolicyStatus `json:"instanceSchedulePolicy,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "InstanceSchedulePolicy") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "InstanceSchedulePolicy") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ResourcePolicyResourceStatus) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyResourceStatus - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ResourcePolicyResourceStatusInstanceSchedulePolicyStatus struct { - // LastRunStartTime: [Output Only] The last time the schedule - // successfully ran. The timestamp is an RFC3339 string. - LastRunStartTime string `json:"lastRunStartTime,omitempty"` - - // NextRunStartTime: [Output Only] The next time the schedule is planned - // to run. The actual time might be slightly different. The timestamp is - // an RFC3339 string. - NextRunStartTime string `json:"nextRunStartTime,omitempty"` - - // ForceSendFields is a list of field names (e.g. "LastRunStartTime") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "LastRunStartTime") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ResourcePolicyResourceStatusInstanceSchedulePolicyStatus) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyResourceStatusInstanceSchedulePolicyStatus - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ResourcePolicySnapshotSchedulePolicy: A snapshot schedule policy -// specifies when and how frequently snapshots are to be created for the -// target disk. Also specifies how many and how long these scheduled -// snapshots should be retained. -type ResourcePolicySnapshotSchedulePolicy struct { - // RetentionPolicy: Retention policy applied to snapshots created by - // this resource policy. - RetentionPolicy *ResourcePolicySnapshotSchedulePolicyRetentionPolicy `json:"retentionPolicy,omitempty"` - - // Schedule: A Vm Maintenance Policy specifies what kind of - // infrastructure maintenance we are allowed to perform on this VM and - // when. Schedule that is applied to disks covered by this policy. - Schedule *ResourcePolicySnapshotSchedulePolicySchedule `json:"schedule,omitempty"` - - // SnapshotProperties: Properties with which snapshots are created such - // as labels, encryption keys. - SnapshotProperties *ResourcePolicySnapshotSchedulePolicySnapshotProperties `json:"snapshotProperties,omitempty"` - - // ForceSendFields is a list of field names (e.g. "RetentionPolicy") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "RetentionPolicy") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ResourcePolicySnapshotSchedulePolicy) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicySnapshotSchedulePolicy +func (s *RegionInstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupsListInstancesWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResourcePolicySnapshotSchedulePolicyRetentionPolicy: Policy for -// retention of scheduled snapshots. -type ResourcePolicySnapshotSchedulePolicyRetentionPolicy struct { - // MaxRetentionDays: Maximum age of the snapshot that is allowed to be - // kept. - MaxRetentionDays int64 `json:"maxRetentionDays,omitempty"` - - // OnSourceDiskDelete: Specifies the behavior to apply to scheduled - // snapshots when the source disk is deleted. +type RegionInstanceGroupsListInstancesRequest struct { + // InstanceState: Instances in which state should be returned. Valid + // options are: 'ALL', 'RUNNING'. By default, it lists all instances. // // Possible values: - // "APPLY_RETENTION_POLICY" - // "KEEP_AUTO_SNAPSHOTS" - // "UNSPECIFIED_ON_SOURCE_DISK_DELETE" - OnSourceDiskDelete string `json:"onSourceDiskDelete,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MaxRetentionDays") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MaxRetentionDays") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ResourcePolicySnapshotSchedulePolicyRetentionPolicy) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicySnapshotSchedulePolicyRetentionPolicy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ResourcePolicySnapshotSchedulePolicySchedule: A schedule for disks -// where the schedueled operations are performed. -type ResourcePolicySnapshotSchedulePolicySchedule struct { - DailySchedule *ResourcePolicyDailyCycle `json:"dailySchedule,omitempty"` - - HourlySchedule *ResourcePolicyHourlyCycle `json:"hourlySchedule,omitempty"` + // "ALL" - Matches any status of the instances, running, non-running + // and others. + // "RUNNING" - Instance is in RUNNING state if it is running. + InstanceState string `json:"instanceState,omitempty"` - WeeklySchedule *ResourcePolicyWeeklyCycle `json:"weeklySchedule,omitempty"` + // PortName: Name of port user is interested in. It is optional. If it + // is set, only information about this ports will be returned. If it is + // not set, all the named ports will be returned. Always lists all + // instances. + PortName string `json:"portName,omitempty"` - // ForceSendFields is a list of field names (e.g. "DailySchedule") to + // ForceSendFields is a list of field names (e.g. "InstanceState") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -37492,7 +38253,7 @@ type ResourcePolicySnapshotSchedulePolicySchedule struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DailySchedule") to include + // NullFields is a list of field names (e.g. "InstanceState") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -37501,106 +38262,25 @@ type ResourcePolicySnapshotSchedulePolicySchedule struct { NullFields []string `json:"-"` } -func (s *ResourcePolicySnapshotSchedulePolicySchedule) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicySnapshotSchedulePolicySchedule - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ResourcePolicySnapshotSchedulePolicySnapshotProperties: Specified -// snapshot properties for scheduled snapshots created by this policy. -type ResourcePolicySnapshotSchedulePolicySnapshotProperties struct { - // ChainName: Chain name that the snapshot is created in. - ChainName string `json:"chainName,omitempty"` - - // GuestFlush: Indication to perform a 'guest aware' snapshot. - GuestFlush bool `json:"guestFlush,omitempty"` - - // Labels: Labels to apply to scheduled snapshots. These can be later - // modified by the setLabels method. Label values may be empty. - Labels map[string]string `json:"labels,omitempty"` - - // StorageLocations: Cloud Storage bucket storage location of the auto - // snapshot (regional or multi-regional). - StorageLocations []string `json:"storageLocations,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ChainName") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ChainName") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ResourcePolicySnapshotSchedulePolicySnapshotProperties) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicySnapshotSchedulePolicySnapshotProperties - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ResourcePolicyWeeklyCycle: Time window specified for weekly -// operations. -type ResourcePolicyWeeklyCycle struct { - // DayOfWeeks: Up to 7 intervals/windows, one for each day of the week. - DayOfWeeks []*ResourcePolicyWeeklyCycleDayOfWeek `json:"dayOfWeeks,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DayOfWeeks") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DayOfWeeks") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ResourcePolicyWeeklyCycle) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyWeeklyCycle +func (s *RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupsListInstancesRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ResourcePolicyWeeklyCycleDayOfWeek struct { - // Day: Defines a schedule that runs on specific days of the week. - // Specify one or more days. The following options are available: - // MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY. - // - // Possible values: - // "FRIDAY" - // "INVALID" - // "MONDAY" - // "SATURDAY" - // "SUNDAY" - // "THURSDAY" - // "TUESDAY" - // "WEDNESDAY" - Day string `json:"day,omitempty"` - - // Duration: [Output only] Duration of the time window, automatically - // chosen to be smallest possible in the given scenario. - Duration string `json:"duration,omitempty"` +type RegionInstanceGroupsSetNamedPortsRequest struct { + // Fingerprint: The fingerprint of the named ports information for this + // instance group. Use this optional property to prevent conflicts when + // multiple users change the named ports settings concurrently. Obtain + // the fingerprint with the instanceGroups.get method. Then, include the + // fingerprint in your request to ensure that you do not overwrite + // changes that were applied from another concurrent request. + Fingerprint string `json:"fingerprint,omitempty"` - // StartTime: Time within the window to start the operations. It must be - // in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. - StartTime string `json:"startTime,omitempty"` + // NamedPorts: The list of named ports to set for this instance group. + NamedPorts []*NamedPort `json:"namedPorts,omitempty"` - // ForceSendFields is a list of field names (e.g. "Day") to + // ForceSendFields is a list of field names (e.g. "Fingerprint") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -37608,135 +38288,53 @@ type ResourcePolicyWeeklyCycleDayOfWeek struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Day") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ResourcePolicyWeeklyCycleDayOfWeek) MarshalJSON() ([]byte, error) { - type NoMethod ResourcePolicyWeeklyCycleDayOfWeek +func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupsSetNamedPortsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Route: Represents a Route resource. A route defines a path from VM -// instances in the VPC network to a specific destination. This -// destination can be inside or outside the VPC network. For more -// information, read the Routes overview. -type Route struct { - // AsPaths: [Output Only] AS path. - AsPaths []*RouteAsPath `json:"asPaths,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // field when you create the resource. - Description string `json:"description,omitempty"` - - // DestRange: The destination range of outgoing packets that this route - // applies to. Both IPv4 and IPv6 are supported. - DestRange string `json:"destRange,omitempty"` +// RegionList: Contains a list of region resources. +type RegionList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // Items: A list of Region resources. + Items []*Region `json:"items,omitempty"` - // Kind: [Output Only] Type of this resource. Always compute#routes for - // Route resources. + // Kind: [Output Only] Type of resource. Always compute#regionList for + // lists of regions. Kind string `json:"kind,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first - // character must be a lowercase letter, and all following characters - // (except for the last character) must be a dash, lowercase letter, or - // digit. The last character must be a lowercase letter or digit. - Name string `json:"name,omitempty"` - - // Network: Fully-qualified URL of the network that this route applies - // to. - Network string `json:"network,omitempty"` - - // NextHopGateway: The URL to a gateway that should handle matching - // packets. You can only specify the internet gateway using a full or - // partial valid URL: projects/ - // project/global/gateways/default-internet-gateway - NextHopGateway string `json:"nextHopGateway,omitempty"` - - // NextHopIlb: The URL to a forwarding rule of type - // loadBalancingScheme=INTERNAL that should handle matching packets or - // the IP address of the forwarding Rule. For example, the following are - // all valid URLs: - 10.128.0.56 - - // https://www.googleapis.com/compute/v1/projects/project/regions/region - // /forwardingRules/forwardingRule - - // regions/region/forwardingRules/forwardingRule - NextHopIlb string `json:"nextHopIlb,omitempty"` - - // NextHopInstance: The URL to an instance that should handle matching - // packets. You can specify this as a full or partial URL. For example: - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/ - NextHopInstance string `json:"nextHopInstance,omitempty"` - - // NextHopIp: The network IP address of an instance that should handle - // matching packets. Only IPv4 is supported. - NextHopIp string `json:"nextHopIp,omitempty"` - - // NextHopNetwork: The URL of the local network if it should handle - // matching packets. - NextHopNetwork string `json:"nextHopNetwork,omitempty"` - - // NextHopPeering: [Output Only] The network peering name that should - // handle matching packets, which should conform to RFC1035. - NextHopPeering string `json:"nextHopPeering,omitempty"` - - // NextHopVpnTunnel: The URL to a VpnTunnel that should handle matching - // packets. - NextHopVpnTunnel string `json:"nextHopVpnTunnel,omitempty"` - - // Priority: The priority of this route. Priority is used to break ties - // in cases where there is more than one matching route of equal prefix - // length. In cases where multiple routes have equal prefix length, the - // one with the lowest-numbered priority value wins. The default value - // is `1000`. The priority value must be from `0` to `65535`, inclusive. - Priority int64 `json:"priority,omitempty"` - - // RouteType: [Output Only] The type of this route, which can be one of - // the following values: - 'TRANSIT' for a transit route that this - // router learned from another Cloud Router and will readvertise to one - // of its BGP peers - 'SUBNET' for a route from a subnet of the VPC - - // 'BGP' for a route learned from a BGP peer of this router - 'STATIC' - // for a static route - // - // Possible values: - // "BGP" - // "STATIC" - // "SUBNET" - // "TRANSIT" - RouteType string `json:"routeType,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined fully-qualified URL for this - // resource. + // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Tags: A list of instance tags to which this route applies. - Tags []string `json:"tags,omitempty"` - - // Warnings: [Output Only] If potential misconfigurations are detected - // for this route, this field will be populated with warning messages. - Warnings []*RouteWarnings `json:"warnings,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "AsPaths") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -37744,8 +38342,8 @@ type Route struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AsPaths") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -37753,13 +38351,14 @@ type Route struct { NullFields []string `json:"-"` } -func (s *Route) MarshalJSON() ([]byte, error) { - type NoMethod Route +func (s *RegionList) MarshalJSON() ([]byte, error) { + type NoMethod RegionList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouteWarnings struct { +// RegionListWarning: [Output Only] Informational warning message. +type RegionListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -37781,6 +38380,9 @@ type RouteWarnings struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -37788,6 +38390,9 @@ type RouteWarnings struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -37821,7 +38426,7 @@ type RouteWarnings struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*RouteWarningsData `json:"data,omitempty"` + Data []*RegionListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -37844,13 +38449,13 @@ type RouteWarnings struct { NullFields []string `json:"-"` } -func (s *RouteWarnings) MarshalJSON() ([]byte, error) { - type NoMethod RouteWarnings +func (s *RegionListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouteWarningsData struct { +type RegionListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -37881,33 +38486,24 @@ type RouteWarningsData struct { NullFields []string `json:"-"` } -func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { - type NoMethod RouteWarningsData +func (s *RegionListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouteAsPath struct { - // AsLists: [Output Only] The AS numbers of the AS Path. - AsLists []int64 `json:"asLists,omitempty"` +type RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse struct { + // FirewallPolicys: Effective firewalls from firewall policy. + FirewallPolicys []*RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewallPolicy `json:"firewallPolicys,omitempty"` - // PathSegmentType: [Output Only] The type of the AS Path, which can be - // one of the following values: - 'AS_SET': unordered set of autonomous - // systems that the route in has traversed - 'AS_SEQUENCE': ordered set - // of autonomous systems that the route has traversed - - // 'AS_CONFED_SEQUENCE': ordered set of Member Autonomous Systems in the - // local confederation that the route has traversed - 'AS_CONFED_SET': - // unordered set of Member Autonomous Systems in the local confederation - // that the route has traversed - // - // Possible values: - // "AS_CONFED_SEQUENCE" - // "AS_CONFED_SET" - // "AS_SEQUENCE" - // "AS_SET" - PathSegmentType string `json:"pathSegmentType,omitempty"` + // Firewalls: Effective firewalls on the network. + Firewalls []*Firewall `json:"firewalls,omitempty"` - // ForceSendFields is a list of field names (e.g. "AsLists") to + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "FirewallPolicys") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -37915,52 +38511,78 @@ type RouteAsPath struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AsLists") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "FirewallPolicys") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RouteAsPath) MarshalJSON() ([]byte, error) { - type NoMethod RouteAsPath +func (s *RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse) MarshalJSON() ([]byte, error) { + type NoMethod RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouteList: Contains a list of Route resources. -type RouteList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +type RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { + // DisplayName: [Output Only] The display name of the firewall policy. + DisplayName string `json:"displayName,omitempty"` - // Items: A list of Route resources. - Items []*Route `json:"items,omitempty"` + // Name: [Output Only] The name of the firewall policy. + Name string `json:"name,omitempty"` - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` + // Rules: The rules that apply to the network. + Rules []*FirewallPolicyRule `json:"rules,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // Type: [Output Only] The type of the firewall policy. Can be one of + // HIERARCHY, NETWORK, NETWORK_REGIONAL. + // + // Possible values: + // "HIERARCHY" + // "NETWORK" + // "NETWORK_REGIONAL" + // "UNSPECIFIED" + Type string `json:"type,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Warning: [Output Only] Informational warning message. - Warning *RouteListWarning `json:"warning,omitempty"` + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +func (s *RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewallPolicy) MarshalJSON() ([]byte, error) { + type NoMethod RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewallPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ForceSendFields is a list of field names (e.g. "Id") to +type RegionSetLabelsRequest struct { + // LabelFingerprint: The fingerprint of the previous set of labels for + // this resource, used to detect conflicts. The fingerprint is initially + // generated by Compute Engine and changes after every request to modify + // or update labels. You must always provide an up-to-date fingerprint + // hash in order to update or change labels. Make a get() request to the + // resource to get the latest fingerprint. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: The labels to set for this resource. + Labels map[string]string `json:"labels,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -37968,91 +38590,38 @@ type RouteList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "LabelFingerprint") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RouteList) MarshalJSON() ([]byte, error) { - type NoMethod RouteList +func (s *RegionSetLabelsRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionSetLabelsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouteListWarning: [Output Only] Informational warning message. -type RouteListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` +type RegionSetPolicyRequest struct { + // Bindings: Flatten Policy to create a backwacd compatible wire-format. + // Deprecated. Use 'policy' to specify bindings. + Bindings []*Binding `json:"bindings,omitempty"` - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*RouteListWarningData `json:"data,omitempty"` + // Etag: Flatten Policy to create a backward compatible wire-format. + // Deprecated. Use 'policy' to specify the etag. + Etag string `json:"etag,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // Policy: REQUIRED: The complete policy to be applied to the + // 'resource'. The size of the policy is limited to a few 10s of KB. An + // empty policy is in general a valid policy but certain services (like + // Projects) might reject them. + Policy *Policy `json:"policy,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "Bindings") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -38060,8 +38629,8 @@ type RouteListWarning struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Bindings") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -38069,27 +38638,46 @@ type RouteListWarning struct { NullFields []string `json:"-"` } -func (s *RouteListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RouteListWarning +func (s *RegionSetPolicyRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionSetPolicyRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouteListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +type RegionTargetHttpsProxiesSetSslCertificatesRequest struct { + // SslCertificates: New set of SslCertificate resources to associate + // with this TargetHttpsProxy resource. + SslCertificates []string `json:"sslCertificates,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // ForceSendFields is a list of field names (e.g. "Key") to + // NullFields is a list of field names (e.g. "SslCertificates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RegionTargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionTargetHttpsProxiesSetSslCertificatesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionUrlMapsValidateRequest struct { + // Resource: Content of the UrlMap to be validated. + Resource *UrlMap `json:"resource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Resource") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -38097,8 +38685,8 @@ type RouteListWarningData struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Resource") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -38106,23 +38694,56 @@ type RouteListWarningData struct { NullFields []string `json:"-"` } -func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RouteListWarningData +func (s *RegionUrlMapsValidateRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionUrlMapsValidateRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Router: Represents a Cloud Router resource. For more information -// about Cloud Router, read the Cloud Router overview. -type Router struct { - // Bgp: BGP information specific to this router. - Bgp *RouterBgp `json:"bgp,omitempty"` +// RequestMirrorPolicy: A policy that specifies how requests intended +// for the route's backends are shadowed to a separate mirrored backend +// service. The load balancer doesn't wait for responses from the shadow +// service. Before sending traffic to the shadow service, the host or +// authority header is suffixed with -shadow. +type RequestMirrorPolicy struct { + // BackendService: The full or partial URL to the BackendService + // resource being mirrored to. The backend service configured for a + // mirroring policy must reference backends that are of the same type as + // the original backend service matched in the URL map. Serverless NEG + // backends are not currently supported as a mirrored backend service. + BackendService string `json:"backendService,omitempty"` - // BgpPeers: BGP information that must be configured into the routing - // stack to establish BGP peering. This information must specify the - // peer ASN and either the interface name, IP address, or peer IP - // address. Please refer to RFC4273. - BgpPeers []*RouterBgpPeer `json:"bgpPeers,omitempty"` + // ForceSendFields is a list of field names (e.g. "BackendService") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackendService") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RequestMirrorPolicy) MarshalJSON() ([]byte, error) { + type NoMethod RequestMirrorPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Reservation: Represents a reservation resource. A reservation ensures +// that capacity is held in a specific zone even if the reserved VMs are +// not running. For more information, read Reserving zonal resources. +type Reservation struct { + // Commitment: [Output Only] Full or partial URL to a parent commitment. + // This field displays for reservations that are tied to a commitment. + Commitment string `json:"commitment,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -38132,52 +38753,71 @@ type Router struct { // property when you create the resource. Description string `json:"description,omitempty"` - // EncryptedInterconnectRouter: Indicates if a router is dedicated for - // use with encrypted VLAN attachments (interconnectAttachments). Not - // currently available publicly. - EncryptedInterconnectRouter bool `json:"encryptedInterconnectRouter,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Interfaces: Router interfaces. Each interface requires either one - // linked resource, (for example, linkedVpnTunnel), or IP address and IP - // address range (for example, ipRange), or both. - Interfaces []*RouterInterface `json:"interfaces,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#router for - // routers. + // Kind: [Output Only] Type of the resource. Always compute#reservations + // for reservations. Kind string `json:"kind,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // Name: The name of the resource, provided by the client when initially + // creating the resource. The resource name must be 1-63 characters + // long, and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. Name string `json:"name,omitempty"` - // Nats: A list of NAT services created in this router. - Nats []*RouterNat `json:"nats,omitempty"` - - // Network: URI of the network to which this router belongs. - Network string `json:"network,omitempty"` + // ResourcePolicies: Resource policies to be added to this reservation. + // The key is defined by user, and the value is resource policy url. + // This is to define placement policy with reservation. + ResourcePolicies map[string]string `json:"resourcePolicies,omitempty"` - // Region: [Output Only] URI of the region where the router resides. You - // must specify this field as part of the HTTP request URL. It is not - // settable as a field in the request body. - Region string `json:"region,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. + SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. + // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // resource. SelfLink string `json:"selfLink,omitempty"` + // ShareSettings: Specify share-settings to create a shared reservation. + // This property is optional. For more information about the syntax and + // options for this field and its subfields, see the guide for creating + // a shared reservation. + ShareSettings *ShareSettings `json:"shareSettings,omitempty"` + + // SpecificReservation: Reservation for instances with specific machine + // shapes. + SpecificReservation *AllocationSpecificSKUReservation `json:"specificReservation,omitempty"` + + // SpecificReservationRequired: Indicates whether the reservation can be + // consumed by VMs with affinity for "any" reservation. If the field is + // set, then only VMs that target the reservation by name can consume + // from this reservation. + SpecificReservationRequired bool `json:"specificReservationRequired,omitempty"` + + // Status: [Output Only] The status of the reservation. + // + // Possible values: + // "CREATING" - Resources are being allocated for the reservation. + // "DELETING" - Reservation is currently being deleted. + // "INVALID" + // "READY" - Reservation has allocated all its resources. + // "UPDATING" - Reservation is currently being resized. + Status string `json:"status,omitempty"` + + // Zone: Zone in which the reservation resides. A zone must be provided + // if the reservation is created within a commitment. + Zone string `json:"zone,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Bgp") to + // ForceSendFields is a list of field names (e.g. "Commitment") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -38185,8 +38825,8 @@ type Router struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bgp") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Commitment") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -38194,53 +38834,74 @@ type Router struct { NullFields []string `json:"-"` } -func (s *Router) MarshalJSON() ([]byte, error) { - type NoMethod Router +func (s *Reservation) MarshalJSON() ([]byte, error) { + type NoMethod Reservation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterAdvertisedIpRange: Description-tagged IP ranges for the router -// to advertise. -type RouterAdvertisedIpRange struct { - // Description: User-specified description for the IP range. - Description string `json:"description,omitempty"` +// ReservationAffinity: Specifies the reservations that this instance +// can consume from. +type ReservationAffinity struct { + // ConsumeReservationType: Specifies the type of reservation from which + // this instance can consume resources: ANY_RESERVATION (default), + // SPECIFIC_RESERVATION, or NO_RESERVATION. See Consuming reserved + // instances for examples. + // + // Possible values: + // "ANY_RESERVATION" - Consume any allocation available. + // "NO_RESERVATION" - Do not consume from any allocated capacity. + // "SPECIFIC_RESERVATION" - Must consume from a specific reservation. + // Must specify key value fields for specifying the reservations. + // "UNSPECIFIED" + ConsumeReservationType string `json:"consumeReservationType,omitempty"` - // Range: The IP range to advertise. The value must be a CIDR-formatted - // string. - Range string `json:"range,omitempty"` + // Key: Corresponds to the label key of a reservation resource. To + // target a SPECIFIC_RESERVATION by name, specify + // googleapis.com/reservation-name as the key and specify the name of + // your reservation as its value. + Key string `json:"key,omitempty"` - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // Values: Corresponds to the label values of a reservation resource. + // This can be either a name to a reservation in the same project or + // "projects/different-project/reservations/some-reservation-name" to + // target a shared reservation in the same zone but in a different + // project. + Values []string `json:"values,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "ConsumeReservationType") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "ConsumeReservationType") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RouterAdvertisedIpRange) MarshalJSON() ([]byte, error) { - type NoMethod RouterAdvertisedIpRange +func (s *ReservationAffinity) MarshalJSON() ([]byte, error) { + type NoMethod ReservationAffinity raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterAggregatedList: Contains a list of routers. -type RouterAggregatedList struct { +// ReservationAggregatedList: Contains a list of reservations. +type ReservationAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Router resources. - Items map[string]RoutersScopedList `json:"items,omitempty"` + // Items: A list of Allocation resources. + Items map[string]ReservationsScopedList `json:"items,omitempty"` // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -38260,7 +38921,7 @@ type RouterAggregatedList struct { Unreachables []string `json:"unreachables,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *RouterAggregatedListWarning `json:"warning,omitempty"` + Warning *ReservationAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -38283,15 +38944,15 @@ type RouterAggregatedList struct { NullFields []string `json:"-"` } -func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod RouterAggregatedList +func (s *ReservationAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod ReservationAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterAggregatedListWarning: [Output Only] Informational warning +// ReservationAggregatedListWarning: [Output Only] Informational warning // message. -type RouterAggregatedListWarning struct { +type ReservationAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -38313,6 +38974,9 @@ type RouterAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -38320,6 +38984,9 @@ type RouterAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -38353,7 +39020,7 @@ type RouterAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*RouterAggregatedListWarningData `json:"data,omitempty"` + Data []*ReservationAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -38376,13 +39043,13 @@ type RouterAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *RouterAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RouterAggregatedListWarning +func (s *ReservationAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ReservationAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterAggregatedListWarningData struct { +type ReservationAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -38413,57 +39080,43 @@ type RouterAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RouterAggregatedListWarningData +func (s *ReservationAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ReservationAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterBgp struct { - // AdvertiseMode: User-specified flag to indicate which mode to use for - // advertisement. The options are DEFAULT or CUSTOM. - // - // Possible values: - // "CUSTOM" - // "DEFAULT" - AdvertiseMode string `json:"advertiseMode,omitempty"` +type ReservationList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` - // AdvertisedGroups: User-specified list of prefix groups to advertise - // in custom mode. This field can only be populated if advertise_mode is - // CUSTOM and is advertised to all peers of the router. These groups - // will be advertised in addition to any specified prefixes. Leave this - // field blank to advertise no custom groups. - // - // Possible values: - // "ALL_SUBNETS" - Advertise all available subnets (including peer VPC - // subnets). - AdvertisedGroups []string `json:"advertisedGroups,omitempty"` + // Items: [Output Only] A list of Allocation resources. + Items []*Reservation `json:"items,omitempty"` - // AdvertisedIpRanges: User-specified list of individual IP ranges to - // advertise in custom mode. This field can only be populated if - // advertise_mode is CUSTOM and is advertised to all peers of the - // router. These IP ranges will be advertised in addition to any - // specified groups. Leave this field blank to advertise no custom IP - // ranges. - AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` + // Kind: [Output Only] Type of resource.Always compute#reservationsList + // for listsof reservations + Kind string `json:"kind,omitempty"` - // Asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 - // private ASN, either 16-bit or 32-bit. The value will be fixed for - // this router resource. All VPN tunnels that link to this router will - // have the same local ASN. - Asn int64 `json:"asn,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // KeepaliveInterval: The interval in seconds between BGP keepalive - // messages that are sent to the peer. Hold time is three times the - // interval at which keepalive messages are sent, and the hold time is - // the maximum number of seconds allowed to elapse between successive - // keepalive messages that BGP receives from a peer. BGP will use the - // smaller of either the local hold time value or the peer's hold time - // value as the hold time for the BGP connection between the two peers. - // If set, this value must be between 20 and 60. The default is 20. - KeepaliveInterval int64 `json:"keepaliveInterval,omitempty"` + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` - // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to + // Warning: [Output Only] Informational warning message. + Warning *ReservationListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -38471,135 +39124,97 @@ type RouterBgp struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AdvertiseMode") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterBgp) MarshalJSON() ([]byte, error) { - type NoMethod RouterBgp +func (s *ReservationList) MarshalJSON() ([]byte, error) { + type NoMethod ReservationList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterBgpPeer struct { - // AdvertiseMode: User-specified flag to indicate which mode to use for - // advertisement. - // - // Possible values: - // "CUSTOM" - // "DEFAULT" - AdvertiseMode string `json:"advertiseMode,omitempty"` - - // AdvertisedGroups: User-specified list of prefix groups to advertise - // in custom mode, which can take one of the following options: - - // ALL_SUBNETS: Advertises all available subnets, including peer VPC - // subnets. - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. - // Note that this field can only be populated if advertise_mode is - // CUSTOM and overrides the list defined for the router (in the "bgp" - // message). These groups are advertised in addition to any specified - // prefixes. Leave this field blank to advertise no custom groups. - // - // Possible values: - // "ALL_SUBNETS" - Advertise all available subnets (including peer VPC - // subnets). - AdvertisedGroups []string `json:"advertisedGroups,omitempty"` - - // AdvertisedIpRanges: User-specified list of individual IP ranges to - // advertise in custom mode. This field can only be populated if - // advertise_mode is CUSTOM and overrides the list defined for the - // router (in the "bgp" message). These IP ranges are advertised in - // addition to any specified groups. Leave this field blank to advertise - // no custom IP ranges. - AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` - - // AdvertisedRoutePriority: The priority of routes advertised to this - // BGP peer. Where there is more than one matching route of maximum - // length, the routes with the lowest priority value win. - AdvertisedRoutePriority int64 `json:"advertisedRoutePriority,omitempty"` - - // Bfd: BFD configuration for the BGP peering. - Bfd *RouterBgpPeerBfd `json:"bfd,omitempty"` - - // Enable: The status of the BGP peer connection. If set to FALSE, any - // active session with the peer is terminated and all associated routing - // information is removed. If set to TRUE, the peer connection can be - // established with routing information. The default is TRUE. - // - // Possible values: - // "FALSE" - // "TRUE" - Enable string `json:"enable,omitempty"` - - // EnableIpv6: Enable IPv6 traffic over BGP Peer. If not specified, it - // is disabled by default. - EnableIpv6 bool `json:"enableIpv6,omitempty"` - - // InterfaceName: Name of the interface the BGP peer is associated with. - InterfaceName string `json:"interfaceName,omitempty"` - - // IpAddress: IP address of the interface inside Google Cloud Platform. - // Only IPv4 is supported. - IpAddress string `json:"ipAddress,omitempty"` - - // Ipv6NexthopAddress: IPv6 address of the interface inside Google Cloud - // Platform. - Ipv6NexthopAddress string `json:"ipv6NexthopAddress,omitempty"` - - // ManagementType: [Output Only] The resource that configures and - // manages this BGP peer. - MANAGED_BY_USER is the default value and can - // be managed by you or other users - MANAGED_BY_ATTACHMENT is a BGP - // peer that is configured and managed by Cloud Interconnect, - // specifically by an InterconnectAttachment of type PARTNER. Google - // automatically creates, updates, and deletes this type of BGP peer - // when the PARTNER InterconnectAttachment is created, updated, or - // deleted. +// ReservationListWarning: [Output Only] Informational warning message. +type ReservationListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "MANAGED_BY_ATTACHMENT" - The BGP peer is automatically created for - // PARTNER type InterconnectAttachment; Google will automatically - // create/delete this BGP peer when the PARTNER InterconnectAttachment - // is created/deleted, and Google will update the ipAddress and - // peerIpAddress when the PARTNER InterconnectAttachment is provisioned. - // This type of BGP peer cannot be created or deleted, but can be - // modified for all fields except for name, ipAddress and peerIpAddress. - // "MANAGED_BY_USER" - Default value, the BGP peer is manually created - // and managed by user. - ManagementType string `json:"managementType,omitempty"` - - // Name: Name of this BGP peer. The name must be 1-63 characters long, - // and comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` - - // PeerAsn: Peer BGP Autonomous System Number (ASN). Each BGP interface - // may use a different value. - PeerAsn int64 `json:"peerAsn,omitempty"` - - // PeerIpAddress: IP address of the BGP interface outside Google Cloud - // Platform. Only IPv4 is supported. - PeerIpAddress string `json:"peerIpAddress,omitempty"` + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` - // PeerIpv6NexthopAddress: IPv6 address of the BGP interface outside - // Google Cloud Platform. - PeerIpv6NexthopAddress string `json:"peerIpv6NexthopAddress,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*ReservationListWarningData `json:"data,omitempty"` - // RouterApplianceInstance: URI of the VM instance that is used as - // third-party router appliances such as Next Gen Firewalls, Virtual - // Routers, or Router Appliances. The VM instance must be located in - // zones contained in the same region as this Cloud Router. The VM - // instance is the peer side of the BGP session. - RouterApplianceInstance string `json:"routerApplianceInstance,omitempty"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -38607,155 +39222,64 @@ type RouterBgpPeer struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AdvertiseMode") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { - type NoMethod RouterBgpPeer +func (s *ReservationListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ReservationListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterBgpPeerBfd struct { - // MinReceiveInterval: The minimum interval, in milliseconds, between - // BFD control packets received from the peer router. The actual value - // is negotiated between the two routers and is equal to the greater of - // this value and the transmit interval of the other router. If set, - // this value must be between 1000 and 30000. The default is 1000. - MinReceiveInterval int64 `json:"minReceiveInterval,omitempty"` - - // MinTransmitInterval: The minimum interval, in milliseconds, between - // BFD control packets transmitted to the peer router. The actual value - // is negotiated between the two routers and is equal to the greater of - // this value and the corresponding receive interval of the other - // router. If set, this value must be between 1000 and 30000. The - // default is 1000. - MinTransmitInterval int64 `json:"minTransmitInterval,omitempty"` - - // Multiplier: The number of consecutive BFD packets that must be missed - // before BFD declares that a peer is unavailable. If set, the value - // must be a value between 5 and 16. The default is 5. - Multiplier int64 `json:"multiplier,omitempty"` +type ReservationListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // SessionInitializationMode: The BFD session initialization mode for - // this BGP peer. If set to ACTIVE, the Cloud Router will initiate the - // BFD session for this BGP peer. If set to PASSIVE, the Cloud Router - // will wait for the peer router to initiate the BFD session for this - // BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The - // default is PASSIVE. - // - // Possible values: - // "ACTIVE" - // "DISABLED" - // "PASSIVE" - SessionInitializationMode string `json:"sessionInitializationMode,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "MinReceiveInterval") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MinReceiveInterval") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterBgpPeerBfd) MarshalJSON() ([]byte, error) { - type NoMethod RouterBgpPeerBfd +func (s *ReservationListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ReservationListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterInterface struct { - // IpRange: IP address and range of the interface. The IP range must be - // in the RFC3927 link-local IP address space. The value must be a - // CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not - // truncate the address as it represents the IP address of the - // interface. - IpRange string `json:"ipRange,omitempty"` - - // LinkedInterconnectAttachment: URI of the linked Interconnect - // attachment. It must be in the same region as the router. Each - // interface can have one linked resource, which can be a VPN tunnel, an - // Interconnect attachment, or a virtual machine instance. - LinkedInterconnectAttachment string `json:"linkedInterconnectAttachment,omitempty"` - - // LinkedVpnTunnel: URI of the linked VPN tunnel, which must be in the - // same region as the router. Each interface can have one linked - // resource, which can be a VPN tunnel, an Interconnect attachment, or a - // virtual machine instance. - LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` - - // ManagementType: [Output Only] The resource that configures and - // manages this interface. - MANAGED_BY_USER is the default value and - // can be managed directly by users. - MANAGED_BY_ATTACHMENT is an - // interface that is configured and managed by Cloud Interconnect, - // specifically, by an InterconnectAttachment of type PARTNER. Google - // automatically creates, updates, and deletes this type of interface - // when the PARTNER InterconnectAttachment is created, updated, or - // deleted. - // - // Possible values: - // "MANAGED_BY_ATTACHMENT" - The interface is automatically created - // for PARTNER type InterconnectAttachment, Google will automatically - // create/update/delete this interface when the PARTNER - // InterconnectAttachment is created/provisioned/deleted. This type of - // interface cannot be manually managed by user. - // "MANAGED_BY_USER" - Default value, the interface is manually - // created and managed by user. - ManagementType string `json:"managementType,omitempty"` - - // Name: Name of this interface entry. The name must be 1-63 characters - // long, and comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` - - // PrivateIpAddress: The regional private internal IP address that is - // used to establish BGP sessions to a VM instance acting as a - // third-party Router Appliance, such as a Next Gen Firewall, a Virtual - // Router, or an SD-WAN VM. - PrivateIpAddress string `json:"privateIpAddress,omitempty"` - - // RedundantInterface: Name of the interface that will be redundant with - // the current interface you are creating. The redundantInterface must - // belong to the same Cloud Router as the interface here. To establish - // the BGP session to a Router Appliance VM, you must create two BGP - // peers. The two BGP peers must be attached to two separate interfaces - // that are redundant with each other. The redundant_interface must be - // 1-63 characters long, and comply with RFC1035. Specifically, the - // redundant_interface must be 1-63 characters long and match the - // regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first - // character must be a lowercase letter, and all following characters - // must be a dash, lowercase letter, or digit, except the last - // character, which cannot be a dash. - RedundantInterface string `json:"redundantInterface,omitempty"` - - // Subnetwork: The URI of the subnetwork resource that this interface - // belongs to, which must be in the same region as the Cloud Router. - // When you establish a BGP session to a VM instance using this - // interface, the VM instance must belong to the same subnetwork as the - // subnetwork specified here. - Subnetwork string `json:"subnetwork,omitempty"` +type ReservationsResizeRequest struct { + // SpecificSkuCount: Number of allocated resources can be resized with + // minimum = 1 and maximum = 1000. + SpecificSkuCount int64 `json:"specificSkuCount,omitempty,string"` - // ForceSendFields is a list of field names (e.g. "IpRange") to + // ForceSendFields is a list of field names (e.g. "SpecificSkuCount") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -38763,53 +39287,31 @@ type RouterInterface struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpRange") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "SpecificSkuCount") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RouterInterface) MarshalJSON() ([]byte, error) { - type NoMethod RouterInterface +func (s *ReservationsResizeRequest) MarshalJSON() ([]byte, error) { + type NoMethod ReservationsResizeRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterList: Contains a list of Router resources. -type RouterList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Router resources. - Items []*Router `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#router for - // routers. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *RouterListWarning `json:"warning,omitempty"` +type ReservationsScopedList struct { + // Reservations: A list of reservations contained in this scope. + Reservations []*Reservation `json:"reservations,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: Informational warning which replaces the list of + // reservations when the list is empty. + Warning *ReservationsScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Reservations") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -38817,23 +39319,24 @@ type RouterList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Reservations") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterList) MarshalJSON() ([]byte, error) { - type NoMethod RouterList +func (s *ReservationsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod ReservationsScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterListWarning: [Output Only] Informational warning message. -type RouterListWarning struct { +// ReservationsScopedListWarning: Informational warning which replaces +// the list of reservations when the list is empty. +type ReservationsScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -38855,6 +39358,9 @@ type RouterListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -38862,6 +39368,9 @@ type RouterListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -38895,7 +39404,7 @@ type RouterListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*RouterListWarningData `json:"data,omitempty"` + Data []*ReservationsScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -38918,13 +39427,13 @@ type RouterListWarning struct { NullFields []string `json:"-"` } -func (s *RouterListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RouterListWarning +func (s *ReservationsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ReservationsScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterListWarningData struct { +type ReservationsScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -38955,128 +39464,97 @@ type RouterListWarningData struct { NullFields []string `json:"-"` } -func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RouterListWarningData +func (s *ReservationsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ReservationsScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterNat: Represents a Nat resource. It enables the VMs within the -// specified subnetworks to access Internet without external IP -// addresses. It specifies a list of subnetworks (and the ranges within) -// that want to use NAT. Customers can also provide the external IPs -// that would be used for NAT. GCP would auto-allocate ephemeral IPs if -// no external IPs are provided. -type RouterNat struct { - // DrainNatIps: A list of URLs of the IP resources to be drained. These - // IPs must be valid static external IPs that have been assigned to the - // NAT. These IPs should be used for updating/patching a NAT only. - DrainNatIps []string `json:"drainNatIps,omitempty"` - - // EnableDynamicPortAllocation: Enable Dynamic Port Allocation. If not - // specified, it is disabled by default. If set to true, - Dynamic Port - // Allocation will be enabled on this NAT config. - - // enableEndpointIndependentMapping cannot be set to true. - If minPorts - // is set, minPortsPerVm must be set to a power of two greater than or - // equal to 32. If minPortsPerVm is not set, a minimum of 32 ports will - // be allocated to a VM from this NAT config. - EnableDynamicPortAllocation bool `json:"enableDynamicPortAllocation,omitempty"` +// ResourceCommitment: Commitment for a particular resource (a +// Commitment is composed of one or more of these). +type ResourceCommitment struct { + // AcceleratorType: Name of the accelerator type resource. Applicable + // only when the type is ACCELERATOR. + AcceleratorType string `json:"acceleratorType,omitempty"` - EnableEndpointIndependentMapping bool `json:"enableEndpointIndependentMapping,omitempty"` + // Amount: The amount of the resource purchased (in a type-dependent + // unit, such as bytes). For vCPUs, this can just be an integer. For + // memory, this must be provided in MB. Memory must be a multiple of 256 + // MB, with up to 6.5GB of memory per every vCPU. + Amount int64 `json:"amount,omitempty,string"` - // IcmpIdleTimeoutSec: Timeout (in seconds) for ICMP connections. - // Defaults to 30s if not set. - IcmpIdleTimeoutSec int64 `json:"icmpIdleTimeoutSec,omitempty"` + // Type: Type of resource for which this commitment applies. Possible + // values are VCPU, MEMORY, LOCAL_SSD, and ACCELERATOR. + // + // Possible values: + // "ACCELERATOR" + // "LOCAL_SSD" + // "MEMORY" + // "UNSPECIFIED" + // "VCPU" + Type string `json:"type,omitempty"` - // LogConfig: Configure logging on this NAT. - LogConfig *RouterNatLogConfig `json:"logConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "AcceleratorType") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // MaxPortsPerVm: Maximum number of ports allocated to a VM from this - // NAT config when Dynamic Port Allocation is enabled. If Dynamic Port - // Allocation is not enabled, this field has no effect. If Dynamic Port - // Allocation is enabled, and this field is set, it must be set to a - // power of two greater than minPortsPerVm, or 64 if minPortsPerVm is - // not set. If Dynamic Port Allocation is enabled and this field is not - // set, a maximum of 65536 ports will be allocated to a VM from this NAT - // config. - MaxPortsPerVm int64 `json:"maxPortsPerVm,omitempty"` + // NullFields is a list of field names (e.g. "AcceleratorType") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} - // MinPortsPerVm: Minimum number of ports allocated to a VM from this - // NAT config. If not set, a default number of ports is allocated to a - // VM. This is rounded up to the nearest power of 2. For example, if the - // value of this field is 50, at least 64 ports are allocated to a VM. - MinPortsPerVm int64 `json:"minPortsPerVm,omitempty"` +func (s *ResourceCommitment) MarshalJSON() ([]byte, error) { + type NoMethod ResourceCommitment + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // Name: Unique name of this Nat service. The name must be 1-63 - // characters long and comply with RFC1035. - Name string `json:"name,omitempty"` +type ResourceGroupReference struct { + // Group: A URI referencing one of the instance groups or network + // endpoint groups listed in the backend service. + Group string `json:"group,omitempty"` - // NatIpAllocateOption: Specify the NatIpAllocateOption, which can take - // one of the following values: - MANUAL_ONLY: Uses only Nat IP - // addresses provided by customers. When there are not enough specified - // Nat IPs, the Nat service fails for new VMs. - AUTO_ONLY: Nat IPs are - // allocated by Google Cloud Platform; customers can't specify any Nat - // IPs. When choosing AUTO_ONLY, then nat_ip should be empty. - // - // Possible values: - // "AUTO_ONLY" - Nat IPs are allocated by GCP; customers can not - // specify any Nat IPs. - // "MANUAL_ONLY" - Only use Nat IPs provided by customers. When - // specified Nat IPs are not enough then the Nat service fails for new - // VMs. - NatIpAllocateOption string `json:"natIpAllocateOption,omitempty"` - - // NatIps: A list of URLs of the IP resources used for this Nat service. - // These IP addresses must be valid static external IP addresses - // assigned to the project. - NatIps []string `json:"natIps,omitempty"` - - // Rules: A list of rules associated with this NAT. - Rules []*RouterNatRule `json:"rules,omitempty"` - - // SourceSubnetworkIpRangesToNat: Specify the Nat option, which can take - // one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of - // the IP ranges in every Subnetwork are allowed to Nat. - - // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges - // in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list - // of Subnetworks are allowed to Nat (specified in the field subnetwork - // below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. - // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or - // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any - // other Router.Nat section in any Router for this network in this - // region. - // - // Possible values: - // "ALL_SUBNETWORKS_ALL_IP_RANGES" - All the IP ranges in every - // Subnetwork are allowed to Nat. - // "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES" - All the primary IP ranges - // in every Subnetwork are allowed to Nat. - // "LIST_OF_SUBNETWORKS" - A list of Subnetworks are allowed to Nat - // (specified in the field subnetwork below) - SourceSubnetworkIpRangesToNat string `json:"sourceSubnetworkIpRangesToNat,omitempty"` - - // Subnetworks: A list of Subnetwork resources whose traffic should be - // translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS - // is selected for the SubnetworkIpRangeToNatOption above. - Subnetworks []*RouterNatSubnetworkToNat `json:"subnetworks,omitempty"` + // ForceSendFields is a list of field names (e.g. "Group") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // TcpEstablishedIdleTimeoutSec: Timeout (in seconds) for TCP - // established connections. Defaults to 1200s if not set. - TcpEstablishedIdleTimeoutSec int64 `json:"tcpEstablishedIdleTimeoutSec,omitempty"` + // NullFields is a list of field names (e.g. "Group") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // TcpTimeWaitTimeoutSec: Timeout (in seconds) for TCP connections that - // are in TIME_WAIT state. Defaults to 120s if not set. - TcpTimeWaitTimeoutSec int64 `json:"tcpTimeWaitTimeoutSec,omitempty"` +func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { + type NoMethod ResourceGroupReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // TcpTransitoryIdleTimeoutSec: Timeout (in seconds) for TCP transitory - // connections. Defaults to 30s if not set. - TcpTransitoryIdleTimeoutSec int64 `json:"tcpTransitoryIdleTimeoutSec,omitempty"` +type ResourcePoliciesScopedList struct { + // ResourcePolicies: A list of resourcePolicies contained in this scope. + ResourcePolicies []*ResourcePolicy `json:"resourcePolicies,omitempty"` - // UdpIdleTimeoutSec: Timeout (in seconds) for UDP connections. Defaults - // to 30s if not set. - UdpIdleTimeoutSec int64 `json:"udpIdleTimeoutSec,omitempty"` + // Warning: Informational warning which replaces the list of + // resourcePolicies when the list is empty. + Warning *ResourcePoliciesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "DrainNatIps") to + // ForceSendFields is a list of field names (e.g. "ResourcePolicies") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39084,42 +39562,99 @@ type RouterNat struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DrainNatIps") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "ResourcePolicies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RouterNat) MarshalJSON() ([]byte, error) { - type NoMethod RouterNat +func (s *ResourcePoliciesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePoliciesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterNatLogConfig: Configuration of logging on a NAT. -type RouterNatLogConfig struct { - // Enable: Indicates whether or not to export logs. This is false by - // default. - Enable bool `json:"enable,omitempty"` - - // Filter: Specify the desired filtering of logs on this NAT. If - // unspecified, logs are exported for all connections handled by this - // NAT. This option can take one of the following values: - ERRORS_ONLY: - // Export logs only for connection failures. - TRANSLATIONS_ONLY: Export - // logs only for successful connections. - ALL: Export logs for all - // connections, successful and unsuccessful. +// ResourcePoliciesScopedListWarning: Informational warning which +// replaces the list of resourcePolicies when the list is empty. +type ResourcePoliciesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "ALL" - Export logs for all (successful and unsuccessful) - // connections. - // "ERRORS_ONLY" - Export logs for connection failures only. - // "TRANSLATIONS_ONLY" - Export logs for successful connections only. - Filter string `json:"filter,omitempty"` + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "Enable") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*ResourcePoliciesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39127,7 +39662,7 @@ type RouterNatLogConfig struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Enable") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -39136,37 +39671,27 @@ type RouterNatLogConfig struct { NullFields []string `json:"-"` } -func (s *RouterNatLogConfig) MarshalJSON() ([]byte, error) { - type NoMethod RouterNatLogConfig +func (s *ResourcePoliciesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePoliciesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterNatRule struct { - // Action: The action to be enforced for traffic that matches this rule. - Action *RouterNatRuleAction `json:"action,omitempty"` - - // Description: An optional description of this rule. - Description string `json:"description,omitempty"` - - // Match: CEL expression that specifies the match condition that egress - // traffic from a VM is evaluated against. If it evaluates to true, the - // corresponding `action` is enforced. The following examples are valid - // match expressions for public NAT: "inIpRange(destination.ip, - // '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')" - // "destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'" The - // following example is a valid match expression for private NAT: - // "nexthop.hub == - // 'https://networkconnectivity.googleapis.com/v1alpha1/projects/my-proje - // ct/global/hub/hub-1'" - Match string `json:"match,omitempty"` +type ResourcePoliciesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // RuleNumber: An integer uniquely identifying a rule in the list. The - // rule number must be a positive value between 0 and 65000, and must be - // unique among rules within a NAT. - RuleNumber int64 `json:"ruleNumber,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Action") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39174,7 +39699,7 @@ type RouterNatRule struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Action") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -39183,25 +39708,78 @@ type RouterNatRule struct { NullFields []string `json:"-"` } -func (s *RouterNatRule) MarshalJSON() ([]byte, error) { - type NoMethod RouterNatRule +func (s *ResourcePoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePoliciesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterNatRuleAction struct { - // SourceNatActiveIps: A list of URLs of the IP resources used for this - // NAT rule. These IP addresses must be valid static external IP - // addresses assigned to the project. This field is used for public NAT. - SourceNatActiveIps []string `json:"sourceNatActiveIps,omitempty"` +// ResourcePolicy: Represents a Resource Policy resource. You can use +// resource policies to schedule actions for some Compute Engine +// resources. For example, you can use them to schedule persistent disk +// snapshots. +type ResourcePolicy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // SourceNatDrainIps: A list of URLs of the IP resources to be drained. - // These IPs must be valid static external IPs that have been assigned - // to the NAT. These IPs should be used for updating/patching a NAT rule - // only. This field is used for public NAT. - SourceNatDrainIps []string `json:"sourceNatDrainIps,omitempty"` + Description string `json:"description,omitempty"` - // ForceSendFields is a list of field names (e.g. "SourceNatActiveIps") + // GroupPlacementPolicy: Resource policy for instances for placement + // configuration. + GroupPlacementPolicy *ResourcePolicyGroupPlacementPolicy `json:"groupPlacementPolicy,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // InstanceSchedulePolicy: Resource policy for scheduling instance + // operations. + InstanceSchedulePolicy *ResourcePolicyInstanceSchedulePolicy `json:"instanceSchedulePolicy,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#resource_policies for resource policies. + Kind string `json:"kind,omitempty"` + + // Name: The name of the resource, provided by the client when initially + // creating the resource. The resource name must be 1-63 characters + // long, and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` + + Region string `json:"region,omitempty"` + + // ResourceStatus: [Output Only] The system status of the resource + // policy. + ResourceStatus *ResourcePolicyResourceStatus `json:"resourceStatus,omitempty"` + + // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // resource. + SelfLink string `json:"selfLink,omitempty"` + + // SnapshotSchedulePolicy: Resource policy for persistent disks for + // creating snapshots. + SnapshotSchedulePolicy *ResourcePolicySnapshotSchedulePolicy `json:"snapshotSchedulePolicy,omitempty"` + + // Status: [Output Only] The status of resource policy creation. + // + // Possible values: + // "CREATING" - Resource policy is being created. + // "DELETING" - Resource policy is being deleted. + // "EXPIRED" - Resource policy is expired and will not run again. + // "INVALID" + // "READY" - Resource policy is ready to be used. + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39209,7 +39787,7 @@ type RouterNatRuleAction struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SourceNatActiveIps") to + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -39219,39 +39797,48 @@ type RouterNatRuleAction struct { NullFields []string `json:"-"` } -func (s *RouterNatRuleAction) MarshalJSON() ([]byte, error) { - type NoMethod RouterNatRuleAction +func (s *ResourcePolicy) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterNatSubnetworkToNat: Defines the IP ranges that want to use NAT -// for a subnetwork. -type RouterNatSubnetworkToNat struct { - // Name: URL for the subnetwork resource that will use NAT. - Name string `json:"name,omitempty"` +// ResourcePolicyAggregatedList: Contains a list of resourcePolicies. +type ResourcePolicyAggregatedList struct { + Etag string `json:"etag,omitempty"` - // SecondaryIpRangeNames: A list of the secondary ranges of the - // Subnetwork that are allowed to use NAT. This can be populated only if - // "LIST_OF_SECONDARY_IP_RANGES" is one of the values in - // source_ip_ranges_to_nat. - SecondaryIpRangeNames []string `json:"secondaryIpRangeNames,omitempty"` + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // SourceIpRangesToNat: Specify the options for NAT ranges in the - // Subnetwork. All options of a single value are valid except - // NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple - // values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] - // Default: [ALL_IP_RANGES] - // - // Possible values: - // "ALL_IP_RANGES" - The primary and all the secondary ranges are - // allowed to Nat. - // "LIST_OF_SECONDARY_IP_RANGES" - A list of secondary ranges are - // allowed to Nat. - // "PRIMARY_IP_RANGE" - The primary range is allowed to Nat. - SourceIpRangesToNat []string `json:"sourceIpRangesToNat,omitempty"` + // Items: A list of ResourcePolicy resources. + Items map[string]ResourcePoliciesScopedList `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "Name") to + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *ResourcePolicyAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39259,7 +39846,7 @@ type RouterNatSubnetworkToNat struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API + // NullFields is a list of field names (e.g. "Etag") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -39268,27 +39855,89 @@ type RouterNatSubnetworkToNat struct { NullFields []string `json:"-"` } -func (s *RouterNatSubnetworkToNat) MarshalJSON() ([]byte, error) { - type NoMethod RouterNatSubnetworkToNat +func (s *ResourcePolicyAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterStatus struct { - // BestRoutes: Best routes for this router's network. - BestRoutes []*Route `json:"bestRoutes,omitempty"` - - // BestRoutesForRouter: Best routes learned by this router. - BestRoutesForRouter []*Route `json:"bestRoutesForRouter,omitempty"` - - BgpPeerStatus []*RouterStatusBgpPeerStatus `json:"bgpPeerStatus,omitempty"` +// ResourcePolicyAggregatedListWarning: [Output Only] Informational +// warning message. +type ResourcePolicyAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` - NatStatus []*RouterStatusNatStatus `json:"natStatus,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*ResourcePolicyAggregatedListWarningData `json:"data,omitempty"` - // Network: URI of the network to which this router belongs. - Network string `json:"network,omitempty"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "BestRoutes") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39296,8 +39945,8 @@ type RouterStatus struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BestRoutes") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -39305,58 +39954,27 @@ type RouterStatus struct { NullFields []string `json:"-"` } -func (s *RouterStatus) MarshalJSON() ([]byte, error) { - type NoMethod RouterStatus +func (s *ResourcePolicyAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterStatusBgpPeerStatus struct { - // AdvertisedRoutes: Routes that were advertised to the remote BGP peer - AdvertisedRoutes []*Route `json:"advertisedRoutes,omitempty"` - - BfdStatus *BfdStatus `json:"bfdStatus,omitempty"` - - // IpAddress: IP address of the local BGP interface. - IpAddress string `json:"ipAddress,omitempty"` - - // LinkedVpnTunnel: URL of the VPN tunnel that this BGP peer controls. - LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` - - // Name: Name of this BGP peer. Unique within the Routers resource. - Name string `json:"name,omitempty"` - - // NumLearnedRoutes: Number of routes learned from the remote BGP Peer. - NumLearnedRoutes int64 `json:"numLearnedRoutes,omitempty"` - - // PeerIpAddress: IP address of the remote BGP interface. - PeerIpAddress string `json:"peerIpAddress,omitempty"` - - // RouterApplianceInstance: [Output only] URI of the VM instance that is - // used as third-party router appliances such as Next Gen Firewalls, - // Virtual Routers, or Router Appliances. The VM instance is the peer - // side of the BGP session. - RouterApplianceInstance string `json:"routerApplianceInstance,omitempty"` - - // State: BGP state as specified in RFC1771. - State string `json:"state,omitempty"` - - // Status: Status of the BGP peer: {UP, DOWN} - // - // Possible values: - // "DOWN" - // "UNKNOWN" - // "UP" - Status string `json:"status,omitempty"` - - // Uptime: Time this session has been up. Format: 14 years, 51 weeks, 6 - // days, 23 hours, 59 minutes, 59 seconds - Uptime string `json:"uptime,omitempty"` +type ResourcePolicyAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // UptimeSeconds: Time this session has been up, in seconds. Format: 145 - UptimeSeconds string `json:"uptimeSeconds,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "AdvertisedRoutes") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39364,108 +39982,123 @@ type RouterStatusBgpPeerStatus struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AdvertisedRoutes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { - type NoMethod RouterStatusBgpPeerStatus +func (s *ResourcePolicyAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterStatusNatStatus: Status of a NAT contained in this router. -type RouterStatusNatStatus struct { - // AutoAllocatedNatIps: A list of IPs auto-allocated for NAT. Example: - // ["1.1.1.1", "129.2.16.89"] - AutoAllocatedNatIps []string `json:"autoAllocatedNatIps,omitempty"` +// ResourcePolicyDailyCycle: Time window specified for daily operations. +type ResourcePolicyDailyCycle struct { + // DaysInCycle: Defines a schedule with units measured in days. The + // value determines how many days pass between the start of each cycle. + DaysInCycle int64 `json:"daysInCycle,omitempty"` - // DrainAutoAllocatedNatIps: A list of IPs auto-allocated for NAT that - // are in drain mode. Example: ["1.1.1.1", "179.12.26.133"]. - DrainAutoAllocatedNatIps []string `json:"drainAutoAllocatedNatIps,omitempty"` + // Duration: [Output only] A predetermined duration for the window, + // automatically chosen to be the smallest possible in the given + // scenario. + Duration string `json:"duration,omitempty"` - // DrainUserAllocatedNatIps: A list of IPs user-allocated for NAT that - // are in drain mode. Example: ["1.1.1.1", "179.12.26.133"]. - DrainUserAllocatedNatIps []string `json:"drainUserAllocatedNatIps,omitempty"` + // StartTime: Start time of the window. This must be in UTC format that + // resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For + // example, both 13:00-5 and 08:00 are valid. + StartTime string `json:"startTime,omitempty"` - // MinExtraNatIpsNeeded: The number of extra IPs to allocate. This will - // be greater than 0 only if user-specified IPs are NOT enough to allow - // all configured VMs to use NAT. This value is meaningful only when - // auto-allocation of NAT IPs is *not* used. - MinExtraNatIpsNeeded int64 `json:"minExtraNatIpsNeeded,omitempty"` + // ForceSendFields is a list of field names (e.g. "DaysInCycle") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Name: Unique name of this NAT. - Name string `json:"name,omitempty"` + // NullFields is a list of field names (e.g. "DaysInCycle") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // NumVmEndpointsWithNatMappings: Number of VM endpoints (i.e., Nics) - // that can use NAT. - NumVmEndpointsWithNatMappings int64 `json:"numVmEndpointsWithNatMappings,omitempty"` +func (s *ResourcePolicyDailyCycle) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyDailyCycle + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // RuleStatus: Status of rules in this NAT. - RuleStatus []*RouterStatusNatStatusNatRuleStatus `json:"ruleStatus,omitempty"` +// ResourcePolicyGroupPlacementPolicy: A GroupPlacementPolicy specifies +// resource placement configuration. It specifies the failure bucket +// separation as well as network locality +type ResourcePolicyGroupPlacementPolicy struct { + // AvailabilityDomainCount: The number of availability domains to spread + // instances across. If two instances are in different availability + // domain, they are not in the same low latency network. + AvailabilityDomainCount int64 `json:"availabilityDomainCount,omitempty"` - // UserAllocatedNatIpResources: A list of fully qualified URLs of - // reserved IP address resources. - UserAllocatedNatIpResources []string `json:"userAllocatedNatIpResources,omitempty"` + // Collocation: Specifies network collocation + // + // Possible values: + // "COLLOCATED" + // "UNSPECIFIED_COLLOCATION" + Collocation string `json:"collocation,omitempty"` - // UserAllocatedNatIps: A list of IPs user-allocated for NAT. They will - // be raw IP strings like "179.12.26.133". - UserAllocatedNatIps []string `json:"userAllocatedNatIps,omitempty"` + // VmCount: Number of VMs in this placement group. Google does not + // recommend that you use this field unless you use a compact policy and + // you want your policy to work only if it contains this exact number of + // VMs. + VmCount int64 `json:"vmCount,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoAllocatedNatIps") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AvailabilityDomainCount") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoAllocatedNatIps") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the + // NullFields is a list of field names (e.g. "AvailabilityDomainCount") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } -func (s *RouterStatusNatStatus) MarshalJSON() ([]byte, error) { - type NoMethod RouterStatusNatStatus +func (s *ResourcePolicyGroupPlacementPolicy) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyGroupPlacementPolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterStatusNatStatusNatRuleStatus: Status of a NAT Rule contained in -// this NAT. -type RouterStatusNatStatusNatRuleStatus struct { - // ActiveNatIps: A list of active IPs for NAT. Example: ["1.1.1.1", - // "179.12.26.133"]. - ActiveNatIps []string `json:"activeNatIps,omitempty"` - - // DrainNatIps: A list of IPs for NAT that are in drain mode. Example: - // ["1.1.1.1", "179.12.26.133"]. - DrainNatIps []string `json:"drainNatIps,omitempty"` - - // MinExtraIpsNeeded: The number of extra IPs to allocate. This will be - // greater than 0 only if the existing IPs in this NAT Rule are NOT - // enough to allow all configured VMs to use NAT. - MinExtraIpsNeeded int64 `json:"minExtraIpsNeeded,omitempty"` +// ResourcePolicyHourlyCycle: Time window specified for hourly +// operations. +type ResourcePolicyHourlyCycle struct { + // Duration: [Output only] Duration of the time window, automatically + // chosen to be smallest possible in the given scenario. + Duration string `json:"duration,omitempty"` - // NumVmEndpointsWithNatMappings: Number of VM endpoints (i.e., NICs) - // that have NAT Mappings from this NAT Rule. - NumVmEndpointsWithNatMappings int64 `json:"numVmEndpointsWithNatMappings,omitempty"` + // HoursInCycle: Defines a schedule with units measured in hours. The + // value determines how many hours pass between the start of each cycle. + HoursInCycle int64 `json:"hoursInCycle,omitempty"` - // RuleNumber: Rule number of the rule. - RuleNumber int64 `json:"ruleNumber,omitempty"` + // StartTime: Time within the window to start the operations. It must be + // in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. + StartTime string `json:"startTime,omitempty"` - // ForceSendFields is a list of field names (e.g. "ActiveNatIps") to + // ForceSendFields is a list of field names (e.g. "Duration") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39473,32 +40106,45 @@ type RouterStatusNatStatusNatRuleStatus struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ActiveNatIps") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Duration") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterStatusNatStatusNatRuleStatus) MarshalJSON() ([]byte, error) { - type NoMethod RouterStatusNatStatusNatRuleStatus +func (s *ResourcePolicyHourlyCycle) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyHourlyCycle raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterStatusResponse struct { - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` +// ResourcePolicyInstanceSchedulePolicy: An InstanceSchedulePolicy +// specifies when and how frequent certain operations are performed on +// the instance. +type ResourcePolicyInstanceSchedulePolicy struct { + // ExpirationTime: The expiration time of the schedule. The timestamp is + // an RFC3339 string. + ExpirationTime string `json:"expirationTime,omitempty"` - Result *RouterStatus `json:"result,omitempty"` + // StartTime: The start time of the schedule. The timestamp is an + // RFC3339 string. + StartTime string `json:"startTime,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // TimeZone: Specifies the time zone to be used in interpreting + // Schedule.schedule. The value of this field must be a time zone name + // from the tz database: http://en.wikipedia.org/wiki/Tz_database. + TimeZone string `json:"timeZone,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // VmStartSchedule: Specifies the schedule for starting instances. + VmStartSchedule *ResourcePolicyInstanceSchedulePolicySchedule `json:"vmStartSchedule,omitempty"` + + // VmStopSchedule: Specifies the schedule for stopping instances. + VmStopSchedule *ResourcePolicyInstanceSchedulePolicySchedule `json:"vmStopSchedule,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExpirationTime") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39506,30 +40152,30 @@ type RouterStatusResponse struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "ExpirationTime") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { - type NoMethod RouterStatusResponse +func (s *ResourcePolicyInstanceSchedulePolicy) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyInstanceSchedulePolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersPreviewResponse struct { - // Resource: Preview of given router. - Resource *Router `json:"resource,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +// ResourcePolicyInstanceSchedulePolicySchedule: Schedule for an +// instance operation. +type ResourcePolicyInstanceSchedulePolicySchedule struct { + // Schedule: Specifies the frequency for the operation, using the + // unix-cron format. + Schedule string `json:"schedule,omitempty"` - // ForceSendFields is a list of field names (e.g. "Resource") to + // ForceSendFields is a list of field names (e.g. "Schedule") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39537,7 +40183,7 @@ type RoutersPreviewResponse struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Resource") to include in + // NullFields is a list of field names (e.g. "Schedule") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -39546,21 +40192,45 @@ type RoutersPreviewResponse struct { NullFields []string `json:"-"` } -func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { - type NoMethod RoutersPreviewResponse +func (s *ResourcePolicyInstanceSchedulePolicySchedule) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyInstanceSchedulePolicySchedule raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersScopedList struct { - // Routers: A list of routers contained in this scope. - Routers []*Router `json:"routers,omitempty"` +type ResourcePolicyList struct { + Etag string `json:"etag,omitempty"` - // Warning: Informational warning which replaces the list of routers - // when the list is empty. - Warning *RoutersScopedListWarning `json:"warning,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` - // ForceSendFields is a list of field names (e.g. "Routers") to + // Items: [Output Only] A list of ResourcePolicy resources. + Items []*ResourcePolicy `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource.Always + // compute#resourcePoliciesList for listsof resourcePolicies + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *ResourcePolicyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39568,8 +40238,8 @@ type RoutersScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Routers") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -39577,15 +40247,15 @@ type RoutersScopedList struct { NullFields []string `json:"-"` } -func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { - type NoMethod RoutersScopedList +func (s *ResourcePolicyList) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RoutersScopedListWarning: Informational warning which replaces the -// list of routers when the list is empty. -type RoutersScopedListWarning struct { +// ResourcePolicyListWarning: [Output Only] Informational warning +// message. +type ResourcePolicyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -39607,6 +40277,9 @@ type RoutersScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -39614,6 +40287,9 @@ type RoutersScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -39647,7 +40323,7 @@ type RoutersScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*RoutersScopedListWarningData `json:"data,omitempty"` + Data []*ResourcePolicyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -39670,13 +40346,13 @@ type RoutersScopedListWarning struct { NullFields []string `json:"-"` } -func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RoutersScopedListWarning +func (s *ResourcePolicyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersScopedListWarningData struct { +type ResourcePolicyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -39707,45 +40383,58 @@ type RoutersScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RoutersScopedListWarningData +func (s *ResourcePolicyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Rule: This is deprecated and has no effect. Do not use. -type Rule struct { - // Action: This is deprecated and has no effect. Do not use. - // - // Possible values: - // "ALLOW" - This is deprecated and has no effect. Do not use. - // "ALLOW_WITH_LOG" - This is deprecated and has no effect. Do not - // use. - // "DENY" - This is deprecated and has no effect. Do not use. - // "DENY_WITH_LOG" - This is deprecated and has no effect. Do not use. - // "LOG" - This is deprecated and has no effect. Do not use. - // "NO_ACTION" - This is deprecated and has no effect. Do not use. - Action string `json:"action,omitempty"` - - // Conditions: This is deprecated and has no effect. Do not use. - Conditions []*Condition `json:"conditions,omitempty"` +// ResourcePolicyResourceStatus: Contains output only fields. Use this +// sub-message for all output fields set on ResourcePolicy. The internal +// structure of this "status" field should mimic the structure of +// ResourcePolicy proto specification. +type ResourcePolicyResourceStatus struct { + // InstanceSchedulePolicy: [Output Only] Specifies a set of output + // values reffering to the instance_schedule_policy system status. This + // field should have the same name as corresponding policy field. + InstanceSchedulePolicy *ResourcePolicyResourceStatusInstanceSchedulePolicyStatus `json:"instanceSchedulePolicy,omitempty"` - // Description: This is deprecated and has no effect. Do not use. - Description string `json:"description,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "InstanceSchedulePolicy") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` - // Ins: This is deprecated and has no effect. Do not use. - Ins []string `json:"ins,omitempty"` + // NullFields is a list of field names (e.g. "InstanceSchedulePolicy") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} - // LogConfigs: This is deprecated and has no effect. Do not use. - LogConfigs []*LogConfig `json:"logConfigs,omitempty"` +func (s *ResourcePolicyResourceStatus) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyResourceStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // NotIns: This is deprecated and has no effect. Do not use. - NotIns []string `json:"notIns,omitempty"` +type ResourcePolicyResourceStatusInstanceSchedulePolicyStatus struct { + // LastRunStartTime: [Output Only] The last time the schedule + // successfully ran. The timestamp is an RFC3339 string. + LastRunStartTime string `json:"lastRunStartTime,omitempty"` - // Permissions: This is deprecated and has no effect. Do not use. - Permissions []string `json:"permissions,omitempty"` + // NextRunStartTime: [Output Only] The next time the schedule is planned + // to run. The actual time might be slightly different. The timestamp is + // an RFC3339 string. + NextRunStartTime string `json:"nextRunStartTime,omitempty"` - // ForceSendFields is a list of field names (e.g. "Action") to + // ForceSendFields is a list of field names (e.g. "LastRunStartTime") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39753,71 +40442,41 @@ type Rule struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Action") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "LastRunStartTime") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *Rule) MarshalJSON() ([]byte, error) { - type NoMethod Rule +func (s *ResourcePolicyResourceStatusInstanceSchedulePolicyStatus) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyResourceStatusInstanceSchedulePolicyStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SSLHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. - Port int64 `json:"port,omitempty"` - - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` - - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, SSL health check follows behavior specified in port and - // portName fields. - // - // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. - PortSpecification string `json:"portSpecification,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` +// ResourcePolicySnapshotSchedulePolicy: A snapshot schedule policy +// specifies when and how frequently snapshots are to be created for the +// target disk. Also specifies how many and how long these scheduled +// snapshots should be retained. +type ResourcePolicySnapshotSchedulePolicy struct { + // RetentionPolicy: Retention policy applied to snapshots created by + // this resource policy. + RetentionPolicy *ResourcePolicySnapshotSchedulePolicyRetentionPolicy `json:"retentionPolicy,omitempty"` - // Request: The application data to send once the SSL connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. - Request string `json:"request,omitempty"` + // Schedule: A Vm Maintenance Policy specifies what kind of + // infrastructure maintenance we are allowed to perform on this VM and + // when. Schedule that is applied to disks covered by this policy. + Schedule *ResourcePolicySnapshotSchedulePolicySchedule `json:"schedule,omitempty"` - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. - Response string `json:"response,omitempty"` + // SnapshotProperties: Properties with which snapshots are created such + // as labels, encryption keys. + SnapshotProperties *ResourcePolicySnapshotSchedulePolicySnapshotProperties `json:"snapshotProperties,omitempty"` - // ForceSendFields is a list of field names (e.g. "Port") to + // ForceSendFields is a list of field names (e.g. "RetentionPolicy") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39825,109 +40484,112 @@ type SSLHealthCheck struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Port") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "RetentionPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { - type NoMethod SSLHealthCheck +func (s *ResourcePolicySnapshotSchedulePolicy) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicySnapshotSchedulePolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SavedAttachedDisk: DEPRECATED: Please use compute#savedDisk instead. -// An instance-attached disk resource. -type SavedAttachedDisk struct { - // AutoDelete: Specifies whether the disk will be auto-deleted when the - // instance is deleted (but not when the disk is detached from the - // instance). - AutoDelete bool `json:"autoDelete,omitempty"` +// ResourcePolicySnapshotSchedulePolicyRetentionPolicy: Policy for +// retention of scheduled snapshots. +type ResourcePolicySnapshotSchedulePolicyRetentionPolicy struct { + // MaxRetentionDays: Maximum age of the snapshot that is allowed to be + // kept. + MaxRetentionDays int64 `json:"maxRetentionDays,omitempty"` - // Boot: Indicates that this is a boot disk. The virtual machine will - // use the first partition of the disk for its root filesystem. - Boot bool `json:"boot,omitempty"` + // OnSourceDiskDelete: Specifies the behavior to apply to scheduled + // snapshots when the source disk is deleted. + // + // Possible values: + // "APPLY_RETENTION_POLICY" + // "KEEP_AUTO_SNAPSHOTS" + // "UNSPECIFIED_ON_SOURCE_DISK_DELETE" + OnSourceDiskDelete string `json:"onSourceDiskDelete,omitempty"` - // DeviceName: Specifies the name of the disk attached to the source - // instance. - DeviceName string `json:"deviceName,omitempty"` + // ForceSendFields is a list of field names (e.g. "MaxRetentionDays") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // DiskEncryptionKey: The encryption key for the disk. - DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` + // NullFields is a list of field names (e.g. "MaxRetentionDays") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} - // DiskSizeGb: The size of the disk in base-2 GB. - DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` +func (s *ResourcePolicySnapshotSchedulePolicyRetentionPolicy) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicySnapshotSchedulePolicyRetentionPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // DiskType: [Output Only] URL of the disk type resource. For example: - // projects/project /zones/zone/diskTypes/pd-standard or pd-ssd - DiskType string `json:"diskType,omitempty"` +// ResourcePolicySnapshotSchedulePolicySchedule: A schedule for disks +// where the schedueled operations are performed. +type ResourcePolicySnapshotSchedulePolicySchedule struct { + DailySchedule *ResourcePolicyDailyCycle `json:"dailySchedule,omitempty"` - // GuestOsFeatures: A list of features to enable on the guest operating - // system. Applicable only for bootable images. Read Enabling guest - // operating system features to see a list of available options. - GuestOsFeatures []*GuestOsFeature `json:"guestOsFeatures,omitempty"` + HourlySchedule *ResourcePolicyHourlyCycle `json:"hourlySchedule,omitempty"` - // Index: Specifies zero-based index of the disk that is attached to the - // source instance. - Index int64 `json:"index,omitempty"` + WeeklySchedule *ResourcePolicyWeeklyCycle `json:"weeklySchedule,omitempty"` - // Interface: Specifies the disk interface to use for attaching this - // disk, which is either SCSI or NVME. - // - // Possible values: - // "NVME" - // "SCSI" - Interface string `json:"interface,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#attachedDisk - // for attached disks. - Kind string `json:"kind,omitempty"` + // ForceSendFields is a list of field names (e.g. "DailySchedule") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Licenses: [Output Only] Any valid publicly visible licenses. - Licenses []string `json:"licenses,omitempty"` + // NullFields is a list of field names (e.g. "DailySchedule") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // Mode: The mode in which this disk is attached to the source instance, - // either READ_WRITE or READ_ONLY. - // - // Possible values: - // "READ_ONLY" - Attaches this disk in read-only mode. Multiple - // virtual machines can use a disk in read-only mode at a time. - // "READ_WRITE" - *[Default]* Attaches this disk in read-write mode. - // Only one virtual machine at a time can be attached to a disk in - // read-write mode. - Mode string `json:"mode,omitempty"` +func (s *ResourcePolicySnapshotSchedulePolicySchedule) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicySnapshotSchedulePolicySchedule + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // Source: Specifies a URL of the disk attached to the source instance. - Source string `json:"source,omitempty"` +// ResourcePolicySnapshotSchedulePolicySnapshotProperties: Specified +// snapshot properties for scheduled snapshots created by this policy. +type ResourcePolicySnapshotSchedulePolicySnapshotProperties struct { + // ChainName: Chain name that the snapshot is created in. + ChainName string `json:"chainName,omitempty"` - // StorageBytes: [Output Only] A size of the storage used by the disk's - // snapshot by this machine image. - StorageBytes int64 `json:"storageBytes,omitempty,string"` + // GuestFlush: Indication to perform a 'guest aware' snapshot. + GuestFlush bool `json:"guestFlush,omitempty"` - // StorageBytesStatus: [Output Only] An indicator whether storageBytes - // is in a stable state or it is being adjusted as a result of shared - // storage reallocation. This status can either be UPDATING, meaning the - // size of the snapshot is being updated, or UP_TO_DATE, meaning the - // size of the snapshot is up-to-date. - // - // Possible values: - // "UPDATING" - // "UP_TO_DATE" - StorageBytesStatus string `json:"storageBytesStatus,omitempty"` + // Labels: Labels to apply to scheduled snapshots. These can be later + // modified by the setLabels method. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` - // Type: Specifies the type of the attached disk, either SCRATCH or - // PERSISTENT. - // - // Possible values: - // "PERSISTENT" - // "SCRATCH" - Type string `json:"type,omitempty"` + // StorageLocations: Cloud Storage bucket storage location of the auto + // snapshot (regional or multi-regional). + StorageLocations []string `json:"storageLocations,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoDelete") to + // ForceSendFields is a list of field names (e.g. "ChainName") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39935,7 +40597,7 @@ type SavedAttachedDisk struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoDelete") to include in + // NullFields is a list of field names (e.g. "ChainName") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -39944,38 +40606,66 @@ type SavedAttachedDisk struct { NullFields []string `json:"-"` } -func (s *SavedAttachedDisk) MarshalJSON() ([]byte, error) { - type NoMethod SavedAttachedDisk +func (s *ResourcePolicySnapshotSchedulePolicySnapshotProperties) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicySnapshotSchedulePolicySnapshotProperties raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SavedDisk: An instance-attached disk resource. -type SavedDisk struct { - // Kind: [Output Only] Type of the resource. Always compute#savedDisk - // for attached disks. - Kind string `json:"kind,omitempty"` +// ResourcePolicyWeeklyCycle: Time window specified for weekly +// operations. +type ResourcePolicyWeeklyCycle struct { + // DayOfWeeks: Up to 7 intervals/windows, one for each day of the week. + DayOfWeeks []*ResourcePolicyWeeklyCycleDayOfWeek `json:"dayOfWeeks,omitempty"` - // SourceDisk: Specifies a URL of the disk attached to the source - // instance. - SourceDisk string `json:"sourceDisk,omitempty"` + // ForceSendFields is a list of field names (e.g. "DayOfWeeks") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // StorageBytes: [Output Only] Size of the individual disk snapshot used - // by this machine image. - StorageBytes int64 `json:"storageBytes,omitempty,string"` + // NullFields is a list of field names (e.g. "DayOfWeeks") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // StorageBytesStatus: [Output Only] An indicator whether storageBytes - // is in a stable state or it is being adjusted as a result of shared - // storage reallocation. This status can either be UPDATING, meaning the - // size of the snapshot is being updated, or UP_TO_DATE, meaning the - // size of the snapshot is up-to-date. +func (s *ResourcePolicyWeeklyCycle) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyWeeklyCycle + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ResourcePolicyWeeklyCycleDayOfWeek struct { + // Day: Defines a schedule that runs on specific days of the week. + // Specify one or more days. The following options are available: + // MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY. // // Possible values: - // "UPDATING" - // "UP_TO_DATE" - StorageBytesStatus string `json:"storageBytesStatus,omitempty"` + // "FRIDAY" + // "INVALID" + // "MONDAY" + // "SATURDAY" + // "SUNDAY" + // "THURSDAY" + // "TUESDAY" + // "WEDNESDAY" + Day string `json:"day,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // Duration: [Output only] Duration of the time window, automatically + // chosen to be smallest possible in the given scenario. + Duration string `json:"duration,omitempty"` + + // StartTime: Time within the window to start the operations. It must be + // in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. + StartTime string `json:"startTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Day") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -39983,7 +40673,7 @@ type SavedDisk struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API + // NullFields is a list of field names (e.g. "Day") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -39992,37 +40682,21 @@ type SavedDisk struct { NullFields []string `json:"-"` } -func (s *SavedDisk) MarshalJSON() ([]byte, error) { - type NoMethod SavedDisk +func (s *ResourcePolicyWeeklyCycleDayOfWeek) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyWeeklyCycleDayOfWeek raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ScalingScheduleStatus struct { - // LastStartTime: [Output Only] The last time the scaling schedule - // became active. Note: this is a timestamp when a schedule actually - // became active, not when it was planned to do so. The timestamp is in - // RFC3339 text format. - LastStartTime string `json:"lastStartTime,omitempty"` - - // NextStartTime: [Output Only] The next time the scaling schedule is to - // become active. Note: this is a timestamp when a schedule is planned - // to run, but the actual time might be slightly different. The - // timestamp is in RFC3339 text format. - NextStartTime string `json:"nextStartTime,omitempty"` - - // State: [Output Only] The current state of a scaling schedule. - // - // Possible values: - // "ACTIVE" - The current autoscaling recommendation is influenced by - // this scaling schedule. - // "DISABLED" - This scaling schedule has been disabled by the user. - // "OBSOLETE" - This scaling schedule will never become active again. - // "READY" - The current autoscaling recommendation is not influenced - // by this scaling schedule. - State string `json:"state,omitempty"` +// ResourceStatus: Contains output only fields. Use this sub-message for +// actual values set on Instance attributes as compared to the value +// requested by the user (intent) in their instance CRUD calls. +type ResourceStatus struct { + // PhysicalHost: [Output Only] An opaque ID of the host on which the VM + // is running. + PhysicalHost string `json:"physicalHost,omitempty"` - // ForceSendFields is a list of field names (e.g. "LastStartTime") to + // ForceSendFields is a list of field names (e.g. "PhysicalHost") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -40030,7 +40704,7 @@ type ScalingScheduleStatus struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LastStartTime") to include + // NullFields is a list of field names (e.g. "PhysicalHost") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -40039,77 +40713,139 @@ type ScalingScheduleStatus struct { NullFields []string `json:"-"` } -func (s *ScalingScheduleStatus) MarshalJSON() ([]byte, error) { - type NoMethod ScalingScheduleStatus +func (s *ResourceStatus) MarshalJSON() ([]byte, error) { + type NoMethod ResourceStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Scheduling: Sets the scheduling options for an Instance. NextID: 21 -type Scheduling struct { - // AutomaticRestart: Specifies whether the instance should be - // automatically restarted if it is terminated by Compute Engine (not - // terminated by a user). You can only set the automatic restart option - // for standard instances. Preemptible instances cannot be automatically - // restarted. By default, this is set to true so an instance is - // automatically restarted if it is terminated by Compute Engine. - AutomaticRestart *bool `json:"automaticRestart,omitempty"` +// Route: Represents a Route resource. A route defines a path from VM +// instances in the VPC network to a specific destination. This +// destination can be inside or outside the VPC network. For more +// information, read the Routes overview. +type Route struct { + // AsPaths: [Output Only] AS path. + AsPaths []*RouteAsPath `json:"asPaths,omitempty"` - // InstanceTerminationAction: Specifies the termination action for the - // instance. - // - // Possible values: - // "DELETE" - Delete the VM. - // "INSTANCE_TERMINATION_ACTION_UNSPECIFIED" - Default value. This - // value is unused. - // "STOP" - Stop the VM without storing in-memory content. default - // action. - InstanceTerminationAction string `json:"instanceTerminationAction,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // LocationHint: An opaque location hint used to place the instance - // close to other resources. This field is for use by internal tools - // that use the public API. - LocationHint string `json:"locationHint,omitempty"` + // Description: An optional description of this resource. Provide this + // field when you create the resource. + Description string `json:"description,omitempty"` - // MinNodeCpus: The minimum number of virtual CPUs this instance will - // consume when running on a sole-tenant node. - MinNodeCpus int64 `json:"minNodeCpus,omitempty"` + // DestRange: The destination range of outgoing packets that this route + // applies to. Both IPv4 and IPv6 are supported. + DestRange string `json:"destRange,omitempty"` - // NodeAffinities: A set of node affinity and anti-affinity - // configurations. Refer to Configuring node affinity for more - // information. Overrides reservationAffinity. - NodeAffinities []*SchedulingNodeAffinity `json:"nodeAffinities,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` - // OnHostMaintenance: Defines the maintenance behavior for this - // instance. For standard instances, the default behavior is MIGRATE. - // For preemptible instances, the default and only possible behavior is - // TERMINATE. For more information, see Set VM availability policies. + // Kind: [Output Only] Type of this resource. Always compute#routes for + // Route resources. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first + // character must be a lowercase letter, and all following characters + // (except for the last character) must be a dash, lowercase letter, or + // digit. The last character must be a lowercase letter or digit. + Name string `json:"name,omitempty"` + + // Network: Fully-qualified URL of the network that this route applies + // to. + Network string `json:"network,omitempty"` + + // NextHopGateway: The URL to a gateway that should handle matching + // packets. You can only specify the internet gateway using a full or + // partial valid URL: projects/ + // project/global/gateways/default-internet-gateway + NextHopGateway string `json:"nextHopGateway,omitempty"` + + // NextHopIlb: The URL to a forwarding rule of type + // loadBalancingScheme=INTERNAL that should handle matching packets or + // the IP address of the forwarding Rule. For example, the following are + // all valid URLs: - 10.128.0.56 - + // https://www.googleapis.com/compute/v1/projects/project/regions/region + // /forwardingRules/forwardingRule - + // regions/region/forwardingRules/forwardingRule + NextHopIlb string `json:"nextHopIlb,omitempty"` + + // NextHopInstance: The URL to an instance that should handle matching + // packets. You can specify this as a full or partial URL. For example: + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/ + NextHopInstance string `json:"nextHopInstance,omitempty"` + + // NextHopIp: The network IP address of an instance that should handle + // matching packets. Only IPv4 is supported. + NextHopIp string `json:"nextHopIp,omitempty"` + + // NextHopNetwork: The URL of the local network if it should handle + // matching packets. + NextHopNetwork string `json:"nextHopNetwork,omitempty"` + + // NextHopPeering: [Output Only] The network peering name that should + // handle matching packets, which should conform to RFC1035. + NextHopPeering string `json:"nextHopPeering,omitempty"` + + // NextHopVpnTunnel: The URL to a VpnTunnel that should handle matching + // packets. + NextHopVpnTunnel string `json:"nextHopVpnTunnel,omitempty"` + + // Priority: The priority of this route. Priority is used to break ties + // in cases where there is more than one matching route of equal prefix + // length. In cases where multiple routes have equal prefix length, the + // one with the lowest-numbered priority value wins. The default value + // is `1000`. The priority value must be from `0` to `65535`, inclusive. + Priority int64 `json:"priority,omitempty"` + + // RouteStatus: [Output only] The status of the route. // // Possible values: - // "MIGRATE" - *[Default]* Allows Compute Engine to automatically - // migrate instances out of the way of maintenance events. - // "TERMINATE" - Tells Compute Engine to terminate and (optionally) - // restart the instance away from the maintenance activity. If you would - // like your instance to be restarted, set the automaticRestart flag to - // true. Your instance may be restarted more than once, and it may be - // restarted outside the window of maintenance events. - OnHostMaintenance string `json:"onHostMaintenance,omitempty"` - - // Preemptible: Defines whether the instance is preemptible. This can - // only be set during instance creation or while the instance is stopped - // and therefore, in a `TERMINATED` state. See Instance Life Cycle for - // more information on the possible instance states. - Preemptible bool `json:"preemptible,omitempty"` + // "ACTIVE" - This route is processed and active. + // "DROPPED" - The route is dropped due to the VPC exceeding the + // dynamic route limit. For dynamic route limit, please refer to the + // Learned route example + // "INACTIVE" - This route is processed but inactive due to failure + // from the backend. The backend may have rejected the route + // "PENDING" - This route is being processed internally. The status + // will change once processed. + RouteStatus string `json:"routeStatus,omitempty"` - // ProvisioningModel: Specifies the provisioning model of the instance. + // RouteType: [Output Only] The type of this route, which can be one of + // the following values: - 'TRANSIT' for a transit route that this + // router learned from another Cloud Router and will readvertise to one + // of its BGP peers - 'SUBNET' for a route from a subnet of the VPC - + // 'BGP' for a route learned from a BGP peer of this router - 'STATIC' + // for a static route // // Possible values: - // "SPOT" - Heavily discounted, no guaranteed runtime. - // "STANDARD" - Standard provisioning with user controlled runtime, no - // discounts. - ProvisioningModel string `json:"provisioningModel,omitempty"` + // "BGP" + // "STATIC" + // "SUBNET" + // "TRANSIT" + RouteType string `json:"routeType,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutomaticRestart") to + // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // resource. + SelfLink string `json:"selfLink,omitempty"` + + // Tags: A list of instance tags to which this route applies. + Tags []string `json:"tags,omitempty"` + + // Warnings: [Output Only] If potential misconfigurations are detected + // for this route, this field will be populated with warning messages. + Warnings []*RouteWarnings `json:"warnings,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AsPaths") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -40117,39 +40853,131 @@ type Scheduling struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutomaticRestart") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "AsPaths") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Scheduling) MarshalJSON() ([]byte, error) { - type NoMethod Scheduling +func (s *Route) MarshalJSON() ([]byte, error) { + type NoMethod Route raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SchedulingNodeAffinity: Node Affinity: the configuration of desired -// nodes onto which this Instance could be scheduled. -type SchedulingNodeAffinity struct { - // Key: Corresponds to the label key of Node resource. - Key string `json:"key,omitempty"` - - // Operator: Defines the operation of node selection. Valid operators - // are IN for affinity and NOT_IN for anti-affinity. +type RouteWarnings struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "IN" - Requires Compute Engine to seek for matched nodes. - // "NOT_IN" - Requires Compute Engine to avoid certain nodes. - // "OPERATOR_UNSPECIFIED" - Operator string `json:"operator,omitempty"` + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` - // Values: Corresponds to the label values of Node resource. - Values []string `json:"values,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*RouteWarningsData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouteWarnings) MarshalJSON() ([]byte, error) { + type NoMethod RouteWarnings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RouteWarningsData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with @@ -40168,26 +40996,86 @@ type SchedulingNodeAffinity struct { NullFields []string `json:"-"` } -func (s *SchedulingNodeAffinity) MarshalJSON() ([]byte, error) { - type NoMethod SchedulingNodeAffinity +func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { + type NoMethod RouteWarningsData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Screenshot: An instance's screenshot. -type Screenshot struct { - // Contents: [Output Only] The Base64-encoded screenshot data. - Contents string `json:"contents,omitempty"` +type RouteAsPath struct { + // AsLists: [Output Only] The AS numbers of the AS Path. + AsLists []int64 `json:"asLists,omitempty"` - // Kind: [Output Only] Type of the resource. Always compute#screenshot - // for the screenshots. + // PathSegmentType: [Output Only] The type of the AS Path, which can be + // one of the following values: - 'AS_SET': unordered set of autonomous + // systems that the route in has traversed - 'AS_SEQUENCE': ordered set + // of autonomous systems that the route has traversed - + // 'AS_CONFED_SEQUENCE': ordered set of Member Autonomous Systems in the + // local confederation that the route has traversed - 'AS_CONFED_SET': + // unordered set of Member Autonomous Systems in the local confederation + // that the route has traversed + // + // Possible values: + // "AS_CONFED_SEQUENCE" + // "AS_CONFED_SET" + // "AS_SEQUENCE" + // "AS_SET" + PathSegmentType string `json:"pathSegmentType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AsLists") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AsLists") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouteAsPath) MarshalJSON() ([]byte, error) { + type NoMethod RouteAsPath + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RouteList: Contains a list of Route resources. +type RouteList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Route resources. + Items []*Route `json:"items,omitempty"` + + // Kind: Type of resource. Kind string `json:"kind,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *RouteListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Contents") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -40195,8 +41083,8 @@ type Screenshot struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Contents") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -40204,48 +41092,125 @@ type Screenshot struct { NullFields []string `json:"-"` } -func (s *Screenshot) MarshalJSON() ([]byte, error) { - type NoMethod Screenshot +func (s *RouteList) MarshalJSON() ([]byte, error) { + type NoMethod RouteList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPoliciesListPreconfiguredExpressionSetsResponse struct { - PreconfiguredExpressionSets *SecurityPoliciesWafConfig `json:"preconfiguredExpressionSets,omitempty"` +// RouteListWarning: [Output Only] Informational warning message. +type RouteListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*RouteListWarningData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "PreconfiguredExpressionSets") to unconditionally include in API - // requests. By default, fields with empty or default values are omitted - // from API requests. However, any non-pointer, non-interface field - // appearing in ForceSendFields will be sent to the server regardless of - // whether the field is empty or not. This may be used to include empty - // fields in Patch requests. + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. - // "PreconfiguredExpressionSets") to include in API requests with the - // JSON null value. By default, fields with empty values are omitted - // from API requests. However, any field with an empty value appearing - // in NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SecurityPoliciesListPreconfiguredExpressionSetsResponse) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPoliciesListPreconfiguredExpressionSetsResponse +func (s *RouteListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RouteListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPoliciesWafConfig struct { - WafRules *PreconfiguredWafSet `json:"wafRules,omitempty"` +type RouteListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ForceSendFields is a list of field names (e.g. "WafRules") to + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -40253,8 +41218,8 @@ type SecurityPoliciesWafConfig struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "WafRules") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -40262,20 +41227,23 @@ type SecurityPoliciesWafConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPoliciesWafConfig) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPoliciesWafConfig +func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RouteListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SecurityPolicy: Represents a Google Cloud Armor security policy -// resource. Only external backend services that use load balancers can -// reference a security policy. For more information, see Google Cloud -// Armor security policy overview. -type SecurityPolicy struct { - AdaptiveProtectionConfig *SecurityPolicyAdaptiveProtectionConfig `json:"adaptiveProtectionConfig,omitempty"` +// Router: Represents a Cloud Router resource. For more information +// about Cloud Router, read the Cloud Router overview. +type Router struct { + // Bgp: BGP information specific to this router. + Bgp *RouterBgp `json:"bgp,omitempty"` - AdvancedOptionsConfig *SecurityPolicyAdvancedOptionsConfig `json:"advancedOptionsConfig,omitempty"` + // BgpPeers: BGP information that must be configured into the routing + // stack to establish BGP peering. This information must specify the + // peer ASN and either the interface name, IP address, or peer IP + // address. Please refer to RFC4273. + BgpPeers []*RouterBgpPeer `json:"bgpPeers,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -40285,24 +41253,26 @@ type SecurityPolicy struct { // property when you create the resource. Description string `json:"description,omitempty"` - // Fingerprint: Specifies a fingerprint for this resource, which is - // essentially a hash of the metadata's contents and used for optimistic - // locking. The fingerprint is initially generated by Compute Engine and - // changes after every request to modify or update metadata. You must - // always provide an up-to-date fingerprint hash in order to update or - // change metadata, otherwise the request will fail with error 412 - // conditionNotMet. To see the latest fingerprint, make get() request to - // the security policy. - Fingerprint string `json:"fingerprint,omitempty"` + // EncryptedInterconnectRouter: Indicates if a router is dedicated for + // use with encrypted VLAN attachments (interconnectAttachments). + EncryptedInterconnectRouter bool `json:"encryptedInterconnectRouter,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output only] Type of the resource. Always - // compute#securityPolicyfor security policies + // Interfaces: Router interfaces. Each interface requires either one + // linked resource, (for example, linkedVpnTunnel), or IP address and IP + // address range (for example, ipRange), or both. + Interfaces []*RouterInterface `json:"interfaces,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#router for + // routers. Kind string `json:"kind,omitempty"` + // Md5AuthenticationKeys: Keys used for MD5 authentication. + Md5AuthenticationKeys []*RouterMd5AuthenticationKey `json:"md5AuthenticationKeys,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -40312,107 +41282,25 @@ type SecurityPolicy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - RecaptchaOptionsConfig *SecurityPolicyRecaptchaOptionsConfig `json:"recaptchaOptionsConfig,omitempty"` + // Nats: A list of NAT services created in this router. + Nats []*RouterNat `json:"nats,omitempty"` - // Rules: A list of rules that belong to this policy. There must always - // be a default rule (rule with priority 2147483647 and match "*"). If - // no rules are provided when creating a security policy, a default rule - // with action "allow" will be added. - Rules []*SecurityPolicyRule `json:"rules,omitempty"` + // Network: URI of the network to which this router belongs. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URI of the region where the router resides. You + // must specify this field as part of the HTTP request URL. It is not + // settable as a field in the request body. + Region string `json:"region,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Type: The type indicates the intended use of the security policy. - // CLOUD_ARMOR - Cloud Armor backend security policies can be configured - // to filter incoming HTTP requests targeting backend services. They - // filter requests before they hit the origin servers. CLOUD_ARMOR_EDGE - // - Cloud Armor edge security policies can be configured to filter - // incoming HTTP requests targeting backend services (including Cloud - // CDN-enabled) as well as backend buckets (Cloud Storage). They filter - // requests before the request is served from Google's cache. - // - // Possible values: - // "CLOUD_ARMOR" - // "CLOUD_ARMOR_EDGE" - Type string `json:"type,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. - // "AdaptiveProtectionConfig") to unconditionally include in API - // requests. By default, fields with empty or default values are omitted - // from API requests. However, any non-pointer, non-interface field - // appearing in ForceSendFields will be sent to the server regardless of - // whether the field is empty or not. This may be used to include empty - // fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AdaptiveProtectionConfig") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *SecurityPolicy) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SecurityPolicyAdaptiveProtectionConfig: Configuration options for -// Cloud Armor Adaptive Protection (CAAP). -type SecurityPolicyAdaptiveProtectionConfig struct { - // Layer7DdosDefenseConfig: If set to true, enables Cloud Armor Machine - // Learning. - Layer7DdosDefenseConfig *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig `json:"layer7DdosDefenseConfig,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "Layer7DdosDefenseConfig") to unconditionally include in API - // requests. By default, fields with empty or default values are omitted - // from API requests. However, any non-pointer, non-interface field - // appearing in ForceSendFields will be sent to the server regardless of - // whether the field is empty or not. This may be used to include empty - // fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Layer7DdosDefenseConfig") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *SecurityPolicyAdaptiveProtectionConfig) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyAdaptiveProtectionConfig - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig: -// Configuration options for L7 DDoS detection. -type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { - // Enable: If set to true, enables CAAP for L7 DDoS detection. - Enable bool `json:"enable,omitempty"` - - // RuleVisibility: Rule visibility can be one of the following: STANDARD - // - opaque rules. (default) PREMIUM - transparent rules. - // - // Possible values: - // "PREMIUM" - // "STANDARD" - RuleVisibility string `json:"ruleVisibility,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Enable") to + // ForceSendFields is a list of field names (e.g. "Bgp") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -40420,7 +41308,7 @@ type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Enable") to include in API + // NullFields is a list of field names (e.g. "Bgp") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -40429,24 +41317,23 @@ type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig +func (s *Router) MarshalJSON() ([]byte, error) { + type NoMethod Router raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPolicyAdvancedOptionsConfig struct { - // Possible values: - // "DISABLED" - // "STANDARD" - JsonParsing string `json:"jsonParsing,omitempty"` +// RouterAdvertisedIpRange: Description-tagged IP ranges for the router +// to advertise. +type RouterAdvertisedIpRange struct { + // Description: User-specified description for the IP range. + Description string `json:"description,omitempty"` - // Possible values: - // "NORMAL" - // "VERBOSE" - LogLevel string `json:"logLevel,omitempty"` + // Range: The IP range to advertise. The value must be a CIDR-formatted + // string. + Range string `json:"range,omitempty"` - // ForceSendFields is a list of field names (e.g. "JsonParsing") to + // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -40454,7 +41341,7 @@ type SecurityPolicyAdvancedOptionsConfig struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "JsonParsing") to include + // NullFields is a list of field names (e.g. "Description") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -40463,22 +41350,22 @@ type SecurityPolicyAdvancedOptionsConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyAdvancedOptionsConfig) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyAdvancedOptionsConfig +func (s *RouterAdvertisedIpRange) MarshalJSON() ([]byte, error) { + type NoMethod RouterAdvertisedIpRange raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPolicyList struct { +// RouterAggregatedList: Contains a list of routers. +type RouterAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of SecurityPolicy resources. - Items []*SecurityPolicy `json:"items,omitempty"` + // Items: A list of Router resources. + Items map[string]RoutersScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#securityPolicyList for listsof securityPolicies + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -40489,8 +41376,14 @@ type SecurityPolicyList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *SecurityPolicyListWarning `json:"warning,omitempty"` + Warning *RouterAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -40513,15 +41406,15 @@ type SecurityPolicyList struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyList) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyList +func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod RouterAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SecurityPolicyListWarning: [Output Only] Informational warning +// RouterAggregatedListWarning: [Output Only] Informational warning // message. -type SecurityPolicyListWarning struct { +type RouterAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -40543,6 +41436,9 @@ type SecurityPolicyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -40550,6 +41446,9 @@ type SecurityPolicyListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -40583,7 +41482,7 @@ type SecurityPolicyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*SecurityPolicyListWarningData `json:"data,omitempty"` + Data []*RouterAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -40606,13 +41505,13 @@ type SecurityPolicyListWarning struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyListWarning +func (s *RouterAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RouterAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPolicyListWarningData struct { +type RouterAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -40643,49 +41542,57 @@ type SecurityPolicyListWarningData struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyListWarningData +func (s *RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RouterAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPolicyRecaptchaOptionsConfig struct { - // RedirectSiteKey: An optional field to supply a reCAPTCHA site key to - // be used for all the rules using the redirect action with the type of - // GOOGLE_RECAPTCHA under the security policy. The specified site key - // needs to be created from the reCAPTCHA API. The user is responsible - // for the validity of the specified site key. If not specified, a - // Google-managed site key is used. - RedirectSiteKey string `json:"redirectSiteKey,omitempty"` +type RouterBgp struct { + // AdvertiseMode: User-specified flag to indicate which mode to use for + // advertisement. The options are DEFAULT or CUSTOM. + // + // Possible values: + // "CUSTOM" + // "DEFAULT" + AdvertiseMode string `json:"advertiseMode,omitempty"` - // ForceSendFields is a list of field names (e.g. "RedirectSiteKey") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // AdvertisedGroups: User-specified list of prefix groups to advertise + // in custom mode. This field can only be populated if advertise_mode is + // CUSTOM and is advertised to all peers of the router. These groups + // will be advertised in addition to any specified prefixes. Leave this + // field blank to advertise no custom groups. + // + // Possible values: + // "ALL_SUBNETS" - Advertise all available subnets (including peer VPC + // subnets). + AdvertisedGroups []string `json:"advertisedGroups,omitempty"` - // NullFields is a list of field names (e.g. "RedirectSiteKey") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} + // AdvertisedIpRanges: User-specified list of individual IP ranges to + // advertise in custom mode. This field can only be populated if + // advertise_mode is CUSTOM and is advertised to all peers of the + // router. These IP ranges will be advertised in addition to any + // specified groups. Leave this field blank to advertise no custom IP + // ranges. + AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` -func (s *SecurityPolicyRecaptchaOptionsConfig) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRecaptchaOptionsConfig - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 + // private ASN, either 16-bit or 32-bit. The value will be fixed for + // this router resource. All VPN tunnels that link to this router will + // have the same local ASN. + Asn int64 `json:"asn,omitempty"` -type SecurityPolicyReference struct { - SecurityPolicy string `json:"securityPolicy,omitempty"` + // KeepaliveInterval: The interval in seconds between BGP keepalive + // messages that are sent to the peer. Hold time is three times the + // interval at which keepalive messages are sent, and the hold time is + // the maximum number of seconds allowed to elapse between successive + // keepalive messages that BGP receives from a peer. BGP will use the + // smaller of either the local hold time value or the peer's hold time + // value as the hold time for the BGP connection between the two peers. + // If set, this value must be between 20 and 60. The default is 20. + KeepaliveInterval int64 `json:"keepaliveInterval,omitempty"` - // ForceSendFields is a list of field names (e.g. "SecurityPolicy") to + // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -40693,213 +41600,140 @@ type SecurityPolicyReference struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SecurityPolicy") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "AdvertiseMode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyReference +func (s *RouterBgp) MarshalJSON() ([]byte, error) { + type NoMethod RouterBgp raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SecurityPolicyRule: Represents a rule that describes one or more -// match conditions along with the action to be taken when traffic -// matches this condition (allow or deny). -type SecurityPolicyRule struct { - // Action: The Action to perform when the rule is matched. The following - // are the valid actions: - allow: allow access to target. - deny(): - // deny access to target, returns the HTTP response code specified - // (valid values are 403, 404, and 502). - rate_based_ban: limit client - // traffic to the configured threshold and ban the client if the traffic - // exceeds the threshold. Configure parameters for this action in - // RateLimitOptions. Requires rate_limit_options to be set. - redirect: - // redirect to a different target. This can either be an internal - // reCAPTCHA redirect, or an external URL-based redirect via a 302 - // response. Parameters for this action can be configured via - // redirectOptions. - throttle: limit client traffic to the configured - // threshold. Configure parameters for this action in rateLimitOptions. - // Requires rate_limit_options to be set for this. - Action string `json:"action,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // HeaderAction: Optional, additional actions that are performed on - // headers. - HeaderAction *SecurityPolicyRuleHttpHeaderAction `json:"headerAction,omitempty"` +type RouterBgpPeer struct { + // AdvertiseMode: User-specified flag to indicate which mode to use for + // advertisement. + // + // Possible values: + // "CUSTOM" + // "DEFAULT" + AdvertiseMode string `json:"advertiseMode,omitempty"` - // Kind: [Output only] Type of the resource. Always - // compute#securityPolicyRule for security policy rules - Kind string `json:"kind,omitempty"` + // AdvertisedGroups: User-specified list of prefix groups to advertise + // in custom mode, which can take one of the following options: - + // ALL_SUBNETS: Advertises all available subnets, including peer VPC + // subnets. - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. + // Note that this field can only be populated if advertise_mode is + // CUSTOM and overrides the list defined for the router (in the "bgp" + // message). These groups are advertised in addition to any specified + // prefixes. Leave this field blank to advertise no custom groups. + // + // Possible values: + // "ALL_SUBNETS" - Advertise all available subnets (including peer VPC + // subnets). + AdvertisedGroups []string `json:"advertisedGroups,omitempty"` - // Match: A match condition that incoming traffic is evaluated against. - // If it evaluates to true, the corresponding 'action' is enforced. - Match *SecurityPolicyRuleMatcher `json:"match,omitempty"` - - // Preview: If set to true, the specified action is not enforced. - Preview bool `json:"preview,omitempty"` - - // Priority: An integer indicating the priority of a rule in the list. - // The priority must be a positive value between 0 and 2147483647. Rules - // are evaluated from highest to lowest priority where 0 is the highest - // priority and 2147483647 is the lowest priority. - Priority int64 `json:"priority,omitempty"` - - // RateLimitOptions: Must be specified if the action is "rate_based_ban" - // or "throttle". Cannot be specified for any other actions. - RateLimitOptions *SecurityPolicyRuleRateLimitOptions `json:"rateLimitOptions,omitempty"` - - // RedirectOptions: Parameters defining the redirect action. Cannot be - // specified for any other actions. - RedirectOptions *SecurityPolicyRuleRedirectOptions `json:"redirectOptions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Action") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Action") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SecurityPolicyRule) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRule - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SecurityPolicyRuleHttpHeaderAction struct { - // RequestHeadersToAdds: The list of request headers to add or overwrite - // if they're already present. - RequestHeadersToAdds []*SecurityPolicyRuleHttpHeaderActionHttpHeaderOption `json:"requestHeadersToAdds,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "RequestHeadersToAdds") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "RequestHeadersToAdds") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *SecurityPolicyRuleHttpHeaderAction) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRuleHttpHeaderAction - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // AdvertisedIpRanges: User-specified list of individual IP ranges to + // advertise in custom mode. This field can only be populated if + // advertise_mode is CUSTOM and overrides the list defined for the + // router (in the "bgp" message). These IP ranges are advertised in + // addition to any specified groups. Leave this field blank to advertise + // no custom IP ranges. + AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` -type SecurityPolicyRuleHttpHeaderActionHttpHeaderOption struct { - // HeaderName: The name of the header to set. - HeaderName string `json:"headerName,omitempty"` + // AdvertisedRoutePriority: The priority of routes advertised to this + // BGP peer. Where there is more than one matching route of maximum + // length, the routes with the lowest priority value win. + AdvertisedRoutePriority int64 `json:"advertisedRoutePriority,omitempty"` - // HeaderValue: The value to set the named header to. - HeaderValue string `json:"headerValue,omitempty"` + // Bfd: BFD configuration for the BGP peering. + Bfd *RouterBgpPeerBfd `json:"bfd,omitempty"` - // ForceSendFields is a list of field names (e.g. "HeaderName") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Enable: The status of the BGP peer connection. If set to FALSE, any + // active session with the peer is terminated and all associated routing + // information is removed. If set to TRUE, the peer connection can be + // established with routing information. The default is TRUE. + // + // Possible values: + // "FALSE" + // "TRUE" + Enable string `json:"enable,omitempty"` - // NullFields is a list of field names (e.g. "HeaderName") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // EnableIpv6: Enable IPv6 traffic over BGP Peer. If not specified, it + // is disabled by default. + EnableIpv6 bool `json:"enableIpv6,omitempty"` -func (s *SecurityPolicyRuleHttpHeaderActionHttpHeaderOption) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRuleHttpHeaderActionHttpHeaderOption - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // InterfaceName: Name of the interface the BGP peer is associated with. + InterfaceName string `json:"interfaceName,omitempty"` -// SecurityPolicyRuleMatcher: Represents a match condition that incoming -// traffic is evaluated against. Exactly one field must be specified. -type SecurityPolicyRuleMatcher struct { - // Config: The configuration options available when specifying - // versioned_expr. This field must be specified if versioned_expr is - // specified and cannot be specified if versioned_expr is not specified. - Config *SecurityPolicyRuleMatcherConfig `json:"config,omitempty"` + // IpAddress: IP address of the interface inside Google Cloud Platform. + // Only IPv4 is supported. + IpAddress string `json:"ipAddress,omitempty"` - // Expr: User defined CEVAL expression. A CEVAL expression is used to - // specify match criteria such as origin.ip, source.region_code and - // contents in the request header. - Expr *Expr `json:"expr,omitempty"` + // Ipv6NexthopAddress: IPv6 address of the interface inside Google Cloud + // Platform. + Ipv6NexthopAddress string `json:"ipv6NexthopAddress,omitempty"` - // VersionedExpr: Preconfigured versioned expression. If this field is - // specified, config must also be specified. Available preconfigured - // expressions along with their requirements are: SRC_IPS_V1 - must - // specify the corresponding src_ip_range field in config. + // ManagementType: [Output Only] The resource that configures and + // manages this BGP peer. - MANAGED_BY_USER is the default value and can + // be managed by you or other users - MANAGED_BY_ATTACHMENT is a BGP + // peer that is configured and managed by Cloud Interconnect, + // specifically by an InterconnectAttachment of type PARTNER. Google + // automatically creates, updates, and deletes this type of BGP peer + // when the PARTNER InterconnectAttachment is created, updated, or + // deleted. // // Possible values: - // "SRC_IPS_V1" - Matches the source IP address of a request to the IP - // ranges supplied in config. - VersionedExpr string `json:"versionedExpr,omitempty"` + // "MANAGED_BY_ATTACHMENT" - The BGP peer is automatically created for + // PARTNER type InterconnectAttachment; Google will automatically + // create/delete this BGP peer when the PARTNER InterconnectAttachment + // is created/deleted, and Google will update the ipAddress and + // peerIpAddress when the PARTNER InterconnectAttachment is provisioned. + // This type of BGP peer cannot be created or deleted, but can be + // modified for all fields except for name, ipAddress and peerIpAddress. + // "MANAGED_BY_USER" - Default value, the BGP peer is manually created + // and managed by user. + ManagementType string `json:"managementType,omitempty"` - // ForceSendFields is a list of field names (e.g. "Config") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Md5AuthenticationKeyName: Present if MD5 authentication is enabled + // for the peering. Must be the name of one of the entries in the + // Router.md5_authentication_keys. The field must comply with RFC1035. + Md5AuthenticationKeyName string `json:"md5AuthenticationKeyName,omitempty"` - // NullFields is a list of field names (e.g. "Config") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // Name: Name of this BGP peer. The name must be 1-63 characters long, + // and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` -func (s *SecurityPolicyRuleMatcher) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRuleMatcher - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // PeerAsn: Peer BGP Autonomous System Number (ASN). Each BGP interface + // may use a different value. + PeerAsn int64 `json:"peerAsn,omitempty"` -type SecurityPolicyRuleMatcherConfig struct { - // SrcIpRanges: CIDR IP address range. Maximum number of src_ip_ranges - // allowed is 10. - SrcIpRanges []string `json:"srcIpRanges,omitempty"` + // PeerIpAddress: IP address of the BGP interface outside Google Cloud + // Platform. Only IPv4 is supported. + PeerIpAddress string `json:"peerIpAddress,omitempty"` - // ForceSendFields is a list of field names (e.g. "SrcIpRanges") to + // PeerIpv6NexthopAddress: IPv6 address of the BGP interface outside + // Google Cloud Platform. + PeerIpv6NexthopAddress string `json:"peerIpv6NexthopAddress,omitempty"` + + // RouterApplianceInstance: URI of the VM instance that is used as + // third-party router appliances such as Next Gen Firewalls, Virtual + // Routers, or Router Appliances. The VM instance must be located in + // zones contained in the same region as this Cloud Router. The VM + // instance is the peer side of the BGP session. + RouterApplianceInstance string `json:"routerApplianceInstance,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -40907,7 +41741,7 @@ type SecurityPolicyRuleMatcherConfig struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SrcIpRanges") to include + // NullFields is a list of field names (e.g. "AdvertiseMode") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -40916,85 +41750,55 @@ type SecurityPolicyRuleMatcherConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRuleMatcherConfig +func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { + type NoMethod RouterBgpPeer raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPolicyRuleRateLimitOptions struct { - // BanDurationSec: Can only be specified if the action for the rule is - // "rate_based_ban". If specified, determines the time (in seconds) the - // traffic will continue to be banned by the rate limit after the rate - // falls below the threshold. - BanDurationSec int64 `json:"banDurationSec,omitempty"` +type RouterBgpPeerBfd struct { + // MinReceiveInterval: The minimum interval, in milliseconds, between + // BFD control packets received from the peer router. The actual value + // is negotiated between the two routers and is equal to the greater of + // this value and the transmit interval of the other router. If set, + // this value must be between 1000 and 30000. The default is 1000. + MinReceiveInterval int64 `json:"minReceiveInterval,omitempty"` - // BanThreshold: Can only be specified if the action for the rule is - // "rate_based_ban". If specified, the key will be banned for the - // configured 'ban_duration_sec' when the number of requests that exceed - // the 'rate_limit_threshold' also exceed this 'ban_threshold'. - BanThreshold *SecurityPolicyRuleRateLimitOptionsThreshold `json:"banThreshold,omitempty"` + // MinTransmitInterval: The minimum interval, in milliseconds, between + // BFD control packets transmitted to the peer router. The actual value + // is negotiated between the two routers and is equal to the greater of + // this value and the corresponding receive interval of the other + // router. If set, this value must be between 1000 and 30000. The + // default is 1000. + MinTransmitInterval int64 `json:"minTransmitInterval,omitempty"` - // ConformAction: Action to take for requests that are under the - // configured rate limit threshold. Valid option is "allow" only. - ConformAction string `json:"conformAction,omitempty"` + // Multiplier: The number of consecutive BFD packets that must be missed + // before BFD declares that a peer is unavailable. If set, the value + // must be a value between 5 and 16. The default is 5. + Multiplier int64 `json:"multiplier,omitempty"` - // EnforceOnKey: Determines the key to enforce the rate_limit_threshold - // on. Possible values are: - ALL: A single rate limit threshold is - // applied to all the requests matching this rule. This is the default - // value if this field 'enforce_on_key' is not configured. - IP: The - // source IP address of the request is the key. Each IP has this limit - // enforced separately. - HTTP_HEADER: The value of the HTTP header - // whose name is configured under "enforce_on_key_name". The key value - // is truncated to the first 128 bytes of the header value. If no such - // header is present in the request, the key type defaults to ALL. - - // XFF_IP: The first IP address (i.e. the originating client IP address) - // specified in the list of IPs under X-Forwarded-For HTTP header. If no - // such header is present or the value is not a valid IP, the key type - // defaults to ALL. - HTTP_COOKIE: The value of the HTTP cookie whose - // name is configured under "enforce_on_key_name". The key value is - // truncated to the first 128 bytes of the cookie value. If no such - // cookie is present in the request, the key type defaults to ALL. + // SessionInitializationMode: The BFD session initialization mode for + // this BGP peer. If set to ACTIVE, the Cloud Router will initiate the + // BFD session for this BGP peer. If set to PASSIVE, the Cloud Router + // will wait for the peer router to initiate the BFD session for this + // BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The + // default is DISABLED. // // Possible values: - // "ALL" - // "HTTP_COOKIE" - // "HTTP_HEADER" - // "IP" - // "XFF_IP" - EnforceOnKey string `json:"enforceOnKey,omitempty"` - - // EnforceOnKeyName: Rate limit key name applicable only for the - // following key types: HTTP_HEADER -- Name of the HTTP header whose - // value is taken as the key value. HTTP_COOKIE -- Name of the HTTP - // cookie whose value is taken as the key value. - EnforceOnKeyName string `json:"enforceOnKeyName,omitempty"` - - // ExceedAction: Action to take for requests that are above the - // configured rate limit threshold, to either deny with a specified HTTP - // response code, or redirect to a different endpoint. Valid options are - // "deny()" where valid values for status are 403, 404, 429, and 502, - // and "redirect" where the redirect parameters come from - // exceed_redirect_options below. - ExceedAction string `json:"exceedAction,omitempty"` - - // ExceedRedirectOptions: Parameters defining the redirect action that - // is used as the exceed action. Cannot be specified if the exceed - // action is not redirect. - ExceedRedirectOptions *SecurityPolicyRuleRedirectOptions `json:"exceedRedirectOptions,omitempty"` - - // RateLimitThreshold: Threshold at which to begin ratelimiting. - RateLimitThreshold *SecurityPolicyRuleRateLimitOptionsThreshold `json:"rateLimitThreshold,omitempty"` + // "ACTIVE" + // "DISABLED" + // "PASSIVE" + SessionInitializationMode string `json:"sessionInitializationMode,omitempty"` - // ForceSendFields is a list of field names (e.g. "BanDurationSec") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "MinReceiveInterval") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BanDurationSec") to + // NullFields is a list of field names (e.g. "MinReceiveInterval") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -41004,160 +41808,88 @@ type SecurityPolicyRuleRateLimitOptions struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleRateLimitOptions) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRuleRateLimitOptions +func (s *RouterBgpPeerBfd) MarshalJSON() ([]byte, error) { + type NoMethod RouterBgpPeerBfd raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPolicyRuleRateLimitOptionsThreshold struct { - // Count: Number of HTTP(S) requests for calculating the threshold. - Count int64 `json:"count,omitempty"` - - // IntervalSec: Interval over which the threshold is computed. - IntervalSec int64 `json:"intervalSec,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Count") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Count") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} +type RouterInterface struct { + // IpRange: IP address and range of the interface. The IP range must be + // in the RFC3927 link-local IP address space. The value must be a + // CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not + // truncate the address as it represents the IP address of the + // interface. + IpRange string `json:"ipRange,omitempty"` -func (s *SecurityPolicyRuleRateLimitOptionsThreshold) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRuleRateLimitOptionsThreshold - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // LinkedInterconnectAttachment: URI of the linked Interconnect + // attachment. It must be in the same region as the router. Each + // interface can have one linked resource, which can be a VPN tunnel, an + // Interconnect attachment, or a virtual machine instance. + LinkedInterconnectAttachment string `json:"linkedInterconnectAttachment,omitempty"` -type SecurityPolicyRuleRedirectOptions struct { - // Target: Target for the redirect action. This is required if the type - // is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. - Target string `json:"target,omitempty"` + // LinkedVpnTunnel: URI of the linked VPN tunnel, which must be in the + // same region as the router. Each interface can have one linked + // resource, which can be a VPN tunnel, an Interconnect attachment, or a + // virtual machine instance. + LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` - // Type: Type of the redirect action. + // ManagementType: [Output Only] The resource that configures and + // manages this interface. - MANAGED_BY_USER is the default value and + // can be managed directly by users. - MANAGED_BY_ATTACHMENT is an + // interface that is configured and managed by Cloud Interconnect, + // specifically, by an InterconnectAttachment of type PARTNER. Google + // automatically creates, updates, and deletes this type of interface + // when the PARTNER InterconnectAttachment is created, updated, or + // deleted. // // Possible values: - // "EXTERNAL_302" - // "GOOGLE_RECAPTCHA" - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Target") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Target") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SecurityPolicyRuleRedirectOptions) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRuleRedirectOptions - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SecuritySettings: The authentication and authorization settings for a -// BackendService. -type SecuritySettings struct { - // ClientTlsPolicy: Optional. A URL referring to a - // networksecurity.ClientTlsPolicy resource that describes how clients - // should authenticate with this service's backends. clientTlsPolicy - // only applies to a global BackendService with the loadBalancingScheme - // set to INTERNAL_SELF_MANAGED. If left blank, communications are not - // encrypted. Note: This field currently has no impact. - ClientTlsPolicy string `json:"clientTlsPolicy,omitempty"` - - // SubjectAltNames: Optional. A list of Subject Alternative Names (SANs) - // that the client verifies during a mutual TLS handshake with an - // server/endpoint for this BackendService. When the server presents its - // X.509 certificate to the client, the client inspects the - // certificate's subjectAltName field. If the field contains one of the - // specified values, the communication continues. Otherwise, it fails. - // This additional check enables the client to verify that the server is - // authorized to run the requested service. Note that the contents of - // the server certificate's subjectAltName field are configured by the - // Public Key Infrastructure which provisions server identities. Only - // applies to a global BackendService with loadBalancingScheme set to - // INTERNAL_SELF_MANAGED. Only applies when BackendService has an - // attached clientTlsPolicy with clientCertificate (mTLS mode). Note: - // This field currently has no impact. - SubjectAltNames []string `json:"subjectAltNames,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ClientTlsPolicy") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ClientTlsPolicy") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *SecuritySettings) MarshalJSON() ([]byte, error) { - type NoMethod SecuritySettings - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SerialPortOutput: An instance serial console output. -type SerialPortOutput struct { - // Contents: [Output Only] The contents of the console output. - Contents string `json:"contents,omitempty"` - - // Kind: [Output Only] Type of the resource. Always - // compute#serialPortOutput for serial port output. - Kind string `json:"kind,omitempty"` + // "MANAGED_BY_ATTACHMENT" - The interface is automatically created + // for PARTNER type InterconnectAttachment, Google will automatically + // create/update/delete this interface when the PARTNER + // InterconnectAttachment is created/provisioned/deleted. This type of + // interface cannot be manually managed by user. + // "MANAGED_BY_USER" - Default value, the interface is manually + // created and managed by user. + ManagementType string `json:"managementType,omitempty"` - // Next: [Output Only] The position of the next byte of content, - // regardless of whether the content exists, following the output - // returned in the `contents` property. Use this value in the next - // request as the start parameter. - Next int64 `json:"next,omitempty,string"` + // Name: Name of this interface entry. The name must be 1-63 characters + // long, and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // PrivateIpAddress: The regional private internal IP address that is + // used to establish BGP sessions to a VM instance acting as a + // third-party Router Appliance, such as a Next Gen Firewall, a Virtual + // Router, or an SD-WAN VM. + PrivateIpAddress string `json:"privateIpAddress,omitempty"` - // Start: The starting byte position of the output that was returned. - // This should match the start parameter sent with the request. If the - // serial console output exceeds the size of the buffer (1 MB), older - // output is overwritten by newer content. The output start value will - // indicate the byte position of the output that was returned, which - // might be different than the `start` value that was specified in the - // request. - Start int64 `json:"start,omitempty,string"` + // RedundantInterface: Name of the interface that will be redundant with + // the current interface you are creating. The redundantInterface must + // belong to the same Cloud Router as the interface here. To establish + // the BGP session to a Router Appliance VM, you must create two BGP + // peers. The two BGP peers must be attached to two separate interfaces + // that are redundant with each other. The redundant_interface must be + // 1-63 characters long, and comply with RFC1035. Specifically, the + // redundant_interface must be 1-63 characters long and match the + // regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first + // character must be a lowercase letter, and all following characters + // must be a dash, lowercase letter, or digit, except the last + // character, which cannot be a dash. + RedundantInterface string `json:"redundantInterface,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Subnetwork: The URI of the subnetwork resource that this interface + // belongs to, which must be in the same region as the Cloud Router. + // When you establish a BGP session to a VM instance using this + // interface, the VM instance must belong to the same subnetwork as the + // subnetwork specified here. + Subnetwork string `json:"subnetwork,omitempty"` - // ForceSendFields is a list of field names (e.g. "Contents") to + // ForceSendFields is a list of field names (e.g. "IpRange") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -41165,7 +41897,7 @@ type SerialPortOutput struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Contents") to include in + // NullFields is a list of field names (e.g. "IpRange") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -41174,217 +41906,23 @@ type SerialPortOutput struct { NullFields []string `json:"-"` } -func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { - type NoMethod SerialPortOutput - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ServerBinding struct { - // Possible values: - // "RESTART_NODE_ON_ANY_SERVER" - Node may associate with any physical - // server over its lifetime. - // "RESTART_NODE_ON_MINIMAL_SERVERS" - Node may associate with minimal - // physical servers over its lifetime. - // "SERVER_BINDING_TYPE_UNSPECIFIED" - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Type") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Type") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ServerBinding) MarshalJSON() ([]byte, error) { - type NoMethod ServerBinding - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ServiceAccount: A service account. -type ServiceAccount struct { - // Email: Email address of the service account. - Email string `json:"email,omitempty"` - - // Scopes: The list of scopes to be made available for this service - // account. - Scopes []string `json:"scopes,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Email") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Email") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ServiceAccount) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAccount - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ServiceAttachment: Represents a ServiceAttachment resource. A service -// attachment represents a service that a producer has exposed. It -// encapsulates the load balancer which fronts the service runs and a -// list of NAT IP ranges that the producers uses to represent the -// consumers connecting to the service. next tag = 20 -type ServiceAttachment struct { - // ConnectedEndpoints: [Output Only] An array of connections for all the - // consumers connected to this service attachment. - ConnectedEndpoints []*ServiceAttachmentConnectedEndpoint `json:"connectedEndpoints,omitempty"` - - // ConnectionPreference: The connection preference of service - // attachment. The value can be set to ACCEPT_AUTOMATIC. An - // ACCEPT_AUTOMATIC service attachment is one that always accepts the - // connection from consumer forwarding rules. - // - // Possible values: - // "ACCEPT_AUTOMATIC" - // "ACCEPT_MANUAL" - // "CONNECTION_PREFERENCE_UNSPECIFIED" - ConnectionPreference string `json:"connectionPreference,omitempty"` - - // ConsumerAcceptLists: Projects that are allowed to connect to this - // service attachment. - ConsumerAcceptLists []*ServiceAttachmentConsumerProjectLimit `json:"consumerAcceptLists,omitempty"` - - // ConsumerRejectLists: Projects that are not allowed to connect to this - // service attachment. The project can be specified using its id or - // number. - ConsumerRejectLists []string `json:"consumerRejectLists,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // DomainNames: If specified, the domain name will be used during the - // integration between the PSC connected endpoints and the Cloud DNS. - // For example, this is a valid domain name: "p.mycompany.com.". Current - // max number of domain names supported is 1. - DomainNames []string `json:"domainNames,omitempty"` - - // EnableProxyProtocol: If true, enable the proxy protocol which is for - // supplying client TCP/IP address data in TCP connections that traverse - // proxies on their way to destination servers. - EnableProxyProtocol bool `json:"enableProxyProtocol,omitempty"` - - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a ServiceAttachment. An - // up-to-date fingerprint must be provided in order to patch/update the - // ServiceAttachment; otherwise, the request will fail with error 412 - // conditionNotMet. To see the latest fingerprint, make a get() request - // to retrieve the ServiceAttachment. - Fingerprint string `json:"fingerprint,omitempty"` - - // Id: [Output Only] The unique identifier for the resource type. The - // server generates this identifier. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always - // compute#serviceAttachment for service attachments. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // NatSubnets: An array of URLs where each entry is the URL of a subnet - // provided by the service producer to use for NAT in this service - // attachment. - NatSubnets []string `json:"natSubnets,omitempty"` - - // ProducerForwardingRule: The URL of a forwarding rule with - // loadBalancingScheme INTERNAL* that is serving the endpoint identified - // by this service attachment. - ProducerForwardingRule string `json:"producerForwardingRule,omitempty"` - - // PscServiceAttachmentId: [Output Only] An 128-bit global unique ID of - // the PSC service attachment. - PscServiceAttachmentId *Uint128 `json:"pscServiceAttachmentId,omitempty"` - - // Region: [Output Only] URL of the region where the service attachment - // resides. This field applies only to the region resource. You must - // specify this field as part of the HTTP request URL. It is not - // settable as a field in the request body. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // TargetService: The URL of a service serving the endpoint identified - // by this service attachment. - TargetService string `json:"targetService,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "ConnectedEndpoints") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ConnectedEndpoints") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ServiceAttachment) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAttachment +func (s *RouterInterface) MarshalJSON() ([]byte, error) { + type NoMethod RouterInterface raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ServiceAttachmentAggregatedList: Contains a list of -// ServiceAttachmentsScopedList. -type ServiceAttachmentAggregatedList struct { +// RouterList: Contains a list of Router resources. +type RouterList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of ServiceAttachmentsScopedList resources. - Items map[string]ServiceAttachmentsScopedList `json:"items,omitempty"` + // Items: A list of Router resources. + Items []*Router `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of resource. Always compute#router for + // routers. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -41398,11 +41936,8 @@ type ServiceAttachmentAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *ServiceAttachmentAggregatedListWarning `json:"warning,omitempty"` + Warning *RouterListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -41425,15 +41960,14 @@ type ServiceAttachmentAggregatedList struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAttachmentAggregatedList +func (s *RouterList) MarshalJSON() ([]byte, error) { + type NoMethod RouterList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ServiceAttachmentAggregatedListWarning: [Output Only] Informational -// warning message. -type ServiceAttachmentAggregatedListWarning struct { +// RouterListWarning: [Output Only] Informational warning message. +type RouterListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -41455,6 +41989,9 @@ type ServiceAttachmentAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -41462,6 +41999,9 @@ type ServiceAttachmentAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -41495,7 +42035,7 @@ type ServiceAttachmentAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*ServiceAttachmentAggregatedListWarningData `json:"data,omitempty"` + Data []*RouterListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -41518,13 +42058,13 @@ type ServiceAttachmentAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAttachmentAggregatedListWarning +func (s *RouterListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RouterListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ServiceAttachmentAggregatedListWarningData struct { +type RouterListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -41555,34 +42095,25 @@ type ServiceAttachmentAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAttachmentAggregatedListWarningData +func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RouterListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ServiceAttachmentConnectedEndpoint: [Output Only] A connection -// connected to this service attachment. -type ServiceAttachmentConnectedEndpoint struct { - // Endpoint: The url of a connected endpoint. - Endpoint string `json:"endpoint,omitempty"` - - // PscConnectionId: The PSC connection id of the connected endpoint. - PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` +type RouterMd5AuthenticationKey struct { + // Key: [Input only] Value of the key. For patch and update calls, it + // can be skipped to copy the value from the previous configuration. + // This is allowed if the key with the same name existed before the + // operation. Maximum length is 80 characters. Can only contain + // printable ASCII characters. + Key string `json:"key,omitempty"` - // Status: The status of a connected endpoint to this service - // attachment. - // - // Possible values: - // "ACCEPTED" - The connection has been accepted by the producer. - // "CLOSED" - The connection has been closed by the producer. - // "PENDING" - The connection is pending acceptance by the producer. - // "REJECTED" - The consumer is still connected but not using the - // connection. - // "STATUS_UNSPECIFIED" - Status string `json:"status,omitempty"` + // Name: Name used to identify the key. Must be unique within a router. + // Must be referenced by at least one bgpPeer. Must comply with RFC1035. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "Endpoint") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -41590,8 +42121,8 @@ type ServiceAttachmentConnectedEndpoint struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Endpoint") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -41599,21 +42130,138 @@ type ServiceAttachmentConnectedEndpoint struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentConnectedEndpoint) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAttachmentConnectedEndpoint +func (s *RouterMd5AuthenticationKey) MarshalJSON() ([]byte, error) { + type NoMethod RouterMd5AuthenticationKey raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ServiceAttachmentConsumerProjectLimit struct { - // ConnectionLimit: The value of the limit to set. - ConnectionLimit int64 `json:"connectionLimit,omitempty"` +// RouterNat: Represents a Nat resource. It enables the VMs within the +// specified subnetworks to access Internet without external IP +// addresses. It specifies a list of subnetworks (and the ranges within) +// that want to use NAT. Customers can also provide the external IPs +// that would be used for NAT. GCP would auto-allocate ephemeral IPs if +// no external IPs are provided. +type RouterNat struct { + // DrainNatIps: A list of URLs of the IP resources to be drained. These + // IPs must be valid static external IPs that have been assigned to the + // NAT. These IPs should be used for updating/patching a NAT only. + DrainNatIps []string `json:"drainNatIps,omitempty"` - // ProjectIdOrNum: The project id or number for the project to set the - // limit for. - ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` + // EnableDynamicPortAllocation: Enable Dynamic Port Allocation. If not + // specified, it is disabled by default. If set to true, - Dynamic Port + // Allocation will be enabled on this NAT config. - + // enableEndpointIndependentMapping cannot be set to true. - If minPorts + // is set, minPortsPerVm must be set to a power of two greater than or + // equal to 32. If minPortsPerVm is not set, a minimum of 32 ports will + // be allocated to a VM from this NAT config. + EnableDynamicPortAllocation bool `json:"enableDynamicPortAllocation,omitempty"` - // ForceSendFields is a list of field names (e.g. "ConnectionLimit") to + EnableEndpointIndependentMapping bool `json:"enableEndpointIndependentMapping,omitempty"` + + // EndpointTypes: List of NAT-ted endpoint types supported by the Nat + // Gateway. If the list is empty, then it will be equivalent to include + // ENDPOINT_TYPE_VM + // + // Possible values: + // "ENDPOINT_TYPE_SWG" - This is used for Secure Web Gateway + // endpoints. + // "ENDPOINT_TYPE_VM" - This is the default. + EndpointTypes []string `json:"endpointTypes,omitempty"` + + // IcmpIdleTimeoutSec: Timeout (in seconds) for ICMP connections. + // Defaults to 30s if not set. + IcmpIdleTimeoutSec int64 `json:"icmpIdleTimeoutSec,omitempty"` + + // LogConfig: Configure logging on this NAT. + LogConfig *RouterNatLogConfig `json:"logConfig,omitempty"` + + // MaxPortsPerVm: Maximum number of ports allocated to a VM from this + // NAT config when Dynamic Port Allocation is enabled. If Dynamic Port + // Allocation is not enabled, this field has no effect. If Dynamic Port + // Allocation is enabled, and this field is set, it must be set to a + // power of two greater than minPortsPerVm, or 64 if minPortsPerVm is + // not set. If Dynamic Port Allocation is enabled and this field is not + // set, a maximum of 65536 ports will be allocated to a VM from this NAT + // config. + MaxPortsPerVm int64 `json:"maxPortsPerVm,omitempty"` + + // MinPortsPerVm: Minimum number of ports allocated to a VM from this + // NAT config. If not set, a default number of ports is allocated to a + // VM. This is rounded up to the nearest power of 2. For example, if the + // value of this field is 50, at least 64 ports are allocated to a VM. + MinPortsPerVm int64 `json:"minPortsPerVm,omitempty"` + + // Name: Unique name of this Nat service. The name must be 1-63 + // characters long and comply with RFC1035. + Name string `json:"name,omitempty"` + + // NatIpAllocateOption: Specify the NatIpAllocateOption, which can take + // one of the following values: - MANUAL_ONLY: Uses only Nat IP + // addresses provided by customers. When there are not enough specified + // Nat IPs, the Nat service fails for new VMs. - AUTO_ONLY: Nat IPs are + // allocated by Google Cloud Platform; customers can't specify any Nat + // IPs. When choosing AUTO_ONLY, then nat_ip should be empty. + // + // Possible values: + // "AUTO_ONLY" - Nat IPs are allocated by GCP; customers can not + // specify any Nat IPs. + // "MANUAL_ONLY" - Only use Nat IPs provided by customers. When + // specified Nat IPs are not enough then the Nat service fails for new + // VMs. + NatIpAllocateOption string `json:"natIpAllocateOption,omitempty"` + + // NatIps: A list of URLs of the IP resources used for this Nat service. + // These IP addresses must be valid static external IP addresses + // assigned to the project. + NatIps []string `json:"natIps,omitempty"` + + // Rules: A list of rules associated with this NAT. + Rules []*RouterNatRule `json:"rules,omitempty"` + + // SourceSubnetworkIpRangesToNat: Specify the Nat option, which can take + // one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of + // the IP ranges in every Subnetwork are allowed to Nat. - + // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges + // in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list + // of Subnetworks are allowed to Nat (specified in the field subnetwork + // below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. + // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or + // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any + // other Router.Nat section in any Router for this network in this + // region. + // + // Possible values: + // "ALL_SUBNETWORKS_ALL_IP_RANGES" - All the IP ranges in every + // Subnetwork are allowed to Nat. + // "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES" - All the primary IP ranges + // in every Subnetwork are allowed to Nat. + // "LIST_OF_SUBNETWORKS" - A list of Subnetworks are allowed to Nat + // (specified in the field subnetwork below) + SourceSubnetworkIpRangesToNat string `json:"sourceSubnetworkIpRangesToNat,omitempty"` + + // Subnetworks: A list of Subnetwork resources whose traffic should be + // translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS + // is selected for the SubnetworkIpRangeToNatOption above. + Subnetworks []*RouterNatSubnetworkToNat `json:"subnetworks,omitempty"` + + // TcpEstablishedIdleTimeoutSec: Timeout (in seconds) for TCP + // established connections. Defaults to 1200s if not set. + TcpEstablishedIdleTimeoutSec int64 `json:"tcpEstablishedIdleTimeoutSec,omitempty"` + + // TcpTimeWaitTimeoutSec: Timeout (in seconds) for TCP connections that + // are in TIME_WAIT state. Defaults to 120s if not set. + TcpTimeWaitTimeoutSec int64 `json:"tcpTimeWaitTimeoutSec,omitempty"` + + // TcpTransitoryIdleTimeoutSec: Timeout (in seconds) for TCP transitory + // connections. Defaults to 30s if not set. + TcpTransitoryIdleTimeoutSec int64 `json:"tcpTransitoryIdleTimeoutSec,omitempty"` + + // UdpIdleTimeoutSec: Timeout (in seconds) for UDP connections. Defaults + // to 30s if not set. + UdpIdleTimeoutSec int64 `json:"udpIdleTimeoutSec,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DrainNatIps") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -41621,53 +42269,89 @@ type ServiceAttachmentConsumerProjectLimit struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ConnectionLimit") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "DrainNatIps") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ServiceAttachmentConsumerProjectLimit) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAttachmentConsumerProjectLimit +func (s *RouterNat) MarshalJSON() ([]byte, error) { + type NoMethod RouterNat raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ServiceAttachmentList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +// RouterNatLogConfig: Configuration of logging on a NAT. +type RouterNatLogConfig struct { + // Enable: Indicates whether or not to export logs. This is false by + // default. + Enable bool `json:"enable,omitempty"` - // Items: A list of ServiceAttachment resources. - Items []*ServiceAttachment `json:"items,omitempty"` + // Filter: Specify the desired filtering of logs on this NAT. If + // unspecified, logs are exported for all connections handled by this + // NAT. This option can take one of the following values: - ERRORS_ONLY: + // Export logs only for connection failures. - TRANSLATIONS_ONLY: Export + // logs only for successful connections. - ALL: Export logs for all + // connections, successful and unsuccessful. + // + // Possible values: + // "ALL" - Export logs for all (successful and unsuccessful) + // connections. + // "ERRORS_ONLY" - Export logs for connection failures only. + // "TRANSLATIONS_ONLY" - Export logs for successful connections only. + Filter string `json:"filter,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#serviceAttachment for service attachments. - Kind string `json:"kind,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enable") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // NullFields is a list of field names (e.g. "Enable") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +func (s *RouterNatLogConfig) MarshalJSON() ([]byte, error) { + type NoMethod RouterNatLogConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // Warning: [Output Only] Informational warning message. - Warning *ServiceAttachmentListWarning `json:"warning,omitempty"` +type RouterNatRule struct { + // Action: The action to be enforced for traffic that matches this rule. + Action *RouterNatRuleAction `json:"action,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Description: An optional description of this rule. + Description string `json:"description,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // Match: CEL expression that specifies the match condition that egress + // traffic from a VM is evaluated against. If it evaluates to true, the + // corresponding `action` is enforced. The following examples are valid + // match expressions for public NAT: "inIpRange(destination.ip, + // '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')" + // "destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'" The + // following example is a valid match expression for private NAT: + // "nexthop.hub == + // 'https://networkconnectivity.googleapis.com/v1alpha1/projects/my-proje + // ct/global/hub/hub-1'" + Match string `json:"match,omitempty"` + + // RuleNumber: An integer uniquely identifying a rule in the list. The + // rule number must be a positive value between 0 and 65000, and must be + // unique among rules within a NAT. + RuleNumber int64 `json:"ruleNumber,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -41675,7 +42359,7 @@ type ServiceAttachmentList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Action") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -41684,152 +42368,25 @@ type ServiceAttachmentList struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentList) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAttachmentList +func (s *RouterNatRule) MarshalJSON() ([]byte, error) { + type NoMethod RouterNatRule raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ServiceAttachmentListWarning: [Output Only] Informational warning -// message. -type ServiceAttachmentListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*ServiceAttachmentListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ServiceAttachmentListWarning) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAttachmentListWarning - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ServiceAttachmentListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ServiceAttachmentListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAttachmentListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ServiceAttachmentsScopedList struct { - // ServiceAttachments: A list of ServiceAttachments contained in this - // scope. - ServiceAttachments []*ServiceAttachment `json:"serviceAttachments,omitempty"` +type RouterNatRuleAction struct { + // SourceNatActiveIps: A list of URLs of the IP resources used for this + // NAT rule. These IP addresses must be valid static external IP + // addresses assigned to the project. This field is used for public NAT. + SourceNatActiveIps []string `json:"sourceNatActiveIps,omitempty"` - // Warning: Informational warning which replaces the list of service - // attachments when the list is empty. - Warning *ServiceAttachmentsScopedListWarning `json:"warning,omitempty"` + // SourceNatDrainIps: A list of URLs of the IP resources to be drained. + // These IPs must be valid static external IPs that have been assigned + // to the NAT. These IPs should be used for updating/patching a NAT rule + // only. This field is used for public NAT. + SourceNatDrainIps []string `json:"sourceNatDrainIps,omitempty"` - // ForceSendFields is a list of field names (e.g. "ServiceAttachments") + // ForceSendFields is a list of field names (e.g. "SourceNatActiveIps") // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -41837,7 +42394,7 @@ type ServiceAttachmentsScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ServiceAttachments") to + // NullFields is a list of field names (e.g. "SourceNatActiveIps") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -41847,83 +42404,39 @@ type ServiceAttachmentsScopedList struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAttachmentsScopedList +func (s *RouterNatRuleAction) MarshalJSON() ([]byte, error) { + type NoMethod RouterNatRuleAction raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ServiceAttachmentsScopedListWarning: Informational warning which -// replaces the list of service attachments when the list is empty. -type ServiceAttachmentsScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` +// RouterNatSubnetworkToNat: Defines the IP ranges that want to use NAT +// for a subnetwork. +type RouterNatSubnetworkToNat struct { + // Name: URL for the subnetwork resource that will use NAT. + Name string `json:"name,omitempty"` - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*ServiceAttachmentsScopedListWarningData `json:"data,omitempty"` + // SecondaryIpRangeNames: A list of the secondary ranges of the + // Subnetwork that are allowed to use NAT. This can be populated only if + // "LIST_OF_SECONDARY_IP_RANGES" is one of the values in + // source_ip_ranges_to_nat. + SecondaryIpRangeNames []string `json:"secondaryIpRangeNames,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // SourceIpRangesToNat: Specify the options for NAT ranges in the + // Subnetwork. All options of a single value are valid except + // NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple + // values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] + // Default: [ALL_IP_RANGES] + // + // Possible values: + // "ALL_IP_RANGES" - The primary and all the secondary ranges are + // allowed to Nat. + // "LIST_OF_SECONDARY_IP_RANGES" - A list of secondary ranges are + // allowed to Nat. + // "PRIMARY_IP_RANGE" - The primary range is allowed to Nat. + SourceIpRangesToNat []string `json:"sourceIpRangesToNat,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "Name") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -41931,7 +42444,7 @@ type ServiceAttachmentsScopedListWarning struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API + // NullFields is a list of field names (e.g. "Name") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -41940,27 +42453,27 @@ type ServiceAttachmentsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAttachmentsScopedListWarning +func (s *RouterNatSubnetworkToNat) MarshalJSON() ([]byte, error) { + type NoMethod RouterNatSubnetworkToNat raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ServiceAttachmentsScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +type RouterStatus struct { + // BestRoutes: Best routes for this router's network. + BestRoutes []*Route `json:"bestRoutes,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // BestRoutesForRouter: Best routes learned by this router. + BestRoutesForRouter []*Route `json:"bestRoutesForRouter,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + BgpPeerStatus []*RouterStatusBgpPeerStatus `json:"bgpPeerStatus,omitempty"` + + NatStatus []*RouterStatusNatStatus `json:"natStatus,omitempty"` + + // Network: URI of the network to which this router belongs. + Network string `json:"network,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BestRoutes") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -41968,8 +42481,8 @@ type ServiceAttachmentsScopedListWarningData struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "BestRoutes") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -41977,59 +42490,82 @@ type ServiceAttachmentsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *ServiceAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAttachmentsScopedListWarningData +func (s *RouterStatus) MarshalJSON() ([]byte, error) { + type NoMethod RouterStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ShareSettings: The share setting for reservations and sole tenancy -// node groups. -type ShareSettings struct { - // ProjectMap: A map of project id and project config. This is only - // valid when share_type's value is SPECIFIC_PROJECTS. - ProjectMap map[string]ShareSettingsProjectConfig `json:"projectMap,omitempty"` +type RouterStatusBgpPeerStatus struct { + // AdvertisedRoutes: Routes that were advertised to the remote BGP peer + AdvertisedRoutes []*Route `json:"advertisedRoutes,omitempty"` - // ShareType: Type of sharing for this shared-reservation + BfdStatus *BfdStatus `json:"bfdStatus,omitempty"` + + // EnableIpv6: Enable IPv6 traffic over BGP Peer. If not specified, it + // is disabled by default. + EnableIpv6 bool `json:"enableIpv6,omitempty"` + + // IpAddress: IP address of the local BGP interface. + IpAddress string `json:"ipAddress,omitempty"` + + // Ipv6NexthopAddress: IPv6 address of the local BGP interface. + Ipv6NexthopAddress string `json:"ipv6NexthopAddress,omitempty"` + + // LinkedVpnTunnel: URL of the VPN tunnel that this BGP peer controls. + LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` + + // Md5AuthEnabled: Informs whether MD5 authentication is enabled on this + // BGP peer. + Md5AuthEnabled bool `json:"md5AuthEnabled,omitempty"` + + // Name: Name of this BGP peer. Unique within the Routers resource. + Name string `json:"name,omitempty"` + + // NumLearnedRoutes: Number of routes learned from the remote BGP Peer. + NumLearnedRoutes int64 `json:"numLearnedRoutes,omitempty"` + + // PeerIpAddress: IP address of the remote BGP interface. + PeerIpAddress string `json:"peerIpAddress,omitempty"` + + // PeerIpv6NexthopAddress: IPv6 address of the remote BGP interface. + PeerIpv6NexthopAddress string `json:"peerIpv6NexthopAddress,omitempty"` + + // RouterApplianceInstance: [Output only] URI of the VM instance that is + // used as third-party router appliances such as Next Gen Firewalls, + // Virtual Routers, or Router Appliances. The VM instance is the peer + // side of the BGP session. + RouterApplianceInstance string `json:"routerApplianceInstance,omitempty"` + + // State: The state of the BGP session. For a list of possible values + // for this field, see BGP session states. + State string `json:"state,omitempty"` + + // Status: Status of the BGP peer: {UP, DOWN} // // Possible values: - // "LOCAL" - Default value. - // "SHARE_TYPE_UNSPECIFIED" - Default value. This value is unused. - // "SPECIFIC_PROJECTS" - Shared-reservation is open to specific - // projects - ShareType string `json:"shareType,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ProjectMap") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // "DOWN" + // "UNKNOWN" + // "UP" + Status string `json:"status,omitempty"` - // NullFields is a list of field names (e.g. "ProjectMap") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // StatusReason: Indicates why particular status was returned. + // + // Possible values: + // "MD5_AUTH_INTERNAL_PROBLEM" - Indicates internal problems with + // configuration of MD5 authentication. This particular reason can only + // be returned when md5AuthEnabled is true and status is DOWN. + // "STATUS_REASON_UNSPECIFIED" + StatusReason string `json:"statusReason,omitempty"` -func (s *ShareSettings) MarshalJSON() ([]byte, error) { - type NoMethod ShareSettings - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Uptime: Time this session has been up. Format: 14 years, 51 weeks, 6 + // days, 23 hours, 59 minutes, 59 seconds + Uptime string `json:"uptime,omitempty"` -// ShareSettingsProjectConfig: Config for each project in the share -// settings. -type ShareSettingsProjectConfig struct { - // ProjectId: The project ID, should be same as the key of this project - // config in the parent map. - ProjectId string `json:"projectId,omitempty"` + // UptimeSeconds: Time this session has been up, in seconds. Format: 145 + UptimeSeconds string `json:"uptimeSeconds,omitempty"` - // ForceSendFields is a list of field names (e.g. "ProjectId") to + // ForceSendFields is a list of field names (e.g. "AdvertisedRoutes") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -42037,111 +42573,108 @@ type ShareSettingsProjectConfig struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ProjectId") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AdvertisedRoutes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *ShareSettingsProjectConfig) MarshalJSON() ([]byte, error) { - type NoMethod ShareSettingsProjectConfig +func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { + type NoMethod RouterStatusBgpPeerStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ShieldedInstanceConfig: A set of Shielded Instance options. -type ShieldedInstanceConfig struct { - // EnableIntegrityMonitoring: Defines whether the instance has integrity - // monitoring enabled. Enabled by default. - EnableIntegrityMonitoring bool `json:"enableIntegrityMonitoring,omitempty"` - - // EnableSecureBoot: Defines whether the instance has Secure Boot - // enabled. Disabled by default. - EnableSecureBoot bool `json:"enableSecureBoot,omitempty"` +// RouterStatusNatStatus: Status of a NAT contained in this router. +type RouterStatusNatStatus struct { + // AutoAllocatedNatIps: A list of IPs auto-allocated for NAT. Example: + // ["1.1.1.1", "129.2.16.89"] + AutoAllocatedNatIps []string `json:"autoAllocatedNatIps,omitempty"` - // EnableVtpm: Defines whether the instance has the vTPM enabled. - // Enabled by default. - EnableVtpm bool `json:"enableVtpm,omitempty"` + // DrainAutoAllocatedNatIps: A list of IPs auto-allocated for NAT that + // are in drain mode. Example: ["1.1.1.1", "179.12.26.133"]. + DrainAutoAllocatedNatIps []string `json:"drainAutoAllocatedNatIps,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "EnableIntegrityMonitoring") to unconditionally include in API - // requests. By default, fields with empty or default values are omitted - // from API requests. However, any non-pointer, non-interface field - // appearing in ForceSendFields will be sent to the server regardless of - // whether the field is empty or not. This may be used to include empty - // fields in Patch requests. - ForceSendFields []string `json:"-"` + // DrainUserAllocatedNatIps: A list of IPs user-allocated for NAT that + // are in drain mode. Example: ["1.1.1.1", "179.12.26.133"]. + DrainUserAllocatedNatIps []string `json:"drainUserAllocatedNatIps,omitempty"` - // NullFields is a list of field names (e.g. - // "EnableIntegrityMonitoring") to include in API requests with the JSON - // null value. By default, fields with empty values are omitted from API - // requests. However, any field with an empty value appearing in - // NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. - NullFields []string `json:"-"` -} + // MinExtraNatIpsNeeded: The number of extra IPs to allocate. This will + // be greater than 0 only if user-specified IPs are NOT enough to allow + // all configured VMs to use NAT. This value is meaningful only when + // auto-allocation of NAT IPs is *not* used. + MinExtraNatIpsNeeded int64 `json:"minExtraNatIpsNeeded,omitempty"` -func (s *ShieldedInstanceConfig) MarshalJSON() ([]byte, error) { - type NoMethod ShieldedInstanceConfig - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Name: Unique name of this NAT. + Name string `json:"name,omitempty"` -// ShieldedInstanceIdentity: A Shielded Instance Identity. -type ShieldedInstanceIdentity struct { - // EncryptionKey: An Endorsement Key (EK) made by the RSA 2048 algorithm - // issued to the Shielded Instance's vTPM. - EncryptionKey *ShieldedInstanceIdentityEntry `json:"encryptionKey,omitempty"` + // NumVmEndpointsWithNatMappings: Number of VM endpoints (i.e., Nics) + // that can use NAT. + NumVmEndpointsWithNatMappings int64 `json:"numVmEndpointsWithNatMappings,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#shieldedInstanceIdentity for shielded Instance identity - // entry. - Kind string `json:"kind,omitempty"` + // RuleStatus: Status of rules in this NAT. + RuleStatus []*RouterStatusNatStatusNatRuleStatus `json:"ruleStatus,omitempty"` - // SigningKey: An Attestation Key (AK) made by the RSA 2048 algorithm - // issued to the Shielded Instance's vTPM. - SigningKey *ShieldedInstanceIdentityEntry `json:"signingKey,omitempty"` + // UserAllocatedNatIpResources: A list of fully qualified URLs of + // reserved IP address resources. + UserAllocatedNatIpResources []string `json:"userAllocatedNatIpResources,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // UserAllocatedNatIps: A list of IPs user-allocated for NAT. They will + // be raw IP strings like "179.12.26.133". + UserAllocatedNatIps []string `json:"userAllocatedNatIps,omitempty"` - // ForceSendFields is a list of field names (e.g. "EncryptionKey") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AutoAllocatedNatIps") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "EncryptionKey") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AutoAllocatedNatIps") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *ShieldedInstanceIdentity) MarshalJSON() ([]byte, error) { - type NoMethod ShieldedInstanceIdentity +func (s *RouterStatusNatStatus) MarshalJSON() ([]byte, error) { + type NoMethod RouterStatusNatStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ShieldedInstanceIdentityEntry: A Shielded Instance Identity Entry. -type ShieldedInstanceIdentityEntry struct { - // EkCert: A PEM-encoded X.509 certificate. This field can be empty. - EkCert string `json:"ekCert,omitempty"` +// RouterStatusNatStatusNatRuleStatus: Status of a NAT Rule contained in +// this NAT. +type RouterStatusNatStatusNatRuleStatus struct { + // ActiveNatIps: A list of active IPs for NAT. Example: ["1.1.1.1", + // "179.12.26.133"]. + ActiveNatIps []string `json:"activeNatIps,omitempty"` - // EkPub: A PEM-encoded public key. - EkPub string `json:"ekPub,omitempty"` + // DrainNatIps: A list of IPs for NAT that are in drain mode. Example: + // ["1.1.1.1", "179.12.26.133"]. + DrainNatIps []string `json:"drainNatIps,omitempty"` - // ForceSendFields is a list of field names (e.g. "EkCert") to + // MinExtraIpsNeeded: The number of extra IPs to allocate. This will be + // greater than 0 only if the existing IPs in this NAT Rule are NOT + // enough to allow all configured VMs to use NAT. + MinExtraIpsNeeded int64 `json:"minExtraIpsNeeded,omitempty"` + + // NumVmEndpointsWithNatMappings: Number of VM endpoints (i.e., NICs) + // that have NAT Mappings from this NAT Rule. + NumVmEndpointsWithNatMappings int64 `json:"numVmEndpointsWithNatMappings,omitempty"` + + // RuleNumber: Rule number of the rule. + RuleNumber int64 `json:"ruleNumber,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ActiveNatIps") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -42149,69 +42682,32 @@ type ShieldedInstanceIdentityEntry struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "EkCert") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "ActiveNatIps") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ShieldedInstanceIdentityEntry) MarshalJSON() ([]byte, error) { - type NoMethod ShieldedInstanceIdentityEntry +func (s *RouterStatusNatStatusNatRuleStatus) MarshalJSON() ([]byte, error) { + type NoMethod RouterStatusNatStatusNatRuleStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ShieldedInstanceIntegrityPolicy: The policy describes the baseline -// against which Instance boot integrity is measured. -type ShieldedInstanceIntegrityPolicy struct { - // UpdateAutoLearnPolicy: Updates the integrity policy baseline using - // the measurements from the VM instance's most recent boot. - UpdateAutoLearnPolicy bool `json:"updateAutoLearnPolicy,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "UpdateAutoLearnPolicy") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "UpdateAutoLearnPolicy") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ShieldedInstanceIntegrityPolicy) MarshalJSON() ([]byte, error) { - type NoMethod ShieldedInstanceIntegrityPolicy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} +type RouterStatusResponse struct { + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` -// SignedUrlKey: Represents a customer-supplied Signing Key used by -// Cloud CDN Signed URLs -type SignedUrlKey struct { - // KeyName: Name of the key. The name must be 1-63 characters long, and - // comply with RFC1035. Specifically, the name must be 1-63 characters - // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` - // which means the first character must be a lowercase letter, and all - // following characters must be a dash, lowercase letter, or digit, - // except the last character, which cannot be a dash. - KeyName string `json:"keyName,omitempty"` + Result *RouterStatus `json:"result,omitempty"` - // KeyValue: 128-bit key value used for signing the URL. The key value - // must be a valid RFC 4648 Section 5 base64url encoded string. - KeyValue string `json:"keyValue,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "KeyName") to + // ForceSendFields is a list of field names (e.g. "Kind") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -42219,8 +42715,8 @@ type SignedUrlKey struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "KeyName") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -42228,156 +42724,21 @@ type SignedUrlKey struct { NullFields []string `json:"-"` } -func (s *SignedUrlKey) MarshalJSON() ([]byte, error) { - type NoMethod SignedUrlKey +func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { + type NoMethod RouterStatusResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Snapshot: Represents a Persistent Disk Snapshot resource. You can use -// snapshots to back up data on a regular interval. For more -// information, read Creating persistent disk snapshots. -type Snapshot struct { - // AutoCreated: [Output Only] Set to true if snapshots are automatically - // created by applying resource policy on the target disk. - AutoCreated bool `json:"autoCreated,omitempty"` - - // ChainName: Creates the new snapshot in the snapshot chain labeled - // with the specified name. The chain name must be 1-63 characters long - // and comply with RFC1035. This is an uncommon option only for advanced - // service owners who needs to create separate snapshot chains, for - // example, for chargeback tracking. When you describe your snapshot - // resource, this field is visible only if it has a non-empty value. - ChainName string `json:"chainName,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // DiskSizeGb: [Output Only] Size of the source disk, specified in GB. - DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` - - // DownloadBytes: [Output Only] Number of bytes downloaded to restore a - // snapshot to a disk. - DownloadBytes int64 `json:"downloadBytes,omitempty,string"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#snapshot for - // Snapshot resources. - Kind string `json:"kind,omitempty"` - - // LabelFingerprint: A fingerprint for the labels being applied to this - // snapshot, which is essentially a hash of the labels set used for - // optimistic locking. The fingerprint is initially generated by Compute - // Engine and changes after every request to modify or update labels. - // You must always provide an up-to-date fingerprint hash in order to - // update or change labels, otherwise the request will fail with error - // 412 conditionNotMet. To see the latest fingerprint, make a get() - // request to retrieve a snapshot. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: Labels to apply to this snapshot. These can be later modified - // by the setLabels method. Label values may be empty. - Labels map[string]string `json:"labels,omitempty"` - - // LicenseCodes: [Output Only] Integer license codes indicating which - // licenses are attached to this snapshot. - LicenseCodes googleapi.Int64s `json:"licenseCodes,omitempty"` - - // Licenses: [Output Only] A list of public visible licenses that apply - // to this snapshot. This can be because the original image had licenses - // attached (such as a Windows image). - Licenses []string `json:"licenses,omitempty"` - - // LocationHint: An opaque location hint used to place the snapshot - // close to other resources. This field is for use by internal tools - // that use the public API. - LocationHint string `json:"locationHint,omitempty"` - - // Name: Name of the resource; provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // SatisfiesPzs: [Output Only] Reserved for future use. - SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SnapshotEncryptionKey: Encrypts the snapshot using a - // customer-supplied encryption key. After you encrypt a snapshot using - // a customer-supplied key, you must provide the same key if you use the - // snapshot later. For example, you must provide the encryption key when - // you create a disk from the encrypted snapshot in a future request. - // Customer-supplied encryption keys do not protect access to metadata - // of the snapshot. If you do not provide an encryption key when - // creating the snapshot, then the snapshot will be encrypted using an - // automatically generated key and you do not need to provide a key to - // use the snapshot later. - SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` - - // SourceDisk: The source disk used to create this snapshot. - SourceDisk string `json:"sourceDisk,omitempty"` - - // SourceDiskEncryptionKey: The customer-supplied encryption key of the - // source disk. Required if the source disk is protected by a - // customer-supplied encryption key. - SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` - - // SourceDiskId: [Output Only] The ID value of the disk used to create - // this snapshot. This value may be used to determine whether the - // snapshot was taken from the current or a previous instance of a given - // disk name. - SourceDiskId string `json:"sourceDiskId,omitempty"` - - // Status: [Output Only] The status of the snapshot. This can be - // CREATING, DELETING, FAILED, READY, or UPLOADING. - // - // Possible values: - // "CREATING" - Snapshot creation is in progress. - // "DELETING" - Snapshot is currently being deleted. - // "FAILED" - Snapshot creation failed. - // "READY" - Snapshot has been created successfully. - // "UPLOADING" - Snapshot is being uploaded. - Status string `json:"status,omitempty"` - - // StorageBytes: [Output Only] A size of the storage used by the - // snapshot. As snapshots share storage, this number is expected to - // change with snapshot creation/deletion. - StorageBytes int64 `json:"storageBytes,omitempty,string"` - - // StorageBytesStatus: [Output Only] An indicator whether storageBytes - // is in a stable state or it is being adjusted as a result of shared - // storage reallocation. This status can either be UPDATING, meaning the - // size of the snapshot is being updated, or UP_TO_DATE, meaning the - // size of the snapshot is up-to-date. - // - // Possible values: - // "UPDATING" - // "UP_TO_DATE" - StorageBytesStatus string `json:"storageBytesStatus,omitempty"` - - // StorageLocations: Cloud Storage bucket storage location of the - // snapshot (regional or multi-regional). - StorageLocations []string `json:"storageLocations,omitempty"` +type RoutersPreviewResponse struct { + // Resource: Preview of given router. + Resource *Router `json:"resource,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "AutoCreated") to + // ForceSendFields is a list of field names (e.g. "Resource") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -42385,52 +42746,30 @@ type Snapshot struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoCreated") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Resource") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Snapshot) MarshalJSON() ([]byte, error) { - type NoMethod Snapshot +func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { + type NoMethod RoutersPreviewResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SnapshotList: Contains a list of Snapshot resources. -type SnapshotList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Snapshot resources. - Items []*Snapshot `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *SnapshotListWarning `json:"warning,omitempty"` +type RoutersScopedList struct { + // Routers: A list of routers contained in this scope. + Routers []*Router `json:"routers,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: Informational warning which replaces the list of routers + // when the list is empty. + Warning *RoutersScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Routers") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -42438,8 +42777,8 @@ type SnapshotList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Routers") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -42447,14 +42786,15 @@ type SnapshotList struct { NullFields []string `json:"-"` } -func (s *SnapshotList) MarshalJSON() ([]byte, error) { - type NoMethod SnapshotList +func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { + type NoMethod RoutersScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SnapshotListWarning: [Output Only] Informational warning message. -type SnapshotListWarning struct { +// RoutersScopedListWarning: Informational warning which replaces the +// list of routers when the list is empty. +type RoutersScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -42476,6 +42816,9 @@ type SnapshotListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -42483,6 +42826,9 @@ type SnapshotListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -42516,7 +42862,7 @@ type SnapshotListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*SnapshotListWarningData `json:"data,omitempty"` + Data []*RoutersScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -42539,13 +42885,13 @@ type SnapshotListWarning struct { NullFields []string `json:"-"` } -func (s *SnapshotListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SnapshotListWarning +func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RoutersScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SnapshotListWarningData struct { +type RoutersScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -42576,61 +42922,45 @@ type SnapshotListWarningData struct { NullFields []string `json:"-"` } -func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SnapshotListWarningData +func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RoutersScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SourceDiskEncryptionKey struct { - // DiskEncryptionKey: The customer-supplied encryption key of the source - // disk. Required if the source disk is protected by a customer-supplied - // encryption key. - DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` +// Rule: This is deprecated and has no effect. Do not use. +type Rule struct { + // Action: This is deprecated and has no effect. Do not use. + // + // Possible values: + // "ALLOW" - This is deprecated and has no effect. Do not use. + // "ALLOW_WITH_LOG" - This is deprecated and has no effect. Do not + // use. + // "DENY" - This is deprecated and has no effect. Do not use. + // "DENY_WITH_LOG" - This is deprecated and has no effect. Do not use. + // "LOG" - This is deprecated and has no effect. Do not use. + // "NO_ACTION" - This is deprecated and has no effect. Do not use. + Action string `json:"action,omitempty"` - // SourceDisk: URL of the disk attached to the source instance. This can - // be a full or valid partial URL. For example, the following are valid - // values: - - // https://www.googleapis.com/compute/v1/projects/project/zones/zone - // /disks/disk - projects/project/zones/zone/disks/disk - - // zones/zone/disks/disk - SourceDisk string `json:"sourceDisk,omitempty"` + // Conditions: This is deprecated and has no effect. Do not use. + Conditions []*Condition `json:"conditions,omitempty"` - // ForceSendFields is a list of field names (e.g. "DiskEncryptionKey") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Description: This is deprecated and has no effect. Do not use. + Description string `json:"description,omitempty"` - // NullFields is a list of field names (e.g. "DiskEncryptionKey") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} + // Ins: This is deprecated and has no effect. Do not use. + Ins []string `json:"ins,omitempty"` -func (s *SourceDiskEncryptionKey) MarshalJSON() ([]byte, error) { - type NoMethod SourceDiskEncryptionKey - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // LogConfigs: This is deprecated and has no effect. Do not use. + LogConfigs []*LogConfig `json:"logConfigs,omitempty"` -// SourceInstanceParams: A specification of the parameters to use when -// creating the instance template from a source instance. -type SourceInstanceParams struct { - // DiskConfigs: Attached disks configuration. If not provided, defaults - // are applied: For boot disk and any other R/W disks, new custom images - // will be created from each disk. For read-only disks, they will be - // attached in read-only mode. Local SSD disks will be created as blank - // volumes. - DiskConfigs []*DiskInstantiationConfig `json:"diskConfigs,omitempty"` + // NotIns: This is deprecated and has no effect. Do not use. + NotIns []string `json:"notIns,omitempty"` - // ForceSendFields is a list of field names (e.g. "DiskConfigs") to + // Permissions: This is deprecated and has no effect. Do not use. + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -42638,93 +42968,86 @@ type SourceInstanceParams struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DiskConfigs") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SourceInstanceParams) MarshalJSON() ([]byte, error) { - type NoMethod SourceInstanceParams +func (s *Rule) MarshalJSON() ([]byte, error) { + type NoMethod Rule raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SourceInstanceProperties: DEPRECATED: Please use -// compute#instanceProperties instead. New properties will not be added -// to this field. -type SourceInstanceProperties struct { - // CanIpForward: Enables instances created based on this machine image - // to send packets with source IP addresses other than their own and - // receive packets with destination IP addresses other than their own. - // If these instances will be used as an IP gateway or it will be set as - // the next-hop in a Route resource, specify true. If unsure, leave this - // set to false. See the Enable IP forwarding documentation for more - // information. - CanIpForward bool `json:"canIpForward,omitempty"` - - // DeletionProtection: Whether the instance created from this machine - // image should be protected against deletion. - DeletionProtection bool `json:"deletionProtection,omitempty"` - - // Description: An optional text description for the instances that are - // created from this machine image. - Description string `json:"description,omitempty"` - - // Disks: An array of disks that are associated with the instances that - // are created from this machine image. - Disks []*SavedAttachedDisk `json:"disks,omitempty"` - - // GuestAccelerators: A list of guest accelerator cards' type and count - // to use for instances created from this machine image. - GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` - - // Labels: Labels to apply to instances that are created from this - // machine image. - Labels map[string]string `json:"labels,omitempty"` - - // MachineType: The machine type to use for instances that are created - // from this machine image. - MachineType string `json:"machineType,omitempty"` - - // Metadata: The metadata key/value pairs to assign to instances that - // are created from this machine image. These pairs can consist of - // custom metadata or predefined keys. See Project and instance metadata - // for more information. - Metadata *Metadata `json:"metadata,omitempty"` +type SSLHealthCheck struct { + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 443. Valid values are 1 through 65535. + Port int64 `json:"port,omitempty"` - // MinCpuPlatform: Minimum cpu/platform to be used by instances created - // from this machine image. The instance may be scheduled on the - // specified or newer cpu/platform. Applicable values are the friendly - // names of CPU platforms, such as minCpuPlatform: "Intel Haswell" or - // minCpuPlatform: "Intel Sandy Bridge". For more information, read - // Specifying a Minimum CPU Platform. - MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + // PortName: Not supported. + PortName string `json:"portName,omitempty"` - // NetworkInterfaces: An array of network access configurations for this - // interface. - NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. + // + // Possible values: + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. + PortSpecification string `json:"portSpecification,omitempty"` - // Scheduling: Specifies the scheduling options for the instances that - // are created from this machine image. - Scheduling *Scheduling `json:"scheduling,omitempty"` + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` - // ServiceAccounts: A list of service accounts with specified scopes. - // Access tokens for these service accounts are available to the - // instances that are created from this machine image. Use metadata - // queries to obtain the access tokens for these instances. - ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` + // Request: Instructs the health check prober to send this exact ASCII + // string, up to 1024 bytes in length, after establishing the TCP + // connection and SSL handshake. + Request string `json:"request,omitempty"` - // Tags: A list of tags to apply to the instances that are created from - // this machine image. The tags identify valid sources or targets for - // network firewalls. The setTags method can modify this list of tags. - // Each tag within the list must comply with RFC1035. - Tags *Tags `json:"tags,omitempty"` + // Response: Creates a content-based SSL health check. In addition to + // establishing a TCP connection and the TLS handshake, you can + // configure the health check to pass only when the backend sends this + // exact response ASCII string, up to 1024 bytes in length. For details, + // see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-ssl-tcp + Response string `json:"response,omitempty"` - // ForceSendFields is a list of field names (e.g. "CanIpForward") to + // ForceSendFields is a list of field names (e.g. "Port") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -42732,108 +43055,166 @@ type SourceInstanceProperties struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CanIpForward") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Port") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SourceInstanceProperties) MarshalJSON() ([]byte, error) { - type NoMethod SourceInstanceProperties +func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { + type NoMethod SSLHealthCheck raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificate: Represents an SSL Certificate resource. Google -// Compute Engine has two SSL Certificate resources: * Global -// (/compute/docs/reference/rest/v1/sslCertificates) * Regional -// (/compute/docs/reference/rest/v1/regionSslCertificates) The -// sslCertificates are used by: - external HTTPS load balancers - SSL -// proxy load balancers The regionSslCertificates are used by internal -// HTTPS load balancers. Optionally, certificate file contents that you -// upload can contain a set of up to five PEM-encoded certificates. The -// API call creates an object (sslCertificate) that holds this data. You -// can use SSL keys and certificates to secure connections to a load -// balancer. For more information, read Creating and using SSL -// certificates, SSL certificates quotas and limits, and Troubleshooting -// SSL certificates. -type SslCertificate struct { - // Certificate: A value read into memory from a certificate file. The - // certificate file must be in PEM format. The certificate chain must be - // no greater than 5 certs long. The chain must include at least one - // intermediate cert. - Certificate string `json:"certificate,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` +// SavedAttachedDisk: DEPRECATED: Please use compute#savedDisk instead. +// An instance-attached disk resource. +type SavedAttachedDisk struct { + // AutoDelete: Specifies whether the disk will be auto-deleted when the + // instance is deleted (but not when the disk is detached from the + // instance). + AutoDelete bool `json:"autoDelete,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` + // Boot: Indicates that this is a boot disk. The virtual machine will + // use the first partition of the disk for its root filesystem. + Boot bool `json:"boot,omitempty"` - // ExpireTime: [Output Only] Expire time of the certificate. RFC3339 - ExpireTime string `json:"expireTime,omitempty"` + // DeviceName: Specifies the name of the disk attached to the source + // instance. + DeviceName string `json:"deviceName,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // DiskEncryptionKey: The encryption key for the disk. + DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#sslCertificate for SSL certificates. - Kind string `json:"kind,omitempty"` + // DiskSizeGb: The size of the disk in base-2 GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` - // Managed: Configuration and status of a managed SSL certificate. - Managed *SslCertificateManagedSslCertificate `json:"managed,omitempty"` + // DiskType: [Output Only] URL of the disk type resource. For example: + // projects/project /zones/zone/diskTypes/pd-standard or pd-ssd + DiskType string `json:"diskType,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` + // GuestOsFeatures: A list of features to enable on the guest operating + // system. Applicable only for bootable images. Read Enabling guest + // operating system features to see a list of available options. + GuestOsFeatures []*GuestOsFeature `json:"guestOsFeatures,omitempty"` - // PrivateKey: A value read into memory from a write-only private key - // file. The private key file must be in PEM format. For security, only - // insert requests include this field. - PrivateKey string `json:"privateKey,omitempty"` + // Index: Specifies zero-based index of the disk that is attached to the + // source instance. + Index int64 `json:"index,omitempty"` - // Region: [Output Only] URL of the region where the regional SSL - // Certificate resides. This field is not applicable to global SSL - // Certificate. - Region string `json:"region,omitempty"` + // Interface: Specifies the disk interface to use for attaching this + // disk, which is either SCSI or NVME. + // + // Possible values: + // "NVME" + // "SCSI" + Interface string `json:"interface,omitempty"` - // SelfLink: [Output only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // Kind: [Output Only] Type of the resource. Always compute#attachedDisk + // for attached disks. + Kind string `json:"kind,omitempty"` - // SelfManaged: Configuration and status of a self-managed SSL - // certificate. - SelfManaged *SslCertificateSelfManagedSslCertificate `json:"selfManaged,omitempty"` + // Licenses: [Output Only] Any valid publicly visible licenses. + Licenses []string `json:"licenses,omitempty"` - // SubjectAlternativeNames: [Output Only] Domains associated with the - // certificate via Subject Alternative Name. - SubjectAlternativeNames []string `json:"subjectAlternativeNames,omitempty"` + // Mode: The mode in which this disk is attached to the source instance, + // either READ_WRITE or READ_ONLY. + // + // Possible values: + // "READ_ONLY" - Attaches this disk in read-only mode. Multiple + // virtual machines can use a disk in read-only mode at a time. + // "READ_WRITE" - *[Default]* Attaches this disk in read-write mode. + // Only one virtual machine at a time can be attached to a disk in + // read-write mode. + Mode string `json:"mode,omitempty"` - // Type: (Optional) Specifies the type of SSL certificate, either - // "SELF_MANAGED" or "MANAGED". If not specified, the certificate is - // self-managed and the fields certificate and private_key are used. + // Source: Specifies a URL of the disk attached to the source instance. + Source string `json:"source,omitempty"` + + // StorageBytes: [Output Only] A size of the storage used by the disk's + // snapshot by this machine image. + StorageBytes int64 `json:"storageBytes,omitempty,string"` + + // StorageBytesStatus: [Output Only] An indicator whether storageBytes + // is in a stable state or it is being adjusted as a result of shared + // storage reallocation. This status can either be UPDATING, meaning the + // size of the snapshot is being updated, or UP_TO_DATE, meaning the + // size of the snapshot is up-to-date. // // Possible values: - // "MANAGED" - Google-managed SSLCertificate. - // "SELF_MANAGED" - Certificate uploaded by user. - // "TYPE_UNSPECIFIED" + // "UPDATING" + // "UP_TO_DATE" + StorageBytesStatus string `json:"storageBytesStatus,omitempty"` + + // Type: Specifies the type of the attached disk, either SCRATCH or + // PERSISTENT. + // + // Possible values: + // "PERSISTENT" + // "SCRATCH" Type string `json:"type,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "AutoDelete") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // ForceSendFields is a list of field names (e.g. "Certificate") to + // NullFields is a list of field names (e.g. "AutoDelete") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SavedAttachedDisk) MarshalJSON() ([]byte, error) { + type NoMethod SavedAttachedDisk + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SavedDisk: An instance-attached disk resource. +type SavedDisk struct { + // Architecture: [Output Only] The architecture of the attached disk. + // + // Possible values: + // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture + // is not set. + // "ARM64" - Machines with architecture ARM64 + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#savedDisk + // for attached disks. + Kind string `json:"kind,omitempty"` + + // SourceDisk: Specifies a URL of the disk attached to the source + // instance. + SourceDisk string `json:"sourceDisk,omitempty"` + + // StorageBytes: [Output Only] Size of the individual disk snapshot used + // by this machine image. + StorageBytes int64 `json:"storageBytes,omitempty,string"` + + // StorageBytesStatus: [Output Only] An indicator whether storageBytes + // is in a stable state or it is being adjusted as a result of shared + // storage reallocation. This status can either be UPDATING, meaning the + // size of the snapshot is being updated, or UP_TO_DATE, meaning the + // size of the snapshot is up-to-date. + // + // Possible values: + // "UPDATING" + // "UP_TO_DATE" + StorageBytesStatus string `json:"storageBytesStatus,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Architecture") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -42841,7 +43222,7 @@ type SslCertificate struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Certificate") to include + // NullFields is a list of field names (e.g. "Architecture") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -42850,22 +43231,236 @@ type SslCertificate struct { NullFields []string `json:"-"` } -func (s *SslCertificate) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificate +func (s *SavedDisk) MarshalJSON() ([]byte, error) { + type NoMethod SavedDisk raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslCertificateAggregatedList struct { +type ScalingScheduleStatus struct { + // LastStartTime: [Output Only] The last time the scaling schedule + // became active. Note: this is a timestamp when a schedule actually + // became active, not when it was planned to do so. The timestamp is in + // RFC3339 text format. + LastStartTime string `json:"lastStartTime,omitempty"` + + // NextStartTime: [Output Only] The next time the scaling schedule is to + // become active. Note: this is a timestamp when a schedule is planned + // to run, but the actual time might be slightly different. The + // timestamp is in RFC3339 text format. + NextStartTime string `json:"nextStartTime,omitempty"` + + // State: [Output Only] The current state of a scaling schedule. + // + // Possible values: + // "ACTIVE" - The current autoscaling recommendation is influenced by + // this scaling schedule. + // "DISABLED" - This scaling schedule has been disabled by the user. + // "OBSOLETE" - This scaling schedule will never become active again. + // "READY" - The current autoscaling recommendation is not influenced + // by this scaling schedule. + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LastStartTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LastStartTime") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ScalingScheduleStatus) MarshalJSON() ([]byte, error) { + type NoMethod ScalingScheduleStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Scheduling: Sets the scheduling options for an Instance. +type Scheduling struct { + // AutomaticRestart: Specifies whether the instance should be + // automatically restarted if it is terminated by Compute Engine (not + // terminated by a user). You can only set the automatic restart option + // for standard instances. Preemptible instances cannot be automatically + // restarted. By default, this is set to true so an instance is + // automatically restarted if it is terminated by Compute Engine. + AutomaticRestart *bool `json:"automaticRestart,omitempty"` + + // InstanceTerminationAction: Specifies the termination action for the + // instance. + // + // Possible values: + // "DELETE" - Delete the VM. + // "INSTANCE_TERMINATION_ACTION_UNSPECIFIED" - Default value. This + // value is unused. + // "STOP" - Stop the VM without storing in-memory content. default + // action. + InstanceTerminationAction string `json:"instanceTerminationAction,omitempty"` + + // LocationHint: An opaque location hint used to place the instance + // close to other resources. This field is for use by internal tools + // that use the public API. + LocationHint string `json:"locationHint,omitempty"` + + // MinNodeCpus: The minimum number of virtual CPUs this instance will + // consume when running on a sole-tenant node. + MinNodeCpus int64 `json:"minNodeCpus,omitempty"` + + // NodeAffinities: A set of node affinity and anti-affinity + // configurations. Refer to Configuring node affinity for more + // information. Overrides reservationAffinity. + NodeAffinities []*SchedulingNodeAffinity `json:"nodeAffinities,omitempty"` + + // OnHostMaintenance: Defines the maintenance behavior for this + // instance. For standard instances, the default behavior is MIGRATE. + // For preemptible instances, the default and only possible behavior is + // TERMINATE. For more information, see Set VM host maintenance policy. + // + // Possible values: + // "MIGRATE" - *[Default]* Allows Compute Engine to automatically + // migrate instances out of the way of maintenance events. + // "TERMINATE" - Tells Compute Engine to terminate and (optionally) + // restart the instance away from the maintenance activity. If you would + // like your instance to be restarted, set the automaticRestart flag to + // true. Your instance may be restarted more than once, and it may be + // restarted outside the window of maintenance events. + OnHostMaintenance string `json:"onHostMaintenance,omitempty"` + + // Preemptible: Defines whether the instance is preemptible. This can + // only be set during instance creation or while the instance is stopped + // and therefore, in a `TERMINATED` state. See Instance Life Cycle for + // more information on the possible instance states. + Preemptible bool `json:"preemptible,omitempty"` + + // ProvisioningModel: Specifies the provisioning model of the instance. + // + // Possible values: + // "SPOT" - Heavily discounted, no guaranteed runtime. + // "STANDARD" - Standard provisioning with user controlled runtime, no + // discounts. + ProvisioningModel string `json:"provisioningModel,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutomaticRestart") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AutomaticRestart") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Scheduling) MarshalJSON() ([]byte, error) { + type NoMethod Scheduling + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SchedulingNodeAffinity: Node Affinity: the configuration of desired +// nodes onto which this Instance could be scheduled. +type SchedulingNodeAffinity struct { + // Key: Corresponds to the label key of Node resource. + Key string `json:"key,omitempty"` + + // Operator: Defines the operation of node selection. Valid operators + // are IN for affinity and NOT_IN for anti-affinity. + // + // Possible values: + // "IN" - Requires Compute Engine to seek for matched nodes. + // "NOT_IN" - Requires Compute Engine to avoid certain nodes. + // "OPERATOR_UNSPECIFIED" + Operator string `json:"operator,omitempty"` + + // Values: Corresponds to the label values of Node resource. + Values []string `json:"values,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SchedulingNodeAffinity) MarshalJSON() ([]byte, error) { + type NoMethod SchedulingNodeAffinity + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Screenshot: An instance's screenshot. +type Screenshot struct { + // Contents: [Output Only] The Base64-encoded screenshot data. + Contents string `json:"contents,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#screenshot + // for the screenshots. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Contents") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Contents") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Screenshot) MarshalJSON() ([]byte, error) { + type NoMethod Screenshot + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SecurityPoliciesAggregatedList struct { + Etag string `json:"etag,omitempty"` + // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of SslCertificatesScopedList resources. - Items map[string]SslCertificatesScopedList `json:"items,omitempty"` + // Items: A list of SecurityPoliciesScopedList resources. + Items map[string]SecurityPoliciesScopedList `json:"items,omitempty"` // Kind: [Output Only] Type of resource. Always - // compute#sslCertificateAggregatedList for lists of SSL Certificates. + // compute#securityPolicyAggregatedList for lists of Security Policies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -42883,13 +43478,13 @@ type SslCertificateAggregatedList struct { Unreachables []string `json:"unreachables,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *SslCertificateAggregatedListWarning `json:"warning,omitempty"` + Warning *SecurityPoliciesAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -42897,7 +43492,7 @@ type SslCertificateAggregatedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Etag") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -42906,15 +43501,15 @@ type SslCertificateAggregatedList struct { NullFields []string `json:"-"` } -func (s *SslCertificateAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateAggregatedList +func (s *SecurityPoliciesAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPoliciesAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificateAggregatedListWarning: [Output Only] Informational +// SecurityPoliciesAggregatedListWarning: [Output Only] Informational // warning message. -type SslCertificateAggregatedListWarning struct { +type SecurityPoliciesAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -42936,6 +43531,9 @@ type SslCertificateAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -42943,6 +43541,9 @@ type SslCertificateAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -42976,7 +43577,7 @@ type SslCertificateAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*SslCertificateAggregatedListWarningData `json:"data,omitempty"` + Data []*SecurityPoliciesAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -42999,13 +43600,13 @@ type SslCertificateAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *SslCertificateAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateAggregatedListWarning +func (s *SecurityPoliciesAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPoliciesAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslCertificateAggregatedListWarningData struct { +type SecurityPoliciesAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -43036,43 +43637,53 @@ type SslCertificateAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *SslCertificateAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateAggregatedListWarningData +func (s *SecurityPoliciesAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPoliciesAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificateList: Contains a list of SslCertificate resources. -type SslCertificateList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +type SecurityPoliciesListPreconfiguredExpressionSetsResponse struct { + PreconfiguredExpressionSets *SecurityPoliciesWafConfig `json:"preconfiguredExpressionSets,omitempty"` - // Items: A list of SslCertificate resources. - Items []*SslCertificate `json:"items,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "PreconfiguredExpressionSets") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // NullFields is a list of field names (e.g. + // "PreconfiguredExpressionSets") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +func (s *SecurityPoliciesListPreconfiguredExpressionSetsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPoliciesListPreconfiguredExpressionSetsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // Warning: [Output Only] Informational warning message. - Warning *SslCertificateListWarning `json:"warning,omitempty"` +type SecurityPoliciesScopedList struct { + // SecurityPolicies: A list of SecurityPolicies contained in this scope. + SecurityPolicies []*SecurityPolicy `json:"securityPolicies,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: Informational warning which replaces the list of security + // policies when the list is empty. + Warning *SecurityPoliciesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "SecurityPolicies") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -43080,24 +43691,25 @@ type SslCertificateList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "SecurityPolicies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *SslCertificateList) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateList +func (s *SecurityPoliciesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPoliciesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificateListWarning: [Output Only] Informational warning -// message. -type SslCertificateListWarning struct { +// SecurityPoliciesScopedListWarning: Informational warning which +// replaces the list of security policies when the list is empty. +type SecurityPoliciesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -43119,6 +43731,9 @@ type SslCertificateListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -43126,6 +43741,9 @@ type SslCertificateListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -43159,7 +43777,7 @@ type SslCertificateListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*SslCertificateListWarningData `json:"data,omitempty"` + Data []*SecurityPoliciesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -43182,13 +43800,13 @@ type SslCertificateListWarning struct { NullFields []string `json:"-"` } -func (s *SslCertificateListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateListWarning +func (s *SecurityPoliciesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPoliciesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslCertificateListWarningData struct { +type SecurityPoliciesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -43219,50 +43837,16 @@ type SslCertificateListWarningData struct { NullFields []string `json:"-"` } -func (s *SslCertificateListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateListWarningData +func (s *SecurityPoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPoliciesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificateManagedSslCertificate: Configuration and status of a -// managed SSL certificate. -type SslCertificateManagedSslCertificate struct { - // DomainStatus: [Output only] Detailed statuses of the domains - // specified for managed certificate resource. - DomainStatus map[string]string `json:"domainStatus,omitempty"` - - // Domains: The domains for which a managed SSL certificate will be - // generated. Each Google-managed SSL certificate supports up to the - // maximum number of domains per Google-managed SSL certificate - // (/load-balancing/docs/quotas#ssl_certificates). - Domains []string `json:"domains,omitempty"` - - // Status: [Output only] Status of the managed certificate resource. - // - // Possible values: - // "ACTIVE" - The certificate management is working, and a certificate - // has been provisioned. - // "MANAGED_CERTIFICATE_STATUS_UNSPECIFIED" - // "PROVISIONING" - The certificate management is working. GCP will - // attempt to provision the first certificate. - // "PROVISIONING_FAILED" - Certificate provisioning failed due to an - // issue with the DNS or load balancing configuration. For details of - // which domain failed, consult domain_status field. - // "PROVISIONING_FAILED_PERMANENTLY" - Certificate provisioning failed - // due to an issue with the DNS or load balancing configuration. It - // won't be retried. To try again delete and create a new managed - // SslCertificate resource. For details of which domain failed, consult - // domain_status field. - // "RENEWAL_FAILED" - Renewal of the certificate has failed due to an - // issue with the DNS or load balancing configuration. The existing cert - // is still serving; however, it will expire shortly. To provision a - // renewed certificate, delete and create a new managed SslCertificate - // resource. For details on which domain failed, consult domain_status - // field. - Status string `json:"status,omitempty"` +type SecurityPoliciesWafConfig struct { + WafRules *PreconfiguredWafSet `json:"wafRules,omitempty"` - // ForceSendFields is a list of field names (e.g. "DomainStatus") to + // ForceSendFields is a list of field names (e.g. "WafRules") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -43270,34 +43854,181 @@ type SslCertificateManagedSslCertificate struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DomainStatus") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "WafRules") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslCertificateManagedSslCertificate) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateManagedSslCertificate +func (s *SecurityPoliciesWafConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPoliciesWafConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificateSelfManagedSslCertificate: Configuration and status of -// a self-managed SSL certificate. -type SslCertificateSelfManagedSslCertificate struct { - // Certificate: A local certificate file. The certificate must be in PEM - // format. The certificate chain must be no greater than 5 certs long. - // The chain must include at least one intermediate cert. - Certificate string `json:"certificate,omitempty"` +// SecurityPolicy: Represents a Google Cloud Armor security policy +// resource. Only external backend services that use load balancers can +// reference a security policy. For more information, see Google Cloud +// Armor security policy overview. +type SecurityPolicy struct { + AdaptiveProtectionConfig *SecurityPolicyAdaptiveProtectionConfig `json:"adaptiveProtectionConfig,omitempty"` - // PrivateKey: A write-only private key in PEM format. Only insert - // requests will include this field. - PrivateKey string `json:"privateKey,omitempty"` + AdvancedOptionsConfig *SecurityPolicyAdvancedOptionsConfig `json:"advancedOptionsConfig,omitempty"` - // ForceSendFields is a list of field names (e.g. "Certificate") to + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + DdosProtectionConfig *SecurityPolicyDdosProtectionConfig `json:"ddosProtectionConfig,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Specifies a fingerprint for this resource, which is + // essentially a hash of the metadata's contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update metadata. You must + // always provide an up-to-date fingerprint hash in order to update or + // change metadata, otherwise the request will fail with error 412 + // conditionNotMet. To see the latest fingerprint, make get() request to + // the security policy. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output only] Type of the resource. Always + // compute#securityPolicyfor security policies + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + RecaptchaOptionsConfig *SecurityPolicyRecaptchaOptionsConfig `json:"recaptchaOptionsConfig,omitempty"` + + // Region: [Output Only] URL of the region where the regional security + // policy resides. This field is not applicable to global security + // policies. + Region string `json:"region,omitempty"` + + // Rules: A list of rules that belong to this policy. There must always + // be a default rule which is a rule with priority 2147483647 and match + // all condition (for the match condition this means match "*" for + // srcIpRanges and for the networkMatch condition every field must be + // either match "*" or not set). If no rules are provided when creating + // a security policy, a default rule with action "allow" will be added. + Rules []*SecurityPolicyRule `json:"rules,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Type: The type indicates the intended use of the security policy. - + // CLOUD_ARMOR: Cloud Armor backend security policies can be configured + // to filter incoming HTTP requests targeting backend services. They + // filter requests before they hit the origin servers. - + // CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be + // configured to filter incoming HTTP requests targeting backend + // services (including Cloud CDN-enabled) as well as backend buckets + // (Cloud Storage). They filter requests before the request is served + // from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor + // internal service policies can be configured to filter HTTP requests + // targeting services managed by Traffic Director in a service mesh. + // They filter requests before the request is served from the + // application. This field can be set only at resource creation time. + // + // Possible values: + // "CLOUD_ARMOR" + // "CLOUD_ARMOR_EDGE" + // "CLOUD_ARMOR_NETWORK" + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "AdaptiveProtectionConfig") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdaptiveProtectionConfig") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicy) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SecurityPolicyAdaptiveProtectionConfig: Configuration options for +// Cloud Armor Adaptive Protection (CAAP). +type SecurityPolicyAdaptiveProtectionConfig struct { + // Layer7DdosDefenseConfig: If set to true, enables Cloud Armor Machine + // Learning. + Layer7DdosDefenseConfig *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig `json:"layer7DdosDefenseConfig,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "Layer7DdosDefenseConfig") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Layer7DdosDefenseConfig") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyAdaptiveProtectionConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdaptiveProtectionConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig: +// Configuration options for L7 DDoS detection. +type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { + // Enable: If set to true, enables CAAP for L7 DDoS detection. + Enable bool `json:"enable,omitempty"` + + // RuleVisibility: Rule visibility can be one of the following: STANDARD + // - opaque rules. (default) PREMIUM - transparent rules. + // + // Possible values: + // "PREMIUM" + // "STANDARD" + RuleVisibility string `json:"ruleVisibility,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enable") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -43305,30 +44036,37 @@ type SslCertificateSelfManagedSslCertificate struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Certificate") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Enable") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslCertificateSelfManagedSslCertificate) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateSelfManagedSslCertificate +func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslCertificatesScopedList struct { - // SslCertificates: List of SslCertificates contained in this scope. - SslCertificates []*SslCertificate `json:"sslCertificates,omitempty"` +type SecurityPolicyAdvancedOptionsConfig struct { + // JsonCustomConfig: Custom configuration to apply the JSON parsing. + // Only applicable when json_parsing is set to STANDARD. + JsonCustomConfig *SecurityPolicyAdvancedOptionsConfigJsonCustomConfig `json:"jsonCustomConfig,omitempty"` - // Warning: Informational warning which replaces the list of backend - // services when the list is empty. - Warning *SslCertificatesScopedListWarning `json:"warning,omitempty"` + // Possible values: + // "DISABLED" + // "STANDARD" + JsonParsing string `json:"jsonParsing,omitempty"` - // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // Possible values: + // "NORMAL" + // "VERBOSE" + LogLevel string `json:"logLevel,omitempty"` + + // ForceSendFields is a list of field names (e.g. "JsonCustomConfig") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -43336,7 +44074,7 @@ type SslCertificatesScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SslCertificates") to + // NullFields is a list of field names (e.g. "JsonCustomConfig") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -43346,83 +44084,22 @@ type SslCertificatesScopedList struct { NullFields []string `json:"-"` } -func (s *SslCertificatesScopedList) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificatesScopedList +func (s *SecurityPolicyAdvancedOptionsConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdvancedOptionsConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificatesScopedListWarning: Informational warning which -// replaces the list of backend services when the list is empty. -type SslCertificatesScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*SslCertificatesScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` +type SecurityPolicyAdvancedOptionsConfigJsonCustomConfig struct { + // ContentTypes: A list of custom Content-Type header values to apply + // the JSON parsing. As per RFC 1341, a Content-Type header value has + // the following format: Content-Type := type "/" subtype *[";" + // parameter] When configuring a custom Content-Type header value, only + // the type/subtype needs to be specified, and the parameters should be + // excluded. + ContentTypes []string `json:"contentTypes,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "ContentTypes") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -43430,36 +44107,28 @@ type SslCertificatesScopedListWarning struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "ContentTypes") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslCertificatesScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificatesScopedListWarning +func (s *SecurityPolicyAdvancedOptionsConfigJsonCustomConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdvancedOptionsConfigJsonCustomConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslCertificatesScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` +type SecurityPolicyDdosProtectionConfig struct { + // Possible values: + // "ADVANCED" + // "STANDARD" + DdosProtection string `json:"ddosProtection,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "DdosProtection") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -43467,31 +44136,32 @@ type SslCertificatesScopedListWarningData struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "DdosProtection") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *SslCertificatesScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificatesScopedListWarningData +func (s *SecurityPolicyDdosProtectionConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyDdosProtectionConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesList struct { +type SecurityPolicyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of SslPolicy resources. - Items []*SslPolicy `json:"items,omitempty"` + // Items: A list of SecurityPolicy resources. + Items []*SecurityPolicy `json:"items,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#sslPoliciesList for lists of sslPolicies. + // Kind: [Output Only] Type of resource. Always + // compute#securityPolicyList for listsof securityPolicies Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -43502,11 +44172,8 @@ type SslPoliciesList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *SslPoliciesListWarning `json:"warning,omitempty"` + Warning *SecurityPolicyListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -43529,14 +44196,15 @@ type SslPoliciesList struct { NullFields []string `json:"-"` } -func (s *SslPoliciesList) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesList +func (s *SecurityPolicyList) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslPoliciesListWarning: [Output Only] Informational warning message. -type SslPoliciesListWarning struct { +// SecurityPolicyListWarning: [Output Only] Informational warning +// message. +type SecurityPolicyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -43558,6 +44226,9 @@ type SslPoliciesListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -43565,6 +44236,9 @@ type SslPoliciesListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -43598,7 +44272,7 @@ type SslPoliciesListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*SslPoliciesListWarningData `json:"data,omitempty"` + Data []*SecurityPolicyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -43621,13 +44295,13 @@ type SslPoliciesListWarning struct { NullFields []string `json:"-"` } -func (s *SslPoliciesListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListWarning +func (s *SecurityPolicyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesListWarningData struct { +type SecurityPolicyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -43658,20 +44332,22 @@ type SslPoliciesListWarningData struct { NullFields []string `json:"-"` } -func (s *SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListWarningData +func (s *SecurityPolicyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesListAvailableFeaturesResponse struct { - Features []string `json:"features,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type SecurityPolicyRecaptchaOptionsConfig struct { + // RedirectSiteKey: An optional field to supply a reCAPTCHA site key to + // be used for all the rules using the redirect action with the type of + // GOOGLE_RECAPTCHA under the security policy. The specified site key + // needs to be created from the reCAPTCHA API. The user is responsible + // for the validity of the specified site key. If not specified, a + // Google-managed site key is used. + RedirectSiteKey string `json:"redirectSiteKey,omitempty"` - // ForceSendFields is a list of field names (e.g. "Features") to + // ForceSendFields is a list of field names (e.g. "RedirectSiteKey") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -43679,203 +44355,166 @@ type SslPoliciesListAvailableFeaturesResponse struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Features") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "RedirectSiteKey") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListAvailableFeaturesResponse +func (s *SecurityPolicyRecaptchaOptionsConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRecaptchaOptionsConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslPolicy: Represents an SSL Policy resource. Use SSL policies to -// control the SSL features, such as versions and cipher suites, offered -// by an HTTPS or SSL Proxy load balancer. For more information, read -// SSL Policy Concepts. -type SslPolicy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` +type SecurityPolicyReference struct { + SecurityPolicy string `json:"securityPolicy,omitempty"` - // CustomFeatures: A list of features enabled when the selected profile - // is CUSTOM. The method returns the set of features that can be - // specified in this list. This field must be empty if the profile is - // not CUSTOM. - CustomFeatures []string `json:"customFeatures,omitempty"` + // ForceSendFields is a list of field names (e.g. "SecurityPolicy") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SecurityPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SecurityPolicyRule: Represents a rule that describes one or more +// match conditions along with the action to be taken when traffic +// matches this condition (allow or deny). +type SecurityPolicyRule struct { + // Action: The Action to perform when the rule is matched. The following + // are the valid actions: - allow: allow access to target. - deny(): + // deny access to target, returns the HTTP response code specified + // (valid values are 403, 404, and 502). - rate_based_ban: limit client + // traffic to the configured threshold and ban the client if the traffic + // exceeds the threshold. Configure parameters for this action in + // RateLimitOptions. Requires rate_limit_options to be set. - redirect: + // redirect to a different target. This can either be an internal + // reCAPTCHA redirect, or an external URL-based redirect via a 302 + // response. Parameters for this action can be configured via + // redirectOptions. - throttle: limit client traffic to the configured + // threshold. Configure parameters for this action in rateLimitOptions. + // Requires rate_limit_options to be set for this. + Action string `json:"action,omitempty"` // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` - // EnabledFeatures: [Output Only] The list of features enabled in the - // SSL policy. - EnabledFeatures []string `json:"enabledFeatures,omitempty"` - - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a SslPolicy. An up-to-date - // fingerprint must be provided in order to update the SslPolicy, - // otherwise the request will fail with error 412 conditionNotMet. To - // see the latest fingerprint, make a get() request to retrieve an - // SslPolicy. - Fingerprint string `json:"fingerprint,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // HeaderAction: Optional, additional actions that are performed on + // headers. + HeaderAction *SecurityPolicyRuleHttpHeaderAction `json:"headerAction,omitempty"` - // Kind: [Output only] Type of the resource. Always compute#sslPolicyfor - // SSL policies. + // Kind: [Output only] Type of the resource. Always + // compute#securityPolicyRule for security policy rules Kind string `json:"kind,omitempty"` - // MinTlsVersion: The minimum version of SSL protocol that can be used - // by the clients to establish a connection with the load balancer. This - // can be one of TLS_1_0, TLS_1_1, TLS_1_2. - // - // Possible values: - // "TLS_1_0" - TLS 1.0 - // "TLS_1_1" - TLS 1.1 - // "TLS_1_2" - TLS 1.2 - MinTlsVersion string `json:"minTlsVersion,omitempty"` + // Match: A match condition that incoming traffic is evaluated against. + // If it evaluates to true, the corresponding 'action' is enforced. + Match *SecurityPolicyRuleMatcher `json:"match,omitempty"` - // Name: Name of the resource. The name must be 1-63 characters long, - // and comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` + // Preview: If set to true, the specified action is not enforced. + Preview bool `json:"preview,omitempty"` - // Profile: Profile specifies the set of SSL features that can be used - // by the load balancer when negotiating SSL with clients. This can be - // one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, - // the set of SSL features to enable must be specified in the - // customFeatures field. - // - // Possible values: - // "COMPATIBLE" - Compatible profile. Allows the broadset set of - // clients, even those which support only out-of-date SSL features to - // negotiate with the load balancer. - // "CUSTOM" - Custom profile. Allow only the set of allowed SSL - // features specified in the customFeatures field. - // "MODERN" - Modern profile. Supports a wide set of SSL features, - // allowing modern clients to negotiate SSL with the load balancer. - // "RESTRICTED" - Restricted profile. Supports a reduced set of SSL - // features, intended to meet stricter compliance requirements. - Profile string `json:"profile,omitempty"` + // Priority: An integer indicating the priority of a rule in the list. + // The priority must be a positive value between 0 and 2147483647. Rules + // are evaluated from highest to lowest priority where 0 is the highest + // priority and 2147483647 is the lowest priority. + Priority int64 `json:"priority,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // RateLimitOptions: Must be specified if the action is "rate_based_ban" + // or "throttle". Cannot be specified for any other actions. + RateLimitOptions *SecurityPolicyRuleRateLimitOptions `json:"rateLimitOptions,omitempty"` - // Warnings: [Output Only] If potential misconfigurations are detected - // for this SSL policy, this field will be populated with warning - // messages. - Warnings []*SslPolicyWarnings `json:"warnings,omitempty"` + // RedirectOptions: Parameters defining the redirect action. Cannot be + // specified for any other actions. + RedirectOptions *SecurityPolicyRuleRedirectOptions `json:"redirectOptions,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslPolicy) MarshalJSON() ([]byte, error) { - type NoMethod SslPolicy +func (s *SecurityPolicyRule) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRule raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPolicyWarnings struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` +type SecurityPolicyRuleHttpHeaderAction struct { + // RequestHeadersToAdds: The list of request headers to add or overwrite + // if they're already present. + RequestHeadersToAdds []*SecurityPolicyRuleHttpHeaderActionHttpHeaderOption `json:"requestHeadersToAdds,omitempty"` - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*SslPolicyWarningsData `json:"data,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "RequestHeadersToAdds") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // NullFields is a list of field names (e.g. "RequestHeadersToAdds") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} - // ForceSendFields is a list of field names (e.g. "Code") to +func (s *SecurityPolicyRuleHttpHeaderAction) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleHttpHeaderAction + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SecurityPolicyRuleHttpHeaderActionHttpHeaderOption struct { + // HeaderName: The name of the header to set. + HeaderName string `json:"headerName,omitempty"` + + // HeaderValue: The value to set the named header to. + HeaderValue string `json:"headerValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HeaderName") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -43883,8 +44522,8 @@ type SslPolicyWarnings struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "HeaderName") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -43892,27 +44531,36 @@ type SslPolicyWarnings struct { NullFields []string `json:"-"` } -func (s *SslPolicyWarnings) MarshalJSON() ([]byte, error) { - type NoMethod SslPolicyWarnings +func (s *SecurityPolicyRuleHttpHeaderActionHttpHeaderOption) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleHttpHeaderActionHttpHeaderOption raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPolicyWarningsData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +// SecurityPolicyRuleMatcher: Represents a match condition that incoming +// traffic is evaluated against. Exactly one field must be specified. +type SecurityPolicyRuleMatcher struct { + // Config: The configuration options available when specifying + // versioned_expr. This field must be specified if versioned_expr is + // specified and cannot be specified if versioned_expr is not specified. + Config *SecurityPolicyRuleMatcherConfig `json:"config,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // Expr: User defined CEVAL expression. A CEVAL expression is used to + // specify match criteria such as origin.ip, source.region_code and + // contents in the request header. + Expr *Expr `json:"expr,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // VersionedExpr: Preconfigured versioned expression. If this field is + // specified, config must also be specified. Available preconfigured + // expressions along with their requirements are: SRC_IPS_V1 - must + // specify the corresponding src_ip_range field in config. + // + // Possible values: + // "SRC_IPS_V1" - Matches the source IP address of a request to the IP + // ranges supplied in config. + VersionedExpr string `json:"versionedExpr,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Config") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -43920,7 +44568,7 @@ type SslPolicyWarningsData struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API + // NullFields is a list of field names (e.g. "Config") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -43929,19 +44577,18 @@ type SslPolicyWarningsData struct { NullFields []string `json:"-"` } -func (s *SslPolicyWarningsData) MarshalJSON() ([]byte, error) { - type NoMethod SslPolicyWarningsData +func (s *SecurityPolicyRuleMatcher) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleMatcher raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPolicyReference struct { - // SslPolicy: URL of the SSL policy resource. Set this to empty string - // to clear any existing SSL policy associated with the target proxy - // resource. - SslPolicy string `json:"sslPolicy,omitempty"` +type SecurityPolicyRuleMatcherConfig struct { + // SrcIpRanges: CIDR IP address range. Maximum number of src_ip_ranges + // allowed is 10. + SrcIpRanges []string `json:"srcIpRanges,omitempty"` - // ForceSendFields is a list of field names (e.g. "SslPolicy") to + // ForceSendFields is a list of field names (e.g. "SrcIpRanges") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -43949,25 +44596,95 @@ type SslPolicyReference struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SslPolicy") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "SrcIpRanges") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslPolicyReference) MarshalJSON() ([]byte, error) { - type NoMethod SslPolicyReference +func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleMatcherConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type StatefulPolicy struct { - PreservedState *StatefulPolicyPreservedState `json:"preservedState,omitempty"` +type SecurityPolicyRuleRateLimitOptions struct { + // BanDurationSec: Can only be specified if the action for the rule is + // "rate_based_ban". If specified, determines the time (in seconds) the + // traffic will continue to be banned by the rate limit after the rate + // falls below the threshold. + BanDurationSec int64 `json:"banDurationSec,omitempty"` - // ForceSendFields is a list of field names (e.g. "PreservedState") to + // BanThreshold: Can only be specified if the action for the rule is + // "rate_based_ban". If specified, the key will be banned for the + // configured 'ban_duration_sec' when the number of requests that exceed + // the 'rate_limit_threshold' also exceed this 'ban_threshold'. + BanThreshold *SecurityPolicyRuleRateLimitOptionsThreshold `json:"banThreshold,omitempty"` + + // ConformAction: Action to take for requests that are under the + // configured rate limit threshold. Valid option is "allow" only. + ConformAction string `json:"conformAction,omitempty"` + + // EnforceOnKey: Determines the key to enforce the rate_limit_threshold + // on. Possible values are: - ALL: A single rate limit threshold is + // applied to all the requests matching this rule. This is the default + // value if "enforceOnKey" is not configured. - IP: The source IP + // address of the request is the key. Each IP has this limit enforced + // separately. - HTTP_HEADER: The value of the HTTP header whose name is + // configured under "enforceOnKeyName". The key value is truncated to + // the first 128 bytes of the header value. If no such header is present + // in the request, the key type defaults to ALL. - XFF_IP: The first IP + // address (i.e. the originating client IP address) specified in the + // list of IPs under X-Forwarded-For HTTP header. If no such header is + // present or the value is not a valid IP, the key defaults to the + // source IP address of the request i.e. key type IP. - HTTP_COOKIE: The + // value of the HTTP cookie whose name is configured under + // "enforceOnKeyName". The key value is truncated to the first 128 bytes + // of the cookie value. If no such cookie is present in the request, the + // key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP + // request. The key value is truncated to the first 128 bytes. - SNI: + // Server name indication in the TLS session of the HTTPS request. The + // key value is truncated to the first 128 bytes. The key type defaults + // to ALL on a HTTP session. - REGION_CODE: The country/region from + // which the request originates. + // + // Possible values: + // "ALL" + // "HTTP_COOKIE" + // "HTTP_HEADER" + // "HTTP_PATH" + // "IP" + // "REGION_CODE" + // "SNI" + // "XFF_IP" + EnforceOnKey string `json:"enforceOnKey,omitempty"` + + // EnforceOnKeyName: Rate limit key name applicable only for the + // following key types: HTTP_HEADER -- Name of the HTTP header whose + // value is taken as the key value. HTTP_COOKIE -- Name of the HTTP + // cookie whose value is taken as the key value. + EnforceOnKeyName string `json:"enforceOnKeyName,omitempty"` + + // ExceedAction: Action to take for requests that are above the + // configured rate limit threshold, to either deny with a specified HTTP + // response code, or redirect to a different endpoint. Valid options are + // "deny(status)", where valid values for status are 403, 404, 429, and + // 502, and "redirect" where the redirect parameters come from + // exceedRedirectOptions below. + ExceedAction string `json:"exceedAction,omitempty"` + + // ExceedRedirectOptions: Parameters defining the redirect action that + // is used as the exceed action. Cannot be specified if the exceed + // action is not redirect. + ExceedRedirectOptions *SecurityPolicyRuleRedirectOptions `json:"exceedRedirectOptions,omitempty"` + + // RateLimitThreshold: Threshold at which to begin ratelimiting. + RateLimitThreshold *SecurityPolicyRuleRateLimitOptionsThreshold `json:"rateLimitThreshold,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BanDurationSec") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -43975,7 +44692,7 @@ type StatefulPolicy struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "PreservedState") to + // NullFields is a list of field names (e.g. "BanDurationSec") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -43985,20 +44702,20 @@ type StatefulPolicy struct { NullFields []string `json:"-"` } -func (s *StatefulPolicy) MarshalJSON() ([]byte, error) { - type NoMethod StatefulPolicy +func (s *SecurityPolicyRuleRateLimitOptions) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleRateLimitOptions raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// StatefulPolicyPreservedState: Configuration of preserved resources. -type StatefulPolicyPreservedState struct { - // Disks: Disks created on the instances that will be preserved on - // instance delete, update, etc. This map is keyed with the device names - // of the disks. - Disks map[string]StatefulPolicyPreservedStateDiskDevice `json:"disks,omitempty"` +type SecurityPolicyRuleRateLimitOptionsThreshold struct { + // Count: Number of HTTP(S) requests for calculating the threshold. + Count int64 `json:"count,omitempty"` - // ForceSendFields is a list of field names (e.g. "Disks") to + // IntervalSec: Interval over which the threshold is computed. + IntervalSec int64 `json:"intervalSec,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Count") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -44006,7 +44723,7 @@ type StatefulPolicyPreservedState struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Disks") to include in API + // NullFields is a list of field names (e.g. "Count") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -44015,26 +44732,25 @@ type StatefulPolicyPreservedState struct { NullFields []string `json:"-"` } -func (s *StatefulPolicyPreservedState) MarshalJSON() ([]byte, error) { - type NoMethod StatefulPolicyPreservedState +func (s *SecurityPolicyRuleRateLimitOptionsThreshold) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleRateLimitOptionsThreshold raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type StatefulPolicyPreservedStateDiskDevice struct { - // AutoDelete: These stateful disks will never be deleted during - // autohealing, update or VM instance recreate operations. This flag is - // used to configure if the disk should be deleted after it is no longer - // used by the group, e.g. when the given instance or the whole group is - // deleted. Note: disks attached in READ_ONLY mode cannot be - // auto-deleted. +type SecurityPolicyRuleRedirectOptions struct { + // Target: Target for the redirect action. This is required if the type + // is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. + Target string `json:"target,omitempty"` + + // Type: Type of the redirect action. // // Possible values: - // "NEVER" - // "ON_PERMANENT_INSTANCE_DELETION" - AutoDelete string `json:"autoDelete,omitempty"` + // "EXTERNAL_302" + // "GOOGLE_RECAPTCHA" + Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoDelete") to + // ForceSendFields is a list of field names (e.g. "Target") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -44042,7 +44758,112 @@ type StatefulPolicyPreservedStateDiskDevice struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoDelete") to include in + // NullFields is a list of field names (e.g. "Target") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRuleRedirectOptions) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleRedirectOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SecuritySettings: The authentication and authorization settings for a +// BackendService. +type SecuritySettings struct { + // ClientTlsPolicy: Optional. A URL referring to a + // networksecurity.ClientTlsPolicy resource that describes how clients + // should authenticate with this service's backends. clientTlsPolicy + // only applies to a global BackendService with the loadBalancingScheme + // set to INTERNAL_SELF_MANAGED. If left blank, communications are not + // encrypted. Note: This field currently has no impact. + ClientTlsPolicy string `json:"clientTlsPolicy,omitempty"` + + // SubjectAltNames: Optional. A list of Subject Alternative Names (SANs) + // that the client verifies during a mutual TLS handshake with an + // server/endpoint for this BackendService. When the server presents its + // X.509 certificate to the client, the client inspects the + // certificate's subjectAltName field. If the field contains one of the + // specified values, the communication continues. Otherwise, it fails. + // This additional check enables the client to verify that the server is + // authorized to run the requested service. Note that the contents of + // the server certificate's subjectAltName field are configured by the + // Public Key Infrastructure which provisions server identities. Only + // applies to a global BackendService with loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. Only applies when BackendService has an + // attached clientTlsPolicy with clientCertificate (mTLS mode). Note: + // This field currently has no impact. + SubjectAltNames []string `json:"subjectAltNames,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClientTlsPolicy") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClientTlsPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecuritySettings) MarshalJSON() ([]byte, error) { + type NoMethod SecuritySettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SerialPortOutput: An instance serial console output. +type SerialPortOutput struct { + // Contents: [Output Only] The contents of the console output. + Contents string `json:"contents,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#serialPortOutput for serial port output. + Kind string `json:"kind,omitempty"` + + // Next: [Output Only] The position of the next byte of content, + // regardless of whether the content exists, following the output + // returned in the `contents` property. Use this value in the next + // request as the start parameter. + Next int64 `json:"next,omitempty,string"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Start: The starting byte position of the output that was returned. + // This should match the start parameter sent with the request. If the + // serial console output exceeds the size of the buffer (1 MB), older + // output is overwritten by newer content. The output start value will + // indicate the byte position of the output that was returned, which + // might be different than the `start` value that was specified in the + // request. + Start int64 `json:"start,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Contents") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Contents") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -44051,202 +44872,183 @@ type StatefulPolicyPreservedStateDiskDevice struct { NullFields []string `json:"-"` } -func (s *StatefulPolicyPreservedStateDiskDevice) MarshalJSON() ([]byte, error) { - type NoMethod StatefulPolicyPreservedStateDiskDevice +func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { + type NoMethod SerialPortOutput raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Subnetwork: Represents a Subnetwork resource. A subnetwork (also -// known as a subnet) is a logical partition of a Virtual Private Cloud -// network with one primary IP range and zero or more secondary IP -// ranges. For more information, read Virtual Private Cloud (VPC) -// Network. -type Subnetwork struct { +type ServerBinding struct { + // Possible values: + // "RESTART_NODE_ON_ANY_SERVER" - Node may associate with any physical + // server over its lifetime. + // "RESTART_NODE_ON_MINIMAL_SERVERS" - Node may associate with minimal + // physical servers over its lifetime. + // "SERVER_BINDING_TYPE_UNSPECIFIED" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Type") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Type") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ServerBinding) MarshalJSON() ([]byte, error) { + type NoMethod ServerBinding + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ServiceAccount: A service account. +type ServiceAccount struct { + // Email: Email address of the service account. + Email string `json:"email,omitempty"` + + // Scopes: The list of scopes to be made available for this service + // account. + Scopes []string `json:"scopes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Email") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Email") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAccount + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ServiceAttachment: Represents a ServiceAttachment resource. A service +// attachment represents a service that a producer has exposed. It +// encapsulates the load balancer which fronts the service runs and a +// list of NAT IP ranges that the producers uses to represent the +// consumers connecting to the service. next tag = 20 +type ServiceAttachment struct { + // ConnectedEndpoints: [Output Only] An array of connections for all the + // consumers connected to this service attachment. + ConnectedEndpoints []*ServiceAttachmentConnectedEndpoint `json:"connectedEndpoints,omitempty"` + + // ConnectionPreference: The connection preference of service + // attachment. The value can be set to ACCEPT_AUTOMATIC. An + // ACCEPT_AUTOMATIC service attachment is one that always accepts the + // connection from consumer forwarding rules. + // + // Possible values: + // "ACCEPT_AUTOMATIC" + // "ACCEPT_MANUAL" + // "CONNECTION_PREFERENCE_UNSPECIFIED" + ConnectionPreference string `json:"connectionPreference,omitempty"` + + // ConsumerAcceptLists: Projects that are allowed to connect to this + // service attachment. + ConsumerAcceptLists []*ServiceAttachmentConsumerProjectLimit `json:"consumerAcceptLists,omitempty"` + + // ConsumerRejectLists: Projects that are not allowed to connect to this + // service attachment. The project can be specified using its id or + // number. + ConsumerRejectLists []string `json:"consumerRejectLists,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` // Description: An optional description of this resource. Provide this - // property when you create the resource. This field can be set only at - // resource creation time. + // property when you create the resource. Description string `json:"description,omitempty"` - // EnableFlowLogs: Whether to enable flow logging for this subnetwork. - // If this field is not explicitly set, it will not appear in get - // listings. If not set the default behavior is determined by the org - // policy, if there is no org policy specified, then it will default to - // disabled. This field isn't supported with the purpose field set to - // INTERNAL_HTTPS_LOAD_BALANCER. - EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` + // DomainNames: If specified, the domain name will be used during the + // integration between the PSC connected endpoints and the Cloud DNS. + // For example, this is a valid domain name: "p.mycompany.com.". Current + // max number of domain names supported is 1. + DomainNames []string `json:"domainNames,omitempty"` - // ExternalIpv6Prefix: [Output Only] The range of external IPv6 - // addresses that are owned by this subnetwork. - ExternalIpv6Prefix string `json:"externalIpv6Prefix,omitempty"` + // EnableProxyProtocol: If true, enable the proxy protocol which is for + // supplying client TCP/IP address data in TCP connections that traverse + // proxies on their way to destination servers. + EnableProxyProtocol bool `json:"enableProxyProtocol,omitempty"` // Fingerprint: Fingerprint of this resource. A hash of the contents // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a Subnetwork. An up-to-date - // fingerprint must be provided in order to update the Subnetwork, - // otherwise the request will fail with error 412 conditionNotMet. To - // see the latest fingerprint, make a get() request to retrieve a - // Subnetwork. + // field will be ignored when inserting a ServiceAttachment. An + // up-to-date fingerprint must be provided in order to patch/update the + // ServiceAttachment; otherwise, the request will fail with error 412 + // conditionNotMet. To see the latest fingerprint, make a get() request + // to retrieve the ServiceAttachment. Fingerprint string `json:"fingerprint,omitempty"` - // GatewayAddress: [Output Only] The gateway address for default routes - // to reach destination addresses outside this subnetwork. - GatewayAddress string `json:"gatewayAddress,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. + // Id: [Output Only] The unique identifier for the resource type. The + // server generates this identifier. Id uint64 `json:"id,omitempty,string"` - // IpCidrRange: The range of internal addresses that are owned by this - // subnetwork. Provide this property when you create the subnetwork. For - // example, 10.0.0.0/8 or 100.64.0.0/10. Ranges must be unique and - // non-overlapping within a network. Only IPv4 is supported. This field - // is set at resource creation time. The range can be any range listed - // in the Valid ranges list. The range can be expanded after creation - // using expandIpCidrRange. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // Ipv6AccessType: The access type of IPv6 address this subnet holds. - // It's immutable and can only be specified during creation or the first - // time the subnet is updated into IPV4_IPV6 dual stack. If the - // ipv6_type is EXTERNAL then this subnet cannot enable direct path. - // - // Possible values: - // "EXTERNAL" - VMs on this subnet will be assigned IPv6 addresses - // that are accesible via the Internet, as well as the VPC network. - // "UNSPECIFIED_IPV6_ACCESS_TYPE" - IPv6 access type not set. Means - // this subnet hasn't been turned on IPv6 yet. - Ipv6AccessType string `json:"ipv6AccessType,omitempty"` - - // Ipv6CidrRange: [Output Only] The range of internal IPv6 addresses - // that are owned by this subnetwork. Note this will be for private - // google access only eventually. - Ipv6CidrRange string `json:"ipv6CidrRange,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#subnetwork - // for Subnetwork resources. + // Kind: [Output Only] Type of the resource. Always + // compute#serviceAttachment for service attachments. Kind string `json:"kind,omitempty"` - // LogConfig: This field denotes the VPC flow logging options for this - // subnetwork. If logging is enabled, logs are exported to Cloud - // Logging. - LogConfig *SubnetworkLogConfig `json:"logConfig,omitempty"` - - // Name: The name of the resource, provided by the client when initially - // creating the resource. The name must be 1-63 characters long, and - // comply with RFC1035. Specifically, the name must be 1-63 characters - // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` - // which means the first character must be a lowercase letter, and all - // following characters must be a dash, lowercase letter, or digit, - // except the last character, which cannot be a dash. + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Network: The URL of the network to which this subnetwork belongs, - // provided by the client when initially creating the subnetwork. This - // field can be set only at resource creation time. - Network string `json:"network,omitempty"` - - // PrivateIpGoogleAccess: Whether the VMs in this subnet can access - // Google services without assigned external IP addresses. This field - // can be both set at resource creation time and updated using - // setPrivateIpGoogleAccess. - PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` + // NatSubnets: An array of URLs where each entry is the URL of a subnet + // provided by the service producer to use for NAT in this service + // attachment. + NatSubnets []string `json:"natSubnets,omitempty"` - // PrivateIpv6GoogleAccess: The private IPv6 google access type for the - // VMs in this subnet. This is an expanded field of - // enablePrivateV6Access. If both fields are set, - // privateIpv6GoogleAccess will take priority. This field can be both - // set at resource creation time and updated using patch. - // - // Possible values: - // "DISABLE_GOOGLE_ACCESS" - Disable private IPv6 access to/from - // Google services. - // "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE" - Bidirectional private - // IPv6 access to/from Google services. - // "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE" - Outbound private IPv6 - // access from VMs in this subnet to Google services. - PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` + // ProducerForwardingRule: The URL of a forwarding rule with + // loadBalancingScheme INTERNAL* that is serving the endpoint identified + // by this service attachment. + ProducerForwardingRule string `json:"producerForwardingRule,omitempty"` - // Purpose: The purpose of the resource. This field can be either - // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with - // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created - // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If - // unspecified, the purpose defaults to PRIVATE_RFC_1918. The - // enableFlowLogs field isn't supported with the purpose field set to - // INTERNAL_HTTPS_LOAD_BALANCER. - // - // Possible values: - // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal - // HTTP(S) Load Balancing. - // "PRIVATE" - Regular user created or automatically created subnet. - // "PRIVATE_RFC_1918" - Regular user created or automatically created - // subnet. - // "PRIVATE_SERVICE_CONNECT" - Subnetworks created for Private Service - // Connect in the producer network. - // "REGIONAL_MANAGED_PROXY" - Subnetwork used for Regional - // Internal/External HTTP(S) Load Balancing. - Purpose string `json:"purpose,omitempty"` + // PscServiceAttachmentId: [Output Only] An 128-bit global unique ID of + // the PSC service attachment. + PscServiceAttachmentId *Uint128 `json:"pscServiceAttachmentId,omitempty"` - // Region: URL of the region where the Subnetwork resides. This field - // can be set only at resource creation time. + // Region: [Output Only] URL of the region where the service attachment + // resides. This field applies only to the region resource. You must + // specify this field as part of the HTTP request URL. It is not + // settable as a field in the request body. Region string `json:"region,omitempty"` - // Role: The role of subnetwork. Currently, this field is only used when - // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to - // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being - // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one - // that is ready to be promoted to ACTIVE or is currently draining. This - // field can be updated with a patch request. - // - // Possible values: - // "ACTIVE" - The ACTIVE subnet that is currently used. - // "BACKUP" - The BACKUP subnet that could be promoted to ACTIVE. - Role string `json:"role,omitempty"` - - // SecondaryIpRanges: An array of configurations for secondary IP ranges - // for VM instances contained in this subnetwork. The primary IP of such - // VM must belong to the primary ipCidrRange of the subnetwork. The - // alias IPs may belong to either primary or secondary ranges. This - // field can be updated with a patch request. - SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // StackType: The stack type for this subnet to identify whether the - // IPv6 feature is enabled or not. If not specified IPV4_ONLY will be - // used. This field can be both set at resource creation time and - // updated using patch. - // - // Possible values: - // "IPV4_IPV6" - New VMs in this subnet can have both IPv4 and IPv6 - // addresses. - // "IPV4_ONLY" - New VMs in this subnet will only be assigned IPv4 - // addresses. - // "UNSPECIFIED_STACK_TYPE" - StackType string `json:"stackType,omitempty"` - - // State: [Output Only] The state of the subnetwork, which can be one of - // the following values: READY: Subnetwork is created and ready to use - // DRAINING: only applicable to subnetworks that have the purpose set to - // INTERNAL_HTTPS_LOAD_BALANCER and indicates that connections to the - // load balancer are being drained. A subnetwork that is draining cannot - // be used or modified until it reaches a status of READY - // - // Possible values: - // "DRAINING" - Subnetwork is being drained. - // "READY" - Subnetwork is ready for use. - State string `json:"state,omitempty"` + // TargetService: The URL of a service serving the endpoint identified + // by this service attachment. + TargetService string `json:"targetService,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // ForceSendFields is a list of field names (e.g. "ConnectedEndpoints") // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -44254,7 +45056,7 @@ type Subnetwork struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "ConnectedEndpoints") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -44264,22 +45066,23 @@ type Subnetwork struct { NullFields []string `json:"-"` } -func (s *Subnetwork) MarshalJSON() ([]byte, error) { - type NoMethod Subnetwork +func (s *ServiceAttachment) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAttachment raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworkAggregatedList struct { +// ServiceAttachmentAggregatedList: Contains a list of +// ServiceAttachmentsScopedList. +type ServiceAttachmentAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of SubnetworksScopedList resources. - Items map[string]SubnetworksScopedList `json:"items,omitempty"` + // Items: A list of ServiceAttachmentsScopedList resources. + Items map[string]ServiceAttachmentsScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#subnetworkAggregatedList for aggregated lists of subnetworks. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -44297,7 +45100,7 @@ type SubnetworkAggregatedList struct { Unreachables []string `json:"unreachables,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` + Warning *ServiceAttachmentAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -44320,15 +45123,15 @@ type SubnetworkAggregatedList struct { NullFields []string `json:"-"` } -func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkAggregatedList +func (s *ServiceAttachmentAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAttachmentAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkAggregatedListWarning: [Output Only] Informational warning -// message. -type SubnetworkAggregatedListWarning struct { +// ServiceAttachmentAggregatedListWarning: [Output Only] Informational +// warning message. +type ServiceAttachmentAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -44350,6 +45153,9 @@ type SubnetworkAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -44357,6 +45163,9 @@ type SubnetworkAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -44390,7 +45199,7 @@ type SubnetworkAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*SubnetworkAggregatedListWarningData `json:"data,omitempty"` + Data []*ServiceAttachmentAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -44413,13 +45222,13 @@ type SubnetworkAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *SubnetworkAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkAggregatedListWarning +func (s *ServiceAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAttachmentAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworkAggregatedListWarningData struct { +type ServiceAttachmentAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -44450,23 +45259,104 @@ type SubnetworkAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *SubnetworkAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkAggregatedListWarningData +func (s *ServiceAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAttachmentAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkList: Contains a list of Subnetwork resources. -type SubnetworkList struct { +// ServiceAttachmentConnectedEndpoint: [Output Only] A connection +// connected to this service attachment. +type ServiceAttachmentConnectedEndpoint struct { + // Endpoint: The url of a connected endpoint. + Endpoint string `json:"endpoint,omitempty"` + + // PscConnectionId: The PSC connection id of the connected endpoint. + PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` + + // Status: The status of a connected endpoint to this service + // attachment. + // + // Possible values: + // "ACCEPTED" - The connection has been accepted by the producer. + // "CLOSED" - The connection has been closed by the producer. + // "NEEDS_ATTENTION" - The connection has been accepted by the + // producer, but the producer needs to take further action before the + // forwarding rule can serve traffic. + // "PENDING" - The connection is pending acceptance by the producer. + // "REJECTED" - The consumer is still connected but not using the + // connection. + // "STATUS_UNSPECIFIED" + Status string `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Endpoint") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Endpoint") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ServiceAttachmentConnectedEndpoint) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAttachmentConnectedEndpoint + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ServiceAttachmentConsumerProjectLimit struct { + // ConnectionLimit: The value of the limit to set. + ConnectionLimit int64 `json:"connectionLimit,omitempty"` + + // NetworkUrl: The network URL for the network to set the limit for. + NetworkUrl string `json:"networkUrl,omitempty"` + + // ProjectIdOrNum: The project id or number for the project to set the + // limit for. + ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConnectionLimit") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConnectionLimit") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ServiceAttachmentConsumerProjectLimit) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAttachmentConsumerProjectLimit + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ServiceAttachmentList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Subnetwork resources. - Items []*Subnetwork `json:"items,omitempty"` + // Items: A list of ServiceAttachment resources. + Items []*ServiceAttachment `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#subnetworkList - // for lists of subnetworks. + // Kind: [Output Only] Type of the resource. Always + // compute#serviceAttachment for service attachments. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -44481,7 +45371,7 @@ type SubnetworkList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *SubnetworkListWarning `json:"warning,omitempty"` + Warning *ServiceAttachmentListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -44504,14 +45394,15 @@ type SubnetworkList struct { NullFields []string `json:"-"` } -func (s *SubnetworkList) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkList +func (s *ServiceAttachmentList) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAttachmentList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkListWarning: [Output Only] Informational warning message. -type SubnetworkListWarning struct { +// ServiceAttachmentListWarning: [Output Only] Informational warning +// message. +type ServiceAttachmentListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -44533,6 +45424,9 @@ type SubnetworkListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -44540,6 +45434,9 @@ type SubnetworkListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -44573,7 +45470,7 @@ type SubnetworkListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*SubnetworkListWarningData `json:"data,omitempty"` + Data []*ServiceAttachmentListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -44596,13 +45493,13 @@ type SubnetworkListWarning struct { NullFields []string `json:"-"` } -func (s *SubnetworkListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkListWarning +func (s *ServiceAttachmentListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAttachmentListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworkListWarningData struct { +type ServiceAttachmentListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -44633,65 +45530,22 @@ type SubnetworkListWarningData struct { NullFields []string `json:"-"` } -func (s *SubnetworkListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkListWarningData +func (s *ServiceAttachmentListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAttachmentListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkLogConfig: The available logging options for this -// subnetwork. -type SubnetworkLogConfig struct { - // AggregationInterval: Can only be specified if VPC flow logging for - // this subnetwork is enabled. Toggles the aggregation interval for - // collecting flow logs. Increasing the interval time will reduce the - // amount of generated flow logs for long lasting connections. Default - // is an interval of 5 seconds per connection. - // - // Possible values: - // "INTERVAL_10_MIN" - // "INTERVAL_15_MIN" - // "INTERVAL_1_MIN" - // "INTERVAL_30_SEC" - // "INTERVAL_5_MIN" - // "INTERVAL_5_SEC" - AggregationInterval string `json:"aggregationInterval,omitempty"` - - // Enable: Whether to enable flow logging for this subnetwork. If this - // field is not explicitly set, it will not appear in get listings. If - // not set the default behavior is determined by the org policy, if - // there is no org policy specified, then it will default to disabled. - Enable bool `json:"enable,omitempty"` - - // FilterExpr: Can only be specified if VPC flow logs for this - // subnetwork is enabled. Export filter used to define which VPC flow - // logs should be logged. - FilterExpr string `json:"filterExpr,omitempty"` - - // FlowSampling: Can only be specified if VPC flow logging for this - // subnetwork is enabled. The value of the field must be in [0, 1]. Set - // the sampling rate of VPC flow logs within the subnetwork where 1.0 - // means all collected logs are reported and 0.0 means no logs are - // reported. Default is 0.5 unless otherwise specified by the org - // policy, which means half of all collected logs are reported. - FlowSampling float64 `json:"flowSampling,omitempty"` - - // Metadata: Can only be specified if VPC flow logs for this subnetwork - // is enabled. Configures whether all, none or a subset of metadata - // fields should be added to the reported VPC flow logs. Default is - // EXCLUDE_ALL_METADATA. - // - // Possible values: - // "CUSTOM_METADATA" - // "EXCLUDE_ALL_METADATA" - // "INCLUDE_ALL_METADATA" - Metadata string `json:"metadata,omitempty"` +type ServiceAttachmentsScopedList struct { + // ServiceAttachments: A list of ServiceAttachments contained in this + // scope. + ServiceAttachments []*ServiceAttachment `json:"serviceAttachments,omitempty"` - // MetadataFields: Can only be specified if VPC flow logs for this - // subnetwork is enabled and "metadata" was set to CUSTOM_METADATA. - MetadataFields []string `json:"metadataFields,omitempty"` + // Warning: Informational warning which replaces the list of service + // attachments when the list is empty. + Warning *ServiceAttachmentsScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "AggregationInterval") + // ForceSendFields is a list of field names (e.g. "ServiceAttachments") // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -44699,7 +45553,7 @@ type SubnetworkLogConfig struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AggregationInterval") to + // NullFields is a list of field names (e.g. "ServiceAttachments") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -44709,131 +45563,15 @@ type SubnetworkLogConfig struct { NullFields []string `json:"-"` } -func (s *SubnetworkLogConfig) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkLogConfig - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *SubnetworkLogConfig) UnmarshalJSON(data []byte) error { - type NoMethod SubnetworkLogConfig - var s1 struct { - FlowSampling gensupport.JSONFloat64 `json:"flowSampling"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.FlowSampling = float64(s1.FlowSampling) - return nil -} - -// SubnetworkSecondaryRange: Represents a secondary IP range of a -// subnetwork. -type SubnetworkSecondaryRange struct { - // IpCidrRange: The range of IP addresses belonging to this subnetwork - // secondary range. Provide this property when you create the - // subnetwork. Ranges must be unique and non-overlapping with all - // primary and secondary IP ranges within a network. Only IPv4 is - // supported. The range can be any range listed in the Valid ranges - // list. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // RangeName: The name associated with this subnetwork secondary range, - // used when adding an alias IP range to a VM instance. The name must be - // 1-63 characters long, and comply with RFC1035. The name must be - // unique within the subnetwork. - RangeName string `json:"rangeName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkSecondaryRange - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SubnetworksExpandIpCidrRangeRequest struct { - // IpCidrRange: The IP (in CIDR format or netmask) of internal addresses - // that are legal on this Subnetwork. This range should be disjoint from - // other subnetworks within this network. This range can only be larger - // than (i.e. a superset of) the range previously defined before the - // update. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksExpandIpCidrRangeRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SubnetworksScopedList struct { - // Subnetworks: A list of subnetworks contained in this scope. - Subnetworks []*Subnetwork `json:"subnetworks,omitempty"` - - // Warning: An informational warning that appears when the list of - // addresses is empty. - Warning *SubnetworksScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Subnetworks") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Subnetworks") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksScopedList +func (s *ServiceAttachmentsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAttachmentsScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworksScopedListWarning: An informational warning that appears -// when the list of addresses is empty. -type SubnetworksScopedListWarning struct { +// ServiceAttachmentsScopedListWarning: Informational warning which +// replaces the list of service attachments when the list is empty. +type ServiceAttachmentsScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -44855,6 +45593,9 @@ type SubnetworksScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -44862,6 +45603,9 @@ type SubnetworksScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -44895,7 +45639,7 @@ type SubnetworksScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*SubnetworksScopedListWarningData `json:"data,omitempty"` + Data []*ServiceAttachmentsScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -44918,13 +45662,13 @@ type SubnetworksScopedListWarning struct { NullFields []string `json:"-"` } -func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksScopedListWarning +func (s *ServiceAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAttachmentsScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworksScopedListWarningData struct { +type ServiceAttachmentsScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -44955,62 +45699,60 @@ type SubnetworksScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksScopedListWarningData +func (s *ServiceAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAttachmentsScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworksSetPrivateIpGoogleAccessRequest struct { - PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "PrivateIpGoogleAccess") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` +// ShareSettings: The share setting for reservations and sole tenancy +// node groups. +type ShareSettings struct { + // ProjectMap: A map of project id and project config. This is only + // valid when share_type's value is SPECIFIC_PROJECTS. + ProjectMap map[string]ShareSettingsProjectConfig `json:"projectMap,omitempty"` - // NullFields is a list of field names (e.g. "PrivateIpGoogleAccess") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // ShareType: Type of sharing for this shared-reservation + // + // Possible values: + // "LOCAL" - Default value. + // "ORGANIZATION" - Shared-reservation is open to entire Organization + // "SHARE_TYPE_UNSPECIFIED" - Default value. This value is unused. + // "SPECIFIC_PROJECTS" - Shared-reservation is open to specific + // projects + ShareType string `json:"shareType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProjectMap") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProjectMap") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksSetPrivateIpGoogleAccessRequest +func (s *ShareSettings) MarshalJSON() ([]byte, error) { + type NoMethod ShareSettings raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Subsetting: Subsetting configuration for this BackendService. -// Currently this is applicable only for Internal TCP/UDP load -// balancing, Internal HTTP(S) load balancing and Traffic Director. -type Subsetting struct { - // Possible values: - // "CONSISTENT_HASH_SUBSETTING" - Subsetting based on consistent - // hashing. For Traffic Director, the number of backends per backend - // group (the subset size) is based on the `subset_size` parameter. For - // Internal HTTP(S) load balancing, the number of backends per backend - // group (the subset size) is dynamically adjusted in two cases: - As - // the number of proxy instances participating in Internal HTTP(S) load - // balancing increases, the subset size decreases. - When the total - // number of backends in a network exceeds the capacity of a single - // proxy instance, subset sizes are reduced automatically for each - // service that has backend subsetting enabled. - // "NONE" - No Subsetting. Clients may open connections and send - // traffic to all backends of this backend service. This can lead to - // performance issues if there is substantial imbalance in the count of - // clients and backends. - Policy string `json:"policy,omitempty"` +// ShareSettingsProjectConfig: Config for each project in the share +// settings. +type ShareSettingsProjectConfig struct { + // ProjectId: The project ID, should be same as the key of this project + // config in the parent map. + ProjectId string `json:"projectId,omitempty"` - // ForceSendFields is a list of field names (e.g. "Policy") to + // ForceSendFields is a list of field names (e.g. "ProjectId") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -45018,8 +45760,8 @@ type Subsetting struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Policy") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "ProjectId") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -45027,62 +45769,71 @@ type Subsetting struct { NullFields []string `json:"-"` } -func (s *Subsetting) MarshalJSON() ([]byte, error) { - type NoMethod Subsetting +func (s *ShareSettingsProjectConfig) MarshalJSON() ([]byte, error) { + type NoMethod ShareSettingsProjectConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TCPHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 80. Valid values are 1 through 65535. - Port int64 `json:"port,omitempty"` +// ShieldedInstanceConfig: A set of Shielded Instance options. +type ShieldedInstanceConfig struct { + // EnableIntegrityMonitoring: Defines whether the instance has integrity + // monitoring enabled. Enabled by default. + EnableIntegrityMonitoring bool `json:"enableIntegrityMonitoring,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` + // EnableSecureBoot: Defines whether the instance has Secure Boot + // enabled. Disabled by default. + EnableSecureBoot bool `json:"enableSecureBoot,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, TCP health check follows behavior specified in port and - // portName fields. - // - // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. - PortSpecification string `json:"portSpecification,omitempty"` + // EnableVtpm: Defines whether the instance has the vTPM enabled. + // Enabled by default. + EnableVtpm bool `json:"enableVtpm,omitempty"` - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "EnableIntegrityMonitoring") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` - // Request: The application data to send once the TCP connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. - Request string `json:"request,omitempty"` + // NullFields is a list of field names (e.g. + // "EnableIntegrityMonitoring") to include in API requests with the JSON + // null value. By default, fields with empty values are omitted from API + // requests. However, any field with an empty value appearing in + // NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. - Response string `json:"response,omitempty"` +func (s *ShieldedInstanceConfig) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedInstanceConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ForceSendFields is a list of field names (e.g. "Port") to +// ShieldedInstanceIdentity: A Shielded Instance Identity. +type ShieldedInstanceIdentity struct { + // EncryptionKey: An Endorsement Key (EK) made by the RSA 2048 algorithm + // issued to the Shielded Instance's vTPM. + EncryptionKey *ShieldedInstanceIdentityEntry `json:"encryptionKey,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#shieldedInstanceIdentity for shielded Instance identity + // entry. + Kind string `json:"kind,omitempty"` + + // SigningKey: An Attestation Key (AK) made by the RSA 2048 algorithm + // issued to the Shielded Instance's vTPM. + SigningKey *ShieldedInstanceIdentityEntry `json:"signingKey,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "EncryptionKey") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -45090,7 +45841,38 @@ type TCPHealthCheck struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Port") to include in API + // NullFields is a list of field names (e.g. "EncryptionKey") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ShieldedInstanceIdentity) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedInstanceIdentity + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ShieldedInstanceIdentityEntry: A Shielded Instance Identity Entry. +type ShieldedInstanceIdentityEntry struct { + // EkCert: A PEM-encoded X.509 certificate. This field can be empty. + EkCert string `json:"ekCert,omitempty"` + + // EkPub: A PEM-encoded public key. + EkPub string `json:"ekPub,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EkCert") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EkCert") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -45099,28 +45881,60 @@ type TCPHealthCheck struct { NullFields []string `json:"-"` } -func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { - type NoMethod TCPHealthCheck +func (s *ShieldedInstanceIdentityEntry) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedInstanceIdentityEntry raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Tags: A set of instance tags. -type Tags struct { - // Fingerprint: Specifies a fingerprint for this request, which is - // essentially a hash of the tags' contents and used for optimistic - // locking. The fingerprint is initially generated by Compute Engine and - // changes after every request to modify or update tags. You must always - // provide an up-to-date fingerprint hash in order to update or change - // tags. To see the latest fingerprint, make get() request to the - // instance. - Fingerprint string `json:"fingerprint,omitempty"` +// ShieldedInstanceIntegrityPolicy: The policy describes the baseline +// against which Instance boot integrity is measured. +type ShieldedInstanceIntegrityPolicy struct { + // UpdateAutoLearnPolicy: Updates the integrity policy baseline using + // the measurements from the VM instance's most recent boot. + UpdateAutoLearnPolicy bool `json:"updateAutoLearnPolicy,omitempty"` - // Items: An array of tags. Each tag must be 1-63 characters long, and - // comply with RFC1035. - Items []string `json:"items,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "UpdateAutoLearnPolicy") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` - // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // NullFields is a list of field names (e.g. "UpdateAutoLearnPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ShieldedInstanceIntegrityPolicy) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedInstanceIntegrityPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SignedUrlKey: Represents a customer-supplied Signing Key used by +// Cloud CDN Signed URLs +type SignedUrlKey struct { + // KeyName: Name of the key. The name must be 1-63 characters long, and + // comply with RFC1035. Specifically, the name must be 1-63 characters + // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + // which means the first character must be a lowercase letter, and all + // following characters must be a dash, lowercase letter, or digit, + // except the last character, which cannot be a dash. + KeyName string `json:"keyName,omitempty"` + + // KeyValue: 128-bit key value used for signing the URL. The key value + // must be a valid RFC 4648 Section 5 base64url encoded string. + KeyValue string `json:"keyValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "KeyName") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -45128,28 +45942,51 @@ type Tags struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "KeyName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Tags) MarshalJSON() ([]byte, error) { - type NoMethod Tags +func (s *SignedUrlKey) MarshalJSON() ([]byte, error) { + type NoMethod SignedUrlKey raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetGrpcProxy: Represents a Target gRPC Proxy resource. A target -// gRPC proxy is a component of load balancers intended for load -// balancing gRPC traffic. Only global forwarding rules with load -// balancing scheme INTERNAL_SELF_MANAGED can reference a target gRPC -// proxy. The target gRPC Proxy references a URL map that specifies how -// traffic is routed to gRPC backend services. -type TargetGrpcProxy struct { +// Snapshot: Represents a Persistent Disk Snapshot resource. You can use +// snapshots to back up data on a regular interval. For more +// information, read Creating persistent disk snapshots. +type Snapshot struct { + // Architecture: [Output Only] The architecture of the snapshot. Valid + // values are ARM64 or X86_64. + // + // Possible values: + // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture + // is not set. + // "ARM64" - Machines with architecture ARM64 + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + + // AutoCreated: [Output Only] Set to true if snapshots are automatically + // created by applying resource policy on the target disk. + AutoCreated bool `json:"autoCreated,omitempty"` + + // ChainName: Creates the new snapshot in the snapshot chain labeled + // with the specified name. The chain name must be 1-63 characters long + // and comply with RFC1035. This is an uncommon option only for advanced + // service owners who needs to create separate snapshot chains, for + // example, for chargeback tracking. When you describe your snapshot + // resource, this field is visible only if it has a non-empty value. + ChainName string `json:"chainName,omitempty"` + + // CreationSizeBytes: [Output Only] Size in bytes of the snapshot at + // creation time. + CreationSizeBytes int64 `json:"creationSizeBytes,omitempty,string"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -45158,24 +45995,50 @@ type TargetGrpcProxy struct { // property when you create the resource. Description string `json:"description,omitempty"` - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a TargetGrpcProxy. An up-to-date - // fingerprint must be provided in order to patch/update the - // TargetGrpcProxy; otherwise, the request will fail with error 412 - // conditionNotMet. To see the latest fingerprint, make a get() request - // to retrieve the TargetGrpcProxy. - Fingerprint string `json:"fingerprint,omitempty"` + // DiskSizeGb: [Output Only] Size of the source disk, specified in GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` - // Id: [Output Only] The unique identifier for the resource type. The - // server generates this identifier. + // DownloadBytes: [Output Only] Number of bytes downloaded to restore a + // snapshot to a disk. + DownloadBytes int64 `json:"downloadBytes,omitempty,string"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always - // compute#targetGrpcProxy for target grpc proxies. + // Kind: [Output Only] Type of the resource. Always compute#snapshot for + // Snapshot resources. Kind string `json:"kind,omitempty"` - // Name: Name of the resource. Provided by the client when the resource + // LabelFingerprint: A fingerprint for the labels being applied to this + // snapshot, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels, otherwise the request will fail with error + // 412 conditionNotMet. To see the latest fingerprint, make a get() + // request to retrieve a snapshot. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this snapshot. These can be later modified + // by the setLabels method. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // LicenseCodes: [Output Only] Integer license codes indicating which + // licenses are attached to this snapshot. + LicenseCodes googleapi.Int64s `json:"licenseCodes,omitempty"` + + // Licenses: [Output Only] A list of public visible licenses that apply + // to this snapshot. This can be because the original image had licenses + // attached (such as a Windows image). + Licenses []string `json:"licenses,omitempty"` + + // LocationHint: An opaque location hint used to place the snapshot + // close to other resources. This field is for use by internal tools + // that use the public API. + LocationHint string `json:"locationHint,omitempty"` + + // Name: Name of the resource; provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means @@ -45184,68 +46047,121 @@ type TargetGrpcProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. + SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SelfLinkWithId: [Output Only] Server-defined URL with id for the - // resource. - SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // SnapshotEncryptionKey: Encrypts the snapshot using a + // customer-supplied encryption key. After you encrypt a snapshot using + // a customer-supplied key, you must provide the same key if you use the + // snapshot later. For example, you must provide the encryption key when + // you create a disk from the encrypted snapshot in a future request. + // Customer-supplied encryption keys do not protect access to metadata + // of the snapshot. If you do not provide an encryption key when + // creating the snapshot, then the snapshot will be encrypted using an + // automatically generated key and you do not need to provide a key to + // use the snapshot later. + SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` - // UrlMap: URL to the UrlMap resource that defines the mapping from URL - // to the BackendService. The protocol field in the BackendService must - // be set to GRPC. - UrlMap string `json:"urlMap,omitempty"` + // SnapshotType: Indicates the type of the snapshot. + // + // Possible values: + // "ARCHIVE" + // "STANDARD" + SnapshotType string `json:"snapshotType,omitempty"` - // ValidateForProxyless: If true, indicates that the BackendServices - // referenced by the urlMap may be accessed by gRPC applications without - // using a sidecar proxy. This will enable configuration checks on - // urlMap and its referenced BackendServices to not allow unsupported - // features. A gRPC application must use "xds:///" scheme in the target - // URI of the service it is connecting to. If false, indicates that the - // BackendServices referenced by the urlMap will be accessed by gRPC - // applications via a sidecar proxy. In this case, a gRPC application - // must not use "xds:///" scheme in the target URI of the service it is - // connecting to - ValidateForProxyless bool `json:"validateForProxyless,omitempty"` + // SourceDisk: The source disk used to create this snapshot. + SourceDisk string `json:"sourceDisk,omitempty"` + + // SourceDiskEncryptionKey: The customer-supplied encryption key of the + // source disk. Required if the source disk is protected by a + // customer-supplied encryption key. + SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` + + // SourceDiskId: [Output Only] The ID value of the disk used to create + // this snapshot. This value may be used to determine whether the + // snapshot was taken from the current or a previous instance of a given + // disk name. + SourceDiskId string `json:"sourceDiskId,omitempty"` + + // SourceSnapshotSchedulePolicy: [Output Only] URL of the resource + // policy which created this scheduled snapshot. + SourceSnapshotSchedulePolicy string `json:"sourceSnapshotSchedulePolicy,omitempty"` + + // SourceSnapshotSchedulePolicyId: [Output Only] ID of the resource + // policy which created this scheduled snapshot. + SourceSnapshotSchedulePolicyId string `json:"sourceSnapshotSchedulePolicyId,omitempty"` + + // Status: [Output Only] The status of the snapshot. This can be + // CREATING, DELETING, FAILED, READY, or UPLOADING. + // + // Possible values: + // "CREATING" - Snapshot creation is in progress. + // "DELETING" - Snapshot is currently being deleted. + // "FAILED" - Snapshot creation failed. + // "READY" - Snapshot has been created successfully. + // "UPLOADING" - Snapshot is being uploaded. + Status string `json:"status,omitempty"` + + // StorageBytes: [Output Only] A size of the storage used by the + // snapshot. As snapshots share storage, this number is expected to + // change with snapshot creation/deletion. + StorageBytes int64 `json:"storageBytes,omitempty,string"` + + // StorageBytesStatus: [Output Only] An indicator whether storageBytes + // is in a stable state or it is being adjusted as a result of shared + // storage reallocation. This status can either be UPDATING, meaning the + // size of the snapshot is being updated, or UP_TO_DATE, meaning the + // size of the snapshot is up-to-date. + // + // Possible values: + // "UPDATING" + // "UP_TO_DATE" + StorageBytesStatus string `json:"storageBytesStatus,omitempty"` + + // StorageLocations: Cloud Storage bucket storage location of the + // snapshot (regional or multi-regional). + StorageLocations []string `json:"storageLocations,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Architecture") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Architecture") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetGrpcProxy) MarshalJSON() ([]byte, error) { - type NoMethod TargetGrpcProxy +func (s *Snapshot) MarshalJSON() ([]byte, error) { + type NoMethod Snapshot raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetGrpcProxyList struct { +// SnapshotList: Contains a list of Snapshot resources. +type SnapshotList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetGrpcProxy resources. - Items []*TargetGrpcProxy `json:"items,omitempty"` + // Items: A list of Snapshot resources. + Items []*Snapshot `json:"items,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#targetGrpcProxy for target grpc proxies. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -45260,7 +46176,7 @@ type TargetGrpcProxyList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetGrpcProxyListWarning `json:"warning,omitempty"` + Warning *SnapshotListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -45283,15 +46199,14 @@ type TargetGrpcProxyList struct { NullFields []string `json:"-"` } -func (s *TargetGrpcProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetGrpcProxyList +func (s *SnapshotList) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetGrpcProxyListWarning: [Output Only] Informational warning -// message. -type TargetGrpcProxyListWarning struct { +// SnapshotListWarning: [Output Only] Informational warning message. +type SnapshotListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -45313,6 +46228,9 @@ type TargetGrpcProxyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -45320,6 +46238,9 @@ type TargetGrpcProxyListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -45353,7 +46274,7 @@ type TargetGrpcProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetGrpcProxyListWarningData `json:"data,omitempty"` + Data []*SnapshotListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -45376,13 +46297,13 @@ type TargetGrpcProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetGrpcProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetGrpcProxyListWarning +func (s *SnapshotListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetGrpcProxyListWarningData struct { +type SnapshotListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -45413,22 +46334,27 @@ type TargetGrpcProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetGrpcProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetGrpcProxyListWarningData +func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpProxiesScopedList struct { - // TargetHttpProxies: A list of TargetHttpProxies contained in this - // scope. - TargetHttpProxies []*TargetHttpProxy `json:"targetHttpProxies,omitempty"` +type SourceDiskEncryptionKey struct { + // DiskEncryptionKey: The customer-supplied encryption key of the source + // disk. Required if the source disk is protected by a customer-supplied + // encryption key. + DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` - // Warning: Informational warning which replaces the list of backend - // services when the list is empty. - Warning *TargetHttpProxiesScopedListWarning `json:"warning,omitempty"` + // SourceDisk: URL of the disk attached to the source instance. This can + // be a full or valid partial URL. For example, the following are valid + // values: - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone + // /disks/disk - projects/project/zones/zone/disks/disk - + // zones/zone/disks/disk + SourceDisk string `json:"sourceDisk,omitempty"` - // ForceSendFields is a list of field names (e.g. "TargetHttpProxies") + // ForceSendFields is a list of field names (e.g. "DiskEncryptionKey") // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -45436,7 +46362,7 @@ type TargetHttpProxiesScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "TargetHttpProxies") to + // NullFields is a list of field names (e.g. "DiskEncryptionKey") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -45446,83 +46372,23 @@ type TargetHttpProxiesScopedList struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxiesScopedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxiesScopedList +func (s *SourceDiskEncryptionKey) MarshalJSON() ([]byte, error) { + type NoMethod SourceDiskEncryptionKey raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpProxiesScopedListWarning: Informational warning which -// replaces the list of backend services when the list is empty. -type TargetHttpProxiesScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*TargetHttpProxiesScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` +// SourceInstanceParams: A specification of the parameters to use when +// creating the instance template from a source instance. +type SourceInstanceParams struct { + // DiskConfigs: Attached disks configuration. If not provided, defaults + // are applied: For boot disk and any other R/W disks, new custom images + // will be created from each disk. For read-only disks, they will be + // attached in read-only mode. Local SSD disks will be created as blank + // volumes. + DiskConfigs []*DiskInstantiationConfig `json:"diskConfigs,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "DiskConfigs") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -45530,36 +46396,105 @@ type TargetHttpProxiesScopedListWarning struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "DiskConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetHttpProxiesScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxiesScopedListWarning +func (s *SourceInstanceParams) MarshalJSON() ([]byte, error) { + type NoMethod SourceInstanceParams raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpProxiesScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +// SourceInstanceProperties: DEPRECATED: Please use +// compute#instanceProperties instead. New properties will not be added +// to this field. +type SourceInstanceProperties struct { + // CanIpForward: Enables instances created based on this machine image + // to send packets with source IP addresses other than their own and + // receive packets with destination IP addresses other than their own. + // If these instances will be used as an IP gateway or it will be set as + // the next-hop in a Route resource, specify true. If unsure, leave this + // set to false. See the Enable IP forwarding documentation for more + // information. + CanIpForward bool `json:"canIpForward,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // DeletionProtection: Whether the instance created from this machine + // image should be protected against deletion. + DeletionProtection bool `json:"deletionProtection,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // Description: An optional text description for the instances that are + // created from this machine image. + Description string `json:"description,omitempty"` + + // Disks: An array of disks that are associated with the instances that + // are created from this machine image. + Disks []*SavedAttachedDisk `json:"disks,omitempty"` + + // GuestAccelerators: A list of guest accelerator cards' type and count + // to use for instances created from this machine image. + GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` + + // KeyRevocationActionType: KeyRevocationActionType of the instance. + // Supported options are "STOP" and "NONE". The default value is "NONE" + // if it is not specified. + // + // Possible values: + // "KEY_REVOCATION_ACTION_TYPE_UNSPECIFIED" - Default value. This + // value is unused. + // "NONE" - Indicates user chose no operation. + // "STOP" - Indicates user chose to opt for VM shutdown on key + // revocation. + KeyRevocationActionType string `json:"keyRevocationActionType,omitempty"` + + // Labels: Labels to apply to instances that are created from this + // machine image. + Labels map[string]string `json:"labels,omitempty"` + + // MachineType: The machine type to use for instances that are created + // from this machine image. + MachineType string `json:"machineType,omitempty"` + + // Metadata: The metadata key/value pairs to assign to instances that + // are created from this machine image. These pairs can consist of + // custom metadata or predefined keys. See Project and instance metadata + // for more information. + Metadata *Metadata `json:"metadata,omitempty"` + + // MinCpuPlatform: Minimum cpu/platform to be used by instances created + // from this machine image. The instance may be scheduled on the + // specified or newer cpu/platform. Applicable values are the friendly + // names of CPU platforms, such as minCpuPlatform: "Intel Haswell" or + // minCpuPlatform: "Intel Sandy Bridge". For more information, read + // Specifying a Minimum CPU Platform. + MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + + // NetworkInterfaces: An array of network access configurations for this + // interface. + NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + + // Scheduling: Specifies the scheduling options for the instances that + // are created from this machine image. + Scheduling *Scheduling `json:"scheduling,omitempty"` + + // ServiceAccounts: A list of service accounts with specified scopes. + // Access tokens for these service accounts are available to the + // instances that are created from this machine image. Use metadata + // queries to obtain the access tokens for these instances. + ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` + + // Tags: A list of tags to apply to the instances that are created from + // this machine image. The tags identify valid sources or targets for + // network firewalls. The setTags method can modify this list of tags. + // Each tag within the list must comply with RFC1035. + Tags *Tags `json:"tags,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CanIpForward") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -45567,32 +46502,41 @@ type TargetHttpProxiesScopedListWarningData struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "CanIpForward") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetHttpProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxiesScopedListWarningData +func (s *SourceInstanceProperties) MarshalJSON() ([]byte, error) { + type NoMethod SourceInstanceProperties raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpProxy: Represents a Target HTTP Proxy resource. Google -// Compute Engine has two Target HTTP Proxy resources: * Global -// (/compute/docs/reference/rest/v1/targetHttpProxies) * Regional -// (/compute/docs/reference/rest/v1/regionTargetHttpProxies) A target -// HTTP proxy is a component of GCP HTTP load balancers. * -// targetHttpProxies are used by external HTTP load balancers and -// Traffic Director. * regionTargetHttpProxies are used by internal HTTP -// load balancers. Forwarding rules reference a target HTTP proxy, and -// the target proxy then references a URL map. For more information, -// read Using Target Proxies and Forwarding rule concepts. -type TargetHttpProxy struct { +// SslCertificate: Represents an SSL Certificate resource. Google +// Compute Engine has two SSL Certificate resources: * Global +// (/compute/docs/reference/rest/v1/sslCertificates) * Regional +// (/compute/docs/reference/rest/v1/regionSslCertificates) The +// sslCertificates are used by: - external HTTPS load balancers - SSL +// proxy load balancers The regionSslCertificates are used by internal +// HTTPS load balancers. Optionally, certificate file contents that you +// upload can contain a set of up to five PEM-encoded certificates. The +// API call creates an object (sslCertificate) that holds this data. You +// can use SSL keys and certificates to secure connections to a load +// balancer. For more information, read Creating and using SSL +// certificates, SSL certificates quotas and limits, and Troubleshooting +// SSL certificates. +type SslCertificate struct { + // Certificate: A value read into memory from a certificate file. The + // certificate file must be in PEM format. The certificate chain must be + // no greater than 5 certs long. The chain must include at least one + // intermediate cert. + Certificate string `json:"certificate,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -45601,23 +46545,20 @@ type TargetHttpProxy struct { // property when you create the resource. Description string `json:"description,omitempty"` - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a TargetHttpProxy. An up-to-date - // fingerprint must be provided in order to patch/update the - // TargetHttpProxy; otherwise, the request will fail with error 412 - // conditionNotMet. To see the latest fingerprint, make a get() request - // to retrieve the TargetHttpProxy. - Fingerprint string `json:"fingerprint,omitempty"` + // ExpireTime: [Output Only] Expire time of the certificate. RFC3339 + ExpireTime string `json:"expireTime,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy - // for target HTTP proxies. + // Kind: [Output Only] Type of the resource. Always + // compute#sslCertificate for SSL certificates. Kind string `json:"kind,omitempty"` + // Managed: Configuration and status of a managed SSL certificate. + Managed *SslCertificateManagedSslCertificate `json:"managed,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -45627,89 +46568,42 @@ type TargetHttpProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // ProxyBind: This field only applies when the forwarding rule that - // references this target proxy has a loadBalancingScheme set to - // INTERNAL_SELF_MANAGED. When this field is set to true, Envoy proxies - // set up inbound traffic interception and bind to the IP address and - // port specified in the forwarding rule. This is generally useful when - // using Traffic Director to configure Envoy as a gateway or middle - // proxy (in other words, not a sidecar proxy). The Envoy proxy listens - // for inbound requests and handles requests when it receives them. The - // default is false. - ProxyBind bool `json:"proxyBind,omitempty"` + // PrivateKey: A value read into memory from a write-only private key + // file. The private key file must be in PEM format. For security, only + // insert requests include this field. + PrivateKey string `json:"privateKey,omitempty"` - // Region: [Output Only] URL of the region where the regional Target - // HTTP Proxy resides. This field is not applicable to global Target - // HTTP Proxies. + // Region: [Output Only] URL of the region where the regional SSL + // Certificate resides. This field is not applicable to global SSL + // Certificate. Region string `json:"region,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. + // SelfLink: [Output only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // UrlMap: URL to the UrlMap resource that defines the mapping from URL - // to the BackendService. - UrlMap string `json:"urlMap,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetHttpProxyAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetHttpProxiesScopedList resources. - Items map[string]TargetHttpProxiesScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#targetHttpProxyAggregatedList for lists of Target HTTP - // Proxies. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // SelfManaged: Configuration and status of a self-managed SSL + // certificate. + SelfManaged *SslCertificateSelfManagedSslCertificate `json:"selfManaged,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // SubjectAlternativeNames: [Output Only] Domains associated with the + // certificate via Subject Alternative Name. + SubjectAlternativeNames []string `json:"subjectAlternativeNames,omitempty"` - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` + // Type: (Optional) Specifies the type of SSL certificate, either + // "SELF_MANAGED" or "MANAGED". If not specified, the certificate is + // self-managed and the fields certificate and private_key are used. + // + // Possible values: + // "MANAGED" - Google-managed SSLCertificate. + // "SELF_MANAGED" - Certificate uploaded by user. + // "TYPE_UNSPECIFIED" + Type string `json:"type,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Certificate") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -45717,32 +46611,31 @@ type TargetHttpProxyAggregatedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Certificate") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetHttpProxyAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxyAggregatedList +func (s *SslCertificate) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificate raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpProxyList: A list of TargetHttpProxy resources. -type TargetHttpProxyList struct { +type SslCertificateAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetHttpProxy resources. - Items []*TargetHttpProxy `json:"items,omitempty"` + // Items: A list of SslCertificatesScopedList resources. + Items map[string]SslCertificatesScopedList `json:"items,omitempty"` - // Kind: Type of resource. Always compute#targetHttpProxyList for lists - // of target HTTP proxies. + // Kind: [Output Only] Type of resource. Always + // compute#sslCertificateAggregatedList for lists of SSL Certificates. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -45756,8 +46649,11 @@ type TargetHttpProxyList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *TargetHttpProxyListWarning `json:"warning,omitempty"` + Warning *SslCertificateAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -45780,15 +46676,15 @@ type TargetHttpProxyList struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxyList +func (s *SslCertificateAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpProxyListWarning: [Output Only] Informational warning -// message. -type TargetHttpProxyListWarning struct { +// SslCertificateAggregatedListWarning: [Output Only] Informational +// warning message. +type SslCertificateAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -45810,6 +46706,9 @@ type TargetHttpProxyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -45817,6 +46716,9 @@ type TargetHttpProxyListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -45850,7 +46752,7 @@ type TargetHttpProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetHttpProxyListWarningData `json:"data,omitempty"` + Data []*SslCertificateAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -45873,13 +46775,13 @@ type TargetHttpProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxyListWarning +func (s *SslCertificateAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpProxyListWarningData struct { +type SslCertificateAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -45910,48 +46812,68 @@ type TargetHttpProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxyListWarningData +func (s *SslCertificateAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpsProxiesScopedList struct { - // TargetHttpsProxies: A list of TargetHttpsProxies contained in this - // scope. - TargetHttpsProxies []*TargetHttpsProxy `json:"targetHttpsProxies,omitempty"` +// SslCertificateList: Contains a list of SslCertificate resources. +type SslCertificateList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Warning: Informational warning which replaces the list of backend - // services when the list is empty. - Warning *TargetHttpsProxiesScopedListWarning `json:"warning,omitempty"` + // Items: A list of SslCertificate resources. + Items []*SslCertificate `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "TargetHttpsProxies") - // to unconditionally include in API requests. By default, fields with + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *SslCertificateListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "TargetHttpsProxies") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetHttpsProxiesScopedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxiesScopedList +func (s *SslCertificateList) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpsProxiesScopedListWarning: Informational warning which -// replaces the list of backend services when the list is empty. -type TargetHttpsProxiesScopedListWarning struct { +// SslCertificateListWarning: [Output Only] Informational warning +// message. +type SslCertificateListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -45973,6 +46895,9 @@ type TargetHttpsProxiesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -45980,6 +46905,9 @@ type TargetHttpsProxiesScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -46013,7 +46941,7 @@ type TargetHttpsProxiesScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetHttpsProxiesScopedListWarningData `json:"data,omitempty"` + Data []*SslCertificateListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -46036,13 +46964,13 @@ type TargetHttpsProxiesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxiesScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxiesScopedListWarning +func (s *SslCertificateListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpsProxiesScopedListWarningData struct { +type SslCertificateListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -46073,25 +47001,50 @@ type TargetHttpsProxiesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxiesScopedListWarningData +func (s *SslCertificateListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpsProxiesSetQuicOverrideRequest struct { - // QuicOverride: QUIC policy for the TargetHttpsProxy resource. +// SslCertificateManagedSslCertificate: Configuration and status of a +// managed SSL certificate. +type SslCertificateManagedSslCertificate struct { + // DomainStatus: [Output only] Detailed statuses of the domains + // specified for managed certificate resource. + DomainStatus map[string]string `json:"domainStatus,omitempty"` + + // Domains: The domains for which a managed SSL certificate will be + // generated. Each Google-managed SSL certificate supports up to the + // maximum number of domains per Google-managed SSL certificate + // (/load-balancing/docs/quotas#ssl_certificates). + Domains []string `json:"domains,omitempty"` + + // Status: [Output only] Status of the managed certificate resource. // // Possible values: - // "DISABLE" - The load balancer will not attempt to negotiate QUIC - // with clients. - // "ENABLE" - The load balancer will attempt to negotiate QUIC with - // clients. - // "NONE" - No overrides to the default QUIC policy. This option is - // implicit if no QUIC override has been specified in the request. - QuicOverride string `json:"quicOverride,omitempty"` + // "ACTIVE" - The certificate management is working, and a certificate + // has been provisioned. + // "MANAGED_CERTIFICATE_STATUS_UNSPECIFIED" + // "PROVISIONING" - The certificate management is working. GCP will + // attempt to provision the first certificate. + // "PROVISIONING_FAILED" - Certificate provisioning failed due to an + // issue with the DNS or load balancing configuration. For details of + // which domain failed, consult domain_status field. + // "PROVISIONING_FAILED_PERMANENTLY" - Certificate provisioning failed + // due to an issue with the DNS or load balancing configuration. It + // won't be retried. To try again delete and create a new managed + // SslCertificate resource. For details of which domain failed, consult + // domain_status field. + // "RENEWAL_FAILED" - Renewal of the certificate has failed due to an + // issue with the DNS or load balancing configuration. The existing cert + // is still serving; however, it will expire shortly. To provision a + // renewed certificate, delete and create a new managed SslCertificate + // resource. For details on which domain failed, consult domain_status + // field. + Status string `json:"status,omitempty"` - // ForceSendFields is a list of field names (e.g. "QuicOverride") to + // ForceSendFields is a list of field names (e.g. "DomainStatus") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -46099,7 +47052,7 @@ type TargetHttpsProxiesSetQuicOverrideRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "QuicOverride") to include + // NullFields is a list of field names (e.g. "DomainStatus") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -46108,20 +47061,25 @@ type TargetHttpsProxiesSetQuicOverrideRequest struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxiesSetQuicOverrideRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxiesSetQuicOverrideRequest +func (s *SslCertificateManagedSslCertificate) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateManagedSslCertificate raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpsProxiesSetSslCertificatesRequest struct { - // SslCertificates: New set of SslCertificate resources to associate - // with this TargetHttpsProxy resource. At least one SSL certificate - // must be specified. Currently, you may specify up to 15 SSL - // certificates. - SslCertificates []string `json:"sslCertificates,omitempty"` +// SslCertificateSelfManagedSslCertificate: Configuration and status of +// a self-managed SSL certificate. +type SslCertificateSelfManagedSslCertificate struct { + // Certificate: A local certificate file. The certificate must be in PEM + // format. The certificate chain must be no greater than 5 certs long. + // The chain must include at least one intermediate cert. + Certificate string `json:"certificate,omitempty"` - // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // PrivateKey: A write-only private key in PEM format. Only insert + // requests will include this field. + PrivateKey string `json:"privateKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Certificate") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -46129,156 +47087,38 @@ type TargetHttpsProxiesSetSslCertificatesRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SslCertificates") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxiesSetSslCertificatesRequest + // NullFields is a list of field names (e.g. "Certificate") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificateSelfManagedSslCertificate) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateSelfManagedSslCertificate raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpsProxy: Represents a Target HTTPS Proxy resource. Google -// Compute Engine has two Target HTTPS Proxy resources: * Global -// (/compute/docs/reference/rest/v1/targetHttpsProxies) * Regional -// (/compute/docs/reference/rest/v1/regionTargetHttpsProxies) A target -// HTTPS proxy is a component of GCP HTTPS load balancers. * -// targetHttpsProxies are used by external HTTPS load balancers. * -// regionTargetHttpsProxies are used by internal HTTPS load balancers. -// Forwarding rules reference a target HTTPS proxy, and the target proxy -// then references a URL map. For more information, read Using Target -// Proxies and Forwarding rule concepts. -type TargetHttpsProxy struct { - // AuthorizationPolicy: Optional. A URL referring to a - // networksecurity.AuthorizationPolicy resource that describes how the - // proxy should authorize inbound traffic. If left blank, access will - // not be restricted by an authorization policy. Refer to the - // AuthorizationPolicy resource for additional details. - // authorizationPolicy only applies to a global TargetHttpsProxy - // attached to globalForwardingRules with the loadBalancingScheme set to - // INTERNAL_SELF_MANAGED. Note: This field currently has no impact. - AuthorizationPolicy string `json:"authorizationPolicy,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a TargetHttpsProxy. An - // up-to-date fingerprint must be provided in order to patch the - // TargetHttpsProxy; otherwise, the request will fail with error 412 - // conditionNotMet. To see the latest fingerprint, make a get() request - // to retrieve the TargetHttpsProxy. - Fingerprint string `json:"fingerprint,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of resource. Always compute#targetHttpsProxy - // for target HTTPS proxies. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // ProxyBind: This field only applies when the forwarding rule that - // references this target proxy has a loadBalancingScheme set to - // INTERNAL_SELF_MANAGED. When this field is set to true, Envoy proxies - // set up inbound traffic interception and bind to the IP address and - // port specified in the forwarding rule. This is generally useful when - // using Traffic Director to configure Envoy as a gateway or middle - // proxy (in other words, not a sidecar proxy). The Envoy proxy listens - // for inbound requests and handles requests when it receives them. The - // default is false. - ProxyBind bool `json:"proxyBind,omitempty"` - - // QuicOverride: Specifies the QUIC override policy for this - // TargetHttpsProxy resource. This setting determines whether the load - // balancer attempts to negotiate QUIC with clients. You can specify - // NONE, ENABLE, or DISABLE. - When quic-override is set to NONE, Google - // manages whether QUIC is used. - When quic-override is set to ENABLE, - // the load balancer uses QUIC when possible. - When quic-override is - // set to DISABLE, the load balancer doesn't use QUIC. - If the - // quic-override flag is not specified, NONE is implied. - // - // Possible values: - // "DISABLE" - The load balancer will not attempt to negotiate QUIC - // with clients. - // "ENABLE" - The load balancer will attempt to negotiate QUIC with - // clients. - // "NONE" - No overrides to the default QUIC policy. This option is - // implicit if no QUIC override has been specified in the request. - QuicOverride string `json:"quicOverride,omitempty"` - - // Region: [Output Only] URL of the region where the regional - // TargetHttpsProxy resides. This field is not applicable to global - // TargetHttpsProxies. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerTlsPolicy: Optional. A URL referring to a - // networksecurity.ServerTlsPolicy resource that describes how the proxy - // should authenticate inbound traffic. serverTlsPolicy only applies to - // a global TargetHttpsProxy attached to globalForwardingRules with the - // loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, - // communications are not encrypted. Note: This field currently has no - // impact. - ServerTlsPolicy string `json:"serverTlsPolicy,omitempty"` - - // SslCertificates: URLs to SslCertificate resources that are used to - // authenticate connections between users and the load balancer. At - // least one SSL certificate must be specified. Currently, you may - // specify up to 15 SSL certificates. sslCertificates do not apply when - // the load balancing scheme is set to INTERNAL_SELF_MANAGED. - SslCertificates []string `json:"sslCertificates,omitempty"` - - // SslPolicy: URL of SslPolicy resource that will be associated with the - // TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource - // has no SSL policy configured. - SslPolicy string `json:"sslPolicy,omitempty"` - - // UrlMap: A fully-qualified or valid partial URL to the UrlMap resource - // that defines the mapping from URL to the BackendService. For example, - // the following are all valid URLs for specifying a URL map: - - // https://www.googleapis.compute/v1/projects/project/global/urlMaps/ - // url-map - projects/project/global/urlMaps/url-map - - // global/urlMaps/url-map - UrlMap string `json:"urlMap,omitempty"` +type SslCertificatesScopedList struct { + // SslCertificates: List of SslCertificates contained in this scope. + SslCertificates []*SslCertificate `json:"sslCertificates,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *SslCertificatesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "AuthorizationPolicy") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AuthorizationPolicy") to + // NullFields is a list of field names (e.g. "SslCertificates") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -46288,72 +47128,15 @@ type TargetHttpsProxy struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetHttpsProxyAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetHttpsProxiesScopedList resources. - Items map[string]TargetHttpsProxiesScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#targetHttpsProxyAggregatedList for lists of Target HTTP - // Proxies. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *TargetHttpsProxyAggregatedListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpsProxyAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxyAggregatedList +func (s *SslCertificatesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificatesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpsProxyAggregatedListWarning: [Output Only] Informational -// warning message. -type TargetHttpsProxyAggregatedListWarning struct { +// SslCertificatesScopedListWarning: Informational warning which +// replaces the list of backend services when the list is empty. +type SslCertificatesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -46375,6 +47158,9 @@ type TargetHttpsProxyAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -46382,6 +47168,9 @@ type TargetHttpsProxyAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -46415,7 +47204,7 @@ type TargetHttpsProxyAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetHttpsProxyAggregatedListWarningData `json:"data,omitempty"` + Data []*SslCertificatesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -46438,13 +47227,13 @@ type TargetHttpsProxyAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxyAggregatedListWarning +func (s *SslCertificatesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificatesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpsProxyAggregatedListWarningData struct { +type SslCertificatesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -46475,23 +47264,24 @@ type TargetHttpsProxyAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxyAggregatedListWarningData +func (s *SslCertificatesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificatesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. -type TargetHttpsProxyList struct { +type SslPoliciesAggregatedList struct { + Etag string `json:"etag,omitempty"` + // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetHttpsProxy resources. - Items []*TargetHttpsProxy `json:"items,omitempty"` + // Items: A list of SslPoliciesScopedList resources. + Items map[string]SslPoliciesScopedList `json:"items,omitempty"` - // Kind: Type of resource. Always compute#targetHttpsProxyList for lists - // of target HTTPS proxies. + // Kind: [Output Only] Type of resource. Always + // compute#sslPolicyAggregatedList for lists of SSL Policies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -46505,14 +47295,17 @@ type TargetHttpsProxyList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *TargetHttpsProxyListWarning `json:"warning,omitempty"` + Warning *SslPoliciesAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -46520,7 +47313,7 @@ type TargetHttpsProxyList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Etag") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -46529,15 +47322,15 @@ type TargetHttpsProxyList struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxyList +func (s *SslPoliciesAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpsProxyListWarning: [Output Only] Informational warning +// SslPoliciesAggregatedListWarning: [Output Only] Informational warning // message. -type TargetHttpsProxyListWarning struct { +type SslPoliciesAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -46559,6 +47352,9 @@ type TargetHttpsProxyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -46566,6 +47362,9 @@ type TargetHttpsProxyListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -46599,7 +47398,7 @@ type TargetHttpsProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetHttpsProxyListWarningData `json:"data,omitempty"` + Data []*SslPoliciesAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -46622,13 +47421,13 @@ type TargetHttpsProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxyListWarning +func (s *SslPoliciesAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpsProxyListWarningData struct { +type SslPoliciesAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -46659,109 +47458,22 @@ type TargetHttpsProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxyListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetInstance: Represents a Target Instance resource. You can use a -// target instance to handle traffic for one or more forwarding rules, -// which is ideal for forwarding protocol traffic that is managed by a -// single source. For example, ESP, AH, TCP, or UDP. For more -// information, read Target instances. -type TargetInstance struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Instance: A URL to the virtual machine instance that handles traffic - // for this target instance. When creating a target instance, you can - // provide the fully-qualified URL or a valid partial URL to the desired - // virtual machine. For example, the following are all valid URLs: - - // https://www.googleapis.com/compute/v1/projects/project/zones/zone - // /instances/instance - projects/project/zones/zone/instances/instance - // - zones/zone/instances/instance - Instance string `json:"instance,omitempty"` - - // Kind: [Output Only] The type of the resource. Always - // compute#targetInstance for target instances. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // NatPolicy: NAT option controlling how IPs are NAT'ed to the instance. - // Currently only NO_NAT (default value) is supported. - // - // Possible values: - // "NO_NAT" - No NAT performed. - NatPolicy string `json:"natPolicy,omitempty"` - - // Network: The URL of the network this target instance uses to forward - // traffic. If not specified, the traffic will be forwarded to the - // network that the default network interface belongs to. - Network string `json:"network,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Zone: [Output Only] URL of the zone where the target instance - // resides. You must specify this field as part of the HTTP request URL. - // It is not settable as a field in the request body. - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetInstance) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstance +func (s *SslPoliciesAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstanceAggregatedList struct { +type SslPoliciesList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetInstance resources. - Items map[string]TargetInstancesScopedList `json:"items,omitempty"` + // Items: A list of SslPolicy resources. + Items []*SslPolicy `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of the resource. Always + // compute#sslPoliciesList for lists of sslPolicies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -46775,11 +47487,8 @@ type TargetInstanceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *TargetInstanceAggregatedListWarning `json:"warning,omitempty"` + Warning *SslPoliciesListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -46802,15 +47511,14 @@ type TargetInstanceAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceAggregatedList +func (s *SslPoliciesList) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstanceAggregatedListWarning: [Output Only] Informational -// warning message. -type TargetInstanceAggregatedListWarning struct { +// SslPoliciesListWarning: [Output Only] Informational warning message. +type SslPoliciesListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -46832,6 +47540,9 @@ type TargetInstanceAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -46839,6 +47550,9 @@ type TargetInstanceAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -46872,7 +47586,7 @@ type TargetInstanceAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetInstanceAggregatedListWarningData `json:"data,omitempty"` + Data []*SslPoliciesListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -46895,13 +47609,13 @@ type TargetInstanceAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetInstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceAggregatedListWarning +func (s *SslPoliciesListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstanceAggregatedListWarningData struct { +type SslPoliciesListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -46932,43 +47646,20 @@ type TargetInstanceAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetInstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceAggregatedListWarningData +func (s *SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstanceList: Contains a list of TargetInstance resources. -type TargetInstanceList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetInstance resources. - Items []*TargetInstance `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *TargetInstanceListWarning `json:"warning,omitempty"` +type SslPoliciesListAvailableFeaturesResponse struct { + Features []string `json:"features,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Features") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -46976,8 +47667,8 @@ type TargetInstanceList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Features") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -46985,15 +47676,46 @@ type TargetInstanceList struct { NullFields []string `json:"-"` } -func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceList +func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListAvailableFeaturesResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstanceListWarning: [Output Only] Informational warning -// message. -type TargetInstanceListWarning struct { +type SslPoliciesScopedList struct { + // SslPolicies: A list of SslPolicies contained in this scope. + SslPolicies []*SslPolicy `json:"sslPolicies,omitempty"` + + // Warning: Informational warning which replaces the list of SSL + // policies when the list is empty. + Warning *SslPoliciesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslPolicies") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslPolicies") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SslPoliciesScopedListWarning: Informational warning which replaces +// the list of SSL policies when the list is empty. +type SslPoliciesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -47015,6 +47737,9 @@ type TargetInstanceListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -47022,6 +47747,9 @@ type TargetInstanceListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -47055,7 +47783,7 @@ type TargetInstanceListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetInstanceListWarningData `json:"data,omitempty"` + Data []*SslPoliciesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -47078,13 +47806,13 @@ type TargetInstanceListWarning struct { NullFields []string `json:"-"` } -func (s *TargetInstanceListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceListWarning +func (s *SslPoliciesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstanceListWarningData struct { +type SslPoliciesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -47115,29 +47843,114 @@ type TargetInstanceListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetInstanceListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceListWarningData +func (s *SslPoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstancesScopedList struct { - // TargetInstances: A list of target instances contained in this scope. - TargetInstances []*TargetInstance `json:"targetInstances,omitempty"` +// SslPolicy: Represents an SSL Policy resource. Use SSL policies to +// control the SSL features, such as versions and cipher suites, offered +// by an HTTPS or SSL Proxy load balancer. For more information, read +// SSL Policy Concepts. +type SslPolicy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"` + // CustomFeatures: A list of features enabled when the selected profile + // is CUSTOM. The method returns the set of features that can be + // specified in this list. This field must be empty if the profile is + // not CUSTOM. + CustomFeatures []string `json:"customFeatures,omitempty"` - // ForceSendFields is a list of field names (e.g. "TargetInstances") to - // unconditionally include in API requests. By default, fields with + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // EnabledFeatures: [Output Only] The list of features enabled in the + // SSL policy. + EnabledFeatures []string `json:"enabledFeatures,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a SslPolicy. An up-to-date + // fingerprint must be provided in order to update the SslPolicy, + // otherwise the request will fail with error 412 conditionNotMet. To + // see the latest fingerprint, make a get() request to retrieve an + // SslPolicy. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output only] Type of the resource. Always compute#sslPolicyfor + // SSL policies. + Kind string `json:"kind,omitempty"` + + // MinTlsVersion: The minimum version of SSL protocol that can be used + // by the clients to establish a connection with the load balancer. This + // can be one of TLS_1_0, TLS_1_1, TLS_1_2. + // + // Possible values: + // "TLS_1_0" - TLS 1.0 + // "TLS_1_1" - TLS 1.1 + // "TLS_1_2" - TLS 1.2 + MinTlsVersion string `json:"minTlsVersion,omitempty"` + + // Name: Name of the resource. The name must be 1-63 characters long, + // and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` + + // Profile: Profile specifies the set of SSL features that can be used + // by the load balancer when negotiating SSL with clients. This can be + // one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, + // the set of SSL features to enable must be specified in the + // customFeatures field. + // + // Possible values: + // "COMPATIBLE" - Compatible profile. Allows the broadset set of + // clients, even those which support only out-of-date SSL features to + // negotiate with the load balancer. + // "CUSTOM" - Custom profile. Allow only the set of allowed SSL + // features specified in the customFeatures field. + // "MODERN" - Modern profile. Supports a wide set of SSL features, + // allowing modern clients to negotiate SSL with the load balancer. + // "RESTRICTED" - Restricted profile. Supports a reduced set of SSL + // features, intended to meet stricter compliance requirements. + Profile string `json:"profile,omitempty"` + + // Region: [Output Only] URL of the region where the regional SSL policy + // resides. This field is not applicable to global SSL policies. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warnings: [Output Only] If potential misconfigurations are detected + // for this SSL policy, this field will be populated with warning + // messages. + Warnings []*SslPolicyWarnings `json:"warnings,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "TargetInstances") to + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -47147,15 +47960,13 @@ type TargetInstancesScopedList struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstancesScopedList +func (s *SslPolicy) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstancesScopedListWarning: Informational warning which -// replaces the list of addresses when the list is empty. -type TargetInstancesScopedListWarning struct { +type SslPolicyWarnings struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -47177,6 +47988,9 @@ type TargetInstancesScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -47184,6 +47998,9 @@ type TargetInstancesScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -47217,7 +48034,7 @@ type TargetInstancesScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"` + Data []*SslPolicyWarningsData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -47240,13 +48057,13 @@ type TargetInstancesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstancesScopedListWarning +func (s *SslPolicyWarnings) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicyWarnings raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstancesScopedListWarningData struct { +type SslPolicyWarningsData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -47277,179 +48094,355 @@ type TargetInstancesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstancesScopedListWarningData +func (s *SslPolicyWarningsData) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicyWarningsData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPool: Represents a Target Pool resource. Target pools are used -// for network TCP/UDP load balancing. A target pool references member -// instances, an associated legacy HttpHealthCheck resource, and, -// optionally, a backup target pool. For more information, read Using -// target pools. -type TargetPool struct { - // BackupPool: The server-defined URL for the resource. This field is - // applicable only when the containing target pool is serving a - // forwarding rule as the primary pool, and its failoverRatio field is - // properly set to a value between [0, 1]. backupPool and failoverRatio - // together define the fallback behavior of the primary target pool: if - // the ratio of the healthy instances in the primary pool is at or below - // failoverRatio, traffic arriving at the load-balanced IP will be - // directed to the backup pool. In case where failoverRatio and - // backupPool are not set, or all the instances in the backup pool are - // unhealthy, the traffic will be directed back to the primary pool in - // the "force" mode, where traffic will be spread to the healthy - // instances with the best effort, or to all instances when no instance - // is healthy. - BackupPool string `json:"backupPool,omitempty"` +type SslPolicyReference struct { + // SslPolicy: URL of the SSL policy resource. Set this to empty string + // to clear any existing SSL policy associated with the target proxy + // resource. + SslPolicy string `json:"sslPolicy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslPolicy") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslPolicy") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPolicyReference) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicyReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type StatefulPolicy struct { + PreservedState *StatefulPolicyPreservedState `json:"preservedState,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PreservedState") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PreservedState") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *StatefulPolicy) MarshalJSON() ([]byte, error) { + type NoMethod StatefulPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// StatefulPolicyPreservedState: Configuration of preserved resources. +type StatefulPolicyPreservedState struct { + // Disks: Disks created on the instances that will be preserved on + // instance delete, update, etc. This map is keyed with the device names + // of the disks. + Disks map[string]StatefulPolicyPreservedStateDiskDevice `json:"disks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Disks") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Disks") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StatefulPolicyPreservedState) MarshalJSON() ([]byte, error) { + type NoMethod StatefulPolicyPreservedState + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type StatefulPolicyPreservedStateDiskDevice struct { + // AutoDelete: These stateful disks will never be deleted during + // autohealing, update or VM instance recreate operations. This flag is + // used to configure if the disk should be deleted after it is no longer + // used by the group, e.g. when the given instance or the whole group is + // deleted. Note: disks attached in READ_ONLY mode cannot be + // auto-deleted. + // + // Possible values: + // "NEVER" + // "ON_PERMANENT_INSTANCE_DELETION" + AutoDelete string `json:"autoDelete,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoDelete") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AutoDelete") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} +func (s *StatefulPolicyPreservedStateDiskDevice) MarshalJSON() ([]byte, error) { + type NoMethod StatefulPolicyPreservedStateDiskDevice + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Subnetwork: Represents a Subnetwork resource. A subnetwork (also +// known as a subnet) is a logical partition of a Virtual Private Cloud +// network with one primary IP range and zero or more secondary IP +// ranges. For more information, read Virtual Private Cloud (VPC) +// Network. +type Subnetwork struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` // Description: An optional description of this resource. Provide this - // property when you create the resource. + // property when you create the resource. This field can be set only at + // resource creation time. Description string `json:"description,omitempty"` - // FailoverRatio: This field is applicable only when the containing - // target pool is serving a forwarding rule as the primary pool (i.e., - // not as a backup pool to some other target pool). The value of the - // field must be in [0, 1]. If set, backupPool must also be set. They - // together define the fallback behavior of the primary target pool: if - // the ratio of the healthy instances in the primary pool is at or below - // this number, traffic arriving at the load-balanced IP will be - // directed to the backup pool. In case where failoverRatio is not set - // or all the instances in the backup pool are unhealthy, the traffic - // will be directed back to the primary pool in the "force" mode, where - // traffic will be spread to the healthy instances with the best effort, - // or to all instances when no instance is healthy. - FailoverRatio float64 `json:"failoverRatio,omitempty"` + // EnableFlowLogs: Whether to enable flow logging for this subnetwork. + // If this field is not explicitly set, it will not appear in get + // listings. If not set the default behavior is determined by the org + // policy, if there is no org policy specified, then it will default to + // disabled. This field isn't supported with the purpose field set to + // INTERNAL_HTTPS_LOAD_BALANCER. + EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` - // HealthChecks: The URL of the HttpHealthCheck resource. A member - // instance in this pool is considered healthy if and only if the health - // checks pass. Only legacy HttpHealthChecks are supported. Only one - // health check may be specified. - HealthChecks []string `json:"healthChecks,omitempty"` + // ExternalIpv6Prefix: The external IPv6 address range that is owned by + // this subnetwork. + ExternalIpv6Prefix string `json:"externalIpv6Prefix,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a Subnetwork. An up-to-date + // fingerprint must be provided in order to update the Subnetwork, + // otherwise the request will fail with error 412 conditionNotMet. To + // see the latest fingerprint, make a get() request to retrieve a + // Subnetwork. + Fingerprint string `json:"fingerprint,omitempty"` + + // GatewayAddress: [Output Only] The gateway address for default routes + // to reach destination addresses outside this subnetwork. + GatewayAddress string `json:"gatewayAddress,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Instances: A list of resource URLs to the virtual machine instances - // serving this pool. They must live in zones contained in the same - // region as this pool. - Instances []string `json:"instances,omitempty"` + // InternalIpv6Prefix: [Output Only] The internal IPv6 address range + // that is assigned to this subnetwork. + InternalIpv6Prefix string `json:"internalIpv6Prefix,omitempty"` - // Kind: [Output Only] Type of the resource. Always compute#targetPool - // for target pools. + // IpCidrRange: The range of internal addresses that are owned by this + // subnetwork. Provide this property when you create the subnetwork. For + // example, 10.0.0.0/8 or 100.64.0.0/10. Ranges must be unique and + // non-overlapping within a network. Only IPv4 is supported. This field + // is set at resource creation time. The range can be any range listed + // in the Valid ranges list. The range can be expanded after creation + // using expandIpCidrRange. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // Ipv6AccessType: The access type of IPv6 address this subnet holds. + // It's immutable and can only be specified during creation or the first + // time the subnet is updated into IPV4_IPV6 dual stack. + // + // Possible values: + // "EXTERNAL" - VMs on this subnet will be assigned IPv6 addresses + // that are accessible via the Internet, as well as the VPC network. + // "INTERNAL" - VMs on this subnet will be assigned IPv6 addresses + // that are only accessible over the VPC network. + Ipv6AccessType string `json:"ipv6AccessType,omitempty"` + + // Ipv6CidrRange: [Output Only] This field is for internal use. + Ipv6CidrRange string `json:"ipv6CidrRange,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#subnetwork + // for Subnetwork resources. Kind string `json:"kind,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // LogConfig: This field denotes the VPC flow logging options for this + // subnetwork. If logging is enabled, logs are exported to Cloud + // Logging. + LogConfig *SubnetworkLogConfig `json:"logConfig,omitempty"` + + // Name: The name of the resource, provided by the client when initially + // creating the resource. The name must be 1-63 characters long, and + // comply with RFC1035. Specifically, the name must be 1-63 characters + // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + // which means the first character must be a lowercase letter, and all + // following characters must be a dash, lowercase letter, or digit, + // except the last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Region: [Output Only] URL of the region where the target pool - // resides. + // Network: The URL of the network to which this subnetwork belongs, + // provided by the client when initially creating the subnetwork. This + // field can be set only at resource creation time. + Network string `json:"network,omitempty"` + + // PrivateIpGoogleAccess: Whether the VMs in this subnet can access + // Google services without assigned external IP addresses. This field + // can be both set at resource creation time and updated using + // setPrivateIpGoogleAccess. + PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` + + // PrivateIpv6GoogleAccess: This field is for internal use. This field + // can be both set at resource creation time and updated using patch. + // + // Possible values: + // "DISABLE_GOOGLE_ACCESS" - Disable private IPv6 access to/from + // Google services. + // "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE" - Bidirectional private + // IPv6 access to/from Google services. + // "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE" - Outbound private IPv6 + // access from VMs in this subnet to Google services. + PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` + + // Purpose: The purpose of the resource. This field can be either + // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with + // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created + // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If + // unspecified, the purpose defaults to PRIVATE_RFC_1918. The + // enableFlowLogs field isn't supported with the purpose field set to + // INTERNAL_HTTPS_LOAD_BALANCER. + // + // Possible values: + // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal + // HTTP(S) Load Balancing. + // "PRIVATE" - Regular user created or automatically created subnet. + // "PRIVATE_RFC_1918" - Regular user created or automatically created + // subnet. + // "PRIVATE_SERVICE_CONNECT" - Subnetworks created for Private Service + // Connect in the producer network. + // "REGIONAL_MANAGED_PROXY" - Subnetwork used for Regional + // Internal/External HTTP(S) Load Balancing. + Purpose string `json:"purpose,omitempty"` + + // Region: URL of the region where the Subnetwork resides. This field + // can be set only at resource creation time. Region string `json:"region,omitempty"` + // Role: The role of subnetwork. Currently, this field is only used when + // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to + // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being + // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one + // that is ready to be promoted to ACTIVE or is currently draining. This + // field can be updated with a patch request. + // + // Possible values: + // "ACTIVE" - The ACTIVE subnet that is currently used. + // "BACKUP" - The BACKUP subnet that could be promoted to ACTIVE. + Role string `json:"role,omitempty"` + + // SecondaryIpRanges: An array of configurations for secondary IP ranges + // for VM instances contained in this subnetwork. The primary IP of such + // VM must belong to the primary ipCidrRange of the subnetwork. The + // alias IPs may belong to either primary or secondary ranges. This + // field can be updated with a patch request. + SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SessionAffinity: Session affinity option, must be one of the - // following values: NONE: Connections from the same client IP may go to - // any instance in the pool. CLIENT_IP: Connections from the same client - // IP will go to the same instance in the pool while that instance - // remains healthy. CLIENT_IP_PROTO: Connections from the same client IP - // with the same IP protocol will go to the same instance in the pool - // while that instance remains healthy. + // StackType: The stack type for the subnet. If set to IPV4_ONLY, new + // VMs in the subnet are assigned IPv4 addresses only. If set to + // IPV4_IPV6, new VMs in the subnet can be assigned both IPv4 and IPv6 + // addresses. If not specified, IPV4_ONLY is used. This field can be + // both set at resource creation time and updated using patch. // // Possible values: - // "CLIENT_IP" - 2-tuple hash on packet's source and destination IP - // addresses. Connections from the same source IP address to the same - // destination IP address will be served by the same backend VM while - // that VM remains healthy. - // "CLIENT_IP_NO_DESTINATION" - 1-tuple hash only on packet's source - // IP address. Connections from the same source IP address will be - // served by the same backend VM while that VM remains healthy. This - // option can only be used for Internal TCP/UDP Load Balancing. - // "CLIENT_IP_PORT_PROTO" - 5-tuple hash on packet's source and - // destination IP addresses, IP protocol, and source and destination - // ports. Connections for the same IP protocol from the same source IP - // address and port to the same destination IP address and port will be - // served by the same backend VM while that VM remains healthy. This - // option cannot be used for HTTP(S) load balancing. - // "CLIENT_IP_PROTO" - 3-tuple hash on packet's source and destination - // IP addresses, and IP protocol. Connections for the same IP protocol - // from the same source IP address to the same destination IP address - // will be served by the same backend VM while that VM remains healthy. - // This option cannot be used for HTTP(S) load balancing. - // "GENERATED_COOKIE" - Hash based on a cookie generated by the L7 - // loadbalancer. Only valid for HTTP(S) load balancing. - // "HEADER_FIELD" - The hash is based on a user specified header - // field. - // "HTTP_COOKIE" - The hash is based on a user provided cookie. - // "NONE" - No session affinity. Connections from the same client IP - // may go to any instance in the pool. - SessionAffinity string `json:"sessionAffinity,omitempty"` + // "IPV4_IPV6" - New VMs in this subnet can have both IPv4 and IPv6 + // addresses. + // "IPV4_ONLY" - New VMs in this subnet will only be assigned IPv4 + // addresses. + StackType string `json:"stackType,omitempty"` + + // State: [Output Only] The state of the subnetwork, which can be one of + // the following values: READY: Subnetwork is created and ready to use + // DRAINING: only applicable to subnetworks that have the purpose set to + // INTERNAL_HTTPS_LOAD_BALANCER and indicates that connections to the + // load balancer are being drained. A subnetwork that is draining cannot + // be used or modified until it reaches a status of READY + // + // Possible values: + // "DRAINING" - Subnetwork is being drained. + // "READY" - Subnetwork is ready for use. + State string `json:"state,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "BackupPool") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BackupPool") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TargetPool) MarshalJSON() ([]byte, error) { - type NoMethod TargetPool +func (s *Subnetwork) MarshalJSON() ([]byte, error) { + type NoMethod Subnetwork raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -func (s *TargetPool) UnmarshalJSON(data []byte) error { - type NoMethod TargetPool - var s1 struct { - FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.FailoverRatio = float64(s1.FailoverRatio) - return nil -} - -type TargetPoolAggregatedList struct { +type SubnetworkAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetPool resources. - Items map[string]TargetPoolsScopedList `json:"items,omitempty"` + // Items: A list of SubnetworksScopedList resources. + Items map[string]SubnetworksScopedList `json:"items,omitempty"` // Kind: [Output Only] Type of resource. Always - // compute#targetPoolAggregatedList for aggregated lists of target - // pools. + // compute#subnetworkAggregatedList for aggregated lists of subnetworks. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -47467,7 +48460,7 @@ type TargetPoolAggregatedList struct { Unreachables []string `json:"unreachables,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetPoolAggregatedListWarning `json:"warning,omitempty"` + Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -47490,15 +48483,15 @@ type TargetPoolAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolAggregatedList +func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolAggregatedListWarning: [Output Only] Informational warning +// SubnetworkAggregatedListWarning: [Output Only] Informational warning // message. -type TargetPoolAggregatedListWarning struct { +type SubnetworkAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -47520,6 +48513,9 @@ type TargetPoolAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -47527,6 +48523,9 @@ type TargetPoolAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -47560,7 +48559,7 @@ type TargetPoolAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetPoolAggregatedListWarningData `json:"data,omitempty"` + Data []*SubnetworkAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -47583,13 +48582,13 @@ type TargetPoolAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetPoolAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolAggregatedListWarning +func (s *SubnetworkAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolAggregatedListWarningData struct { +type SubnetworkAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -47620,25 +48619,44 @@ type TargetPoolAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetPoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolAggregatedListWarningData +func (s *SubnetworkAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolInstanceHealth struct { - HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` +// SubnetworkList: Contains a list of Subnetwork resources. +type SubnetworkList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#targetPoolInstanceHealth when checking the health of an - // instance. + // Items: A list of Subnetwork resources. + Items []*Subnetwork `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#subnetworkList + // for lists of subnetworks. Kind string `json:"kind,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *SubnetworkListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -47646,77 +48664,23 @@ type TargetPoolInstanceHealth struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthStatus") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolInstanceHealth +func (s *SubnetworkList) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolList: Contains a list of TargetPool resources. -type TargetPoolList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetPool resources. - Items []*TargetPool `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#targetPoolList - // for lists of target pools. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *TargetPoolListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolList) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolList - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetPoolListWarning: [Output Only] Informational warning message. -type TargetPoolListWarning struct { +// SubnetworkListWarning: [Output Only] Informational warning message. +type SubnetworkListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -47738,6 +48702,9 @@ type TargetPoolListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -47745,6 +48712,9 @@ type TargetPoolListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -47778,7 +48748,7 @@ type TargetPoolListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetPoolListWarningData `json:"data,omitempty"` + Data []*SubnetworkListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -47801,13 +48771,13 @@ type TargetPoolListWarning struct { NullFields []string `json:"-"` } -func (s *TargetPoolListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolListWarning +func (s *SubnetworkListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolListWarningData struct { +type SubnetworkListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -47838,82 +48808,120 @@ type TargetPoolListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetPoolListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolListWarningData +func (s *SubnetworkListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsAddHealthCheckRequest struct { - // HealthChecks: The HttpHealthCheck to add to the target pool. - HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` +// SubnetworkLogConfig: The available logging options for this +// subnetwork. +type SubnetworkLogConfig struct { + // AggregationInterval: Can only be specified if VPC flow logging for + // this subnetwork is enabled. Toggles the aggregation interval for + // collecting flow logs. Increasing the interval time will reduce the + // amount of generated flow logs for long lasting connections. Default + // is an interval of 5 seconds per connection. + // + // Possible values: + // "INTERVAL_10_MIN" + // "INTERVAL_15_MIN" + // "INTERVAL_1_MIN" + // "INTERVAL_30_SEC" + // "INTERVAL_5_MIN" + // "INTERVAL_5_SEC" + AggregationInterval string `json:"aggregationInterval,omitempty"` - // ForceSendFields is a list of field names (e.g. "HealthChecks") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Enable: Whether to enable flow logging for this subnetwork. If this + // field is not explicitly set, it will not appear in get listings. If + // not set the default behavior is determined by the org policy, if + // there is no org policy specified, then it will default to disabled. + Enable bool `json:"enable,omitempty"` - // NullFields is a list of field names (e.g. "HealthChecks") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // FilterExpr: Can only be specified if VPC flow logs for this + // subnetwork is enabled. The filter expression is used to define which + // VPC flow logs should be exported to Cloud Logging. + FilterExpr string `json:"filterExpr,omitempty"` -func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsAddHealthCheckRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // FlowSampling: Can only be specified if VPC flow logging for this + // subnetwork is enabled. The value of the field must be in [0, 1]. Set + // the sampling rate of VPC flow logs within the subnetwork where 1.0 + // means all collected logs are reported and 0.0 means no logs are + // reported. Default is 0.5 unless otherwise specified by the org + // policy, which means half of all collected logs are reported. + FlowSampling float64 `json:"flowSampling,omitempty"` -type TargetPoolsAddInstanceRequest struct { - // Instances: A full or partial URL to an instance to add to this target - // pool. This can be a full or partial URL. For example, the following - // are valid URLs: - - // https://www.googleapis.com/compute/v1/projects/project-id/zones/zone - // /instances/instance-name - - // projects/project-id/zones/zone/instances/instance-name - - // zones/zone/instances/instance-name - Instances []*InstanceReference `json:"instances,omitempty"` + // Metadata: Can only be specified if VPC flow logs for this subnetwork + // is enabled. Configures whether all, none or a subset of metadata + // fields should be added to the reported VPC flow logs. Default is + // EXCLUDE_ALL_METADATA. + // + // Possible values: + // "CUSTOM_METADATA" + // "EXCLUDE_ALL_METADATA" + // "INCLUDE_ALL_METADATA" + Metadata string `json:"metadata,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with + // MetadataFields: Can only be specified if VPC flow logs for this + // subnetwork is enabled and "metadata" was set to CUSTOM_METADATA. + MetadataFields []string `json:"metadataFields,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AggregationInterval") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AggregationInterval") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsAddInstanceRequest +func (s *SubnetworkLogConfig) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkLogConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsRemoveHealthCheckRequest struct { - // HealthChecks: Health check URL to be removed. This can be a full or - // valid partial URL. For example, the following are valid URLs: - - // https://www.googleapis.com/compute/beta/projects/project - // /global/httpHealthChecks/health-check - - // projects/project/global/httpHealthChecks/health-check - - // global/httpHealthChecks/health-check - HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` +func (s *SubnetworkLogConfig) UnmarshalJSON(data []byte) error { + type NoMethod SubnetworkLogConfig + var s1 struct { + FlowSampling gensupport.JSONFloat64 `json:"flowSampling"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.FlowSampling = float64(s1.FlowSampling) + return nil +} - // ForceSendFields is a list of field names (e.g. "HealthChecks") to +// SubnetworkSecondaryRange: Represents a secondary IP range of a +// subnetwork. +type SubnetworkSecondaryRange struct { + // IpCidrRange: The range of IP addresses belonging to this subnetwork + // secondary range. Provide this property when you create the + // subnetwork. Ranges must be unique and non-overlapping with all + // primary and secondary IP ranges within a network. Only IPv4 is + // supported. The range can be any range listed in the Valid ranges + // list. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // RangeName: The name associated with this subnetwork secondary range, + // used when adding an alias IP range to a VM instance. The name must be + // 1-63 characters long, and comply with RFC1035. The name must be + // unique within the subnetwork. + RangeName string `json:"rangeName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -47921,7 +48929,7 @@ type TargetPoolsRemoveHealthCheckRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthChecks") to include + // NullFields is a list of field names (e.g. "IpCidrRange") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -47930,17 +48938,21 @@ type TargetPoolsRemoveHealthCheckRequest struct { NullFields []string `json:"-"` } -func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsRemoveHealthCheckRequest +func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkSecondaryRange raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsRemoveInstanceRequest struct { - // Instances: URLs of the instances to be removed from target pool. - Instances []*InstanceReference `json:"instances,omitempty"` +type SubnetworksExpandIpCidrRangeRequest struct { + // IpCidrRange: The IP (in CIDR format or netmask) of internal addresses + // that are legal on this Subnetwork. This range should be disjoint from + // other subnetworks within this network. This range can only be larger + // than (i.e. a superset of) the range previously defined before the + // update. + IpCidrRange string `json:"ipCidrRange,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -47948,30 +48960,30 @@ type TargetPoolsRemoveInstanceRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsRemoveInstanceRequest +func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksExpandIpCidrRangeRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsScopedList struct { - // TargetPools: A list of target pools contained in this scope. - TargetPools []*TargetPool `json:"targetPools,omitempty"` +type SubnetworksScopedList struct { + // Subnetworks: A list of subnetworks contained in this scope. + Subnetworks []*Subnetwork `json:"subnetworks,omitempty"` - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"` + // Warning: An informational warning that appears when the list of + // addresses is empty. + Warning *SubnetworksScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "TargetPools") to + // ForceSendFields is a list of field names (e.g. "Subnetworks") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -47979,7 +48991,7 @@ type TargetPoolsScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "TargetPools") to include + // NullFields is a list of field names (e.g. "Subnetworks") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -47988,15 +49000,15 @@ type TargetPoolsScopedList struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsScopedList +func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolsScopedListWarning: Informational warning which replaces -// the list of addresses when the list is empty. -type TargetPoolsScopedListWarning struct { +// SubnetworksScopedListWarning: An informational warning that appears +// when the list of addresses is empty. +type SubnetworksScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -48018,6 +49030,9 @@ type TargetPoolsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -48025,6 +49040,9 @@ type TargetPoolsScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -48058,7 +49076,7 @@ type TargetPoolsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"` + Data []*SubnetworksScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -48081,13 +49099,13 @@ type TargetPoolsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsScopedListWarning +func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsScopedListWarningData struct { +type SubnetworksScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -48118,44 +49136,62 @@ type TargetPoolsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsScopedListWarningData +func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetReference struct { - Target string `json:"target,omitempty"` +type SubnetworksSetPrivateIpGoogleAccessRequest struct { + PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` - // ForceSendFields is a list of field names (e.g. "Target") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "PrivateIpGoogleAccess") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Target") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "PrivateIpGoogleAccess") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TargetReference) MarshalJSON() ([]byte, error) { - type NoMethod TargetReference +func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksSetPrivateIpGoogleAccessRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxiesSetBackendServiceRequest struct { - // Service: The URL of the new BackendService resource for the - // targetSslProxy. - Service string `json:"service,omitempty"` +// Subsetting: Subsetting configuration for this BackendService. +// Currently this is applicable only for Internal TCP/UDP load +// balancing, Internal HTTP(S) load balancing and Traffic Director. +type Subsetting struct { + // Possible values: + // "CONSISTENT_HASH_SUBSETTING" - Subsetting based on consistent + // hashing. For Traffic Director, the number of backends per backend + // group (the subset size) is based on the `subset_size` parameter. For + // Internal HTTP(S) load balancing, the number of backends per backend + // group (the subset size) is dynamically adjusted in two cases: - As + // the number of proxy instances participating in Internal HTTP(S) load + // balancing increases, the subset size decreases. - When the total + // number of backends in a network exceeds the capacity of a single + // proxy instance, subset sizes are reduced automatically for each + // service that has backend subsetting enabled. + // "NONE" - No Subsetting. Clients may open connections and send + // traffic to all backends of this backend service. This can lead to + // performance issues if there is substantial imbalance in the count of + // clients and backends. + Policy string `json:"policy,omitempty"` - // ForceSendFields is a list of field names (e.g. "Service") to + // ForceSendFields is a list of field names (e.g. "Policy") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -48163,8 +49199,8 @@ type TargetSslProxiesSetBackendServiceRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Service") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Policy") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -48172,22 +49208,76 @@ type TargetSslProxiesSetBackendServiceRequest struct { NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxiesSetBackendServiceRequest +func (s *Subsetting) MarshalJSON() ([]byte, error) { + type NoMethod Subsetting raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxiesSetProxyHeaderRequest struct { - // ProxyHeader: The new type of proxy header to append before sending - // data to the backend. NONE or PROXY_V1 are allowed. +type TCPHealthCheck struct { + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 80. Valid values are 1 through 65535. + Port int64 `json:"port,omitempty"` + + // PortName: Not supported. + PortName string `json:"portName,omitempty"` + + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. + // + // Possible values: + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. + PortSpecification string `json:"portSpecification,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. // // Possible values: // "NONE" // "PROXY_V1" ProxyHeader string `json:"proxyHeader,omitempty"` - // ForceSendFields is a list of field names (e.g. "ProxyHeader") to + // Request: Instructs the health check prober to send this exact ASCII + // string, up to 1024 bytes in length, after establishing the TCP + // connection. + Request string `json:"request,omitempty"` + + // Response: Creates a content-based TCP health check. In addition to + // establishing a TCP connection, you can configure the health check to + // pass only when the backend sends this exact response ASCII string, up + // to 1024 bytes in length. For details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-ssl-tcp + Response string `json:"response,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Port") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -48195,28 +49285,37 @@ type TargetSslProxiesSetProxyHeaderRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ProxyHeader") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Port") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxiesSetProxyHeaderRequest +func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { + type NoMethod TCPHealthCheck raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxiesSetSslCertificatesRequest struct { - // SslCertificates: New set of URLs to SslCertificate resources to - // associate with this TargetSslProxy. At least one SSL certificate must - // be specified. Currently, you may specify up to 15 SSL certificates. - SslCertificates []string `json:"sslCertificates,omitempty"` +// Tags: A set of instance tags. +type Tags struct { + // Fingerprint: Specifies a fingerprint for this request, which is + // essentially a hash of the tags' contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update tags. You must always + // provide an up-to-date fingerprint hash in order to update or change + // tags. To see the latest fingerprint, make get() request to the + // instance. + Fingerprint string `json:"fingerprint,omitempty"` - // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // Items: An array of tags. Each tag must be 1-63 characters long, and + // comply with RFC1035. + Items []string `json:"items,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -48224,28 +49323,28 @@ type TargetSslProxiesSetSslCertificatesRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SslCertificates") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxiesSetSslCertificatesRequest +func (s *Tags) MarshalJSON() ([]byte, error) { + type NoMethod Tags raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetSslProxy: Represents a Target SSL Proxy resource. A target SSL -// proxy is a component of a SSL Proxy load balancer. Global forwarding -// rules reference a target SSL proxy, and the target proxy then -// references an external backend service. For more information, read -// Using Target Proxies. -type TargetSslProxy struct { +// TargetGrpcProxy: Represents a Target gRPC Proxy resource. A target +// gRPC proxy is a component of load balancers intended for load +// balancing gRPC traffic. Only global forwarding rules with load +// balancing scheme INTERNAL_SELF_MANAGED can reference a target gRPC +// proxy. The target gRPC Proxy references a URL map that specifies how +// traffic is routed to gRPC backend services. +type TargetGrpcProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -48254,12 +49353,21 @@ type TargetSslProxy struct { // property when you create the resource. Description string `json:"description,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a TargetGrpcProxy. An up-to-date + // fingerprint must be provided in order to patch/update the + // TargetGrpcProxy; otherwise, the request will fail with error 412 + // conditionNotMet. To see the latest fingerprint, make a get() request + // to retrieve the TargetGrpcProxy. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource type. The + // server generates this identifier. Id uint64 `json:"id,omitempty,string"` // Kind: [Output Only] Type of the resource. Always - // compute#targetSslProxy for target SSL proxies. + // compute#targetGrpcProxy for target grpc proxies. Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -48271,32 +49379,29 @@ type TargetSslProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Service: URL to the BackendService resource. - Service string `json:"service,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL with id for the + // resource. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` - // SslCertificates: URLs to SslCertificate resources that are used to - // authenticate connections to Backends. At least one SSL certificate - // must be specified. Currently, you may specify up to 15 SSL - // certificates. sslCertificates do not apply when the load balancing - // scheme is set to INTERNAL_SELF_MANAGED. - SslCertificates []string `json:"sslCertificates,omitempty"` + // UrlMap: URL to the UrlMap resource that defines the mapping from URL + // to the BackendService. The protocol field in the BackendService must + // be set to GRPC. + UrlMap string `json:"urlMap,omitempty"` - // SslPolicy: URL of SslPolicy resource that will be associated with the - // TargetSslProxy resource. If not set, the TargetSslProxy resource will - // not have any SSL policy configured. - SslPolicy string `json:"sslPolicy,omitempty"` + // ValidateForProxyless: If true, indicates that the BackendServices + // referenced by the urlMap may be accessed by gRPC applications without + // using a sidecar proxy. This will enable configuration checks on + // urlMap and its referenced BackendServices to not allow unsupported + // features. A gRPC application must use "xds:///" scheme in the target + // URI of the service it is connecting to. If false, indicates that the + // BackendServices referenced by the urlMap will be accessed by gRPC + // applications via a sidecar proxy. In this case, a gRPC application + // must not use "xds:///" scheme in the target URI of the service it is + // connecting to + ValidateForProxyless bool `json:"validateForProxyless,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -48320,22 +49425,22 @@ type TargetSslProxy struct { NullFields []string `json:"-"` } -func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxy +func (s *TargetGrpcProxy) MarshalJSON() ([]byte, error) { + type NoMethod TargetGrpcProxy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetSslProxyList: Contains a list of TargetSslProxy resources. -type TargetSslProxyList struct { +type TargetGrpcProxyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetSslProxy resources. - Items []*TargetSslProxy `json:"items,omitempty"` + // Items: A list of TargetGrpcProxy resources. + Items []*TargetGrpcProxy `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of the resource. Always + // compute#targetGrpcProxy for target grpc proxies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -48350,7 +49455,7 @@ type TargetSslProxyList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetSslProxyListWarning `json:"warning,omitempty"` + Warning *TargetGrpcProxyListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -48373,15 +49478,15 @@ type TargetSslProxyList struct { NullFields []string `json:"-"` } -func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxyList +func (s *TargetGrpcProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetGrpcProxyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetSslProxyListWarning: [Output Only] Informational warning +// TargetGrpcProxyListWarning: [Output Only] Informational warning // message. -type TargetSslProxyListWarning struct { +type TargetGrpcProxyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -48403,6 +49508,9 @@ type TargetSslProxyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -48410,6 +49518,9 @@ type TargetSslProxyListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -48443,7 +49554,7 @@ type TargetSslProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetSslProxyListWarningData `json:"data,omitempty"` + Data []*TargetGrpcProxyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -48466,13 +49577,13 @@ type TargetSslProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetSslProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxyListWarning +func (s *TargetGrpcProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetGrpcProxyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxyListWarningData struct { +type TargetGrpcProxyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -48503,134 +49614,22 @@ type TargetSslProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxyListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetTcpProxiesSetBackendServiceRequest struct { - // Service: The URL of the new BackendService resource for the - // targetTcpProxy. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Service") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Service") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxiesSetBackendServiceRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetTcpProxiesSetProxyHeaderRequest struct { - // ProxyHeader: The new type of proxy header to append before sending - // data to the backend. NONE or PROXY_V1 are allowed. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ProxyHeader") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ProxyHeader") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxiesSetProxyHeaderRequest +func (s *TargetGrpcProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetGrpcProxyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxy: Represents a Target TCP Proxy resource. A target TCP -// proxy is a component of a TCP Proxy load balancer. Global forwarding -// rules reference target TCP proxy, and the target proxy then -// references an external backend service. For more information, read -// TCP Proxy Load Balancing overview. -type TargetTcpProxy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always - // compute#targetTcpProxy for target TCP proxies. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // ProxyBind: This field only applies when the forwarding rule that - // references this target proxy has a loadBalancingScheme set to - // INTERNAL_SELF_MANAGED. When this field is set to true, Envoy proxies - // set up inbound traffic interception and bind to the IP address and - // port specified in the forwarding rule. This is generally useful when - // using Traffic Director to configure Envoy as a gateway or middle - // proxy (in other words, not a sidecar proxy). The Envoy proxy listens - // for inbound requests and handles requests when it receives them. The - // default is false. - ProxyBind bool `json:"proxyBind,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Service: URL to the BackendService resource. - Service string `json:"service,omitempty"` +type TargetHttpProxiesScopedList struct { + // TargetHttpProxies: A list of TargetHttpProxies contained in this + // scope. + TargetHttpProxies []*TargetHttpProxy `json:"targetHttpProxies,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *TargetHttpProxiesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // ForceSendFields is a list of field names (e.g. "TargetHttpProxies") // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -48638,7 +49637,7 @@ type TargetTcpProxy struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "TargetHttpProxies") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -48648,68 +49647,15 @@ type TargetTcpProxy struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. -type TargetTcpProxyList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetTcpProxy resources. - Items []*TargetTcpProxy `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *TargetTcpProxyListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyList +func (s *TargetHttpProxiesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxiesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxyListWarning: [Output Only] Informational warning -// message. -type TargetTcpProxyListWarning struct { +// TargetHttpProxiesScopedListWarning: Informational warning which +// replaces the list of backend services when the list is empty. +type TargetHttpProxiesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -48731,6 +49677,9 @@ type TargetTcpProxyListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -48738,6 +49687,9 @@ type TargetTcpProxyListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -48771,7 +49723,7 @@ type TargetTcpProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetTcpProxyListWarningData `json:"data,omitempty"` + Data []*TargetHttpProxiesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -48794,13 +49746,13 @@ type TargetTcpProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyListWarning +func (s *TargetHttpProxiesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxiesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetTcpProxyListWarningData struct { +type TargetHttpProxiesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -48831,16 +49783,23 @@ type TargetTcpProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyListWarningData +func (s *TargetHttpProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxiesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGateway: Represents a Target VPN Gateway resource. The -// target VPN gateway resource represents a Classic Cloud VPN gateway. -// For more information, read the the Cloud VPN Overview. -type TargetVpnGateway struct { +// TargetHttpProxy: Represents a Target HTTP Proxy resource. Google +// Compute Engine has two Target HTTP Proxy resources: * Global +// (/compute/docs/reference/rest/v1/targetHttpProxies) * Regional +// (/compute/docs/reference/rest/v1/regionTargetHttpProxies) A target +// HTTP proxy is a component of GCP HTTP load balancers. * +// targetHttpProxies are used by external HTTP load balancers and +// Traffic Director. * regionTargetHttpProxies are used by internal HTTP +// load balancers. Forwarding rules reference a target HTTP proxy, and +// the target proxy then references a URL map. For more information, +// read Using Target Proxies and Forwarding rule concepts. +type TargetHttpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -48849,17 +49808,21 @@ type TargetVpnGateway struct { // property when you create the resource. Description string `json:"description,omitempty"` - // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule - // resources. ForwardingRules are created using - // compute.forwardingRules.insert and associated with a VPN gateway. - ForwardingRules []string `json:"forwardingRules,omitempty"` + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a TargetHttpProxy. An up-to-date + // fingerprint must be provided in order to patch/update the + // TargetHttpProxy; otherwise, the request will fail with error 412 + // conditionNotMet. To see the latest fingerprint, make a get() request + // to retrieve the TargetHttpProxy. + Fingerprint string `json:"fingerprint,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. + // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy + // for target HTTP proxies. Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -48871,32 +49834,28 @@ type TargetVpnGateway struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Network: URL of the network to which this VPN gateway is attached. - // Provided by the client when the VPN gateway is created. - Network string `json:"network,omitempty"` + // ProxyBind: This field only applies when the forwarding rule that + // references this target proxy has a loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. When this field is set to true, Envoy proxies + // set up inbound traffic interception and bind to the IP address and + // port specified in the forwarding rule. This is generally useful when + // using Traffic Director to configure Envoy as a gateway or middle + // proxy (in other words, not a sidecar proxy). The Envoy proxy listens + // for inbound requests and handles requests when it receives them. The + // default is false. + ProxyBind bool `json:"proxyBind,omitempty"` - // Region: [Output Only] URL of the region where the target VPN gateway - // resides. You must specify this field as part of the HTTP request URL. - // It is not settable as a field in the request body. + // Region: [Output Only] URL of the region where the regional Target + // HTTP Proxy resides. This field is not applicable to global Target + // HTTP Proxies. Region string `json:"region,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] The status of the VPN gateway, which can be one - // of the following: CREATING, READY, FAILED, or DELETING. - // - // Possible values: - // "CREATING" - // "DELETING" - // "FAILED" - // "READY" - Status string `json:"status,omitempty"` - - // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. - // VpnTunnels are created using the compute.vpntunnels.insert method and - // associated with a VPN gateway. - Tunnels []string `json:"tunnels,omitempty"` + // UrlMap: URL to the UrlMap resource that defines the mapping from URL + // to the BackendService. + UrlMap string `json:"urlMap,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -48920,22 +49879,23 @@ type TargetVpnGateway struct { NullFields []string `json:"-"` } -func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGateway +func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewayAggregatedList struct { +type TargetHttpProxyAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetVpnGateway resources. - Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` + // Items: A list of TargetHttpProxiesScopedList resources. + Items map[string]TargetHttpProxiesScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. + // Kind: [Output Only] Type of resource. Always + // compute#targetHttpProxyAggregatedList for lists of Target HTTP + // Proxies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -48952,9 +49912,6 @@ type TargetVpnGatewayAggregatedList struct { // Unreachables: [Output Only] Unreachable resources. Unreachables []string `json:"unreachables,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -48976,153 +49933,23 @@ type TargetVpnGatewayAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayAggregatedList - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetVpnGatewayAggregatedListWarning: [Output Only] Informational -// warning message. -type TargetVpnGatewayAggregatedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*TargetVpnGatewayAggregatedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayAggregatedListWarning - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetVpnGatewayAggregatedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayAggregatedListWarningData +func (s *TargetHttpProxyAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. -type TargetVpnGatewayList struct { +// TargetHttpProxyList: A list of TargetHttpProxy resources. +type TargetHttpProxyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetVpnGateway resources. - Items []*TargetVpnGateway `json:"items,omitempty"` + // Items: A list of TargetHttpProxy resources. + Items []*TargetHttpProxy `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. + // Kind: Type of resource. Always compute#targetHttpProxyList for lists + // of target HTTP proxies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -49137,7 +49964,7 @@ type TargetVpnGatewayList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetVpnGatewayListWarning `json:"warning,omitempty"` + Warning *TargetHttpProxyListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -49160,15 +49987,15 @@ type TargetVpnGatewayList struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayList +func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGatewayListWarning: [Output Only] Informational warning +// TargetHttpProxyListWarning: [Output Only] Informational warning // message. -type TargetVpnGatewayListWarning struct { +type TargetHttpProxyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -49190,6 +50017,9 @@ type TargetVpnGatewayListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -49197,6 +50027,9 @@ type TargetVpnGatewayListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -49230,7 +50063,7 @@ type TargetVpnGatewayListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetVpnGatewayListWarningData `json:"data,omitempty"` + Data []*TargetHttpProxyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -49253,13 +50086,13 @@ type TargetVpnGatewayListWarning struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayListWarning +func (s *TargetHttpProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewayListWarningData struct { +type TargetHttpProxyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -49290,22 +50123,22 @@ type TargetVpnGatewayListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayListWarningData +func (s *TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewaysScopedList struct { - // TargetVpnGateways: [Output Only] A list of target VPN gateways - // contained in this scope. - TargetVpnGateways []*TargetVpnGateway `json:"targetVpnGateways,omitempty"` +type TargetHttpsProxiesScopedList struct { + // TargetHttpsProxies: A list of TargetHttpsProxies contained in this + // scope. + TargetHttpsProxies []*TargetHttpsProxy `json:"targetHttpsProxies,omitempty"` - // Warning: [Output Only] Informational warning which replaces the list - // of addresses when the list is empty. - Warning *TargetVpnGatewaysScopedListWarning `json:"warning,omitempty"` + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *TargetHttpsProxiesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "TargetVpnGateways") + // ForceSendFields is a list of field names (e.g. "TargetHttpsProxies") // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -49313,7 +50146,7 @@ type TargetVpnGatewaysScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "TargetVpnGateways") to + // NullFields is a list of field names (e.g. "TargetHttpsProxies") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -49323,15 +50156,15 @@ type TargetVpnGatewaysScopedList struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewaysScopedList +func (s *TargetHttpsProxiesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGatewaysScopedListWarning: [Output Only] Informational -// warning which replaces the list of addresses when the list is empty. -type TargetVpnGatewaysScopedListWarning struct { +// TargetHttpsProxiesScopedListWarning: Informational warning which +// replaces the list of backend services when the list is empty. +type TargetHttpsProxiesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -49353,6 +50186,9 @@ type TargetVpnGatewaysScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -49360,6 +50196,9 @@ type TargetVpnGatewaysScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -49393,7 +50232,7 @@ type TargetVpnGatewaysScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetVpnGatewaysScopedListWarningData `json:"data,omitempty"` + Data []*TargetHttpsProxiesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -49416,13 +50255,13 @@ type TargetVpnGatewaysScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewaysScopedListWarning +func (s *TargetHttpsProxiesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewaysScopedListWarningData struct { +type TargetHttpsProxiesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -49453,47 +50292,18 @@ type TargetVpnGatewaysScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewaysScopedListWarningData +func (s *TargetHttpsProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TestFailure struct { - // ActualOutputUrl: The actual output URL evaluated by a load balancer - // containing the scheme, host, path and query parameters. - ActualOutputUrl string `json:"actualOutputUrl,omitempty"` - - // ActualRedirectResponseCode: Actual HTTP status code for rule with - // `urlRedirect` calculated by load balancer - ActualRedirectResponseCode int64 `json:"actualRedirectResponseCode,omitempty"` - - // ActualService: BackendService or BackendBucket returned by load - // balancer. - ActualService string `json:"actualService,omitempty"` +type TargetHttpsProxiesSetCertificateMapRequest struct { + // CertificateMap: URL of the Certificate Map to associate with this + // TargetHttpsProxy. + CertificateMap string `json:"certificateMap,omitempty"` - // ExpectedOutputUrl: The expected output URL evaluated by a load - // balancer containing the scheme, host, path and query parameters. - ExpectedOutputUrl string `json:"expectedOutputUrl,omitempty"` - - // ExpectedRedirectResponseCode: Expected HTTP status code for rule with - // `urlRedirect` calculated by load balancer - ExpectedRedirectResponseCode int64 `json:"expectedRedirectResponseCode,omitempty"` - - // ExpectedService: Expected BackendService or BackendBucket resource - // the given URL should be mapped to. - ExpectedService string `json:"expectedService,omitempty"` - - // Headers: HTTP headers of the request. - Headers []*UrlMapTestHeader `json:"headers,omitempty"` - - // Host: Host portion of the URL. - Host string `json:"host,omitempty"` - - // Path: Path portion including query parameters in the URL. - Path string `json:"path,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ActualOutputUrl") to + // ForceSendFields is a list of field names (e.g. "CertificateMap") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -49501,7 +50311,7 @@ type TestFailure struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ActualOutputUrl") to + // NullFields is a list of field names (e.g. "CertificateMap") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -49511,19 +50321,25 @@ type TestFailure struct { NullFields []string `json:"-"` } -func (s *TestFailure) MarshalJSON() ([]byte, error) { - type NoMethod TestFailure +func (s *TargetHttpsProxiesSetCertificateMapRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesSetCertificateMapRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TestPermissionsRequest struct { - // Permissions: The set of permissions to check for the 'resource'. - // Permissions with wildcards (such as '*' or 'storage.*') are not - // allowed. - Permissions []string `json:"permissions,omitempty"` +type TargetHttpsProxiesSetQuicOverrideRequest struct { + // QuicOverride: QUIC policy for the TargetHttpsProxy resource. + // + // Possible values: + // "DISABLE" - The load balancer will not attempt to negotiate QUIC + // with clients. + // "ENABLE" - The load balancer will attempt to negotiate QUIC with + // clients. + // "NONE" - No overrides to the default QUIC policy. This option is + // implicit if no QUIC override has been specified in the request. + QuicOverride string `json:"quicOverride,omitempty"` - // ForceSendFields is a list of field names (e.g. "Permissions") to + // ForceSendFields is a list of field names (e.g. "QuicOverride") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -49531,7 +50347,7 @@ type TestPermissionsRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Permissions") to include + // NullFields is a list of field names (e.g. "QuicOverride") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -49540,22 +50356,20 @@ type TestPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestPermissionsRequest) MarshalJSON() ([]byte, error) { - type NoMethod TestPermissionsRequest +func (s *TargetHttpsProxiesSetQuicOverrideRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesSetQuicOverrideRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TestPermissionsResponse struct { - // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is allowed. - Permissions []string `json:"permissions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type TargetHttpsProxiesSetSslCertificatesRequest struct { + // SslCertificates: New set of SslCertificate resources to associate + // with this TargetHttpsProxy resource. At least one SSL certificate + // must be specified. Currently, you may specify up to 15 SSL + // certificates. + SslCertificates []string `json:"sslCertificates,omitempty"` - // ForceSendFields is a list of field names (e.g. "Permissions") to + // ForceSendFields is a list of field names (e.g. "SslCertificates") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -49563,135 +50377,72 @@ type TestPermissionsResponse struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Permissions") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "SslCertificates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { - type NoMethod TestPermissionsResponse +func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesSetSslCertificatesRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type Uint128 struct { - High uint64 `json:"high,omitempty,string"` - - Low uint64 `json:"low,omitempty,string"` - - // ForceSendFields is a list of field names (e.g. "High") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "High") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} +// TargetHttpsProxy: Represents a Target HTTPS Proxy resource. Google +// Compute Engine has two Target HTTPS Proxy resources: * Global +// (/compute/docs/reference/rest/v1/targetHttpsProxies) * Regional +// (/compute/docs/reference/rest/v1/regionTargetHttpsProxies) A target +// HTTPS proxy is a component of GCP HTTPS load balancers. * +// targetHttpsProxies are used by external HTTPS load balancers. * +// regionTargetHttpsProxies are used by internal HTTPS load balancers. +// Forwarding rules reference a target HTTPS proxy, and the target proxy +// then references a URL map. For more information, read Using Target +// Proxies and Forwarding rule concepts. +type TargetHttpsProxy struct { + // AuthorizationPolicy: Optional. A URL referring to a + // networksecurity.AuthorizationPolicy resource that describes how the + // proxy should authorize inbound traffic. If left blank, access will + // not be restricted by an authorization policy. Refer to the + // AuthorizationPolicy resource for additional details. + // authorizationPolicy only applies to a global TargetHttpsProxy + // attached to globalForwardingRules with the loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. Note: This field currently has no impact. + AuthorizationPolicy string `json:"authorizationPolicy,omitempty"` -func (s *Uint128) MarshalJSON() ([]byte, error) { - type NoMethod Uint128 - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // CertificateMap: URL of a certificate map that identifies a + // certificate map associated with the given target proxy. This field + // can only be set for global target proxies. If set, sslCertificates + // will be ignored. + CertificateMap string `json:"certificateMap,omitempty"` -// UrlMap: Represents a URL Map resource. Compute Engine has two URL Map -// resources: * Global (/compute/docs/reference/rest/v1/urlMaps) * -// Regional (/compute/docs/reference/rest/v1/regionUrlMaps) A URL map -// resource is a component of certain types of cloud load balancers and -// Traffic Director: * urlMaps are used by external HTTP(S) load -// balancers and Traffic Director. * regionUrlMaps are used by internal -// HTTP(S) load balancers. For a list of supported URL map features by -// the load balancer type, see the Load balancing features: Routing and -// traffic management table. For a list of supported URL map features -// for Traffic Director, see the Traffic Director features: Routing and -// traffic management table. This resource defines mappings from -// hostnames and URL paths to either a backend service or a backend -// bucket. To use the global urlMaps resource, the backend service must -// have a loadBalancingScheme of either EXTERNAL or -// INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend -// service must have a loadBalancingScheme of INTERNAL_MANAGED. For more -// information, read URL Map Concepts. -type UrlMap struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // DefaultRouteAction: defaultRouteAction takes effect when none of the - // hostRules match. The load balancer performs advanced routing actions, - // such as URL rewrites and header transformations, before forwarding - // the request to the selected backend. If defaultRouteAction specifies - // any weightedBackendServices, defaultService must not be set. - // Conversely if defaultService is set, defaultRouteAction cannot - // contain any weightedBackendServices. Only one of defaultRouteAction - // or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load - // balancers support only the urlRewrite action within - // defaultRouteAction. defaultRouteAction has no effect when the URL map - // is bound to a target gRPC proxy that has the validateForProxyless - // field set to true. - DefaultRouteAction *HttpRouteAction `json:"defaultRouteAction,omitempty"` - - // DefaultService: The full or partial URL of the defaultService - // resource to which traffic is directed if none of the hostRules match. - // If defaultRouteAction is also specified, advanced routing actions, - // such as URL rewrites, take effect before sending the request to the - // backend. However, if defaultService is specified, defaultRouteAction - // cannot contain any weightedBackendServices. Conversely, if - // routeAction specifies any weightedBackendServices, service must not - // be specified. Only one of defaultService, defaultUrlRedirect , or - // defaultRouteAction.weightedBackendService must be set. defaultService - // has no effect when the URL map is bound to a target gRPC proxy that - // has the validateForProxyless field set to true. - DefaultService string `json:"defaultService,omitempty"` - - // DefaultUrlRedirect: When none of the specified hostRules match, the - // request is redirected to a URL specified by defaultUrlRedirect. If - // defaultUrlRedirect is specified, defaultService or defaultRouteAction - // must not be set. Not supported when the URL map is bound to a target - // gRPC proxy. - DefaultUrlRedirect *HttpRedirectAction `json:"defaultUrlRedirect,omitempty"` - // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` // Fingerprint: Fingerprint of this resource. A hash of the contents // stored in this object. This field is used in optimistic locking. This - // field is ignored when inserting a UrlMap. An up-to-date fingerprint - // must be provided in order to update the UrlMap, otherwise the request - // will fail with error 412 conditionNotMet. To see the latest - // fingerprint, make a get() request to retrieve a UrlMap. + // field will be ignored when inserting a TargetHttpsProxy. An + // up-to-date fingerprint must be provided in order to patch the + // TargetHttpsProxy; otherwise, the request will fail with error 412 + // conditionNotMet. To see the latest fingerprint, make a get() request + // to retrieve the TargetHttpsProxy. Fingerprint string `json:"fingerprint,omitempty"` - // HeaderAction: Specifies changes to request and response headers that - // need to take effect for the selected backendService. The headerAction - // specified here take effect after headerAction specified under - // pathMatcher. headerAction is not supported for load balancers that - // have their loadBalancingScheme set to EXTERNAL. Not supported when - // the URL map is bound to a target gRPC proxy that has - // validateForProxyless field set to true. - HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` - - // HostRules: The list of host rules to use against the URL. - HostRules []*HostRule `json:"hostRules,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always compute#urlMaps for - // url maps. + // Kind: [Output Only] Type of resource. Always compute#targetHttpsProxy + // for target HTTPS proxies. Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -49703,30 +50454,77 @@ type UrlMap struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // PathMatchers: The list of named PathMatchers to use against the URL. - PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"` + // ProxyBind: This field only applies when the forwarding rule that + // references this target proxy has a loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. When this field is set to true, Envoy proxies + // set up inbound traffic interception and bind to the IP address and + // port specified in the forwarding rule. This is generally useful when + // using Traffic Director to configure Envoy as a gateway or middle + // proxy (in other words, not a sidecar proxy). The Envoy proxy listens + // for inbound requests and handles requests when it receives them. The + // default is false. + ProxyBind bool `json:"proxyBind,omitempty"` - // Region: [Output Only] URL of the region where the regional URL map - // resides. This field is not applicable to global URL maps. You must - // specify this field as part of the HTTP request URL. It is not - // settable as a field in the request body. + // QuicOverride: Specifies the QUIC override policy for this + // TargetHttpsProxy resource. This setting determines whether the load + // balancer attempts to negotiate QUIC with clients. You can specify + // NONE, ENABLE, or DISABLE. - When quic-override is set to NONE, Google + // manages whether QUIC is used. - When quic-override is set to ENABLE, + // the load balancer uses QUIC when possible. - When quic-override is + // set to DISABLE, the load balancer doesn't use QUIC. - If the + // quic-override flag is not specified, NONE is implied. + // + // Possible values: + // "DISABLE" - The load balancer will not attempt to negotiate QUIC + // with clients. + // "ENABLE" - The load balancer will attempt to negotiate QUIC with + // clients. + // "NONE" - No overrides to the default QUIC policy. This option is + // implicit if no QUIC override has been specified in the request. + QuicOverride string `json:"quicOverride,omitempty"` + + // Region: [Output Only] URL of the region where the regional + // TargetHttpsProxy resides. This field is not applicable to global + // TargetHttpsProxies. Region string `json:"region,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Tests: The list of expected URL mapping tests. Request to update the - // UrlMap succeeds only if all test cases pass. You can specify a - // maximum of 100 tests per UrlMap. Not supported when the URL map is - // bound to a target gRPC proxy that has validateForProxyless field set - // to true. - Tests []*UrlMapTest `json:"tests,omitempty"` + // ServerTlsPolicy: Optional. A URL referring to a + // networksecurity.ServerTlsPolicy resource that describes how the proxy + // should authenticate inbound traffic. serverTlsPolicy only applies to + // a global TargetHttpsProxy attached to globalForwardingRules with the + // loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, + // communications are not encrypted. Note: This field currently has no + // impact. + ServerTlsPolicy string `json:"serverTlsPolicy,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to + // authenticate connections between users and the load balancer. At + // least one SSL certificate must be specified. Currently, you may + // specify up to 15 SSL certificates. sslCertificates do not apply when + // the load balancing scheme is set to INTERNAL_SELF_MANAGED. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // SslPolicy: URL of SslPolicy resource that will be associated with the + // TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource + // has no SSL policy configured. + SslPolicy string `json:"sslPolicy,omitempty"` + + // UrlMap: A fully-qualified or valid partial URL to the UrlMap resource + // that defines the mapping from URL to the BackendService. For example, + // the following are all valid URLs for specifying a URL map: - + // https://www.googleapis.compute/v1/projects/project/global/urlMaps/ + // url-map - projects/project/global/urlMaps/url-map - + // global/urlMaps/url-map + UrlMap string `json:"urlMap,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // ForceSendFields is a list of field names (e.g. "AuthorizationPolicy") // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -49734,7 +50532,7 @@ type UrlMap struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "AuthorizationPolicy") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -49744,22 +50542,23 @@ type UrlMap struct { NullFields []string `json:"-"` } -func (s *UrlMap) MarshalJSON() ([]byte, error) { - type NoMethod UrlMap +func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapList: Contains a list of UrlMap resources. -type UrlMapList struct { +type TargetHttpsProxyAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of UrlMap resources. - Items []*UrlMap `json:"items,omitempty"` + // Items: A list of TargetHttpsProxiesScopedList resources. + Items map[string]TargetHttpsProxiesScopedList `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of resource. Always + // compute#targetHttpsProxyAggregatedList for lists of Target HTTP + // Proxies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -49773,8 +50572,11 @@ type UrlMapList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *UrlMapListWarning `json:"warning,omitempty"` + Warning *TargetHttpsProxyAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -49797,14 +50599,15 @@ type UrlMapList struct { NullFields []string `json:"-"` } -func (s *UrlMapList) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapList +func (s *TargetHttpsProxyAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapListWarning: [Output Only] Informational warning message. -type UrlMapListWarning struct { +// TargetHttpsProxyAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetHttpsProxyAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -49826,6 +50629,9 @@ type UrlMapListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -49833,6 +50639,9 @@ type UrlMapListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -49866,7 +50675,7 @@ type UrlMapListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*UrlMapListWarningData `json:"data,omitempty"` + Data []*TargetHttpsProxyAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -49889,13 +50698,13 @@ type UrlMapListWarning struct { NullFields []string `json:"-"` } -func (s *UrlMapListWarning) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapListWarning +func (s *TargetHttpsProxyAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapListWarningData struct { +type TargetHttpsProxyAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -49926,16 +50735,44 @@ type UrlMapListWarningData struct { NullFields []string `json:"-"` } -func (s *UrlMapListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapListWarningData +func (s *TargetHttpsProxyAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapReference struct { - UrlMap string `json:"urlMap,omitempty"` +// TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. +type TargetHttpsProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // ForceSendFields is a list of field names (e.g. "UrlMap") to + // Items: A list of TargetHttpsProxy resources. + Items []*TargetHttpsProxy `json:"items,omitempty"` + + // Kind: Type of resource. Always compute#targetHttpsProxyList for lists + // of target HTTPS proxies. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetHttpsProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -49943,7 +50780,7 @@ type UrlMapReference struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "UrlMap") to include in API + // NullFields is a list of field names (e.g. "Id") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -49952,56 +50789,89 @@ type UrlMapReference struct { NullFields []string `json:"-"` } -func (s *UrlMapReference) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapReference +func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapTest: Message for the expected URL mappings. -type UrlMapTest struct { - // Description: Description of this test case. - Description string `json:"description,omitempty"` - - // ExpectedOutputUrl: The expected output URL evaluated by the load - // balancer containing the scheme, host, path and query parameters. For - // rules that forward requests to backends, the test passes only when - // expectedOutputUrl matches the request forwarded by the load balancer - // to backends. For rules with urlRewrite, the test verifies that the - // forwarded request matches hostRewrite and pathPrefixRewrite in the - // urlRewrite action. When service is specified, expectedOutputUrl`s - // scheme is ignored. For rules with urlRedirect, the test passes only - // if expectedOutputUrl matches the URL in the load balancer's redirect - // response. If urlRedirect specifies https_redirect, the test passes - // only if the scheme in expectedOutputUrl is also set to HTTPS. If - // urlRedirect specifies strip_query, the test passes only if - // expectedOutputUrl does not contain any query parameters. - // expectedOutputUrl is optional when service is specified. - ExpectedOutputUrl string `json:"expectedOutputUrl,omitempty"` - - // ExpectedRedirectResponseCode: For rules with urlRedirect, the test - // passes only if expectedRedirectResponseCode matches the HTTP status - // code in load balancer's redirect response. - // expectedRedirectResponseCode cannot be set when service is set. - ExpectedRedirectResponseCode int64 `json:"expectedRedirectResponseCode,omitempty"` - - // Headers: HTTP headers for this request. If headers contains a host - // header, then host must also match the header value. - Headers []*UrlMapTestHeader `json:"headers,omitempty"` - - // Host: Host portion of the URL. If headers contains a host header, - // then host must also match the header value. - Host string `json:"host,omitempty"` +// TargetHttpsProxyListWarning: [Output Only] Informational warning +// message. +type TargetHttpsProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` - // Path: Path portion of the URL. - Path string `json:"path,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*TargetHttpsProxyListWarningData `json:"data,omitempty"` - // Service: Expected BackendService or BackendBucket resource the given - // URL should be mapped to. The service field cannot be set if - // expectedRedirectResponseCode is set. - Service string `json:"service,omitempty"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Description") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -50009,30 +50879,36 @@ type UrlMapTest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *UrlMapTest) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapTest +func (s *TargetHttpsProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapTestHeader: HTTP headers used in UrlMapTests. -type UrlMapTestHeader struct { - // Name: Header name. - Name string `json:"name,omitempty"` +type TargetHttpsProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Value: Header value. + // Value: [Output Only] A warning data value corresponding to the key. Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Name") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -50040,7 +50916,7 @@ type UrlMapTestHeader struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -50049,58 +50925,108 @@ type UrlMapTestHeader struct { NullFields []string `json:"-"` } -func (s *UrlMapTestHeader) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapTestHeader +func (s *TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapValidationResult: Message representing the validation result -// for a UrlMap. -type UrlMapValidationResult struct { - LoadErrors []string `json:"loadErrors,omitempty"` +// TargetInstance: Represents a Target Instance resource. You can use a +// target instance to handle traffic for one or more forwarding rules, +// which is ideal for forwarding protocol traffic that is managed by a +// single source. For example, ESP, AH, TCP, or UDP. For more +// information, read Target instances. +type TargetInstance struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // LoadSucceeded: Whether the given UrlMap can be successfully loaded. - // If false, 'loadErrors' indicates the reasons. - LoadSucceeded bool `json:"loadSucceeded,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - TestFailures []*TestFailure `json:"testFailures,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` - // TestPassed: If successfully loaded, this field indicates whether the - // test passed. If false, 'testFailures's indicate the reason of - // failure. - TestPassed bool `json:"testPassed,omitempty"` + // Instance: A URL to the virtual machine instance that handles traffic + // for this target instance. When creating a target instance, you can + // provide the fully-qualified URL or a valid partial URL to the desired + // virtual machine. For example, the following are all valid URLs: - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone + // /instances/instance - projects/project/zones/zone/instances/instance + // - zones/zone/instances/instance + Instance string `json:"instance,omitempty"` - // ForceSendFields is a list of field names (e.g. "LoadErrors") to - // unconditionally include in API requests. By default, fields with + // Kind: [Output Only] The type of the resource. Always + // compute#targetInstance for target instances. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // NatPolicy: Must have a value of NO_NAT. Protocol forwarding delivers + // packets while preserving the destination IP address of the forwarding + // rule referencing the target instance. + // + // Possible values: + // "NO_NAT" - No NAT performed. + NatPolicy string `json:"natPolicy,omitempty"` + + // Network: The URL of the network this target instance uses to forward + // traffic. If not specified, the traffic will be forwarded to the + // network that the default network interface belongs to. + Network string `json:"network,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Zone: [Output Only] URL of the zone where the target instance + // resides. You must specify this field as part of the HTTP request URL. + // It is not settable as a field in the request body. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LoadErrors") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapValidationResult +func (s *TargetInstance) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstance raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapsAggregatedList struct { +type TargetInstanceAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of UrlMapsScopedList resources. - Items map[string]UrlMapsScopedList `json:"items,omitempty"` + // Items: A list of TargetInstance resources. + Items map[string]TargetInstancesScopedList `json:"items,omitempty"` // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -50120,7 +51046,7 @@ type UrlMapsAggregatedList struct { Unreachables []string `json:"unreachables,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *UrlMapsAggregatedListWarning `json:"warning,omitempty"` + Warning *TargetInstanceAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -50143,15 +51069,15 @@ type UrlMapsAggregatedList struct { NullFields []string `json:"-"` } -func (s *UrlMapsAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapsAggregatedList +func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapsAggregatedListWarning: [Output Only] Informational warning -// message. -type UrlMapsAggregatedListWarning struct { +// TargetInstanceAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetInstanceAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -50173,6 +51099,9 @@ type UrlMapsAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -50180,6 +51109,9 @@ type UrlMapsAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -50213,7 +51145,7 @@ type UrlMapsAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*UrlMapsAggregatedListWarningData `json:"data,omitempty"` + Data []*TargetInstanceAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -50236,13 +51168,13 @@ type UrlMapsAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *UrlMapsAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapsAggregatedListWarning +func (s *TargetInstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapsAggregatedListWarningData struct { +type TargetInstanceAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -50273,21 +51205,43 @@ type UrlMapsAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *UrlMapsAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapsAggregatedListWarningData +func (s *TargetInstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapsScopedList struct { - // UrlMaps: A list of UrlMaps contained in this scope. - UrlMaps []*UrlMap `json:"urlMaps,omitempty"` +// TargetInstanceList: Contains a list of TargetInstance resources. +type TargetInstanceList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Warning: Informational warning which replaces the list of backend - // services when the list is empty. - Warning *UrlMapsScopedListWarning `json:"warning,omitempty"` + // Items: A list of TargetInstance resources. + Items []*TargetInstance `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "UrlMaps") to + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetInstanceListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -50295,8 +51249,8 @@ type UrlMapsScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "UrlMaps") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -50304,15 +51258,15 @@ type UrlMapsScopedList struct { NullFields []string `json:"-"` } -func (s *UrlMapsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapsScopedList +func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapsScopedListWarning: Informational warning which replaces the -// list of backend services when the list is empty. -type UrlMapsScopedListWarning struct { +// TargetInstanceListWarning: [Output Only] Informational warning +// message. +type TargetInstanceListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -50334,6 +51288,9 @@ type UrlMapsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -50341,6 +51298,9 @@ type UrlMapsScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -50374,7 +51334,7 @@ type UrlMapsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*UrlMapsScopedListWarningData `json:"data,omitempty"` + Data []*TargetInstanceListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -50397,13 +51357,13 @@ type UrlMapsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *UrlMapsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapsScopedListWarning +func (s *TargetInstanceListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapsScopedListWarningData struct { +type TargetInstanceListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -50434,215 +51394,21 @@ type UrlMapsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *UrlMapsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapsScopedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type UrlMapsValidateRequest struct { - // Resource: Content of the UrlMap to be validated. - Resource *UrlMap `json:"resource,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Resource") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Resource") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapsValidateRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type UrlMapsValidateResponse struct { - Result *UrlMapValidationResult `json:"result,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Result") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Result") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapsValidateResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// UrlRewrite: The spec for modifying the path before sending the -// request to the matched backend service. -type UrlRewrite struct { - // HostRewrite: Before forwarding the request to the selected service, - // the request's host header is replaced with contents of hostRewrite. - // The value must be from 1 to 255 characters. - HostRewrite string `json:"hostRewrite,omitempty"` - - // PathPrefixRewrite: Before forwarding the request to the selected - // backend service, the matching portion of the request's path is - // replaced by pathPrefixRewrite. The value must be from 1 to 1024 - // characters. - PathPrefixRewrite string `json:"pathPrefixRewrite,omitempty"` - - // ForceSendFields is a list of field names (e.g. "HostRewrite") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "HostRewrite") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UrlRewrite) MarshalJSON() ([]byte, error) { - type NoMethod UrlRewrite - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// UsableSubnetwork: Subnetwork which the current user has -// compute.subnetworks.use permission on. -type UsableSubnetwork struct { - // IpCidrRange: The range of internal addresses that are owned by this - // subnetwork. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // Network: Network URL. - Network string `json:"network,omitempty"` - - // SecondaryIpRanges: Secondary IP ranges. - SecondaryIpRanges []*UsableSubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` - - // Subnetwork: Subnetwork URL. - Subnetwork string `json:"subnetwork,omitempty"` - - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UsableSubnetwork) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetwork - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// UsableSubnetworkSecondaryRange: Secondary IP range of a usable -// subnetwork. -type UsableSubnetworkSecondaryRange struct { - // IpCidrRange: The range of IP addresses belonging to this subnetwork - // secondary range. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // RangeName: The name associated with this subnetwork secondary range, - // used when adding an alias IP range to a VM instance. The name must be - // 1-63 characters long, and comply with RFC1035. The name must be - // unique within the subnetwork. - RangeName string `json:"rangeName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UsableSubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetworkSecondaryRange +func (s *TargetInstanceListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UsableSubnetworksAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output] A list of usable subnetwork URLs. - Items []*UsableSubnetwork `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#usableSubnetworksAggregatedList for aggregated lists of - // usable subnetworks. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. In special cases listUsable may return 0 subnetworks and - // nextPageToken which still should be used to get the next page of - // results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *UsableSubnetworksAggregatedListWarning `json:"warning,omitempty"` +type TargetInstancesScopedList struct { + // TargetInstances: A list of target instances contained in this scope. + TargetInstances []*TargetInstance `json:"targetInstances,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "TargetInstances") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -50650,24 +51416,25 @@ type UsableSubnetworksAggregatedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "TargetInstances") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *UsableSubnetworksAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetworksAggregatedList +func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstancesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UsableSubnetworksAggregatedListWarning: [Output Only] Informational -// warning message. -type UsableSubnetworksAggregatedListWarning struct { +// TargetInstancesScopedListWarning: Informational warning which +// replaces the list of addresses when the list is empty. +type TargetInstancesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -50689,6 +51456,9 @@ type UsableSubnetworksAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -50696,6 +51466,9 @@ type UsableSubnetworksAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -50729,7 +51502,7 @@ type UsableSubnetworksAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*UsableSubnetworksAggregatedListWarningData `json:"data,omitempty"` + Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -50752,13 +51525,13 @@ type UsableSubnetworksAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *UsableSubnetworksAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetworksAggregatedListWarning +func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstancesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UsableSubnetworksAggregatedListWarningData struct { +type TargetInstancesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -50789,153 +51562,179 @@ type UsableSubnetworksAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *UsableSubnetworksAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetworksAggregatedListWarningData +func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstancesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UsageExportLocation: The location in Cloud Storage and naming method -// of the daily usage report. Contains bucket_name and report_name -// prefix. -type UsageExportLocation struct { - // BucketName: The name of an existing bucket in Cloud Storage where the - // usage report object is stored. The Google Service Account is granted - // write access to this bucket. This can either be the bucket name by - // itself, such as example-bucket, or the bucket name with gs:// or - // https://storage.googleapis.com/ in front of it, such as - // gs://example-bucket. - BucketName string `json:"bucketName,omitempty"` - - // ReportNamePrefix: An optional prefix for the name of the usage report - // object stored in bucketName. If not supplied, defaults to usage_gce. - // The report is stored as a CSV file named - // report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the - // usage according to Pacific Time. If you supply a prefix, it should - // conform to Cloud Storage object naming conventions. - ReportNamePrefix string `json:"reportNamePrefix,omitempty"` +// TargetPool: Represents a Target Pool resource. Target pools are used +// for network TCP/UDP load balancing. A target pool references member +// instances, an associated legacy HttpHealthCheck resource, and, +// optionally, a backup target pool. For more information, read Using +// target pools. +type TargetPool struct { + // BackupPool: The server-defined URL for the resource. This field is + // applicable only when the containing target pool is serving a + // forwarding rule as the primary pool, and its failoverRatio field is + // properly set to a value between [0, 1]. backupPool and failoverRatio + // together define the fallback behavior of the primary target pool: if + // the ratio of the healthy instances in the primary pool is at or below + // failoverRatio, traffic arriving at the load-balanced IP will be + // directed to the backup pool. In case where failoverRatio and + // backupPool are not set, or all the instances in the backup pool are + // unhealthy, the traffic will be directed back to the primary pool in + // the "force" mode, where traffic will be spread to the healthy + // instances with the best effort, or to all instances when no instance + // is healthy. + BackupPool string `json:"backupPool,omitempty"` - // ForceSendFields is a list of field names (e.g. "BucketName") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // NullFields is a list of field names (e.g. "BucketName") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` -func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { - type NoMethod UsageExportLocation - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // FailoverRatio: This field is applicable only when the containing + // target pool is serving a forwarding rule as the primary pool (i.e., + // not as a backup pool to some other target pool). The value of the + // field must be in [0, 1]. If set, backupPool must also be set. They + // together define the fallback behavior of the primary target pool: if + // the ratio of the healthy instances in the primary pool is at or below + // this number, traffic arriving at the load-balanced IP will be + // directed to the backup pool. In case where failoverRatio is not set + // or all the instances in the backup pool are unhealthy, the traffic + // will be directed back to the primary pool in the "force" mode, where + // traffic will be spread to the healthy instances with the best effort, + // or to all instances when no instance is healthy. + FailoverRatio float64 `json:"failoverRatio,omitempty"` -// VmEndpointNatMappings: Contain information of Nat mapping for a VM -// endpoint (i.e., NIC). -type VmEndpointNatMappings struct { - // InstanceName: Name of the VM instance which the endpoint belongs to - InstanceName string `json:"instanceName,omitempty"` + // HealthChecks: The URL of the HttpHealthCheck resource. A member + // instance in this pool is considered healthy if and only if the health + // checks pass. Only legacy HttpHealthChecks are supported. Only one + // health check may be specified. + HealthChecks []string `json:"healthChecks,omitempty"` - InterfaceNatMappings []*VmEndpointNatMappingsInterfaceNatMappings `json:"interfaceNatMappings,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` - // ForceSendFields is a list of field names (e.g. "InstanceName") to - // unconditionally include in API requests. By default, fields with + // Instances: A list of resource URLs to the virtual machine instances + // serving this pool. They must live in zones contained in the same + // region as this pool. + Instances []string `json:"instances,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#targetPool + // for target pools. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Region: [Output Only] URL of the region where the target pool + // resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SessionAffinity: Session affinity option, must be one of the + // following values: NONE: Connections from the same client IP may go to + // any instance in the pool. CLIENT_IP: Connections from the same client + // IP will go to the same instance in the pool while that instance + // remains healthy. CLIENT_IP_PROTO: Connections from the same client IP + // with the same IP protocol will go to the same instance in the pool + // while that instance remains healthy. + // + // Possible values: + // "CLIENT_IP" - 2-tuple hash on packet's source and destination IP + // addresses. Connections from the same source IP address to the same + // destination IP address will be served by the same backend VM while + // that VM remains healthy. + // "CLIENT_IP_NO_DESTINATION" - 1-tuple hash only on packet's source + // IP address. Connections from the same source IP address will be + // served by the same backend VM while that VM remains healthy. This + // option can only be used for Internal TCP/UDP Load Balancing. + // "CLIENT_IP_PORT_PROTO" - 5-tuple hash on packet's source and + // destination IP addresses, IP protocol, and source and destination + // ports. Connections for the same IP protocol from the same source IP + // address and port to the same destination IP address and port will be + // served by the same backend VM while that VM remains healthy. This + // option cannot be used for HTTP(S) load balancing. + // "CLIENT_IP_PROTO" - 3-tuple hash on packet's source and destination + // IP addresses, and IP protocol. Connections for the same IP protocol + // from the same source IP address to the same destination IP address + // will be served by the same backend VM while that VM remains healthy. + // This option cannot be used for HTTP(S) load balancing. + // "GENERATED_COOKIE" - Hash based on a cookie generated by the L7 + // loadbalancer. Only valid for HTTP(S) load balancing. + // "HEADER_FIELD" - The hash is based on a user specified header + // field. + // "HTTP_COOKIE" - The hash is based on a user provided cookie. + // "NONE" - No session affinity. Connections from the same client IP + // may go to any instance in the pool. + SessionAffinity string `json:"sessionAffinity,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "BackupPool") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InstanceName") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "BackupPool") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *VmEndpointNatMappings) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappings +func (s *TargetPool) MarshalJSON() ([]byte, error) { + type NoMethod TargetPool raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VmEndpointNatMappingsInterfaceNatMappings: Contain information of Nat -// mapping for an interface of this endpoint. -type VmEndpointNatMappingsInterfaceNatMappings struct { - // DrainNatIpPortRanges: List of all drain IP:port-range mappings - // assigned to this interface. These ranges are inclusive, that is, both - // the first and the last ports can be used for NAT. Example: - // ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. - DrainNatIpPortRanges []string `json:"drainNatIpPortRanges,omitempty"` - - // NatIpPortRanges: A list of all IP:port-range mappings assigned to - // this interface. These ranges are inclusive, that is, both the first - // and the last ports can be used for NAT. Example: - // ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. - NatIpPortRanges []string `json:"natIpPortRanges,omitempty"` - - // NumTotalDrainNatPorts: Total number of drain ports across all NAT IPs - // allocated to this interface. It equals to the aggregated port number - // in the field drain_nat_ip_port_ranges. - NumTotalDrainNatPorts int64 `json:"numTotalDrainNatPorts,omitempty"` - - // NumTotalNatPorts: Total number of ports across all NAT IPs allocated - // to this interface. It equals to the aggregated port number in the - // field nat_ip_port_ranges. - NumTotalNatPorts int64 `json:"numTotalNatPorts,omitempty"` - - // SourceAliasIpRange: Alias IP range for this interface endpoint. It - // will be a private (RFC 1918) IP range. Examples: "10.33.4.55/32", or - // "192.168.5.0/24". - SourceAliasIpRange string `json:"sourceAliasIpRange,omitempty"` - - // SourceVirtualIp: Primary IP of the VM for this NIC. - SourceVirtualIp string `json:"sourceVirtualIp,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "DrainNatIpPortRanges") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DrainNatIpPortRanges") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *VmEndpointNatMappingsInterfaceNatMappings) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappingsInterfaceNatMappings - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +func (s *TargetPool) UnmarshalJSON(data []byte) error { + type NoMethod TargetPool + var s1 struct { + FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.FailoverRatio = float64(s1.FailoverRatio) + return nil } -// VmEndpointNatMappingsList: Contains a list of VmEndpointNatMappings. -type VmEndpointNatMappingsList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. +type TargetPoolAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` + // Items: A list of TargetPool resources. + Items map[string]TargetPoolsScopedList `json:"items,omitempty"` + // Kind: [Output Only] Type of resource. Always - // compute#vmEndpointNatMappingsList for lists of Nat mappings of VM - // endpoints. + // compute#targetPoolAggregatedList for aggregated lists of target + // pools. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -50946,15 +51745,14 @@ type VmEndpointNatMappingsList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // Result: [Output Only] A list of Nat mapping information of VM - // endpoints. - Result []*VmEndpointNatMappings `json:"result,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *VmEndpointNatMappingsListWarning `json:"warning,omitempty"` + Warning *TargetPoolAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -50977,15 +51775,15 @@ type VmEndpointNatMappingsList struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsList) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappingsList +func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VmEndpointNatMappingsListWarning: [Output Only] Informational warning +// TargetPoolAggregatedListWarning: [Output Only] Informational warning // message. -type VmEndpointNatMappingsListWarning struct { +type TargetPoolAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -51007,6 +51805,9 @@ type VmEndpointNatMappingsListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -51014,6 +51815,9 @@ type VmEndpointNatMappingsListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -51047,7 +51851,7 @@ type VmEndpointNatMappingsListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*VmEndpointNatMappingsListWarningData `json:"data,omitempty"` + Data []*TargetPoolAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -51070,13 +51874,13 @@ type VmEndpointNatMappingsListWarning struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappingsListWarning +func (s *TargetPoolAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VmEndpointNatMappingsListWarningData struct { +type TargetPoolAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -51107,120 +51911,58 @@ type VmEndpointNatMappingsListWarningData struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappingsListWarningData +func (s *TargetPoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnGateway: Represents a HA VPN gateway. HA VPN is a -// high-availability (HA) Cloud VPN solution that lets you securely -// connect your on-premises network to your Google Cloud Virtual Private -// Cloud network through an IPsec VPN connection in a single region. For -// more information about Cloud HA VPN solutions, see Cloud VPN -// topologies . -type VpnGateway struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` +type TargetPoolInstanceHealth struct { + HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#vpnGateway for - // VPN gateways. + // Kind: [Output Only] Type of resource. Always + // compute#targetPoolInstanceHealth when checking the health of an + // instance. Kind string `json:"kind,omitempty"` - // LabelFingerprint: A fingerprint for the labels being applied to this - // VpnGateway, which is essentially a hash of the labels set used for - // optimistic locking. The fingerprint is initially generated by Compute - // Engine and changes after every request to modify or update labels. - // You must always provide an up-to-date fingerprint hash in order to - // update or change labels, otherwise the request will fail with error - // 412 conditionNotMet. To see the latest fingerprint, make a get() - // request to retrieve an VpnGateway. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: Labels for this resource. These can only be added or modified - // by the setLabels method. Each label key/value pair must comply with - // RFC1035. Label values may be empty. - Labels map[string]string `json:"labels,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: URL of the network to which this VPN gateway is attached. - // Provided by the client when the VPN gateway is created. - Network string `json:"network,omitempty"` - - // Region: [Output Only] URL of the region where the VPN gateway - // resides. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // StackType: The stack type for this VPN gateway to identify the IP - // protocols that are enabled. If not specified, IPV4_ONLY will be used. - // - // Possible values: - // "IPV4_IPV6" - Enable VPN gateway with both IPv4 and IPv6 protocols. - // "IPV4_ONLY" - Enable VPN gateway with only IPv4 protocol. - StackType string `json:"stackType,omitempty"` - - // VpnInterfaces: The list of VPN interfaces associated with this VPN - // gateway. - VpnInterfaces []*VpnGatewayVpnGatewayInterface `json:"vpnInterfaces,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "HealthStatus") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *VpnGateway) MarshalJSON() ([]byte, error) { - type NoMethod VpnGateway +func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolInstanceHealth raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnGatewayAggregatedList struct { +// TargetPoolList: Contains a list of TargetPool resources. +type TargetPoolList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of VpnGateway resources. - Items map[string]VpnGatewaysScopedList `json:"items,omitempty"` + // Items: A list of TargetPool resources. + Items []*TargetPool `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#vpnGateway for - // VPN gateways. + // Kind: [Output Only] Type of resource. Always compute#targetPoolList + // for lists of target pools. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -51234,11 +51976,8 @@ type VpnGatewayAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *VpnGatewayAggregatedListWarning `json:"warning,omitempty"` + Warning *TargetPoolListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -51261,15 +52000,14 @@ type VpnGatewayAggregatedList struct { NullFields []string `json:"-"` } -func (s *VpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewayAggregatedList +func (s *TargetPoolList) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnGatewayAggregatedListWarning: [Output Only] Informational warning -// message. -type VpnGatewayAggregatedListWarning struct { +// TargetPoolListWarning: [Output Only] Informational warning message. +type TargetPoolListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -51291,6 +52029,9 @@ type VpnGatewayAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -51298,6 +52039,9 @@ type VpnGatewayAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -51331,7 +52075,7 @@ type VpnGatewayAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*VpnGatewayAggregatedListWarningData `json:"data,omitempty"` + Data []*TargetPoolListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -51354,13 +52098,13 @@ type VpnGatewayAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *VpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewayAggregatedListWarning +func (s *TargetPoolListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnGatewayAggregatedListWarningData struct { +type TargetPoolListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -51391,44 +52135,82 @@ type VpnGatewayAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewayAggregatedListWarningData +func (s *TargetPoolListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnGatewayList: Contains a list of VpnGateway resources. -type VpnGatewayList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +type TargetPoolsAddHealthCheckRequest struct { + // HealthChecks: The HttpHealthCheck to add to the target pool. + HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` - // Items: A list of VpnGateway resources. - Items []*VpnGateway `json:"items,omitempty"` + // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Kind: [Output Only] Type of resource. Always compute#vpnGateway for - // VPN gateways. - Kind string `json:"kind,omitempty"` + // NullFields is a list of field names (e.g. "HealthChecks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` +func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsAddHealthCheckRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +type TargetPoolsAddInstanceRequest struct { + // Instances: A full or partial URL to an instance to add to this target + // pool. This can be a full or partial URL. For example, the following + // are valid URLs: - + // https://www.googleapis.com/compute/v1/projects/project-id/zones/zone + // /instances/instance-name - + // projects/project-id/zones/zone/instances/instance-name - + // zones/zone/instances/instance-name + Instances []*InstanceReference `json:"instances,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *VpnGatewayListWarning `json:"warning,omitempty"` + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // ForceSendFields is a list of field names (e.g. "Id") to +func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsAddInstanceRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolsRemoveHealthCheckRequest struct { + // HealthChecks: Health check URL to be removed. This can be a full or + // valid partial URL. For example, the following are valid URLs: - + // https://www.googleapis.com/compute/beta/projects/project + // /global/httpHealthChecks/health-check - + // projects/project/global/httpHealthChecks/health-check - + // global/httpHealthChecks/health-check + HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthChecks") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -51436,8 +52218,35 @@ type VpnGatewayList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "HealthChecks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsRemoveHealthCheckRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolsRemoveInstanceRequest struct { + // Instances: URLs of the instances to be removed from target pool. + Instances []*InstanceReference `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -51445,14 +52254,46 @@ type VpnGatewayList struct { NullFields []string `json:"-"` } -func (s *VpnGatewayList) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewayList +func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsRemoveInstanceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnGatewayListWarning: [Output Only] Informational warning message. -type VpnGatewayListWarning struct { +type TargetPoolsScopedList struct { + // TargetPools: A list of target pools contained in this scope. + TargetPools []*TargetPool `json:"targetPools,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetPools") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetPools") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolsScopedListWarning: Informational warning which replaces +// the list of addresses when the list is empty. +type TargetPoolsScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -51474,6 +52315,9 @@ type VpnGatewayListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -51481,6 +52325,9 @@ type VpnGatewayListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -51514,7 +52361,7 @@ type VpnGatewayListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*VpnGatewayListWarningData `json:"data,omitempty"` + Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -51537,13 +52384,13 @@ type VpnGatewayListWarning struct { NullFields []string `json:"-"` } -func (s *VpnGatewayListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewayListWarning +func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnGatewayListWarningData struct { +type TargetPoolsScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -51574,17 +52421,16 @@ type VpnGatewayListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnGatewayListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewayListWarningData +func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnGatewayStatus struct { - // VpnConnections: List of VPN connection for this VpnGateway. - VpnConnections []*VpnGatewayStatusVpnConnection `json:"vpnConnections,omitempty"` +type TargetReference struct { + Target string `json:"target,omitempty"` - // ForceSendFields is a list of field names (e.g. "VpnConnections") to + // ForceSendFields is a list of field names (e.g. "Target") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -51592,7 +52438,63 @@ type VpnGatewayStatus struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "VpnConnections") to + // NullFields is a list of field names (e.g. "Target") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetReference) MarshalJSON() ([]byte, error) { + type NoMethod TargetReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxiesSetBackendServiceRequest struct { + // Service: The URL of the new BackendService resource for the + // targetSslProxy. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Service") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxiesSetBackendServiceRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxiesSetCertificateMapRequest struct { + // CertificateMap: URL of the Certificate Map to associate with this + // TargetSslProxy. + CertificateMap string `json:"certificateMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CertificateMap") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CertificateMap") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -51602,42 +52504,22 @@ type VpnGatewayStatus struct { NullFields []string `json:"-"` } -func (s *VpnGatewayStatus) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewayStatus +func (s *TargetSslProxiesSetCertificateMapRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxiesSetCertificateMapRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnGatewayStatusHighAvailabilityRequirementState: Describes the high -// availability requirement state for the VPN connection between this -// Cloud VPN gateway and a peer gateway. -type VpnGatewayStatusHighAvailabilityRequirementState struct { - // State: Indicates the high availability requirement state for the VPN - // connection. Valid values are CONNECTION_REDUNDANCY_MET, - // CONNECTION_REDUNDANCY_NOT_MET. - // - // Possible values: - // "CONNECTION_REDUNDANCY_MET" - VPN tunnels are configured with - // adequate redundancy from Cloud VPN gateway to the peer VPN gateway. - // For both GCP-to-non-GCP and GCP-to-GCP connections, the adequate - // redundancy is a pre-requirement for users to get 99.99% availability - // on GCP side; please note that for any connection, end-to-end 99.99% - // availability is subject to proper configuration on the peer VPN - // gateway. - // "CONNECTION_REDUNDANCY_NOT_MET" - VPN tunnels are not configured - // with adequate redundancy from the Cloud VPN gateway to the peer - // gateway - State string `json:"state,omitempty"` - - // UnsatisfiedReason: Indicates the reason why the VPN connection does - // not meet the high availability redundancy criteria/requirement. Valid - // values is INCOMPLETE_TUNNELS_COVERAGE. +type TargetSslProxiesSetProxyHeaderRequest struct { + // ProxyHeader: The new type of proxy header to append before sending + // data to the backend. NONE or PROXY_V1 are allowed. // // Possible values: - // "INCOMPLETE_TUNNELS_COVERAGE" - UnsatisfiedReason string `json:"unsatisfiedReason,omitempty"` + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` - // ForceSendFields is a list of field names (e.g. "State") to + // ForceSendFields is a list of field names (e.g. "ProxyHeader") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -51645,45 +52527,36 @@ type VpnGatewayStatusHighAvailabilityRequirementState struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "State") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "ProxyHeader") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *VpnGatewayStatusHighAvailabilityRequirementState) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewayStatusHighAvailabilityRequirementState +func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxiesSetProxyHeaderRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnGatewayStatusTunnel: Contains some information about a VPN tunnel. -type VpnGatewayStatusTunnel struct { - // LocalGatewayInterface: The VPN gateway interface this VPN tunnel is - // associated with. - LocalGatewayInterface int64 `json:"localGatewayInterface,omitempty"` - - // PeerGatewayInterface: The peer gateway interface this VPN tunnel is - // connected to, the peer gateway could either be an external VPN - // gateway or GCP VPN gateway. - PeerGatewayInterface int64 `json:"peerGatewayInterface,omitempty"` - - // TunnelUrl: URL reference to the VPN tunnel. - TunnelUrl string `json:"tunnelUrl,omitempty"` +type TargetSslProxiesSetSslCertificatesRequest struct { + // SslCertificates: New set of URLs to SslCertificate resources to + // associate with this TargetSslProxy. At least one SSL certificate must + // be specified. Currently, you may specify up to 15 SSL certificates. + SslCertificates []string `json:"sslCertificates,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "LocalGatewayInterface") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LocalGatewayInterface") to + // NullFields is a list of field names (e.g. "SslCertificates") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -51693,42 +52566,89 @@ type VpnGatewayStatusTunnel struct { NullFields []string `json:"-"` } -func (s *VpnGatewayStatusTunnel) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewayStatusTunnel +func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxiesSetSslCertificatesRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnGatewayStatusVpnConnection: A VPN connection contains all VPN -// tunnels connected from this VpnGateway to the same peer gateway. The -// peer gateway could either be a external VPN gateway or GCP VPN -// gateway. -type VpnGatewayStatusVpnConnection struct { - // PeerExternalGateway: URL reference to the peer external VPN gateways - // to which the VPN tunnels in this VPN connection are connected. This - // field is mutually exclusive with peer_gcp_gateway. - PeerExternalGateway string `json:"peerExternalGateway,omitempty"` +// TargetSslProxy: Represents a Target SSL Proxy resource. A target SSL +// proxy is a component of a SSL Proxy load balancer. Global forwarding +// rules reference a target SSL proxy, and the target proxy then +// references an external backend service. For more information, read +// Using Target Proxies. +type TargetSslProxy struct { + // CertificateMap: URL of a certificate map that identifies a + // certificate map associated with the given target proxy. This field + // can only be set for global target proxies. If set, sslCertificates + // will be ignored. + CertificateMap string `json:"certificateMap,omitempty"` - // PeerGcpGateway: URL reference to the peer side VPN gateways to which - // the VPN tunnels in this VPN connection are connected. This field is - // mutually exclusive with peer_gcp_gateway. - PeerGcpGateway string `json:"peerGcpGateway,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // State: HighAvailabilityRequirementState for the VPN connection. - State *VpnGatewayStatusHighAvailabilityRequirementState `json:"state,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // Tunnels: List of VPN tunnels that are in this VPN connection. - Tunnels []*VpnGatewayStatusTunnel `json:"tunnels,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` - // ForceSendFields is a list of field names (e.g. "PeerExternalGateway") - // to unconditionally include in API requests. By default, fields with + // Kind: [Output Only] Type of the resource. Always + // compute#targetSslProxy for target SSL proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Service: URL to the BackendService resource. + Service string `json:"service,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to + // authenticate connections to Backends. At least one SSL certificate + // must be specified. Currently, you may specify up to 15 SSL + // certificates. sslCertificates do not apply when the load balancing + // scheme is set to INTERNAL_SELF_MANAGED. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // SslPolicy: URL of SslPolicy resource that will be associated with the + // TargetSslProxy resource. If not set, the TargetSslProxy resource will + // not have any SSL policy configured. + SslPolicy string `json:"sslPolicy,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CertificateMap") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "PeerExternalGateway") to + // NullFields is a list of field names (e.g. "CertificateMap") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -51738,37 +52658,41 @@ type VpnGatewayStatusVpnConnection struct { NullFields []string `json:"-"` } -func (s *VpnGatewayStatusVpnConnection) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewayStatusVpnConnection +func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnGatewayVpnGatewayInterface: A VPN gateway interface. -type VpnGatewayVpnGatewayInterface struct { - // Id: [Output Only] Numeric identifier for this VPN interface - // associated with the VPN gateway. - Id int64 `json:"id,omitempty"` +// TargetSslProxyList: Contains a list of TargetSslProxy resources. +type TargetSslProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // InterconnectAttachment: URL of the VLAN attachment - // (interconnectAttachment) resource for this VPN gateway interface. - // When the value of this field is present, the VPN gateway is used for - // IPsec-encrypted Cloud Interconnect; all egress or ingress traffic for - // this VPN gateway interface goes through the specified VLAN attachment - // resource. Not currently available publicly. - InterconnectAttachment string `json:"interconnectAttachment,omitempty"` + // Items: A list of TargetSslProxy resources. + Items []*TargetSslProxy `json:"items,omitempty"` - // IpAddress: [Output Only] IP address for this VPN interface associated - // with the VPN gateway. The IP address could be either a regional - // external IP address or a regional internal IP address. The two IP - // addresses for a VPN gateway must be all regional external or regional - // internal IP addresses. There cannot be a mix of regional external IP - // addresses and regional internal IP addresses. For IPsec-encrypted - // Cloud Interconnect, the IP addresses for both interfaces could either - // be regional internal IP addresses or regional external IP addresses. - // For regular (non IPsec-encrypted Cloud Interconnect) HA VPN tunnels, - // the IP address must be a regional external IP address. - IpAddress string `json:"ipAddress,omitempty"` + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetSslProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with @@ -51787,20 +52711,89 @@ type VpnGatewayVpnGatewayInterface struct { NullFields []string `json:"-"` } -func (s *VpnGatewayVpnGatewayInterface) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewayVpnGatewayInterface +func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnGatewaysGetStatusResponse struct { - Result *VpnGatewayStatus `json:"result,omitempty"` +// TargetSslProxyListWarning: [Output Only] Informational warning +// message. +type TargetSslProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*TargetSslProxyListWarningData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. "Result") to + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -51808,7 +52801,7 @@ type VpnGatewaysGetStatusResponse struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Result") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -51817,22 +52810,27 @@ type VpnGatewaysGetStatusResponse struct { NullFields []string `json:"-"` } -func (s *VpnGatewaysGetStatusResponse) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewaysGetStatusResponse +func (s *TargetSslProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnGatewaysScopedList struct { - // VpnGateways: [Output Only] A list of VPN gateways contained in this - // scope. - VpnGateways []*VpnGateway `json:"vpnGateways,omitempty"` +type TargetSslProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Warning: [Output Only] Informational warning which replaces the list - // of addresses when the list is empty. - Warning *VpnGatewaysScopedListWarning `json:"warning,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "VpnGateways") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -51840,24 +52838,56 @@ type VpnGatewaysScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "VpnGateways") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *VpnGatewaysScopedList) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewaysScopedList +func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnGatewaysScopedListWarning: [Output Only] Informational warning -// which replaces the list of addresses when the list is empty. -type VpnGatewaysScopedListWarning struct { +type TargetTcpProxiesScopedList struct { + // TargetTcpProxies: A list of TargetTcpProxies contained in this scope. + TargetTcpProxies []*TargetTcpProxy `json:"targetTcpProxies,omitempty"` + + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *TargetTcpProxiesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetTcpProxies") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetTcpProxies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetTcpProxiesScopedListWarning: Informational warning which +// replaces the list of backend services when the list is empty. +type TargetTcpProxiesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -51879,6 +52909,9 @@ type VpnGatewaysScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -51886,6 +52919,9 @@ type VpnGatewaysScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -51919,7 +52955,7 @@ type VpnGatewaysScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*VpnGatewaysScopedListWarningData `json:"data,omitempty"` + Data []*TargetTcpProxiesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -51942,13 +52978,13 @@ type VpnGatewaysScopedListWarning struct { NullFields []string `json:"-"` } -func (s *VpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewaysScopedListWarning +func (s *TargetTcpProxiesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnGatewaysScopedListWarningData struct { +type TargetTcpProxiesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -51979,15 +53015,78 @@ type VpnGatewaysScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VpnGatewaysScopedListWarningData +func (s *TargetTcpProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnel: Represents a Cloud VPN Tunnel resource. For more -// information about VPN, read the the Cloud VPN Overview. -type VpnTunnel struct { +type TargetTcpProxiesSetBackendServiceRequest struct { + // Service: The URL of the new BackendService resource for the + // targetTcpProxy. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Service") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesSetBackendServiceRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxiesSetProxyHeaderRequest struct { + // ProxyHeader: The new type of proxy header to append before sending + // data to the backend. NONE or PROXY_V1 are allowed. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProxyHeader") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProxyHeader") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesSetProxyHeaderRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetTcpProxy: Represents a Target TCP Proxy resource. A target TCP +// proxy is a component of a TCP Proxy load balancer. Global forwarding +// rules reference target TCP proxy, and the target proxy then +// references an external backend service. For more information, read +// TCP Proxy Load Balancing overview. +type TargetTcpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -51996,29 +53095,14 @@ type VpnTunnel struct { // property when you create the resource. Description string `json:"description,omitempty"` - // DetailedStatus: [Output Only] Detailed status message for the VPN - // tunnel. - DetailedStatus string `json:"detailedStatus,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // IkeVersion: IKE protocol version to use when establishing the VPN - // tunnel with the peer VPN gateway. Acceptable IKE versions are 1 or 2. - // The default version is 2. - IkeVersion int64 `json:"ikeVersion,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. + // Kind: [Output Only] Type of the resource. Always + // compute#targetTcpProxy for target TCP proxies. Kind string `json:"kind,omitempty"` - // LocalTrafficSelector: Local traffic selector to use when establishing - // the VPN tunnel with the peer VPN gateway. The value should be a CIDR - // formatted string, for example: 192.168.0.0/16. The ranges must be - // disjoint. Only IPv4 is supported. - LocalTrafficSelector []string `json:"localTrafficSelector,omitempty"` - // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -52028,113 +53112,35 @@ type VpnTunnel struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // PeerExternalGateway: URL of the peer side external VPN gateway to - // which this VPN tunnel is connected. Provided by the client when the - // VPN tunnel is created. This field is exclusive with the field - // peerGcpGateway. - PeerExternalGateway string `json:"peerExternalGateway,omitempty"` - - // PeerExternalGatewayInterface: The interface ID of the external VPN - // gateway to which this VPN tunnel is connected. Provided by the client - // when the VPN tunnel is created. - PeerExternalGatewayInterface int64 `json:"peerExternalGatewayInterface,omitempty"` + // ProxyBind: This field only applies when the forwarding rule that + // references this target proxy has a loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. When this field is set to true, Envoy proxies + // set up inbound traffic interception and bind to the IP address and + // port specified in the forwarding rule. This is generally useful when + // using Traffic Director to configure Envoy as a gateway or middle + // proxy (in other words, not a sidecar proxy). The Envoy proxy listens + // for inbound requests and handles requests when it receives them. The + // default is false. + ProxyBind bool `json:"proxyBind,omitempty"` - // PeerGcpGateway: URL of the peer side HA GCP VPN gateway to which this - // VPN tunnel is connected. Provided by the client when the VPN tunnel - // is created. This field can be used when creating highly available VPN - // from VPC network to VPC network, the field is exclusive with the - // field peerExternalGateway. If provided, the VPN tunnel will - // automatically use the same vpnGatewayInterface ID in the peer GCP VPN - // gateway. - PeerGcpGateway string `json:"peerGcpGateway,omitempty"` - - // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. - PeerIp string `json:"peerIp,omitempty"` + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` - // Region: [Output Only] URL of the region where the VPN tunnel resides. - // You must specify this field as part of the HTTP request URL. It is - // not settable as a field in the request body. + // Region: [Output Only] URL of the region where the regional TCP proxy + // resides. This field is not applicable to global TCP proxy. Region string `json:"region,omitempty"` - // RemoteTrafficSelector: Remote traffic selectors to use when - // establishing the VPN tunnel with the peer VPN gateway. The value - // should be a CIDR formatted string, for example: 192.168.0.0/16. The - // ranges should be disjoint. Only IPv4 is supported. - RemoteTrafficSelector []string `json:"remoteTrafficSelector,omitempty"` - - // Router: URL of the router resource to be used for dynamic routing. - Router string `json:"router,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SharedSecret: Shared secret used to set the secure session between - // the Cloud VPN gateway and the peer VPN gateway. - SharedSecret string `json:"sharedSecret,omitempty"` - - // SharedSecretHash: Hash of the shared secret. - SharedSecretHash string `json:"sharedSecretHash,omitempty"` - - // Status: [Output Only] The status of the VPN tunnel, which can be one - // of the following: - PROVISIONING: Resource is being allocated for the - // VPN tunnel. - WAITING_FOR_FULL_CONFIG: Waiting to receive all - // VPN-related configs from the user. Network, TargetVpnGateway, - // VpnTunnel, ForwardingRule, and Route resources are needed to setup - // the VPN tunnel. - FIRST_HANDSHAKE: Successful first handshake with - // the peer VPN. - ESTABLISHED: Secure session is successfully - // established with the peer VPN. - NETWORK_ERROR: Deprecated, replaced - // by NO_INCOMING_PACKETS - AUTHORIZATION_ERROR: Auth error (for - // example, bad shared secret). - NEGOTIATION_FAILURE: Handshake failed. - // - DEPROVISIONING: Resources are being deallocated for the VPN tunnel. - // - FAILED: Tunnel creation has failed and the tunnel is not ready to - // be used. - NO_INCOMING_PACKETS: No incoming packets from peer. - - // REJECTED: Tunnel configuration was rejected, can be result of being - // denied access. - ALLOCATING_RESOURCES: Cloud VPN is in the process of - // allocating all required resources. - STOPPED: Tunnel is stopped due - // to its Forwarding Rules being deleted for Classic VPN tunnels or the - // project is in frozen state. - PEER_IDENTITY_MISMATCH: Peer identity - // does not match peer IP, probably behind NAT. - - // TS_NARROWING_NOT_ALLOWED: Traffic selector narrowing not allowed for - // an HA-VPN tunnel. - // - // Possible values: - // "ALLOCATING_RESOURCES" - Cloud VPN is in the process of allocating - // all required resources (specifically, a borg task). - // "AUTHORIZATION_ERROR" - Auth error (e.g. bad shared secret). - // "DEPROVISIONING" - Resources is being deallocated for the VPN - // tunnel. - // "ESTABLISHED" - Secure session is successfully established with - // peer VPN. - // "FAILED" - Tunnel creation has failed and the tunnel is not ready - // to be used. - // "FIRST_HANDSHAKE" - Successful first handshake with peer VPN. - // "NEGOTIATION_FAILURE" - Handshake failed. - // "NETWORK_ERROR" - Deprecated, replaced by NO_INCOMING_PACKETS - // "NO_INCOMING_PACKETS" - No incoming packets from peer - // "PROVISIONING" - Resource is being allocated for the VPN tunnel. - // "REJECTED" - Tunnel configuration was rejected, can be result of - // being denylisted. - // "STOPPED" - Tunnel is stopped due to its Forwarding Rules being - // deleted. - // "WAITING_FOR_FULL_CONFIG" - Waiting to receive all VPN-related - // configs from user. Network, TargetVpnGateway, VpnTunnel, - // ForwardingRule and Route resources are needed to setup VPN tunnel. - Status string `json:"status,omitempty"` - - // TargetVpnGateway: URL of the Target VPN gateway with which this VPN - // tunnel is associated. Provided by the client when the VPN tunnel is - // created. - TargetVpnGateway string `json:"targetVpnGateway,omitempty"` - - // VpnGateway: URL of the VPN gateway with which this VPN tunnel is - // associated. Provided by the client when the VPN tunnel is created. - // This must be used (instead of target_vpn_gateway) if a High - // Availability VPN gateway resource is created. - VpnGateway string `json:"vpnGateway,omitempty"` - - // VpnGatewayInterface: The interface ID of the VPN gateway with which - // this VPN tunnel is associated. - VpnGatewayInterface int64 `json:"vpnGatewayInterface,omitempty"` + // Service: URL to the BackendService resource. + Service string `json:"service,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -52158,22 +53164,22 @@ type VpnTunnel struct { NullFields []string `json:"-"` } -func (s *VpnTunnel) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnel +func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelAggregatedList struct { +type TargetTcpProxyAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of VpnTunnelsScopedList resources. - Items map[string]VpnTunnelsScopedList `json:"items,omitempty"` + // Items: A list of TargetTcpProxiesScopedList resources. + Items map[string]TargetTcpProxiesScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. + // Kind: [Output Only] Type of resource. Always + // compute#targetTcpProxyAggregatedList for lists of Target TCP Proxies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -52191,7 +53197,7 @@ type VpnTunnelAggregatedList struct { Unreachables []string `json:"unreachables,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *VpnTunnelAggregatedListWarning `json:"warning,omitempty"` + Warning *TargetTcpProxyAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -52214,15 +53220,15 @@ type VpnTunnelAggregatedList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelAggregatedList +func (s *TargetTcpProxyAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelAggregatedListWarning: [Output Only] Informational warning -// message. -type VpnTunnelAggregatedListWarning struct { +// TargetTcpProxyAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetTcpProxyAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -52244,6 +53250,9 @@ type VpnTunnelAggregatedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -52251,6 +53260,9 @@ type VpnTunnelAggregatedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -52284,7 +53296,7 @@ type VpnTunnelAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*VpnTunnelAggregatedListWarningData `json:"data,omitempty"` + Data []*TargetTcpProxyAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -52307,13 +53319,13 @@ type VpnTunnelAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *VpnTunnelAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelAggregatedListWarning +func (s *TargetTcpProxyAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelAggregatedListWarningData struct { +type TargetTcpProxyAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -52344,23 +53356,22 @@ type VpnTunnelAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnTunnelAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelAggregatedListWarningData +func (s *TargetTcpProxyAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelList: Contains a list of VpnTunnel resources. -type VpnTunnelList struct { +// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. +type TargetTcpProxyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of VpnTunnel resources. - Items []*VpnTunnel `json:"items,omitempty"` + // Items: A list of TargetTcpProxy resources. + Items []*TargetTcpProxy `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -52375,7 +53386,7 @@ type VpnTunnelList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *VpnTunnelListWarning `json:"warning,omitempty"` + Warning *TargetTcpProxyListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -52398,14 +53409,15 @@ type VpnTunnelList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelList +func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelListWarning: [Output Only] Informational warning message. -type VpnTunnelListWarning struct { +// TargetTcpProxyListWarning: [Output Only] Informational warning +// message. +type TargetTcpProxyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -52427,6 +53439,9 @@ type VpnTunnelListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -52434,6 +53449,9 @@ type VpnTunnelListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -52467,7 +53485,7 @@ type VpnTunnelListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*VpnTunnelListWarningData `json:"data,omitempty"` + Data []*TargetTcpProxyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -52490,13 +53508,13 @@ type VpnTunnelListWarning struct { NullFields []string `json:"-"` } -func (s *VpnTunnelListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelListWarning +func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelListWarningData struct { +type TargetTcpProxyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -52527,21 +53545,135 @@ type VpnTunnelListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnTunnelListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelListWarningData +func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelsScopedList struct { - // VpnTunnels: A list of VPN tunnels contained in this scope. - VpnTunnels []*VpnTunnel `json:"vpnTunnels,omitempty"` +// TargetVpnGateway: Represents a Target VPN Gateway resource. The +// target VPN gateway resource represents a Classic Cloud VPN gateway. +// For more information, read the the Cloud VPN Overview. +type TargetVpnGateway struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *VpnTunnelsScopedListWarning `json:"warning,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // ForceSendFields is a list of field names (e.g. "VpnTunnels") to + // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule + // resources. ForwardingRules are created using + // compute.forwardingRules.insert and associated with a VPN gateway. + ForwardingRules []string `json:"forwardingRules,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URL of the network to which this VPN gateway is attached. + // Provided by the client when the VPN gateway is created. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URL of the region where the target VPN gateway + // resides. You must specify this field as part of the HTTP request URL. + // It is not settable as a field in the request body. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] The status of the VPN gateway, which can be one + // of the following: CREATING, READY, FAILED, or DELETING. + // + // Possible values: + // "CREATING" + // "DELETING" + // "FAILED" + // "READY" + Status string `json:"status,omitempty"` + + // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. + // VpnTunnels are created using the compute.vpntunnels.insert method and + // associated with a VPN gateway. + Tunnels []string `json:"tunnels,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGateway + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetVpnGateway resources. + Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -52549,8 +53681,8 @@ type VpnTunnelsScopedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "VpnTunnels") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -52558,15 +53690,15 @@ type VpnTunnelsScopedList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelsScopedList +func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelsScopedListWarning: Informational warning which replaces the -// list of addresses when the list is empty. -type VpnTunnelsScopedListWarning struct { +// TargetVpnGatewayAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetVpnGatewayAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -52588,6 +53720,9 @@ type VpnTunnelsScopedListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -52595,6 +53730,9 @@ type VpnTunnelsScopedListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -52628,7 +53766,7 @@ type VpnTunnelsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*VpnTunnelsScopedListWarningData `json:"data,omitempty"` + Data []*TargetVpnGatewayAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -52651,13 +53789,13 @@ type VpnTunnelsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelsScopedListWarning +func (s *TargetVpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelsScopedListWarningData struct { +type TargetVpnGatewayAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -52688,28 +53826,44 @@ type VpnTunnelsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelsScopedListWarningData +func (s *TargetVpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type WafExpressionSet struct { - // Aliases: A list of alternate IDs. The format should be: - E.g. - // XSS-stable Generic suffix like "stable" is particularly useful if a - // policy likes to avail newer set of expressions without having to - // change the policy. A given alias name can't be used for more than one - // entity set. - Aliases []string `json:"aliases,omitempty"` +// TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. +type TargetVpnGatewayList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Expressions: List of available expressions. - Expressions []*WafExpressionSetExpression `json:"expressions,omitempty"` + // Items: A list of TargetVpnGateway resources. + Items []*TargetVpnGateway `json:"items,omitempty"` - // Id: Google specified expression set ID. The format should be: - E.g. - // XSS-20170329 required - Id string `json:"id,omitempty"` + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` - // ForceSendFields is a list of field names (e.g. "Aliases") to + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetVpnGatewayListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -52717,8 +53871,8 @@ type WafExpressionSet struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Aliases") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -52726,22 +53880,89 @@ type WafExpressionSet struct { NullFields []string `json:"-"` } -func (s *WafExpressionSet) MarshalJSON() ([]byte, error) { - type NoMethod WafExpressionSet +func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type WafExpressionSetExpression struct { - // Id: Expression ID should uniquely identify the origin of the - // expression. E.g. owasp-crs-v020901-id973337 identifies Owasp core - // rule set version 2.9.1 rule id 973337. The ID could be used to - // determine the individual attack definition that has been detected. It - // could also be used to exclude it from the policy in case of false - // positive. required - Id string `json:"id,omitempty"` +// TargetVpnGatewayListWarning: [Output Only] Informational warning +// message. +type TargetVpnGatewayListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*TargetVpnGatewayListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -52749,7 +53970,7 @@ type WafExpressionSetExpression struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -52758,51 +53979,494 @@ type WafExpressionSetExpression struct { NullFields []string `json:"-"` } -func (s *WafExpressionSetExpression) MarshalJSON() ([]byte, error) { - type NoMethod WafExpressionSetExpression +func (s *TargetVpnGatewayListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// WeightedBackendService: In contrast to a single BackendService in -// HttpRouteAction to which all matching traffic is directed to, -// WeightedBackendService allows traffic to be split across multiple -// backend services. The volume of traffic for each backend service is -// proportional to the weight specified in each WeightedBackendService -type WeightedBackendService struct { - // BackendService: The full or partial URL to the default BackendService - // resource. Before forwarding the request to backendService, the load - // balancer applies any relevant headerActions specified as part of this - // backendServiceWeight. - BackendService string `json:"backendService,omitempty"` +type TargetVpnGatewayListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewaysScopedList struct { + // TargetVpnGateways: [Output Only] A list of target VPN gateways + // contained in this scope. + TargetVpnGateways []*TargetVpnGateway `json:"targetVpnGateways,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of addresses when the list is empty. + Warning *TargetVpnGatewaysScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetVpnGateways") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetVpnGateways") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewaysScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewaysScopedListWarning: [Output Only] Informational +// warning which replaces the list of addresses when the list is empty. +type TargetVpnGatewaysScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*TargetVpnGatewaysScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewaysScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewaysScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewaysScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TestFailure struct { + // ActualOutputUrl: The actual output URL evaluated by a load balancer + // containing the scheme, host, path and query parameters. + ActualOutputUrl string `json:"actualOutputUrl,omitempty"` + + // ActualRedirectResponseCode: Actual HTTP status code for rule with + // `urlRedirect` calculated by load balancer + ActualRedirectResponseCode int64 `json:"actualRedirectResponseCode,omitempty"` + + // ActualService: BackendService or BackendBucket returned by load + // balancer. + ActualService string `json:"actualService,omitempty"` + + // ExpectedOutputUrl: The expected output URL evaluated by a load + // balancer containing the scheme, host, path and query parameters. + ExpectedOutputUrl string `json:"expectedOutputUrl,omitempty"` + + // ExpectedRedirectResponseCode: Expected HTTP status code for rule with + // `urlRedirect` calculated by load balancer + ExpectedRedirectResponseCode int64 `json:"expectedRedirectResponseCode,omitempty"` + + // ExpectedService: Expected BackendService or BackendBucket resource + // the given URL should be mapped to. + ExpectedService string `json:"expectedService,omitempty"` + + // Headers: HTTP headers of the request. + Headers []*UrlMapTestHeader `json:"headers,omitempty"` + + // Host: Host portion of the URL. + Host string `json:"host,omitempty"` + + // Path: Path portion including query parameters in the URL. + Path string `json:"path,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ActualOutputUrl") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ActualOutputUrl") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TestFailure) MarshalJSON() ([]byte, error) { + type NoMethod TestFailure + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TestPermissionsRequest struct { + // Permissions: The set of permissions to check for the 'resource'. + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestPermissionsRequest) MarshalJSON() ([]byte, error) { + type NoMethod TestPermissionsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TestPermissionsResponse struct { + // Permissions: A subset of `TestPermissionsRequest.permissions` that + // the caller is allowed. + Permissions []string `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { + type NoMethod TestPermissionsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Uint128 struct { + High uint64 `json:"high,omitempty,string"` + + Low uint64 `json:"low,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "High") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "High") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Uint128) MarshalJSON() ([]byte, error) { + type NoMethod Uint128 + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMap: Represents a URL Map resource. Compute Engine has two URL Map +// resources: * Global (/compute/docs/reference/rest/v1/urlMaps) * +// Regional (/compute/docs/reference/rest/v1/regionUrlMaps) A URL map +// resource is a component of certain types of cloud load balancers and +// Traffic Director: * urlMaps are used by external HTTP(S) load +// balancers and Traffic Director. * regionUrlMaps are used by internal +// HTTP(S) load balancers. For a list of supported URL map features by +// the load balancer type, see the Load balancing features: Routing and +// traffic management table. For a list of supported URL map features +// for Traffic Director, see the Traffic Director features: Routing and +// traffic management table. This resource defines mappings from +// hostnames and URL paths to either a backend service or a backend +// bucket. To use the global urlMaps resource, the backend service must +// have a loadBalancingScheme of either EXTERNAL or +// INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend +// service must have a loadBalancingScheme of INTERNAL_MANAGED. For more +// information, read URL Map Concepts. +type UrlMap struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // DefaultRouteAction: defaultRouteAction takes effect when none of the + // hostRules match. The load balancer performs advanced routing actions, + // such as URL rewrites and header transformations, before forwarding + // the request to the selected backend. If defaultRouteAction specifies + // any weightedBackendServices, defaultService must not be set. + // Conversely if defaultService is set, defaultRouteAction cannot + // contain any weightedBackendServices. Only one of defaultRouteAction + // or defaultUrlRedirect must be set. URL maps for Classic external + // HTTP(S) load balancers only support the urlRewrite action within + // defaultRouteAction. defaultRouteAction has no effect when the URL map + // is bound to a target gRPC proxy that has the validateForProxyless + // field set to true. + DefaultRouteAction *HttpRouteAction `json:"defaultRouteAction,omitempty"` + + // DefaultService: The full or partial URL of the defaultService + // resource to which traffic is directed if none of the hostRules match. + // If defaultRouteAction is also specified, advanced routing actions, + // such as URL rewrites, take effect before sending the request to the + // backend. However, if defaultService is specified, defaultRouteAction + // cannot contain any weightedBackendServices. Conversely, if + // routeAction specifies any weightedBackendServices, service must not + // be specified. Only one of defaultService, defaultUrlRedirect , or + // defaultRouteAction.weightedBackendService must be set. defaultService + // has no effect when the URL map is bound to a target gRPC proxy that + // has the validateForProxyless field set to true. + DefaultService string `json:"defaultService,omitempty"` + + // DefaultUrlRedirect: When none of the specified hostRules match, the + // request is redirected to a URL specified by defaultUrlRedirect. If + // defaultUrlRedirect is specified, defaultService or defaultRouteAction + // must not be set. Not supported when the URL map is bound to a target + // gRPC proxy. + DefaultUrlRedirect *HttpRedirectAction `json:"defaultUrlRedirect,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field is ignored when inserting a UrlMap. An up-to-date fingerprint + // must be provided in order to update the UrlMap, otherwise the request + // will fail with error 412 conditionNotMet. To see the latest + // fingerprint, make a get() request to retrieve a UrlMap. + Fingerprint string `json:"fingerprint,omitempty"` // HeaderAction: Specifies changes to request and response headers that - // need to take effect for the selected backendService. headerAction - // specified here take effect before headerAction in the enclosing - // HttpRouteRule, PathMatcher and UrlMap. headerAction is not supported - // for load balancers that have their loadBalancingScheme set to - // EXTERNAL. Not supported when the URL map is bound to a target gRPC - // proxy that has validateForProxyless field set to true. + // need to take effect for the selected backendService. The headerAction + // specified here take effect after headerAction specified under + // pathMatcher. headerAction is not supported for load balancers that + // have their loadBalancingScheme set to EXTERNAL. Not supported when + // the URL map is bound to a target gRPC proxy that has + // validateForProxyless field set to true. HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` - // Weight: Specifies the fraction of traffic sent to a backend service, - // computed as weight / (sum of all weightedBackendService weights in - // routeAction) . The selection of a backend service is determined only - // for new traffic. Once a user's request has been directed to a backend - // service, subsequent requests are sent to the same backend service as - // determined by the backend service's session affinity policy. The - // value must be from 0 to 1000. - Weight int64 `json:"weight,omitempty"` + // HostRules: The list of host rules to use against the URL. + HostRules []*HostRule `json:"hostRules,omitempty"` - // ForceSendFields is a list of field names (e.g. "BackendService") to - // unconditionally include in API requests. By default, fields with + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#urlMaps for + // url maps. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PathMatchers: The list of named PathMatchers to use against the URL. + PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"` + + // Region: [Output Only] URL of the region where the regional URL map + // resides. This field is not applicable to global URL maps. You must + // specify this field as part of the HTTP request URL. It is not + // settable as a field in the request body. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Tests: The list of expected URL mapping tests. Request to update the + // UrlMap succeeds only if all test cases pass. You can specify a + // maximum of 100 tests per UrlMap. Not supported when the URL map is + // bound to a target gRPC proxy that has validateForProxyless field set + // to true. + Tests []*UrlMapTest `json:"tests,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BackendService") to + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -52812,22 +54476,22 @@ type WeightedBackendService struct { NullFields []string `json:"-"` } -func (s *WeightedBackendService) MarshalJSON() ([]byte, error) { - type NoMethod WeightedBackendService +func (s *UrlMap) MarshalJSON() ([]byte, error) { + type NoMethod UrlMap raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type XpnHostList struct { +// UrlMapList: Contains a list of UrlMap resources. +type UrlMapList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: [Output Only] A list of shared VPC host project URLs. - Items []*Project `json:"items,omitempty"` + // Items: A list of UrlMap resources. + Items []*UrlMap `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#xpnHostList for - // lists of shared VPC hosts. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -52842,7 +54506,7 @@ type XpnHostList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *XpnHostListWarning `json:"warning,omitempty"` + Warning *UrlMapListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -52865,14 +54529,14 @@ type XpnHostList struct { NullFields []string `json:"-"` } -func (s *XpnHostList) MarshalJSON() ([]byte, error) { - type NoMethod XpnHostList +func (s *UrlMapList) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// XpnHostListWarning: [Output Only] Informational warning message. -type XpnHostListWarning struct { +// UrlMapListWarning: [Output Only] Informational warning message. +type UrlMapListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -52894,6 +54558,9 @@ type XpnHostListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -52901,6 +54568,9 @@ type XpnHostListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -52934,7 +54604,7 @@ type XpnHostListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*XpnHostListWarningData `json:"data,omitempty"` + Data []*UrlMapListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -52957,13 +54627,13 @@ type XpnHostListWarning struct { NullFields []string `json:"-"` } -func (s *XpnHostListWarning) MarshalJSON() ([]byte, error) { - type NoMethod XpnHostListWarning +func (s *UrlMapListWarning) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type XpnHostListWarningData struct { +type UrlMapListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -52994,27 +54664,16 @@ type XpnHostListWarningData struct { NullFields []string `json:"-"` } -func (s *XpnHostListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod XpnHostListWarningData +func (s *UrlMapListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// XpnResourceId: Service resource (a.k.a service project) ID. -type XpnResourceId struct { - // Id: The ID of the service resource. In the case of projects, this - // field supports project id (e.g., my-project-123) and project number - // (e.g. 12345678). - Id string `json:"id,omitempty"` - - // Type: The type of the service resource. - // - // Possible values: - // "PROJECT" - // "XPN_RESOURCE_TYPE_UNSPECIFIED" - Type string `json:"type,omitempty"` +type UrlMapReference struct { + UrlMap string `json:"urlMap,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "UrlMap") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -53022,7 +54681,7 @@ type XpnResourceId struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "UrlMap") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -53031,97 +54690,155 @@ type XpnResourceId struct { NullFields []string `json:"-"` } -func (s *XpnResourceId) MarshalJSON() ([]byte, error) { - type NoMethod XpnResourceId +func (s *UrlMapReference) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapReference raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Zone: Represents a Zone resource. A zone is a deployment area. These -// deployment areas are subsets of a region. For example the zone -// us-east1-a is located in the us-east1 region. For more information, -// read Regions and Zones. -type Zone struct { - // AvailableCpuPlatforms: [Output Only] Available cpu/platform - // selections for the zone. - AvailableCpuPlatforms []string `json:"availableCpuPlatforms,omitempty"` +// UrlMapTest: Message for the expected URL mappings. +type UrlMapTest struct { + // Description: Description of this test case. + Description string `json:"description,omitempty"` - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` + // ExpectedOutputUrl: The expected output URL evaluated by the load + // balancer containing the scheme, host, path and query parameters. For + // rules that forward requests to backends, the test passes only when + // expectedOutputUrl matches the request forwarded by the load balancer + // to backends. For rules with urlRewrite, the test verifies that the + // forwarded request matches hostRewrite and pathPrefixRewrite in the + // urlRewrite action. When service is specified, expectedOutputUrl`s + // scheme is ignored. For rules with urlRedirect, the test passes only + // if expectedOutputUrl matches the URL in the load balancer's redirect + // response. If urlRedirect specifies https_redirect, the test passes + // only if the scheme in expectedOutputUrl is also set to HTTPS. If + // urlRedirect specifies strip_query, the test passes only if + // expectedOutputUrl does not contain any query parameters. + // expectedOutputUrl is optional when service is specified. + ExpectedOutputUrl string `json:"expectedOutputUrl,omitempty"` - // Deprecated -- [Output Only] The deprecation status associated with - // this zone. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + // ExpectedRedirectResponseCode: For rules with urlRedirect, the test + // passes only if expectedRedirectResponseCode matches the HTTP status + // code in load balancer's redirect response. + // expectedRedirectResponseCode cannot be set when service is set. + ExpectedRedirectResponseCode int64 `json:"expectedRedirectResponseCode,omitempty"` - // Description: [Output Only] Textual description of the resource. - Description string `json:"description,omitempty"` + // Headers: HTTP headers for this request. If headers contains a host + // header, then host must also match the header value. + Headers []*UrlMapTestHeader `json:"headers,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // Host: Host portion of the URL. If headers contains a host header, + // then host must also match the header value. + Host string `json:"host,omitempty"` - // Kind: [Output Only] Type of the resource. Always compute#zone for - // zones. - Kind string `json:"kind,omitempty"` + // Path: Path portion of the URL. + Path string `json:"path,omitempty"` - // Name: [Output Only] Name of the resource. + // Service: Expected BackendService or BackendBucket resource the given + // URL should be mapped to. The service field cannot be set if + // expectedRedirectResponseCode is set. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapTest) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapTest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapTestHeader: HTTP headers used in UrlMapTests. +type UrlMapTestHeader struct { + // Name: Header name. Name string `json:"name,omitempty"` - // Region: [Output Only] Full URL reference to the region which hosts - // the zone. - Region string `json:"region,omitempty"` + // Value: Header value. + Value string `json:"value,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Status: [Output Only] Status of the zone, either UP or DOWN. - // - // Possible values: - // "DOWN" - // "UP" - Status string `json:"status,omitempty"` + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // SupportsPzs: [Output Only] Reserved for future use. - SupportsPzs bool `json:"supportsPzs,omitempty"` +func (s *UrlMapTestHeader) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapTestHeader + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +// UrlMapValidationResult: Message representing the validation result +// for a UrlMap. +type UrlMapValidationResult struct { + LoadErrors []string `json:"loadErrors,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "AvailableCpuPlatforms") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // LoadSucceeded: Whether the given UrlMap can be successfully loaded. + // If false, 'loadErrors' indicates the reasons. + LoadSucceeded bool `json:"loadSucceeded,omitempty"` + + TestFailures []*TestFailure `json:"testFailures,omitempty"` + + // TestPassed: If successfully loaded, this field indicates whether the + // test passed. If false, 'testFailures's indicate the reason of + // failure. + TestPassed bool `json:"testPassed,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LoadErrors") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AvailableCpuPlatforms") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "LoadErrors") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Zone) MarshalJSON() ([]byte, error) { - type NoMethod Zone +func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapValidationResult raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ZoneList: Contains a list of zone resources. -type ZoneList struct { +type UrlMapsAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Zone resources. - Items []*Zone `json:"items,omitempty"` + // Items: A list of UrlMapsScopedList resources. + Items map[string]UrlMapsScopedList `json:"items,omitempty"` // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -53137,8 +54854,11 @@ type ZoneList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *ZoneListWarning `json:"warning,omitempty"` + Warning *UrlMapsAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -53161,14 +54881,15 @@ type ZoneList struct { NullFields []string `json:"-"` } -func (s *ZoneList) MarshalJSON() ([]byte, error) { - type NoMethod ZoneList +func (s *UrlMapsAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ZoneListWarning: [Output Only] Informational warning message. -type ZoneListWarning struct { +// UrlMapsAggregatedListWarning: [Output Only] Informational warning +// message. +type UrlMapsAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -53190,6 +54911,9 @@ type ZoneListWarning struct { // overridden. Deprecated unused field. // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type @@ -53197,6 +54921,9 @@ type ZoneListWarning struct { // not assigned to an instance on the network. // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL // refers to an instance that does not exist. // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance @@ -53230,7 +54957,7 @@ type ZoneListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*ZoneListWarningData `json:"data,omitempty"` + Data []*UrlMapsAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -53253,13 +54980,13 @@ type ZoneListWarning struct { NullFields []string `json:"-"` } -func (s *ZoneListWarning) MarshalJSON() ([]byte, error) { - type NoMethod ZoneListWarning +func (s *UrlMapsAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ZoneListWarningData struct { +type UrlMapsAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -53290,25 +55017,21 @@ type ZoneListWarningData struct { NullFields []string `json:"-"` } -func (s *ZoneListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod ZoneListWarningData +func (s *UrlMapsAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ZoneSetLabelsRequest struct { - // LabelFingerprint: The fingerprint of the previous set of labels for - // this resource, used to detect conflicts. The fingerprint is initially - // generated by Compute Engine and changes after every request to modify - // or update labels. You must always provide an up-to-date fingerprint - // hash in order to update or change labels. Make a get() request to the - // resource to get the latest fingerprint. - LabelFingerprint string `json:"labelFingerprint,omitempty"` +type UrlMapsScopedList struct { + // UrlMaps: A list of UrlMaps contained in this scope. + UrlMaps []*UrlMap `json:"urlMaps,omitempty"` - // Labels: The labels to set for this resource. - Labels map[string]string `json:"labels,omitempty"` + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *UrlMapsScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to + // ForceSendFields is a list of field names (e.g. "UrlMaps") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -53316,38 +55039,98 @@ type ZoneSetLabelsRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LabelFingerprint") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "UrlMaps") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { - type NoMethod ZoneSetLabelsRequest +func (s *UrlMapsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ZoneSetPolicyRequest struct { - // Bindings: Flatten Policy to create a backwacd compatible wire-format. - // Deprecated. Use 'policy' to specify bindings. - Bindings []*Binding `json:"bindings,omitempty"` +// UrlMapsScopedListWarning: Informational warning which replaces the +// list of backend services when the list is empty. +type UrlMapsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` - // Etag: Flatten Policy to create a backward compatible wire-format. - // Deprecated. Use 'policy' to specify the etag. - Etag string `json:"etag,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*UrlMapsScopedListWarningData `json:"data,omitempty"` - // Policy: REQUIRED: The complete policy to be applied to the - // 'resource'. The size of the policy is limited to a few 10s of KB. An - // empty policy is in general a valid policy but certain services (like - // Projects) might reject them. - Policy *Policy `json:"policy,omitempty"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Bindings") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -53355,8 +55138,8 @@ type ZoneSetPolicyRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bindings") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -53364,15 +55147,14912 @@ type ZoneSetPolicyRequest struct { NullFields []string `json:"-"` } -func (s *ZoneSetPolicyRequest) MarshalJSON() ([]byte, error) { - type NoMethod ZoneSetPolicyRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +func (s *UrlMapsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsValidateRequest struct { + // LoadBalancingSchemes: Specifies the load balancer type(s) this + // validation request is for. Use EXTERNAL_MANAGED for HTTP/HTTPS + // External Global Load Balancer with Advanced Traffic Management. Use + // EXTERNAL for Classic HTTP/HTTPS External Global Load Balancer. Other + // load balancer types are not supported. For more information, refer to + // Choosing a load balancer. If unspecified, the load balancing scheme + // will be inferred from the backend service resources this URL map + // references. If that can not be inferred (for example, this URL map + // only references backend buckets, or this Url map is for rewrites and + // redirects only and doesn't reference any backends), EXTERNAL will be + // used as the default type. If specified, the scheme(s) must not + // conflict with the load balancing scheme of the backend service + // resources this Url map references. + // + // Possible values: + // "EXTERNAL" - Signifies that this will be used for Classic L7 + // External Load Balancing. + // "EXTERNAL_MANAGED" - Signifies that this will be used for + // Envoy-based L7 External Load Balancing. + // "LOAD_BALANCING_SCHEME_UNSPECIFIED" - If unspecified, the + // validation will try to infer the scheme from the backend service + // resources this Url map references. If the inferrence is not possible, + // EXTERNAL will be used as the default type. + LoadBalancingSchemes []string `json:"loadBalancingSchemes,omitempty"` + + // Resource: Content of the UrlMap to be validated. + Resource *UrlMap `json:"resource,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "LoadBalancingSchemes") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LoadBalancingSchemes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsValidateRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsValidateResponse struct { + Result *UrlMapValidationResult `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsValidateResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlRewrite: The spec for modifying the path before sending the +// request to the matched backend service. +type UrlRewrite struct { + // HostRewrite: Before forwarding the request to the selected service, + // the request's host header is replaced with contents of hostRewrite. + // The value must be from 1 to 255 characters. + HostRewrite string `json:"hostRewrite,omitempty"` + + // PathPrefixRewrite: Before forwarding the request to the selected + // backend service, the matching portion of the request's path is + // replaced by pathPrefixRewrite. The value must be from 1 to 1024 + // characters. + PathPrefixRewrite string `json:"pathPrefixRewrite,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HostRewrite") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HostRewrite") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlRewrite) MarshalJSON() ([]byte, error) { + type NoMethod UrlRewrite + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsableSubnetwork: Subnetwork which the current user has +// compute.subnetworks.use permission on. +type UsableSubnetwork struct { + // ExternalIpv6Prefix: [Output Only] The external IPv6 address range + // that is assigned to this subnetwork. + ExternalIpv6Prefix string `json:"externalIpv6Prefix,omitempty"` + + // InternalIpv6Prefix: [Output Only] The internal IPv6 address range + // that is assigned to this subnetwork. + InternalIpv6Prefix string `json:"internalIpv6Prefix,omitempty"` + + // IpCidrRange: The range of internal addresses that are owned by this + // subnetwork. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // Ipv6AccessType: The access type of IPv6 address this subnet holds. + // It's immutable and can only be specified during creation or the first + // time the subnet is updated into IPV4_IPV6 dual stack. + // + // Possible values: + // "EXTERNAL" - VMs on this subnet will be assigned IPv6 addresses + // that are accessible via the Internet, as well as the VPC network. + // "INTERNAL" - VMs on this subnet will be assigned IPv6 addresses + // that are only accessible over the VPC network. + Ipv6AccessType string `json:"ipv6AccessType,omitempty"` + + // Network: Network URL. + Network string `json:"network,omitempty"` + + // Purpose: The purpose of the resource. This field can be either + // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with + // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created + // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If + // unspecified, the purpose defaults to PRIVATE_RFC_1918. The + // enableFlowLogs field isn't supported with the purpose field set to + // INTERNAL_HTTPS_LOAD_BALANCER. + // + // Possible values: + // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal + // HTTP(S) Load Balancing. + // "PRIVATE" - Regular user created or automatically created subnet. + // "PRIVATE_RFC_1918" - Regular user created or automatically created + // subnet. + // "PRIVATE_SERVICE_CONNECT" - Subnetworks created for Private Service + // Connect in the producer network. + // "REGIONAL_MANAGED_PROXY" - Subnetwork used for Regional + // Internal/External HTTP(S) Load Balancing. + Purpose string `json:"purpose,omitempty"` + + // Role: The role of subnetwork. Currently, this field is only used when + // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to + // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being + // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one + // that is ready to be promoted to ACTIVE or is currently draining. This + // field can be updated with a patch request. + // + // Possible values: + // "ACTIVE" - The ACTIVE subnet that is currently used. + // "BACKUP" - The BACKUP subnet that could be promoted to ACTIVE. + Role string `json:"role,omitempty"` + + // SecondaryIpRanges: Secondary IP ranges. + SecondaryIpRanges []*UsableSubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` + + // StackType: The stack type for the subnet. If set to IPV4_ONLY, new + // VMs in the subnet are assigned IPv4 addresses only. If set to + // IPV4_IPV6, new VMs in the subnet can be assigned both IPv4 and IPv6 + // addresses. If not specified, IPV4_ONLY is used. This field can be + // both set at resource creation time and updated using patch. + // + // Possible values: + // "IPV4_IPV6" - New VMs in this subnet can have both IPv4 and IPv6 + // addresses. + // "IPV4_ONLY" - New VMs in this subnet will only be assigned IPv4 + // addresses. + StackType string `json:"stackType,omitempty"` + + // Subnetwork: Subnetwork URL. + Subnetwork string `json:"subnetwork,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExternalIpv6Prefix") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExternalIpv6Prefix") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetwork) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetwork + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsableSubnetworkSecondaryRange: Secondary IP range of a usable +// subnetwork. +type UsableSubnetworkSecondaryRange struct { + // IpCidrRange: The range of IP addresses belonging to this subnetwork + // secondary range. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // RangeName: The name associated with this subnetwork secondary range, + // used when adding an alias IP range to a VM instance. The name must be + // 1-63 characters long, and comply with RFC1035. The name must be + // unique within the subnetwork. + RangeName string `json:"rangeName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetworkSecondaryRange + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UsableSubnetworksAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output] A list of usable subnetwork URLs. + Items []*UsableSubnetwork `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#usableSubnetworksAggregatedList for aggregated lists of + // usable subnetworks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. In special cases listUsable may return 0 subnetworks and + // nextPageToken which still should be used to get the next page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *UsableSubnetworksAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworksAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetworksAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsableSubnetworksAggregatedListWarning: [Output Only] Informational +// warning message. +type UsableSubnetworksAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*UsableSubnetworksAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworksAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetworksAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UsableSubnetworksAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworksAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetworksAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsageExportLocation: The location in Cloud Storage and naming method +// of the daily usage report. Contains bucket_name and report_name +// prefix. +type UsageExportLocation struct { + // BucketName: The name of an existing bucket in Cloud Storage where the + // usage report object is stored. The Google Service Account is granted + // write access to this bucket. This can either be the bucket name by + // itself, such as example-bucket, or the bucket name with gs:// or + // https://storage.googleapis.com/ in front of it, such as + // gs://example-bucket. + BucketName string `json:"bucketName,omitempty"` + + // ReportNamePrefix: An optional prefix for the name of the usage report + // object stored in bucketName. If not supplied, defaults to usage_gce. + // The report is stored as a CSV file named + // report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the + // usage according to Pacific Time. If you supply a prefix, it should + // conform to Cloud Storage object naming conventions. + ReportNamePrefix string `json:"reportNamePrefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BucketName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { + type NoMethod UsageExportLocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VmEndpointNatMappings: Contain information of Nat mapping for a VM +// endpoint (i.e., NIC). +type VmEndpointNatMappings struct { + // InstanceName: Name of the VM instance which the endpoint belongs to + InstanceName string `json:"instanceName,omitempty"` + + InterfaceNatMappings []*VmEndpointNatMappingsInterfaceNatMappings `json:"interfaceNatMappings,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappings) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VmEndpointNatMappingsInterfaceNatMappings: Contain information of Nat +// mapping for an interface of this endpoint. +type VmEndpointNatMappingsInterfaceNatMappings struct { + // DrainNatIpPortRanges: List of all drain IP:port-range mappings + // assigned to this interface. These ranges are inclusive, that is, both + // the first and the last ports can be used for NAT. Example: + // ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. + DrainNatIpPortRanges []string `json:"drainNatIpPortRanges,omitempty"` + + // NatIpPortRanges: A list of all IP:port-range mappings assigned to + // this interface. These ranges are inclusive, that is, both the first + // and the last ports can be used for NAT. Example: + // ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. + NatIpPortRanges []string `json:"natIpPortRanges,omitempty"` + + // NumTotalDrainNatPorts: Total number of drain ports across all NAT IPs + // allocated to this interface. It equals to the aggregated port number + // in the field drain_nat_ip_port_ranges. + NumTotalDrainNatPorts int64 `json:"numTotalDrainNatPorts,omitempty"` + + // NumTotalNatPorts: Total number of ports across all NAT IPs allocated + // to this interface. It equals to the aggregated port number in the + // field nat_ip_port_ranges. + NumTotalNatPorts int64 `json:"numTotalNatPorts,omitempty"` + + // RuleMappings: Information about mappings provided by rules in this + // NAT. + RuleMappings []*VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings `json:"ruleMappings,omitempty"` + + // SourceAliasIpRange: Alias IP range for this interface endpoint. It + // will be a private (RFC 1918) IP range. Examples: "10.33.4.55/32", or + // "192.168.5.0/24". + SourceAliasIpRange string `json:"sourceAliasIpRange,omitempty"` + + // SourceVirtualIp: Primary IP of the VM for this NIC. + SourceVirtualIp string `json:"sourceVirtualIp,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "DrainNatIpPortRanges") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DrainNatIpPortRanges") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappingsInterfaceNatMappings) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappingsInterfaceNatMappings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings: Contains +// information of NAT Mappings provided by a NAT Rule. +type VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings struct { + // DrainNatIpPortRanges: List of all drain IP:port-range mappings + // assigned to this interface by this rule. These ranges are inclusive, + // that is, both the first and the last ports can be used for NAT. + // Example: ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. + DrainNatIpPortRanges []string `json:"drainNatIpPortRanges,omitempty"` + + // NatIpPortRanges: A list of all IP:port-range mappings assigned to + // this interface by this rule. These ranges are inclusive, that is, + // both the first and the last ports can be used for NAT. Example: + // ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. + NatIpPortRanges []string `json:"natIpPortRanges,omitempty"` + + // NumTotalDrainNatPorts: Total number of drain ports across all NAT IPs + // allocated to this interface by this rule. It equals the aggregated + // port number in the field drain_nat_ip_port_ranges. + NumTotalDrainNatPorts int64 `json:"numTotalDrainNatPorts,omitempty"` + + // NumTotalNatPorts: Total number of ports across all NAT IPs allocated + // to this interface by this rule. It equals the aggregated port number + // in the field nat_ip_port_ranges. + NumTotalNatPorts int64 `json:"numTotalNatPorts,omitempty"` + + // RuleNumber: Rule number of the NAT Rule. + RuleNumber int64 `json:"ruleNumber,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "DrainNatIpPortRanges") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DrainNatIpPortRanges") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VmEndpointNatMappingsList: Contains a list of VmEndpointNatMappings. +type VmEndpointNatMappingsList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#vmEndpointNatMappingsList for lists of Nat mappings of VM + // endpoints. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Result: [Output Only] A list of Nat mapping information of VM + // endpoints. + Result []*VmEndpointNatMappings `json:"result,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VmEndpointNatMappingsListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappingsList) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappingsList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VmEndpointNatMappingsListWarning: [Output Only] Informational warning +// message. +type VmEndpointNatMappingsListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*VmEndpointNatMappingsListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappingsListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappingsListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VmEndpointNatMappingsListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappingsListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappingsListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGateway: Represents a HA VPN gateway. HA VPN is a +// high-availability (HA) Cloud VPN solution that lets you securely +// connect your on-premises network to your Google Cloud Virtual Private +// Cloud network through an IPsec VPN connection in a single region. For +// more information about Cloud HA VPN solutions, see Cloud VPN +// topologies . +type VpnGateway struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#vpnGateway for + // VPN gateways. + Kind string `json:"kind,omitempty"` + + // LabelFingerprint: A fingerprint for the labels being applied to this + // VpnGateway, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels, otherwise the request will fail with error + // 412 conditionNotMet. To see the latest fingerprint, make a get() + // request to retrieve an VpnGateway. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels for this resource. These can only be added or modified + // by the setLabels method. Each label key/value pair must comply with + // RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URL of the network to which this VPN gateway is attached. + // Provided by the client when the VPN gateway is created. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URL of the region where the VPN gateway + // resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // StackType: The stack type for this VPN gateway to identify the IP + // protocols that are enabled. Possible values are: IPV4_ONLY, + // IPV4_IPV6. If not specified, IPV4_ONLY will be used. + // + // Possible values: + // "IPV4_IPV6" - Enable VPN gateway with both IPv4 and IPv6 protocols. + // "IPV4_ONLY" - Enable VPN gateway with only IPv4 protocol. + StackType string `json:"stackType,omitempty"` + + // VpnInterfaces: The list of VPN interfaces associated with this VPN + // gateway. + VpnInterfaces []*VpnGatewayVpnGatewayInterface `json:"vpnInterfaces,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnGateway) MarshalJSON() ([]byte, error) { + type NoMethod VpnGateway + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewayAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnGateway resources. + Items map[string]VpnGatewaysScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnGateway for + // VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnGatewayAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayAggregatedListWarning: [Output Only] Informational warning +// message. +type VpnGatewayAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*VpnGatewayAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewayAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayList: Contains a list of VpnGateway resources. +type VpnGatewayList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnGateway resources. + Items []*VpnGateway `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnGateway for + // VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnGatewayListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayList) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayListWarning: [Output Only] Informational warning message. +type VpnGatewayListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*VpnGatewayListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewayListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewayStatus struct { + // VpnConnections: List of VPN connection for this VpnGateway. + VpnConnections []*VpnGatewayStatusVpnConnection `json:"vpnConnections,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VpnConnections") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "VpnConnections") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayStatus) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayStatusHighAvailabilityRequirementState: Describes the high +// availability requirement state for the VPN connection between this +// Cloud VPN gateway and a peer gateway. +type VpnGatewayStatusHighAvailabilityRequirementState struct { + // State: Indicates the high availability requirement state for the VPN + // connection. Valid values are CONNECTION_REDUNDANCY_MET, + // CONNECTION_REDUNDANCY_NOT_MET. + // + // Possible values: + // "CONNECTION_REDUNDANCY_MET" - VPN tunnels are configured with + // adequate redundancy from Cloud VPN gateway to the peer VPN gateway. + // For both GCP-to-non-GCP and GCP-to-GCP connections, the adequate + // redundancy is a pre-requirement for users to get 99.99% availability + // on GCP side; please note that for any connection, end-to-end 99.99% + // availability is subject to proper configuration on the peer VPN + // gateway. + // "CONNECTION_REDUNDANCY_NOT_MET" - VPN tunnels are not configured + // with adequate redundancy from the Cloud VPN gateway to the peer + // gateway + State string `json:"state,omitempty"` + + // UnsatisfiedReason: Indicates the reason why the VPN connection does + // not meet the high availability redundancy criteria/requirement. Valid + // values is INCOMPLETE_TUNNELS_COVERAGE. + // + // Possible values: + // "INCOMPLETE_TUNNELS_COVERAGE" + UnsatisfiedReason string `json:"unsatisfiedReason,omitempty"` + + // ForceSendFields is a list of field names (e.g. "State") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "State") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayStatusHighAvailabilityRequirementState) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayStatusHighAvailabilityRequirementState + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayStatusTunnel: Contains some information about a VPN tunnel. +type VpnGatewayStatusTunnel struct { + // LocalGatewayInterface: The VPN gateway interface this VPN tunnel is + // associated with. + LocalGatewayInterface int64 `json:"localGatewayInterface,omitempty"` + + // PeerGatewayInterface: The peer gateway interface this VPN tunnel is + // connected to, the peer gateway could either be an external VPN + // gateway or GCP VPN gateway. + PeerGatewayInterface int64 `json:"peerGatewayInterface,omitempty"` + + // TunnelUrl: URL reference to the VPN tunnel. + TunnelUrl string `json:"tunnelUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "LocalGatewayInterface") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LocalGatewayInterface") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayStatusTunnel) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayStatusTunnel + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayStatusVpnConnection: A VPN connection contains all VPN +// tunnels connected from this VpnGateway to the same peer gateway. The +// peer gateway could either be a external VPN gateway or GCP VPN +// gateway. +type VpnGatewayStatusVpnConnection struct { + // PeerExternalGateway: URL reference to the peer external VPN gateways + // to which the VPN tunnels in this VPN connection are connected. This + // field is mutually exclusive with peer_gcp_gateway. + PeerExternalGateway string `json:"peerExternalGateway,omitempty"` + + // PeerGcpGateway: URL reference to the peer side VPN gateways to which + // the VPN tunnels in this VPN connection are connected. This field is + // mutually exclusive with peer_gcp_gateway. + PeerGcpGateway string `json:"peerGcpGateway,omitempty"` + + // State: HighAvailabilityRequirementState for the VPN connection. + State *VpnGatewayStatusHighAvailabilityRequirementState `json:"state,omitempty"` + + // Tunnels: List of VPN tunnels that are in this VPN connection. + Tunnels []*VpnGatewayStatusTunnel `json:"tunnels,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PeerExternalGateway") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PeerExternalGateway") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayStatusVpnConnection) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayStatusVpnConnection + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayVpnGatewayInterface: A VPN gateway interface. +type VpnGatewayVpnGatewayInterface struct { + // Id: [Output Only] Numeric identifier for this VPN interface + // associated with the VPN gateway. + Id int64 `json:"id,omitempty"` + + // InterconnectAttachment: URL of the VLAN attachment + // (interconnectAttachment) resource for this VPN gateway interface. + // When the value of this field is present, the VPN gateway is used for + // HA VPN over Cloud Interconnect; all egress or ingress traffic for + // this VPN gateway interface goes through the specified VLAN attachment + // resource. + InterconnectAttachment string `json:"interconnectAttachment,omitempty"` + + // IpAddress: [Output Only] IP address for this VPN interface associated + // with the VPN gateway. The IP address could be either a regional + // external IP address or a regional internal IP address. The two IP + // addresses for a VPN gateway must be all regional external or regional + // internal IP addresses. There cannot be a mix of regional external IP + // addresses and regional internal IP addresses. For HA VPN over Cloud + // Interconnect, the IP addresses for both interfaces could either be + // regional internal IP addresses or regional external IP addresses. For + // regular (non HA VPN over Cloud Interconnect) HA VPN tunnels, the IP + // address must be a regional external IP address. + IpAddress string `json:"ipAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayVpnGatewayInterface) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayVpnGatewayInterface + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewaysGetStatusResponse struct { + Result *VpnGatewayStatus `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewaysGetStatusResponse) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewaysGetStatusResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewaysScopedList struct { + // VpnGateways: [Output Only] A list of VPN gateways contained in this + // scope. + VpnGateways []*VpnGateway `json:"vpnGateways,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of addresses when the list is empty. + Warning *VpnGatewaysScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VpnGateways") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "VpnGateways") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewaysScopedList) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewaysScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewaysScopedListWarning: [Output Only] Informational warning +// which replaces the list of addresses when the list is empty. +type VpnGatewaysScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*VpnGatewaysScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewaysScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewaysScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewaysScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnel: Represents a Cloud VPN Tunnel resource. For more +// information about VPN, read the the Cloud VPN Overview. +type VpnTunnel struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DetailedStatus: [Output Only] Detailed status message for the VPN + // tunnel. + DetailedStatus string `json:"detailedStatus,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // IkeVersion: IKE protocol version to use when establishing the VPN + // tunnel with the peer VPN gateway. Acceptable IKE versions are 1 or 2. + // The default version is 2. + IkeVersion int64 `json:"ikeVersion,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // LocalTrafficSelector: Local traffic selector to use when establishing + // the VPN tunnel with the peer VPN gateway. The value should be a CIDR + // formatted string, for example: 192.168.0.0/16. The ranges must be + // disjoint. Only IPv4 is supported. + LocalTrafficSelector []string `json:"localTrafficSelector,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PeerExternalGateway: URL of the peer side external VPN gateway to + // which this VPN tunnel is connected. Provided by the client when the + // VPN tunnel is created. This field is exclusive with the field + // peerGcpGateway. + PeerExternalGateway string `json:"peerExternalGateway,omitempty"` + + // PeerExternalGatewayInterface: The interface ID of the external VPN + // gateway to which this VPN tunnel is connected. Provided by the client + // when the VPN tunnel is created. Possible values are: `0`, `1`, `2`, + // `3`. The number of IDs in use depends on the external VPN gateway + // redundancy type. + PeerExternalGatewayInterface int64 `json:"peerExternalGatewayInterface,omitempty"` + + // PeerGcpGateway: URL of the peer side HA GCP VPN gateway to which this + // VPN tunnel is connected. Provided by the client when the VPN tunnel + // is created. This field can be used when creating highly available VPN + // from VPC network to VPC network, the field is exclusive with the + // field peerExternalGateway. If provided, the VPN tunnel will + // automatically use the same vpnGatewayInterface ID in the peer GCP VPN + // gateway. + PeerGcpGateway string `json:"peerGcpGateway,omitempty"` + + // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. + PeerIp string `json:"peerIp,omitempty"` + + // Region: [Output Only] URL of the region where the VPN tunnel resides. + // You must specify this field as part of the HTTP request URL. It is + // not settable as a field in the request body. + Region string `json:"region,omitempty"` + + // RemoteTrafficSelector: Remote traffic selectors to use when + // establishing the VPN tunnel with the peer VPN gateway. The value + // should be a CIDR formatted string, for example: 192.168.0.0/16. The + // ranges should be disjoint. Only IPv4 is supported. + RemoteTrafficSelector []string `json:"remoteTrafficSelector,omitempty"` + + // Router: URL of the router resource to be used for dynamic routing. + Router string `json:"router,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SharedSecret: Shared secret used to set the secure session between + // the Cloud VPN gateway and the peer VPN gateway. + SharedSecret string `json:"sharedSecret,omitempty"` + + // SharedSecretHash: Hash of the shared secret. + SharedSecretHash string `json:"sharedSecretHash,omitempty"` + + // Status: [Output Only] The status of the VPN tunnel, which can be one + // of the following: - PROVISIONING: Resource is being allocated for the + // VPN tunnel. - WAITING_FOR_FULL_CONFIG: Waiting to receive all + // VPN-related configs from the user. Network, TargetVpnGateway, + // VpnTunnel, ForwardingRule, and Route resources are needed to setup + // the VPN tunnel. - FIRST_HANDSHAKE: Successful first handshake with + // the peer VPN. - ESTABLISHED: Secure session is successfully + // established with the peer VPN. - NETWORK_ERROR: Deprecated, replaced + // by NO_INCOMING_PACKETS - AUTHORIZATION_ERROR: Auth error (for + // example, bad shared secret). - NEGOTIATION_FAILURE: Handshake failed. + // - DEPROVISIONING: Resources are being deallocated for the VPN tunnel. + // - FAILED: Tunnel creation has failed and the tunnel is not ready to + // be used. - NO_INCOMING_PACKETS: No incoming packets from peer. - + // REJECTED: Tunnel configuration was rejected, can be result of being + // denied access. - ALLOCATING_RESOURCES: Cloud VPN is in the process of + // allocating all required resources. - STOPPED: Tunnel is stopped due + // to its Forwarding Rules being deleted for Classic VPN tunnels or the + // project is in frozen state. - PEER_IDENTITY_MISMATCH: Peer identity + // does not match peer IP, probably behind NAT. - + // TS_NARROWING_NOT_ALLOWED: Traffic selector narrowing not allowed for + // an HA-VPN tunnel. + // + // Possible values: + // "ALLOCATING_RESOURCES" - Cloud VPN is in the process of allocating + // all required resources (specifically, a borg task). + // "AUTHORIZATION_ERROR" - Auth error (e.g. bad shared secret). + // "DEPROVISIONING" - Resources is being deallocated for the VPN + // tunnel. + // "ESTABLISHED" - Secure session is successfully established with + // peer VPN. + // "FAILED" - Tunnel creation has failed and the tunnel is not ready + // to be used. + // "FIRST_HANDSHAKE" - Successful first handshake with peer VPN. + // "NEGOTIATION_FAILURE" - Handshake failed. + // "NETWORK_ERROR" - Deprecated, replaced by NO_INCOMING_PACKETS + // "NO_INCOMING_PACKETS" - No incoming packets from peer + // "PROVISIONING" - Resource is being allocated for the VPN tunnel. + // "REJECTED" - Tunnel configuration was rejected, can be result of + // being denylisted. + // "STOPPED" - Tunnel is stopped due to its Forwarding Rules being + // deleted. + // "WAITING_FOR_FULL_CONFIG" - Waiting to receive all VPN-related + // configs from user. Network, TargetVpnGateway, VpnTunnel, + // ForwardingRule and Route resources are needed to setup VPN tunnel. + Status string `json:"status,omitempty"` + + // TargetVpnGateway: URL of the Target VPN gateway with which this VPN + // tunnel is associated. Provided by the client when the VPN tunnel is + // created. + TargetVpnGateway string `json:"targetVpnGateway,omitempty"` + + // VpnGateway: URL of the VPN gateway with which this VPN tunnel is + // associated. Provided by the client when the VPN tunnel is created. + // This must be used (instead of target_vpn_gateway) if a High + // Availability VPN gateway resource is created. + VpnGateway string `json:"vpnGateway,omitempty"` + + // VpnGatewayInterface: The interface ID of the VPN gateway with which + // this VPN tunnel is associated. Possible values are: `0`, `1`. + VpnGatewayInterface int64 `json:"vpnGatewayInterface,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnel) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnel + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnTunnelsScopedList resources. + Items map[string]VpnTunnelsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnTunnelAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelAggregatedListWarning: [Output Only] Informational warning +// message. +type VpnTunnelAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*VpnTunnelAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelList: Contains a list of VpnTunnel resources. +type VpnTunnelList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnTunnel resources. + Items []*VpnTunnel `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnTunnelListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelListWarning: [Output Only] Informational warning message. +type VpnTunnelListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*VpnTunnelListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelsScopedList struct { + // VpnTunnels: A list of VPN tunnels contained in this scope. + VpnTunnels []*VpnTunnel `json:"vpnTunnels,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *VpnTunnelsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VpnTunnels") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "VpnTunnels") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelsScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelsScopedListWarning: Informational warning which replaces the +// list of addresses when the list is empty. +type VpnTunnelsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*VpnTunnelsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelsScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelsScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type WafExpressionSet struct { + // Aliases: A list of alternate IDs. The format should be: - E.g. + // XSS-stable Generic suffix like "stable" is particularly useful if a + // policy likes to avail newer set of expressions without having to + // change the policy. A given alias name can't be used for more than one + // entity set. + Aliases []string `json:"aliases,omitempty"` + + // Expressions: List of available expressions. + Expressions []*WafExpressionSetExpression `json:"expressions,omitempty"` + + // Id: Google specified expression set ID. The format should be: - E.g. + // XSS-20170329 required + Id string `json:"id,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Aliases") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Aliases") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *WafExpressionSet) MarshalJSON() ([]byte, error) { + type NoMethod WafExpressionSet + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type WafExpressionSetExpression struct { + // Id: Expression ID should uniquely identify the origin of the + // expression. E.g. owasp-crs-v020901-id973337 identifies Owasp core + // rule set version 2.9.1 rule id 973337. The ID could be used to + // determine the individual attack definition that has been detected. It + // could also be used to exclude it from the policy in case of false + // positive. required + Id string `json:"id,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *WafExpressionSetExpression) MarshalJSON() ([]byte, error) { + type NoMethod WafExpressionSetExpression + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// WeightedBackendService: In contrast to a single BackendService in +// HttpRouteAction to which all matching traffic is directed to, +// WeightedBackendService allows traffic to be split across multiple +// backend services. The volume of traffic for each backend service is +// proportional to the weight specified in each WeightedBackendService +type WeightedBackendService struct { + // BackendService: The full or partial URL to the default BackendService + // resource. Before forwarding the request to backendService, the load + // balancer applies any relevant headerActions specified as part of this + // backendServiceWeight. + BackendService string `json:"backendService,omitempty"` + + // HeaderAction: Specifies changes to request and response headers that + // need to take effect for the selected backendService. headerAction + // specified here take effect before headerAction in the enclosing + // HttpRouteRule, PathMatcher and UrlMap. headerAction is not supported + // for load balancers that have their loadBalancingScheme set to + // EXTERNAL. Not supported when the URL map is bound to a target gRPC + // proxy that has validateForProxyless field set to true. + HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` + + // Weight: Specifies the fraction of traffic sent to a backend service, + // computed as weight / (sum of all weightedBackendService weights in + // routeAction) . The selection of a backend service is determined only + // for new traffic. Once a user's request has been directed to a backend + // service, subsequent requests are sent to the same backend service as + // determined by the backend service's session affinity policy. The + // value must be from 0 to 1000. + Weight int64 `json:"weight,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackendService") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackendService") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *WeightedBackendService) MarshalJSON() ([]byte, error) { + type NoMethod WeightedBackendService + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type XpnHostList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of shared VPC host project URLs. + Items []*Project `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#xpnHostList for + // lists of shared VPC hosts. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *XpnHostListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostList) MarshalJSON() ([]byte, error) { + type NoMethod XpnHostList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// XpnHostListWarning: [Output Only] Informational warning message. +type XpnHostListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*XpnHostListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostListWarning) MarshalJSON() ([]byte, error) { + type NoMethod XpnHostListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type XpnHostListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod XpnHostListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// XpnResourceId: Service resource (a.k.a service project) ID. +type XpnResourceId struct { + // Id: The ID of the service resource. In the case of projects, this + // field supports project id (e.g., my-project-123) and project number + // (e.g. 12345678). + Id string `json:"id,omitempty"` + + // Type: The type of the service resource. + // + // Possible values: + // "PROJECT" + // "XPN_RESOURCE_TYPE_UNSPECIFIED" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnResourceId) MarshalJSON() ([]byte, error) { + type NoMethod XpnResourceId + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Zone: Represents a Zone resource. A zone is a deployment area. These +// deployment areas are subsets of a region. For example the zone +// us-east1-a is located in the us-east1 region. For more information, +// read Regions and Zones. +type Zone struct { + // AvailableCpuPlatforms: [Output Only] Available cpu/platform + // selections for the zone. + AvailableCpuPlatforms []string `json:"availableCpuPlatforms,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated -- [Output Only] The deprecation status associated with + // this zone. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] Textual description of the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#zone for + // zones. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // Region: [Output Only] Full URL reference to the region which hosts + // the zone. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] Status of the zone, either UP or DOWN. + // + // Possible values: + // "DOWN" + // "UP" + Status string `json:"status,omitempty"` + + // SupportsPzs: [Output Only] Reserved for future use. + SupportsPzs bool `json:"supportsPzs,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "AvailableCpuPlatforms") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AvailableCpuPlatforms") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Zone) MarshalJSON() ([]byte, error) { + type NoMethod Zone + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ZoneList: Contains a list of zone resources. +type ZoneList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Zone resources. + Items []*Zone `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *ZoneListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneList) MarshalJSON() ([]byte, error) { + type NoMethod ZoneList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ZoneListWarning: [Output Only] Informational warning message. +type ZoneListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*ZoneListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ZoneListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ZoneListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneSetLabelsRequest struct { + // LabelFingerprint: The fingerprint of the previous set of labels for + // this resource, used to detect conflicts. The fingerprint is initially + // generated by Compute Engine and changes after every request to modify + // or update labels. You must always provide an up-to-date fingerprint + // hash in order to update or change labels. Make a get() request to the + // resource to get the latest fingerprint. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: The labels to set for this resource. + Labels map[string]string `json:"labels,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LabelFingerprint") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { + type NoMethod ZoneSetLabelsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneSetPolicyRequest struct { + // Bindings: Flatten Policy to create a backwacd compatible wire-format. + // Deprecated. Use 'policy' to specify bindings. + Bindings []*Binding `json:"bindings,omitempty"` + + // Etag: Flatten Policy to create a backward compatible wire-format. + // Deprecated. Use 'policy' to specify the etag. + Etag string `json:"etag,omitempty"` + + // Policy: REQUIRED: The complete policy to be applied to the + // 'resource'. The size of the policy is limited to a few 10s of KB. An + // empty policy is in general a valid policy but certain services (like + // Projects) might reject them. + Policy *Policy `json:"policy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Bindings") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bindings") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneSetPolicyRequest) MarshalJSON() ([]byte, error) { + type NoMethod ZoneSetPolicyRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "compute.acceleratorTypes.aggregatedList": + +type AcceleratorTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of accelerator types. +// +// - project: Project ID for this request. +func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTypesAggregatedListCall { + c := &AcceleratorTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *AcceleratorTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *AcceleratorTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *AcceleratorTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesAggregatedListCall) Context(ctx context.Context) *AcceleratorTypesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/acceleratorTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.aggregatedList" call. +// Exactly one of *AcceleratorTypeAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *AcceleratorTypeAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AcceleratorTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of accelerator types.", + // "flatPath": "projects/{project}/aggregated/acceleratorTypes", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/acceleratorTypes", + // "response": { + // "$ref": "AcceleratorTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(*AcceleratorTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.acceleratorTypes.get": + +type AcceleratorTypesGetCall struct { + s *Service + project string + zone string + acceleratorType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified accelerator type. +// +// - acceleratorType: Name of the accelerator type to return. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { + c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.acceleratorType = acceleratorType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesGetCall) Context(ctx context.Context) *AcceleratorTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "acceleratorType": c.acceleratorType, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.get" call. +// Exactly one of *AcceleratorType or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AcceleratorType.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*AcceleratorType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AcceleratorType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified accelerator type.", + // "flatPath": "projects/{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.get", + // "parameterOrder": [ + // "project", + // "zone", + // "acceleratorType" + // ], + // "parameters": { + // "acceleratorType": { + // "description": "Name of the accelerator type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + // "response": { + // "$ref": "AcceleratorType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.acceleratorTypes.list": + +type AcceleratorTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of accelerator types that are available to the +// specified project. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { + c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *AcceleratorTypesListCall) OrderBy(orderBy string) *AcceleratorTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *AcceleratorTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AcceleratorTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesListCall) Context(ctx context.Context) *AcceleratorTypesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/acceleratorTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.list" call. +// Exactly one of *AcceleratorTypeList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AcceleratorTypeList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AcceleratorTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of accelerator types that are available to the specified project.", + // "flatPath": "projects/{project}/zones/{zone}/acceleratorTypes", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/acceleratorTypes", + // "response": { + // "$ref": "AcceleratorTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*AcceleratorTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.aggregatedList": + +type AddressesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of addresses. +// +// - project: Project ID for this request. +func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { + c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *AddressesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *AddressesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *AddressesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AddressesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/addresses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.aggregatedList" call. +// Exactly one of *AddressAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AddressAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AddressAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of addresses.", + // "flatPath": "projects/{project}/aggregated/addresses", + // "httpMethod": "GET", + // "id": "compute.addresses.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/addresses", + // "response": { + // "$ref": "AddressAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*AddressAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.delete": + +type AddressesDeleteCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified address resource. +// +// - address: Name of the address resource to delete. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { + c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified address resource.", + // "flatPath": "projects/{project}/regions/{region}/addresses/{address}", + // "httpMethod": "DELETE", + // "id": "compute.addresses.delete", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.get": + +type AddressesGetCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified address resource. +// +// - address: Name of the address resource to return. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { + c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Address{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified address resource.", + // "flatPath": "projects/{project}/regions/{region}/addresses/{address}", + // "httpMethod": "GET", + // "id": "compute.addresses.get", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Address" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.addresses.insert": + +type AddressesInsertCall struct { + s *Service + project string + region string + address *Address + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an address resource in the specified project by using +// the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { + c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an address resource in the specified project by using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/addresses", + // "httpMethod": "POST", + // "id": "compute.addresses.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/addresses", + // "request": { + // "$ref": "Address" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.list": + +type AddressesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of addresses contained within the specified +// region. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *AddressesService) List(project string, region string) *AddressesListCall { + c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *AddressesListCall) Filter(filter string) *AddressesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *AddressesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AddressesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AddressList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AddressList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of addresses contained within the specified region.", + // "flatPath": "projects/{project}/regions/{region}/addresses", + // "httpMethod": "GET", + // "id": "compute.addresses.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/addresses", + // "response": { + // "$ref": "AddressList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.setLabels": + +type AddressesSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an Address. To learn more about labels, +// read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *AddressesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *AddressesSetLabelsCall { + c := &AddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *AddressesSetLabelsCall) RequestId(requestId string) *AddressesSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesSetLabelsCall) Fields(s ...googleapi.Field) *AddressesSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesSetLabelsCall) Context(ctx context.Context) *AddressesSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.addresses.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.aggregatedList": + +type AutoscalersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of autoscalers. +// +// - project: Project ID for this request. +func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { + c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *AutoscalersAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *AutoscalersAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *AutoscalersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AutoscalersAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.aggregatedList" call. +// Exactly one of *AutoscalerAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AutoscalerAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of autoscalers.", + // "flatPath": "projects/{project}/aggregated/autoscalers", + // "httpMethod": "GET", + // "id": "compute.autoscalers.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/autoscalers", + // "response": { + // "$ref": "AutoscalerAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.delete": + +type AutoscalersDeleteCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified autoscaler. +// +// - autoscaler: Name of the autoscaler to delete. +// - project: Project ID for this request. +// - zone: Name of the zone for this request. +func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { + c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified autoscaler.", + // "flatPath": "projects/{project}/zones/{zone}/autoscalers/{autoscaler}", + // "httpMethod": "DELETE", + // "id": "compute.autoscalers.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.get": + +type AutoscalersGetCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified autoscaler resource. Gets a list of +// available autoscalers by making a list() request. +// +// - autoscaler: Name of the autoscaler to return. +// - project: Project ID for this request. +// - zone: Name of the zone for this request. +func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { + c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.get" call. +// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Autoscaler.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Autoscaler{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", + // "flatPath": "projects/{project}/zones/{zone}/autoscalers/{autoscaler}", + // "httpMethod": "GET", + // "id": "compute.autoscalers.get", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Autoscaler" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.insert": + +type AutoscalersInsertCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an autoscaler in the specified project using the data +// included in the request. +// +// - project: Project ID for this request. +// - zone: Name of the zone for this request. +func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { + c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/zones/{zone}/autoscalers", + // "httpMethod": "POST", + // "id": "compute.autoscalers.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.list": + +type AutoscalersListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of autoscalers contained within the specified +// zone. +// +// - project: Project ID for this request. +// - zone: Name of the zone for this request. +func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { + c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *AutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AutoscalersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.list" call. +// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AutoscalerList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AutoscalerList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of autoscalers contained within the specified zone.", + // "flatPath": "projects/{project}/zones/{zone}/autoscalers", + // "httpMethod": "GET", + // "id": "compute.autoscalers.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/autoscalers", + // "response": { + // "$ref": "AutoscalerList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.patch": + +type AutoscalersPatchCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an autoscaler in the specified project using the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +// +// - project: Project ID for this request. +// - zone: Name of the zone for this request. +func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { + c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to patch. +func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/zones/{zone}/autoscalers", + // "httpMethod": "PATCH", + // "id": "compute.autoscalers.patch", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to patch.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.update": + +type AutoscalersUpdateCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an autoscaler in the specified project using the data +// included in the request. +// +// - project: Project ID for this request. +// - zone: Name of the zone for this request. +func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { + c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to update. +func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/zones/{zone}/autoscalers", + // "httpMethod": "PUT", + // "id": "compute.autoscalers.update", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.addSignedUrlKey": + +type BackendBucketsAddSignedUrlKeyCall struct { + s *Service + project string + backendBucket string + signedurlkey *SignedUrlKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddSignedUrlKey: Adds a key for validating requests with signed URLs +// for this backend bucket. +// +// - backendBucket: Name of the BackendBucket resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - project: Project ID for this request. +func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { + c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.signedurlkey = signedurlkey + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsAddSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsAddSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsAddSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsAddSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.addSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds a key for validating requests with signed URLs for this backend bucket.", + // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.addSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", + // "request": { + // "$ref": "SignedUrlKey" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.delete": + +type BackendBucketsDeleteCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified BackendBucket resource. +// +// - backendBucket: Name of the BackendBucket resource to delete. +// - project: Project ID for this request. +func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { + c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified BackendBucket resource.", + // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}", + // "httpMethod": "DELETE", + // "id": "compute.backendBuckets.delete", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendBuckets/{backendBucket}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.deleteSignedUrlKey": + +type BackendBucketsDeleteSignedUrlKeyCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteSignedUrlKey: Deletes a key for validating requests with signed +// URLs for this backend bucket. +// +// - backendBucket: Name of the BackendBucket resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - keyName: The name of the Signed URL Key to delete. +// - project: Project ID for this request. +func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { + c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.urlParams_.Set("keyName", keyName) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsDeleteSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsDeleteSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.deleteSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a key for validating requests with signed URLs for this backend bucket.", + // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.deleteSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendBucket", + // "keyName" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "keyName": { + // "description": "The name of the Signed URL Key to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.get": + +type BackendBucketsGetCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified BackendBucket resource. Gets a list of +// available backend buckets by making a list() request. +// +// - backendBucket: Name of the BackendBucket resource to return. +// - project: Project ID for this request. +func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { + c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.get" call. +// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendBucket.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BackendBucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", + // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}", + // "httpMethod": "GET", + // "id": "compute.backendBuckets.get", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendBuckets/{backendBucket}", + // "response": { + // "$ref": "BackendBucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendBuckets.insert": + +type BackendBucketsInsertCall struct { + s *Service + project string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a BackendBucket resource in the specified project +// using the data included in the request. +// +// - project: Project ID for this request. +func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { + c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendbucket = backendbucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/global/backendBuckets", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendBuckets", + // "request": { + // "$ref": "BackendBucket" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.list": + +type BackendBucketsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of BackendBucket resources available to the +// specified project. +// +// - project: Project ID for this request. +func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { + c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *BackendBucketsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendBucketsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.list" call. +// Exactly one of *BackendBucketList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendBucketList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BackendBucketList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of BackendBucket resources available to the specified project.", + // "flatPath": "projects/{project}/global/backendBuckets", + // "httpMethod": "GET", + // "id": "compute.backendBuckets.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/global/backendBuckets", + // "response": { + // "$ref": "BackendBucketList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.backendBuckets.patch": + +type BackendBucketsPatchCall struct { + s *Service + project string + backendBucket string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified BackendBucket resource with the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +// +// - backendBucket: Name of the BackendBucket resource to patch. +// - project: Project ID for this request. +func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { + c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.backendbucket = backendbucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}", + // "httpMethod": "PATCH", + // "id": "compute.backendBuckets.patch", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendBuckets/{backendBucket}", + // "request": { + // "$ref": "BackendBucket" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.setEdgeSecurityPolicy": + +type BackendBucketsSetEdgeSecurityPolicyCall struct { + s *Service + project string + backendBucket string + securitypolicyreference *SecurityPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetEdgeSecurityPolicy: Sets the edge security policy for the +// specified backend bucket. +// +// - backendBucket: Name of the BackendService resource to which the +// security policy should be set. The name should conform to RFC1035. +// - project: Project ID for this request. +func (r *BackendBucketsService) SetEdgeSecurityPolicy(project string, backendBucket string, securitypolicyreference *SecurityPolicyReference) *BackendBucketsSetEdgeSecurityPolicyCall { + c := &BackendBucketsSetEdgeSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.securitypolicyreference = securitypolicyreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsSetEdgeSecurityPolicyCall) RequestId(requestId string) *BackendBucketsSetEdgeSecurityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsSetEdgeSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendBucketsSetEdgeSecurityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsSetEdgeSecurityPolicyCall) Context(ctx context.Context) *BackendBucketsSetEdgeSecurityPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsSetEdgeSecurityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsSetEdgeSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{backendBucket}/setEdgeSecurityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.setEdgeSecurityPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the edge security policy for the specified backend bucket.", + // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}/setEdgeSecurityPolicy", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.setEdgeSecurityPolicy", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendBuckets/{backendBucket}/setEdgeSecurityPolicy", + // "request": { + // "$ref": "SecurityPolicyReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.update": + +type BackendBucketsUpdateCall struct { + s *Service + project string + backendBucket string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the specified BackendBucket resource with the data +// included in the request. +// +// - backendBucket: Name of the BackendBucket resource to update. +// - project: Project ID for this request. +func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { + c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.backendbucket = backendbucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified BackendBucket resource with the data included in the request.", + // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}", + // "httpMethod": "PUT", + // "id": "compute.backendBuckets.update", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendBuckets/{backendBucket}", + // "request": { + // "$ref": "BackendBucket" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.addSignedUrlKey": + +type BackendServicesAddSignedUrlKeyCall struct { + s *Service + project string + backendService string + signedurlkey *SignedUrlKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddSignedUrlKey: Adds a key for validating requests with signed URLs +// for this backend service. +// +// - backendService: Name of the BackendService resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - project: Project ID for this request. +func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { + c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.signedurlkey = signedurlkey + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *BackendServicesAddSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesAddSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesAddSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesAddSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}/addSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.addSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds a key for validating requests with signed URLs for this backend service.", + // "flatPath": "projects/{project}/global/backendServices/{backendService}/addSignedUrlKey", + // "httpMethod": "POST", + // "id": "compute.backendServices.addSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{backendService}/addSignedUrlKey", + // "request": { + // "$ref": "SignedUrlKey" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.aggregatedList": + +type BackendServicesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all BackendService resources, +// regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. +func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { + c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *BackendServicesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *BackendServicesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *BackendServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendServicesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/backendServices") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.aggregatedList" call. +// Exactly one of *BackendServiceAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *BackendServiceAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BackendServiceAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", + // "flatPath": "projects/{project}/aggregated/backendServices", + // "httpMethod": "GET", + // "id": "compute.backendServices.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/backendServices", + // "response": { + // "$ref": "BackendServiceAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.backendServices.delete": + +type BackendServicesDeleteCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified BackendService resource. +// +// - backendService: Name of the BackendService resource to delete. +// - project: Project ID for this request. +func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { + c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified BackendService resource.", + // "flatPath": "projects/{project}/global/backendServices/{backendService}", + // "httpMethod": "DELETE", + // "id": "compute.backendServices.delete", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{backendService}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.deleteSignedUrlKey": + +type BackendServicesDeleteSignedUrlKeyCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteSignedUrlKey: Deletes a key for validating requests with signed +// URLs for this backend service. +// +// - backendService: Name of the BackendService resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - keyName: The name of the Signed URL Key to delete. +// - project: Project ID for this request. +func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { + c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.urlParams_.Set("keyName", keyName) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendServicesDeleteSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesDeleteSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesDeleteSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}/deleteSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.deleteSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a key for validating requests with signed URLs for this backend service.", + // "flatPath": "projects/{project}/global/backendServices/{backendService}/deleteSignedUrlKey", + // "httpMethod": "POST", + // "id": "compute.backendServices.deleteSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendService", + // "keyName" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "keyName": { + // "description": "The name of the Signed URL Key to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{backendService}/deleteSignedUrlKey", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.get": + +type BackendServicesGetCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified BackendService resource. Gets a list of +// available backend services. +// +// - backendService: Name of the BackendService resource to return. +// - project: Project ID for this request. +func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { + c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendService.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BackendService{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified BackendService resource. Gets a list of available backend services.", + // "flatPath": "projects/{project}/global/backendServices/{backendService}", + // "httpMethod": "GET", + // "id": "compute.backendServices.get", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{backendService}", + // "response": { + // "$ref": "BackendService" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendServices.getHealth": + +type BackendServicesGetHealthCall struct { + s *Service + project string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetHealth: Gets the most recent health check results for this +// BackendService. Example request body: { "group": +// "/zones/us-east1-b/instanceGroups/lb-backend-example" } +// +// - backendService: Name of the BackendService resource to which the +// queried instance belongs. +// - project: . +func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { + c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesGetHealthCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}/getHealth") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BackendServiceGroupHealth{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the most recent health check results for this BackendService. Example request body: { \"group\": \"/zones/us-east1-b/instanceGroups/lb-backend-example\" }", + // "flatPath": "projects/{project}/global/backendServices/{backendService}/getHealth", + // "httpMethod": "POST", + // "id": "compute.backendServices.getHealth", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the queried instance belongs.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{backendService}/getHealth", + // "request": { + // "$ref": "ResourceGroupReference" + // }, + // "response": { + // "$ref": "BackendServiceGroupHealth" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendServices.getIamPolicy": + +type BackendServicesGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *BackendServicesService) GetIamPolicy(project string, resource string) *BackendServicesGetIamPolicyCall { + c := &BackendServicesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *BackendServicesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BackendServicesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetIamPolicyCall) Fields(s ...googleapi.Field) *BackendServicesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesGetIamPolicyCall) IfNoneMatch(entityTag string) *BackendServicesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetIamPolicyCall) Context(ctx context.Context) *BackendServicesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BackendServicesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/global/backendServices/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.backendServices.getIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendServices.insert": + +type BackendServicesInsertCall struct { + s *Service + project string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a BackendService resource in the specified project +// using the data included in the request. For more information, see +// Backend services overview . +// +// - project: Project ID for this request. +func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { + c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendservice = backendservice + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview .", + // "flatPath": "projects/{project}/global/backendServices", + // "httpMethod": "POST", + // "id": "compute.backendServices.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices", + // "request": { + // "$ref": "BackendService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.list": + +type BackendServicesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of BackendService resources available to the +// specified project. +// +// - project: Project ID for this request. +func (r *BackendServicesService) List(project string) *BackendServicesListCall { + c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *BackendServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendServicesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendServiceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BackendServiceList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of BackendService resources available to the specified project.", + // "flatPath": "projects/{project}/global/backendServices", + // "httpMethod": "GET", + // "id": "compute.backendServices.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/global/backendServices", + // "response": { + // "$ref": "BackendServiceList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.backendServices.patch": + +type BackendServicesPatchCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified BackendService resource with the data +// included in the request. For more information, see Backend services +// overview. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +// +// - backendService: Name of the BackendService resource to patch. +// - project: Project ID for this request. +func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { + c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.backendservice = backendservice + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified BackendService resource with the data included in the request. For more information, see Backend services overview. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/global/backendServices/{backendService}", + // "httpMethod": "PATCH", + // "id": "compute.backendServices.patch", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.setEdgeSecurityPolicy": + +type BackendServicesSetEdgeSecurityPolicyCall struct { + s *Service + project string + backendService string + securitypolicyreference *SecurityPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetEdgeSecurityPolicy: Sets the edge security policy for the +// specified backend service. +// +// - backendService: Name of the BackendService resource to which the +// edge security policy should be set. The name should conform to +// RFC1035. +// - project: Project ID for this request. +func (r *BackendServicesService) SetEdgeSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetEdgeSecurityPolicyCall { + c := &BackendServicesSetEdgeSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.securitypolicyreference = securitypolicyreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendServicesSetEdgeSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetEdgeSecurityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesSetEdgeSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetEdgeSecurityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesSetEdgeSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetEdgeSecurityPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesSetEdgeSecurityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesSetEdgeSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.setEdgeSecurityPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the edge security policy for the specified backend service.", + // "flatPath": "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy", + // "httpMethod": "POST", + // "id": "compute.backendServices.setEdgeSecurityPolicy", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the edge security policy should be set. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy", + // "request": { + // "$ref": "SecurityPolicyReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.setIamPolicy": + +type BackendServicesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *BackendServicesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *BackendServicesSetIamPolicyCall { + c := &BackendServicesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesSetIamPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesSetIamPolicyCall) Context(ctx context.Context) *BackendServicesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BackendServicesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/global/backendServices/{resource}/setIamPolicy", + // "httpMethod": "POST", + // "id": "compute.backendServices.setIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{resource}/setIamPolicy", + // "request": { + // "$ref": "GlobalSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.setSecurityPolicy": + +type BackendServicesSetSecurityPolicyCall struct { + s *Service + project string + backendService string + securitypolicyreference *SecurityPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetSecurityPolicy: Sets the Google Cloud Armor security policy for +// the specified backend service. For more information, see Google Cloud +// Armor Overview +// +// - backendService: Name of the BackendService resource to which the +// security policy should be set. The name should conform to RFC1035. +// - project: Project ID for this request. +func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { + c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.securitypolicyreference = securitypolicyreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetSecurityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetSecurityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetSecurityPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.setSecurityPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", + // "flatPath": "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy", + // "httpMethod": "POST", + // "id": "compute.backendServices.setSecurityPolicy", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy", + // "request": { + // "$ref": "SecurityPolicyReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.update": + +type BackendServicesUpdateCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the specified BackendService resource with the data +// included in the request. For more information, see Backend services +// overview. +// +// - backendService: Name of the BackendService resource to update. +// - project: Project ID for this request. +func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { + c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.backendservice = backendservice + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified BackendService resource with the data included in the request. For more information, see Backend services overview.", + // "flatPath": "projects/{project}/global/backendServices/{backendService}", + // "httpMethod": "PUT", + // "id": "compute.backendServices.update", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.diskTypes.aggregatedList": + +type DiskTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of disk types. +// +// - project: Project ID for this request. +func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { + c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *DiskTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *DiskTypesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *DiskTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DiskTypesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DiskTypesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/diskTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.diskTypes.aggregatedList" call. +// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &DiskTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of disk types.", + // "flatPath": "projects/{project}/aggregated/diskTypes", + // "httpMethod": "GET", + // "id": "compute.diskTypes.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/diskTypes", + // "response": { + // "$ref": "DiskTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.diskTypes.get": + +type DiskTypesGetCall struct { + s *Service + project string + zone string + diskType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified disk type. Gets a list of available disk +// types by making a list() request. +// +// - diskType: Name of the disk type to return. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { + c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.diskType = diskType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DiskTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/diskTypes/{diskType}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "diskType": c.diskType, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.diskTypes.get" call. +// Exactly one of *DiskType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &DiskType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", + // "flatPath": "projects/{project}/zones/{zone}/diskTypes/{diskType}", + // "httpMethod": "GET", + // "id": "compute.diskTypes.get", + // "parameterOrder": [ + // "project", + // "zone", + // "diskType" + // ], + // "parameters": { + // "diskType": { + // "description": "Name of the disk type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/diskTypes/{diskType}", + // "response": { + // "$ref": "DiskType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.diskTypes.list": + +type DiskTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of disk types available to the specified +// project. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { + c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *DiskTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DiskTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DiskTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/diskTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.diskTypes.list" call. +// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *DiskTypeList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &DiskTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of disk types available to the specified project.", + // "flatPath": "projects/{project}/zones/{zone}/diskTypes", + // "httpMethod": "GET", + // "id": "compute.diskTypes.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/diskTypes", + // "response": { + // "$ref": "DiskTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.disks.addResourcePolicies": + +type DisksAddResourcePoliciesCall struct { + s *Service + project string + zone string + disk string + disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddResourcePolicies: Adds existing resource policies to a disk. You +// can only add one policy which will be applied to this disk for +// scheduling snapshot creation. +// +// - disk: The disk name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) AddResourcePolicies(project string, zone string, disk string, disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest) *DisksAddResourcePoliciesCall { + c := &DisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.disksaddresourcepoliciesrequest = disksaddresourcepoliciesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *DisksAddResourcePoliciesCall) RequestId(requestId string) *DisksAddResourcePoliciesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksAddResourcePoliciesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksAddResourcePoliciesCall) Context(ctx context.Context) *DisksAddResourcePoliciesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksAddResourcePoliciesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksaddresourcepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/addResourcePolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.addResourcePolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds existing resource policies to a disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/addResourcePolicies", + // "httpMethod": "POST", + // "id": "compute.disks.addResourcePolicies", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/{disk}/addResourcePolicies", + // "request": { + // "$ref": "DisksAddResourcePoliciesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.aggregatedList": + +type DisksAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of persistent disks. +// +// - project: Project ID for this request. +func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { + c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *DisksAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *DisksAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *DisksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DisksAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/disks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.aggregatedList" call. +// Exactly one of *DiskAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &DiskAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of persistent disks.", + // "flatPath": "projects/{project}/aggregated/disks", + // "httpMethod": "GET", + // "id": "compute.disks.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/disks", + // "response": { + // "$ref": "DiskAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.disks.createSnapshot": + +type DisksCreateSnapshotCall struct { + s *Service + project string + zone string + disk string + snapshot *Snapshot + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// CreateSnapshot: Creates a snapshot of a specified persistent disk. +// For regular snapshot creation, consider using snapshots.insert +// instead, as that method supports more features, such as creating +// snapshots in a project different from the source disk project. +// +// - disk: Name of the persistent disk to snapshot. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { + c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.snapshot = snapshot + return c +} + +// GuestFlush sets the optional parameter "guestFlush": [Input Only] +// Whether to attempt an application consistent snapshot by informing +// the OS to prepare for the snapshot process. +func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { + c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksCreateSnapshotCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/createSnapshot") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.createSnapshot" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a snapshot of a specified persistent disk. For regular snapshot creation, consider using snapshots.insert instead, as that method supports more features, such as creating snapshots in a project different from the source disk project.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/createSnapshot", + // "httpMethod": "POST", + // "id": "compute.disks.createSnapshot", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to snapshot.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "guestFlush": { + // "description": "[Input Only] Whether to attempt an application consistent snapshot by informing the OS to prepare for the snapshot process.", + // "location": "query", + // "type": "boolean" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/{disk}/createSnapshot", + // "request": { + // "$ref": "Snapshot" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.delete": + +type DisksDeleteCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified persistent disk. Deleting a disk +// removes its data permanently and is irreversible. However, deleting a +// disk does not delete any snapshots previously made from the disk. You +// must separately delete snapshots. +// +// - disk: Name of the persistent disk to delete. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { + c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}", + // "httpMethod": "DELETE", + // "id": "compute.disks.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/{disk}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.get": + +type DisksGetCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns a specified persistent disk. Gets a list of available +// persistent disks by making a list() request. +// +// - disk: Name of the persistent disk to return. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { + c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.get" call. +// Exactly one of *Disk or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Disk.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Disk{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}", + // "httpMethod": "GET", + // "id": "compute.disks.get", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/{disk}", + // "response": { + // "$ref": "Disk" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.disks.getIamPolicy": + +type DisksGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) GetIamPolicy(project string, zone string, resource string) *DisksGetIamPolicyCall { + c := &DisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *DisksGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *DisksGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksGetIamPolicyCall) Fields(s ...googleapi.Field) *DisksGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksGetIamPolicyCall) IfNoneMatch(entityTag string) *DisksGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksGetIamPolicyCall) Context(ctx context.Context) *DisksGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.disks.getIamPolicy", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.disks.insert": + +type DisksInsertCall struct { + s *Service + project string + zone string + disk *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a persistent disk in the specified project using the +// data in the request. You can create a disk from a source +// (sourceImage, sourceSnapshot, or sourceDisk) or create an empty 500 +// GB data disk by omitting all properties. You can also create a disk +// that is larger than the default size by specifying the sizeGb +// property. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { + c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// SourceImage sets the optional parameter "sourceImage": Source image +// to restore onto a disk. This field is optional. +func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { + c.urlParams_.Set("sourceImage", sourceImage) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk from a source (sourceImage, sourceSnapshot, or sourceDisk) or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + // "flatPath": "projects/{project}/zones/{zone}/disks", + // "httpMethod": "POST", + // "id": "compute.disks.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sourceImage": { + // "description": "Source image to restore onto a disk. This field is optional.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks", + // "request": { + // "$ref": "Disk" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.list": + +type DisksListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of persistent disks contained within the +// specified zone. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) List(project string, zone string) *DisksListCall { + c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *DisksListCall) Filter(filter string) *DisksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *DisksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DisksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.list" call. +// Exactly one of *DiskList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &DiskList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of persistent disks contained within the specified zone.", + // "flatPath": "projects/{project}/zones/{zone}/disks", + // "httpMethod": "GET", + // "id": "compute.disks.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks", + // "response": { + // "$ref": "DiskList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.disks.removeResourcePolicies": + +type DisksRemoveResourcePoliciesCall struct { + s *Service + project string + zone string + disk string + disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RemoveResourcePolicies: Removes resource policies from a disk. +// +// - disk: The disk name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) RemoveResourcePolicies(project string, zone string, disk string, disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest) *DisksRemoveResourcePoliciesCall { + c := &DisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.disksremoveresourcepoliciesrequest = disksremoveresourcepoliciesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *DisksRemoveResourcePoliciesCall) RequestId(requestId string) *DisksRemoveResourcePoliciesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksRemoveResourcePoliciesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksRemoveResourcePoliciesCall) Context(ctx context.Context) *DisksRemoveResourcePoliciesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksRemoveResourcePoliciesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksremoveresourcepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/removeResourcePolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.removeResourcePolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes resource policies from a disk.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/removeResourcePolicies", + // "httpMethod": "POST", + // "id": "compute.disks.removeResourcePolicies", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/{disk}/removeResourcePolicies", + // "request": { + // "$ref": "DisksRemoveResourcePoliciesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.resize": + +type DisksResizeCall struct { + s *Service + project string + zone string + disk string + disksresizerequest *DisksResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Resize: Resizes the specified persistent disk. You can only increase +// the size of the disk. +// +// - disk: The name of the persistent disk. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { + c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.disksresizerequest = disksresizerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksResizeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/resize") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/resize", + // "httpMethod": "POST", + // "id": "compute.disks.resize", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The name of the persistent disk.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/{disk}/resize", + // "request": { + // "$ref": "DisksResizeRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.setIamPolicy": + +type DisksSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *DisksSetIamPolicyCall { + c := &DisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksSetIamPolicyCall) Fields(s ...googleapi.Field) *DisksSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksSetIamPolicyCall) Context(ctx context.Context) *DisksSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/setIamPolicy", + // "httpMethod": "POST", + // "id": "compute.disks.setIamPolicy", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/{resource}/setIamPolicy", + // "request": { + // "$ref": "ZoneSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.setLabels": + +type DisksSetLabelsCall struct { + s *Service + project string + zone string + resource string + zonesetlabelsrequest *ZoneSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a disk. To learn more about labels, +// read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { + c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.zonesetlabelsrequest = zonesetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.disks.setLabels", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/{resource}/setLabels", + // "request": { + // "$ref": "ZoneSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.testIamPermissions": + +type DisksTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { + c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.disks.testIamPermissions", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.externalVpnGateways.delete": + +type ExternalVpnGatewaysDeleteCall struct { + s *Service + project string + externalVpnGateway string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified externalVpnGateway. +// +// - externalVpnGateway: Name of the externalVpnGateways to delete. +// - project: Project ID for this request. +func (r *ExternalVpnGatewaysService) Delete(project string, externalVpnGateway string) *ExternalVpnGatewaysDeleteCall { + c := &ExternalVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.externalVpnGateway = externalVpnGateway + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *ExternalVpnGatewaysDeleteCall) RequestId(requestId string) *ExternalVpnGatewaysDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysDeleteCall) Context(ctx context.Context) *ExternalVpnGatewaysDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/externalVpnGateways/{externalVpnGateway}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "externalVpnGateway": c.externalVpnGateway, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified externalVpnGateway.", + // "flatPath": "projects/{project}/global/externalVpnGateways/{externalVpnGateway}", + // "httpMethod": "DELETE", + // "id": "compute.externalVpnGateways.delete", + // "parameterOrder": [ + // "project", + // "externalVpnGateway" + // ], + // "parameters": { + // "externalVpnGateway": { + // "description": "Name of the externalVpnGateways to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/externalVpnGateways/{externalVpnGateway}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.externalVpnGateways.get": + +type ExternalVpnGatewaysGetCall struct { + s *Service + project string + externalVpnGateway string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified externalVpnGateway. Get a list of +// available externalVpnGateways by making a list() request. +// +// - externalVpnGateway: Name of the externalVpnGateway to return. +// - project: Project ID for this request. +func (r *ExternalVpnGatewaysService) Get(project string, externalVpnGateway string) *ExternalVpnGatewaysGetCall { + c := &ExternalVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.externalVpnGateway = externalVpnGateway + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysGetCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ExternalVpnGatewaysGetCall) IfNoneMatch(entityTag string) *ExternalVpnGatewaysGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysGetCall) Context(ctx context.Context) *ExternalVpnGatewaysGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/externalVpnGateways/{externalVpnGateway}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "externalVpnGateway": c.externalVpnGateway, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.get" call. +// Exactly one of *ExternalVpnGateway or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ExternalVpnGateway.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*ExternalVpnGateway, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ExternalVpnGateway{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified externalVpnGateway. Get a list of available externalVpnGateways by making a list() request.", + // "flatPath": "projects/{project}/global/externalVpnGateways/{externalVpnGateway}", + // "httpMethod": "GET", + // "id": "compute.externalVpnGateways.get", + // "parameterOrder": [ + // "project", + // "externalVpnGateway" + // ], + // "parameters": { + // "externalVpnGateway": { + // "description": "Name of the externalVpnGateway to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/externalVpnGateways/{externalVpnGateway}", + // "response": { + // "$ref": "ExternalVpnGateway" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.externalVpnGateways.insert": + +type ExternalVpnGatewaysInsertCall struct { + s *Service + project string + externalvpngateway *ExternalVpnGateway + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a ExternalVpnGateway in the specified project using +// the data included in the request. +// +// - project: Project ID for this request. +func (r *ExternalVpnGatewaysService) Insert(project string, externalvpngateway *ExternalVpnGateway) *ExternalVpnGatewaysInsertCall { + c := &ExternalVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.externalvpngateway = externalvpngateway + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *ExternalVpnGatewaysInsertCall) RequestId(requestId string) *ExternalVpnGatewaysInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysInsertCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysInsertCall) Context(ctx context.Context) *ExternalVpnGatewaysInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.externalvpngateway) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/externalVpnGateways") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a ExternalVpnGateway in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/global/externalVpnGateways", + // "httpMethod": "POST", + // "id": "compute.externalVpnGateways.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/externalVpnGateways", + // "request": { + // "$ref": "ExternalVpnGateway" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + } -// method id "compute.acceleratorTypes.aggregatedList": +// method id "compute.externalVpnGateways.list": -type AcceleratorTypesAggregatedListCall struct { +type ExternalVpnGatewaysListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -53381,29 +70061,33 @@ type AcceleratorTypesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of accelerator types. +// List: Retrieves the list of ExternalVpnGateway available to the +// specified project. // // - project: Project ID for this request. -func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTypesAggregatedListCall { - c := &AcceleratorTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ExternalVpnGatewaysService) List(project string) *ExternalVpnGatewaysListCall { + c := &ExternalVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -53411,32 +70095,29 @@ func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTyp // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *ExternalVpnGatewaysListCall) Filter(filter string) *ExternalVpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c } -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *AcceleratorTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) - return c -} - // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of // available results is larger than `maxResults`, Compute Engine returns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *AcceleratorTypesAggregatedListCall { +func (c *ExternalVpnGatewaysListCall) MaxResults(maxResults int64) *ExternalVpnGatewaysListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -53450,7 +70131,7 @@ func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *Accel // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *AcceleratorTypesAggregatedListCall { +func (c *ExternalVpnGatewaysListCall) OrderBy(orderBy string) *ExternalVpnGatewaysListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -53458,7 +70139,7 @@ func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *Accelerato // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *AcceleratorTypesAggregatedListCall { +func (c *ExternalVpnGatewaysListCall) PageToken(pageToken string) *ExternalVpnGatewaysListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -53467,7 +70148,7 @@ func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *Accele // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *AcceleratorTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AcceleratorTypesAggregatedListCall { +func (c *ExternalVpnGatewaysListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ExternalVpnGatewaysListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -53475,7 +70156,7 @@ func (c *AcceleratorTypesAggregatedListCall) ReturnPartialSuccess(returnPartialS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *AcceleratorTypesAggregatedListCall { +func (c *ExternalVpnGatewaysListCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53485,7 +70166,7 @@ func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *Accel // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *AcceleratorTypesAggregatedListCall { +func (c *ExternalVpnGatewaysListCall) IfNoneMatch(entityTag string) *ExternalVpnGatewaysListCall { c.ifNoneMatch_ = entityTag return c } @@ -53493,21 +70174,21 @@ func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *Acce // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesAggregatedListCall) Context(ctx context.Context) *AcceleratorTypesAggregatedListCall { +func (c *ExternalVpnGatewaysListCall) Context(ctx context.Context) *ExternalVpnGatewaysListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { +func (c *ExternalVpnGatewaysListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *ExternalVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -53520,7 +70201,7 @@ func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/acceleratorTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/externalVpnGateways") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -53533,33 +70214,33 @@ func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.aggregatedList" call. -// Exactly one of *AcceleratorTypeAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *AcceleratorTypeAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.externalVpnGateways.list" call. +// Exactly one of *ExternalVpnGatewayList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ExternalVpnGatewayList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeAggregatedList, error) { +func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*ExternalVpnGatewayList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &AcceleratorTypeAggregatedList{ + ret := &ExternalVpnGatewayList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -53571,24 +70252,19 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of accelerator types.", - // "flatPath": "projects/{project}/aggregated/acceleratorTypes", + // "description": "Retrieves the list of ExternalVpnGateway available to the specified project.", + // "flatPath": "projects/{project}/global/externalVpnGateways", // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.aggregatedList", + // "id": "compute.externalVpnGateways.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -53620,9 +70296,9 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/acceleratorTypes", + // "path": "projects/{project}/global/externalVpnGateways", // "response": { - // "$ref": "AcceleratorTypeAggregatedList" + // "$ref": "ExternalVpnGatewayList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -53636,7 +70312,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(*AcceleratorTypeAggregatedList) error) error { +func (c *ExternalVpnGatewaysListCall) Pages(ctx context.Context, f func(*ExternalVpnGatewayList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -53654,122 +70330,112 @@ func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(* } } -// method id "compute.acceleratorTypes.get": +// method id "compute.externalVpnGateways.setLabels": -type AcceleratorTypesGetCall struct { - s *Service - project string - zone string - acceleratorType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ExternalVpnGatewaysSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified accelerator type. +// SetLabels: Sets the labels on an ExternalVpnGateway. To learn more +// about labels, read the Labeling Resources documentation. // -// - acceleratorType: Name of the accelerator type to return. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { - c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *ExternalVpnGatewaysService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ExternalVpnGatewaysSetLabelsCall { + c := &ExternalVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.acceleratorType = acceleratorType + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypesGetCall { +func (c *ExternalVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesGetCall) Context(ctx context.Context) *AcceleratorTypesGetCall { +func (c *ExternalVpnGatewaysSetLabelsCall) Context(ctx context.Context) *ExternalVpnGatewaysSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesGetCall) Header() http.Header { +func (c *ExternalVpnGatewaysSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ExternalVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/externalVpnGateways/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "acceleratorType": c.acceleratorType, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.get" call. -// Exactly one of *AcceleratorType or error will be non-nil. Any non-2xx +// Do executes the "compute.externalVpnGateways.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *AcceleratorType.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*AcceleratorType, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &AcceleratorType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -53781,23 +70447,171 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator } return ret, nil // { - // "description": "Returns the specified accelerator type.", - // "flatPath": "projects/{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", - // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.get", + // "description": "Sets the labels on an ExternalVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/global/externalVpnGateways/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.externalVpnGateways.setLabels", // "parameterOrder": [ // "project", - // "zone", - // "acceleratorType" + // "resource" // ], // "parameters": { - // "acceleratorType": { - // "description": "Name of the accelerator type to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/externalVpnGateways/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.externalVpnGateways.testIamPermissions": + +type ExternalVpnGatewaysTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *ExternalVpnGatewaysService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ExternalVpnGatewaysTestIamPermissionsCall { + c := &ExternalVpnGatewaysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Context(ctx context.Context) *ExternalVpnGatewaysTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/externalVpnGateways/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/global/externalVpnGateways/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.externalVpnGateways.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -53805,17 +70619,20 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + // "path": "projects/{project}/global/externalVpnGateways/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "AcceleratorType" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -53826,188 +70643,626 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator } -// method id "compute.acceleratorTypes.list": +// method id "compute.firewallPolicies.addAssociation": -type AcceleratorTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FirewallPoliciesAddAssociationCall struct { + s *Service + firewallPolicy string + firewallpolicyassociation *FirewallPolicyAssociation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of accelerator types that are available to the -// specified project. +// AddAssociation: Inserts an association for the specified firewall +// policy. // -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { - c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone +// - firewallPolicy: Name of the firewall policy to update. +func (r *FirewallPoliciesService) AddAssociation(firewallPolicy string, firewallpolicyassociation *FirewallPolicyAssociation) *FirewallPoliciesAddAssociationCall { + c := &FirewallPoliciesAddAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.firewallPolicy = firewallPolicy + c.firewallpolicyassociation = firewallpolicyassociation return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { - c.urlParams_.Set("filter", filter) +// ReplaceExistingAssociation sets the optional parameter +// "replaceExistingAssociation": Indicates whether or not to replace it +// if an association of the attachment already exists. This is false by +// default, in which case an error will be returned if an association +// already exists. +func (c *FirewallPoliciesAddAssociationCall) ReplaceExistingAssociation(replaceExistingAssociation bool) *FirewallPoliciesAddAssociationCall { + c.urlParams_.Set("replaceExistingAssociation", fmt.Sprint(replaceExistingAssociation)) return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *FirewallPoliciesAddAssociationCall) RequestId(requestId string) *FirewallPoliciesAddAssociationCall { + c.urlParams_.Set("requestId", requestId) return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *AcceleratorTypesListCall) OrderBy(orderBy string) *AcceleratorTypesListCall { - c.urlParams_.Set("orderBy", orderBy) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallPoliciesAddAssociationCall) Fields(s ...googleapi.Field) *FirewallPoliciesAddAssociationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypesListCall { - c.urlParams_.Set("pageToken", pageToken) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallPoliciesAddAssociationCall) Context(ctx context.Context) *FirewallPoliciesAddAssociationCall { + c.ctx_ = ctx return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *AcceleratorTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AcceleratorTypesListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FirewallPoliciesAddAssociationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FirewallPoliciesAddAssociationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyassociation) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/addAssociation") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "firewallPolicy": c.firewallPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.firewallPolicies.addAssociation" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Inserts an association for the specified firewall policy.", + // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/addAssociation", + // "httpMethod": "POST", + // "id": "compute.firewallPolicies.addAssociation", + // "parameterOrder": [ + // "firewallPolicy" + // ], + // "parameters": { + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", + // "location": "path", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", + // "required": true, + // "type": "string" + // }, + // "replaceExistingAssociation": { + // "description": "Indicates whether or not to replace it if an association of the attachment already exists. This is false by default, in which case an error will be returned if an association already exists.", + // "location": "query", + // "type": "boolean" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "locations/global/firewallPolicies/{firewallPolicy}/addAssociation", + // "request": { + // "$ref": "FirewallPolicyAssociation" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.firewallPolicies.addRule": + +type FirewallPoliciesAddRuleCall struct { + s *Service + firewallPolicy string + firewallpolicyrule *FirewallPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddRule: Inserts a rule into a firewall policy. +// +// - firewallPolicy: Name of the firewall policy to update. +func (r *FirewallPoliciesService) AddRule(firewallPolicy string, firewallpolicyrule *FirewallPolicyRule) *FirewallPoliciesAddRuleCall { + c := &FirewallPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.firewallPolicy = firewallPolicy + c.firewallpolicyrule = firewallpolicyrule + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *FirewallPoliciesAddRuleCall) RequestId(requestId string) *FirewallPoliciesAddRuleCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorTypesListCall { +func (c *FirewallPoliciesAddRuleCall) Fields(s ...googleapi.Field) *FirewallPoliciesAddRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTypesListCall { - c.ifNoneMatch_ = entityTag +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallPoliciesAddRuleCall) Context(ctx context.Context) *FirewallPoliciesAddRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FirewallPoliciesAddRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FirewallPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/addRule") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "firewallPolicy": c.firewallPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.firewallPolicies.addRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Inserts a rule into a firewall policy.", + // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/addRule", + // "httpMethod": "POST", + // "id": "compute.firewallPolicies.addRule", + // "parameterOrder": [ + // "firewallPolicy" + // ], + // "parameters": { + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", + // "location": "path", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "locations/global/firewallPolicies/{firewallPolicy}/addRule", + // "request": { + // "$ref": "FirewallPolicyRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.firewallPolicies.cloneRules": + +type FirewallPoliciesCloneRulesCall struct { + s *Service + firewallPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// CloneRules: Copies rules to the specified firewall policy. +// +// - firewallPolicy: Name of the firewall policy to update. +func (r *FirewallPoliciesService) CloneRules(firewallPolicy string) *FirewallPoliciesCloneRulesCall { + c := &FirewallPoliciesCloneRulesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.firewallPolicy = firewallPolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *FirewallPoliciesCloneRulesCall) RequestId(requestId string) *FirewallPoliciesCloneRulesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// SourceFirewallPolicy sets the optional parameter +// "sourceFirewallPolicy": The firewall policy from which to copy rules. +func (c *FirewallPoliciesCloneRulesCall) SourceFirewallPolicy(sourceFirewallPolicy string) *FirewallPoliciesCloneRulesCall { + c.urlParams_.Set("sourceFirewallPolicy", sourceFirewallPolicy) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallPoliciesCloneRulesCall) Fields(s ...googleapi.Field) *FirewallPoliciesCloneRulesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesListCall) Context(ctx context.Context) *AcceleratorTypesListCall { +func (c *FirewallPoliciesCloneRulesCall) Context(ctx context.Context) *FirewallPoliciesCloneRulesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesListCall) Header() http.Header { +func (c *FirewallPoliciesCloneRulesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesCloneRulesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/acceleratorTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/cloneRules") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.list" call. -// Exactly one of *AcceleratorTypeList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *AcceleratorTypeList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeList, error) { +// Do executes the "compute.firewallPolicies.cloneRules" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallPoliciesCloneRulesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Copies rules to the specified firewall policy.", + // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/cloneRules", + // "httpMethod": "POST", + // "id": "compute.firewallPolicies.cloneRules", + // "parameterOrder": [ + // "firewallPolicy" + // ], + // "parameters": { + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", + // "location": "path", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sourceFirewallPolicy": { + // "description": "The firewall policy from which to copy rules.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "locations/global/firewallPolicies/{firewallPolicy}/cloneRules", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.firewallPolicies.delete": + +type FirewallPoliciesDeleteCall struct { + s *Service + firewallPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified policy. +// +// - firewallPolicy: Name of the firewall policy to delete. +func (r *FirewallPoliciesService) Delete(firewallPolicy string) *FirewallPoliciesDeleteCall { + c := &FirewallPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.firewallPolicy = firewallPolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *FirewallPoliciesDeleteCall) RequestId(requestId string) *FirewallPoliciesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallPoliciesDeleteCall) Fields(s ...googleapi.Field) *FirewallPoliciesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallPoliciesDeleteCall) Context(ctx context.Context) *FirewallPoliciesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FirewallPoliciesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FirewallPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "firewallPolicy": c.firewallPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.firewallPolicies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &AcceleratorTypeList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54019,198 +71274,63 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato } return ret, nil // { - // "description": "Retrieves a list of accelerator types that are available to the specified project.", - // "flatPath": "projects/{project}/zones/{zone}/acceleratorTypes", - // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.list", + // "description": "Deletes the specified policy.", + // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}", + // "httpMethod": "DELETE", + // "id": "compute.firewallPolicies.delete", // "parameterOrder": [ - // "project", - // "zone" + // "firewallPolicy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to delete.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/acceleratorTypes", + // "path": "locations/global/firewallPolicies/{firewallPolicy}", // "response": { - // "$ref": "AcceleratorTypeList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*AcceleratorTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.addresses.aggregatedList": +// method id "compute.firewallPolicies.get": -type AddressesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FirewallPoliciesGetCall struct { + s *Service + firewallPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of addresses. +// Get: Returns the specified firewall policy. // -// - project: Project ID for this request. -func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { - c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *AddressesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *AddressesAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *AddressesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AddressesAggregatedListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// - firewallPolicy: Name of the firewall policy to get. +func (r *FirewallPoliciesService) Get(firewallPolicy string) *FirewallPoliciesGetCall { + c := &FirewallPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.firewallPolicy = firewallPolicy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { +func (c *FirewallPoliciesGetCall) Fields(s ...googleapi.Field) *FirewallPoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54220,7 +71340,7 @@ func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { +func (c *FirewallPoliciesGetCall) IfNoneMatch(entityTag string) *FirewallPoliciesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -54228,21 +71348,21 @@ func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { +func (c *FirewallPoliciesGetCall) Context(ctx context.Context) *FirewallPoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesAggregatedListCall) Header() http.Header { +func (c *FirewallPoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -54255,7 +71375,7 @@ func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -54263,38 +71383,38 @@ func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.aggregatedList" call. -// Exactly one of *AddressAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *AddressAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.firewallPolicies.get" call. +// Exactly one of *FirewallPolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *FirewallPolicy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { +func (c *FirewallPoliciesGetCall) Do(opts ...googleapi.CallOption) (*FirewallPolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &AddressAggregatedList{ + ret := &FirewallPolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54306,58 +71426,25 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address } return ret, nil // { - // "description": "Retrieves an aggregated list of addresses.", - // "flatPath": "projects/{project}/aggregated/addresses", + // "description": "Returns the specified firewall policy.", + // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}", // "httpMethod": "GET", - // "id": "compute.addresses.aggregatedList", + // "id": "compute.firewallPolicies.get", // "parameterOrder": [ - // "project" + // "firewallPolicy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to get.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", // "required": true, // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/addresses", + // "path": "locations/global/firewallPolicies/{firewallPolicy}", // "response": { - // "$ref": "AddressAggregatedList" + // "$ref": "FirewallPolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -54368,145 +71455,122 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*AddressAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.addresses.delete": +// method id "compute.firewallPolicies.getAssociation": -type AddressesDeleteCall struct { - s *Service - project string - region string - address string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallPoliciesGetAssociationCall struct { + s *Service + firewallPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified address resource. +// GetAssociation: Gets an association with the specified name. // -// - address: Name of the address resource to delete. -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { - c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.address = address +// - firewallPolicy: Name of the firewall policy to which the queried +// rule belongs. +func (r *FirewallPoliciesService) GetAssociation(firewallPolicy string) *FirewallPoliciesGetAssociationCall { + c := &FirewallPoliciesGetAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.firewallPolicy = firewallPolicy return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// Name sets the optional parameter "name": The name of the association +// to get from the firewall policy. +func (c *FirewallPoliciesGetAssociationCall) Name(name string) *FirewallPoliciesGetAssociationCall { + c.urlParams_.Set("name", name) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { +func (c *FirewallPoliciesGetAssociationCall) Fields(s ...googleapi.Field) *FirewallPoliciesGetAssociationCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FirewallPoliciesGetAssociationCall) IfNoneMatch(entityTag string) *FirewallPoliciesGetAssociationCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { +func (c *FirewallPoliciesGetAssociationCall) Context(ctx context.Context) *FirewallPoliciesGetAssociationCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesDeleteCall) Header() http.Header { +func (c *FirewallPoliciesGetAssociationCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesGetAssociationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/getAssociation") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "address": c.address, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.firewallPolicies.getAssociation" call. +// Exactly one of *FirewallPolicyAssociation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *FirewallPolicyAssociation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *FirewallPoliciesGetAssociationCall) Do(opts ...googleapi.CallOption) (*FirewallPolicyAssociation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &FirewallPolicyAssociation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54518,85 +71582,72 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified address resource.", - // "flatPath": "projects/{project}/regions/{region}/addresses/{address}", - // "httpMethod": "DELETE", - // "id": "compute.addresses.delete", + // "description": "Gets an association with the specified name.", + // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/getAssociation", + // "httpMethod": "GET", + // "id": "compute.firewallPolicies.getAssociation", // "parameterOrder": [ - // "project", - // "region", - // "address" + // "firewallPolicy" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to which the queried rule belongs.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "name": { + // "description": "The name of the association to get from the firewall policy.", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/addresses/{address}", + // "path": "locations/global/firewallPolicies/{firewallPolicy}/getAssociation", // "response": { - // "$ref": "Operation" + // "$ref": "FirewallPolicyAssociation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.addresses.get": +// method id "compute.firewallPolicies.getIamPolicy": -type AddressesGetCall struct { +type FirewallPoliciesGetIamPolicyCall struct { s *Service - project string - region string - address string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified address resource. +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. // -// - address: Name of the address resource to return. -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { - c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.address = address +// - resource: Name or id of the resource for this request. +func (r *FirewallPoliciesService) GetIamPolicy(resource string) *FirewallPoliciesGetIamPolicyCall { + c := &FirewallPoliciesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *FirewallPoliciesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *FirewallPoliciesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { +func (c *FirewallPoliciesGetIamPolicyCall) Fields(s ...googleapi.Field) *FirewallPoliciesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54606,7 +71657,7 @@ func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { +func (c *FirewallPoliciesGetIamPolicyCall) IfNoneMatch(entityTag string) *FirewallPoliciesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -54614,21 +71665,21 @@ func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { +func (c *FirewallPoliciesGetIamPolicyCall) Context(ctx context.Context) *FirewallPoliciesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesGetCall) Header() http.Header { +func (c *FirewallPoliciesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -54641,7 +71692,7 @@ func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -54649,40 +71700,38 @@ func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "address": c.address, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.get" call. -// Exactly one of *Address or error will be non-nil. Any non-2xx status +// Do executes the "compute.firewallPolicies.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Address.ServerResponse.Header or (if a response was returned at all) +// *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { +func (c *FirewallPoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Address{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54694,41 +71743,31 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { } return ret, nil // { - // "description": "Returns the specified address resource.", - // "flatPath": "projects/{project}/regions/{region}/addresses/{address}", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "locations/global/firewallPolicies/{resource}/getIamPolicy", // "httpMethod": "GET", - // "id": "compute.addresses.get", + // "id": "compute.firewallPolicies.getIamPolicy", // "parameterOrder": [ - // "project", - // "region", - // "address" + // "resource" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" // }, - // "region": { - // "description": "Name of the region for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/addresses/{address}", + // "path": "locations/global/firewallPolicies/{resource}/getIamPolicy", // "response": { - // "$ref": "Address" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -54739,128 +71778,122 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { } -// method id "compute.addresses.insert": +// method id "compute.firewallPolicies.getRule": -type AddressesInsertCall struct { - s *Service - project string - region string - address *Address - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallPoliciesGetRuleCall struct { + s *Service + firewallPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates an address resource in the specified project by using -// the data included in the request. +// GetRule: Gets a rule of the specified priority. // -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { - c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.address = address +// - firewallPolicy: Name of the firewall policy to which the queried +// rule belongs. +func (r *FirewallPoliciesService) GetRule(firewallPolicy string) *FirewallPoliciesGetRuleCall { + c := &FirewallPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.firewallPolicy = firewallPolicy return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { - c.urlParams_.Set("requestId", requestId) +// Priority sets the optional parameter "priority": The priority of the +// rule to get from the firewall policy. +func (c *FirewallPoliciesGetRuleCall) Priority(priority int64) *FirewallPoliciesGetRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { +func (c *FirewallPoliciesGetRuleCall) Fields(s ...googleapi.Field) *FirewallPoliciesGetRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FirewallPoliciesGetRuleCall) IfNoneMatch(entityTag string) *FirewallPoliciesGetRuleCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { +func (c *FirewallPoliciesGetRuleCall) Context(ctx context.Context) *FirewallPoliciesGetRuleCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesInsertCall) Header() http.Header { +func (c *FirewallPoliciesGetRuleCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/getRule") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.firewallPolicies.getRule" call. +// Exactly one of *FirewallPolicyRule or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *FirewallPolicyRule.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *FirewallPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*FirewallPolicyRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &FirewallPolicyRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54872,232 +71905,161 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an address resource in the specified project by using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/addresses", - // "httpMethod": "POST", - // "id": "compute.addresses.insert", + // "description": "Gets a rule of the specified priority.", + // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/getRule", + // "httpMethod": "GET", + // "id": "compute.firewallPolicies.getRule", // "parameterOrder": [ - // "project", - // "region" + // "firewallPolicy" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to which the queried rule belongs.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "priority": { + // "description": "The priority of the rule to get from the firewall policy.", + // "format": "int32", // "location": "query", - // "type": "string" + // "type": "integer" // } // }, - // "path": "projects/{project}/regions/{region}/addresses", - // "request": { - // "$ref": "Address" - // }, + // "path": "locations/global/firewallPolicies/{firewallPolicy}/getRule", // "response": { - // "$ref": "Operation" + // "$ref": "FirewallPolicyRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.addresses.list": - -type AddressesListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of addresses contained within the specified -// region. -// -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *AddressesService) List(project string, region string) *AddressesListCall { - c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *AddressesListCall) Filter(filter string) *AddressesListCall { - c.urlParams_.Set("filter", filter) - return c -} +// method id "compute.firewallPolicies.insert": -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c +type FirewallPoliciesInsertCall struct { + s *Service + firewallpolicy *FirewallPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { - c.urlParams_.Set("orderBy", orderBy) +// Insert: Creates a new policy in the specified project using the data +// included in the request. +func (r *FirewallPoliciesService) Insert(firewallpolicy *FirewallPolicy) *FirewallPoliciesInsertCall { + c := &FirewallPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.firewallpolicy = firewallpolicy return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { - c.urlParams_.Set("pageToken", pageToken) +// ParentId sets the optional parameter "parentId": Parent ID for this +// request. The ID can be either be "folders/[FOLDER_ID]" if the parent +// is a folder or "organizations/[ORGANIZATION_ID]" if the parent is an +// organization. +func (c *FirewallPoliciesInsertCall) ParentId(parentId string) *FirewallPoliciesInsertCall { + c.urlParams_.Set("parentId", parentId) return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *AddressesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AddressesListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *FirewallPoliciesInsertCall) RequestId(requestId string) *FirewallPoliciesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { +func (c *FirewallPoliciesInsertCall) Fields(s ...googleapi.Field) *FirewallPoliciesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { +func (c *FirewallPoliciesInsertCall) Context(ctx context.Context) *FirewallPoliciesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesListCall) Header() http.Header { +func (c *FirewallPoliciesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.list" call. -// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// Do executes the "compute.firewallPolicies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *AddressList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { +func (c *FirewallPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &AddressList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55109,126 +72071,71 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro } return ret, nil // { - // "description": "Retrieves a list of addresses contained within the specified region.", - // "flatPath": "projects/{project}/regions/{region}/addresses", - // "httpMethod": "GET", - // "id": "compute.addresses.list", - // "parameterOrder": [ - // "project", - // "region" - // ], + // "description": "Creates a new policy in the specified project using the data included in the request.", + // "flatPath": "locations/global/firewallPolicies", + // "httpMethod": "POST", + // "id": "compute.firewallPolicies.insert", // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "parentId": { + // "description": "Parent ID for this request. The ID can be either be \"folders/[FOLDER_ID]\" if the parent is a folder or \"organizations/[ORGANIZATION_ID]\" if the parent is an organization.", // "location": "query", // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/addresses", + // "path": "locations/global/firewallPolicies", + // "request": { + // "$ref": "FirewallPolicy" + // }, // "response": { - // "$ref": "AddressList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.autoscalers.aggregatedList": +// method id "compute.firewallPolicies.list": -type AutoscalersAggregatedListCall struct { +type FirewallPoliciesListCall struct { s *Service - project string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of autoscalers. -// -// - project: Project ID for this request. -func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { - c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project +// List: Lists all the policies that have been configured for the +// specified folder or organization. +func (r *FirewallPoliciesService) List() *FirewallPoliciesListCall { + c := &FirewallPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -55236,32 +72143,29 @@ func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregat // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *FirewallPoliciesListCall) Filter(filter string) *FirewallPoliciesListCall { c.urlParams_.Set("filter", filter) return c } -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *AutoscalersAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *AutoscalersAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) - return c -} - // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of // available results is larger than `maxResults`, Compute Engine returns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { +func (c *FirewallPoliciesListCall) MaxResults(maxResults int64) *FirewallPoliciesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -55275,7 +72179,7 @@ func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *Autoscaler // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { +func (c *FirewallPoliciesListCall) OrderBy(orderBy string) *FirewallPoliciesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -55283,16 +72187,23 @@ func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggr // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { +func (c *FirewallPoliciesListCall) PageToken(pageToken string) *FirewallPoliciesListCall { c.urlParams_.Set("pageToken", pageToken) return c } +// ParentId sets the optional parameter "parentId": Parent ID for this +// request. +func (c *FirewallPoliciesListCall) ParentId(parentId string) *FirewallPoliciesListCall { + c.urlParams_.Set("parentId", parentId) + return c +} + // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *AutoscalersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AutoscalersAggregatedListCall { +func (c *FirewallPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *FirewallPoliciesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -55300,7 +72211,7 @@ func (c *AutoscalersAggregatedListCall) ReturnPartialSuccess(returnPartialSucces // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { +func (c *FirewallPoliciesListCall) Fields(s ...googleapi.Field) *FirewallPoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55310,7 +72221,7 @@ func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *Autoscaler // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { +func (c *FirewallPoliciesListCall) IfNoneMatch(entityTag string) *FirewallPoliciesListCall { c.ifNoneMatch_ = entityTag return c } @@ -55318,21 +72229,21 @@ func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *Autoscale // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { +func (c *FirewallPoliciesListCall) Context(ctx context.Context) *FirewallPoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersAggregatedListCall) Header() http.Header { +func (c *FirewallPoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -55345,46 +72256,43 @@ func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.aggregatedList" call. -// Exactly one of *AutoscalerAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was +// Do executes the "compute.firewallPolicies.list" call. +// Exactly one of *FirewallPolicyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *FirewallPolicyList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { +func (c *FirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*FirewallPolicyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &AutoscalerAggregatedList{ + ret := &FirewallPolicyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55396,24 +72304,16 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos } return ret, nil // { - // "description": "Retrieves an aggregated list of autoscalers.", - // "flatPath": "projects/{project}/aggregated/autoscalers", + // "description": "Lists all the policies that have been configured for the specified folder or organization.", + // "flatPath": "locations/global/firewallPolicies", // "httpMethod": "GET", - // "id": "compute.autoscalers.aggregatedList", - // "parameterOrder": [ - // "project" - // ], + // "id": "compute.firewallPolicies.list", // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -55432,11 +72332,9 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "parentId": { + // "description": "Parent ID for this request.", + // "location": "query", // "type": "string" // }, // "returnPartialSuccess": { @@ -55445,9 +72343,9 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/autoscalers", + // "path": "locations/global/firewallPolicies", // "response": { - // "$ref": "AutoscalerAggregatedList" + // "$ref": "FirewallPolicyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -55461,7 +72359,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { +func (c *FirewallPoliciesListCall) Pages(ctx context.Context, f func(*FirewallPolicyList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -55479,124 +72377,118 @@ func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*Autos } } -// method id "compute.autoscalers.delete": - -type AutoscalersDeleteCall struct { - s *Service - project string - zone string - autoscaler string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} +// method id "compute.firewallPolicies.listAssociations": -// Delete: Deletes the specified autoscaler. -// -// - autoscaler: Name of the autoscaler to delete. -// - project: Project ID for this request. -// - zone: Name of the zone for this request. -func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { - c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.autoscaler = autoscaler +type FirewallPoliciesListAssociationsCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListAssociations: Lists associations of a specified target, i.e., +// organization or folder. +func (r *FirewallPoliciesService) ListAssociations() *FirewallPoliciesListAssociationsCall { + c := &FirewallPoliciesListAssociationsCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCall { - c.urlParams_.Set("requestId", requestId) +// TargetResource sets the optional parameter "targetResource": The +// target resource to list associations. It is an organization, or a +// folder. +func (c *FirewallPoliciesListAssociationsCall) TargetResource(targetResource string) *FirewallPoliciesListAssociationsCall { + c.urlParams_.Set("targetResource", targetResource) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { +func (c *FirewallPoliciesListAssociationsCall) Fields(s ...googleapi.Field) *FirewallPoliciesListAssociationsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FirewallPoliciesListAssociationsCall) IfNoneMatch(entityTag string) *FirewallPoliciesListAssociationsCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { +func (c *FirewallPoliciesListAssociationsCall) Context(ctx context.Context) *FirewallPoliciesListAssociationsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersDeleteCall) Header() http.Header { +func (c *FirewallPoliciesListAssociationsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesListAssociationsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/listAssociations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, - }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.firewallPolicies.listAssociations" call. +// Exactly one of *FirewallPoliciesListAssociationsResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *FirewallPoliciesListAssociationsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *FirewallPoliciesListAssociationsCall) Do(opts ...googleapi.CallOption) (*FirewallPoliciesListAssociationsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &FirewallPoliciesListAssociationsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55608,172 +72500,147 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified autoscaler.", - // "flatPath": "projects/{project}/zones/{zone}/autoscalers/{autoscaler}", - // "httpMethod": "DELETE", - // "id": "compute.autoscalers.delete", - // "parameterOrder": [ - // "project", - // "zone", - // "autoscaler" - // ], + // "description": "Lists associations of a specified target, i.e., organization or folder.", + // "flatPath": "locations/global/firewallPolicies/listAssociations", + // "httpMethod": "GET", + // "id": "compute.firewallPolicies.listAssociations", // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "targetResource": { + // "description": "The target resource to list associations. It is an organization, or a folder.", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/autoscalers/{autoscaler}", + // "path": "locations/global/firewallPolicies/listAssociations", // "response": { - // "$ref": "Operation" + // "$ref": "FirewallPoliciesListAssociationsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.autoscalers.get": +// method id "compute.firewallPolicies.move": -type AutoscalersGetCall struct { - s *Service - project string - zone string - autoscaler string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FirewallPoliciesMoveCall struct { + s *Service + firewallPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified autoscaler resource. Gets a list of -// available autoscalers by making a list() request. +// Move: Moves the specified firewall policy. // -// - autoscaler: Name of the autoscaler to return. -// - project: Project ID for this request. -// - zone: Name of the zone for this request. -func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { - c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.autoscaler = autoscaler +// - firewallPolicy: Name of the firewall policy to update. +func (r *FirewallPoliciesService) Move(firewallPolicy string) *FirewallPoliciesMoveCall { + c := &FirewallPoliciesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.firewallPolicy = firewallPolicy + return c +} + +// ParentId sets the optional parameter "parentId": The new parent of +// the firewall policy. +func (c *FirewallPoliciesMoveCall) ParentId(parentId string) *FirewallPoliciesMoveCall { + c.urlParams_.Set("parentId", parentId) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *FirewallPoliciesMoveCall) RequestId(requestId string) *FirewallPoliciesMoveCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { +func (c *FirewallPoliciesMoveCall) Fields(s ...googleapi.Field) *FirewallPoliciesMoveCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { +func (c *FirewallPoliciesMoveCall) Context(ctx context.Context) *FirewallPoliciesMoveCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersGetCall) Header() http.Header { +func (c *FirewallPoliciesMoveCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/move") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.get" call. -// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// Do executes the "compute.firewallPolicies.move" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Autoscaler.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { +func (c *FirewallPoliciesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Autoscaler{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55785,73 +72652,63 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } return ret, nil // { - // "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", - // "flatPath": "projects/{project}/zones/{zone}/autoscalers/{autoscaler}", - // "httpMethod": "GET", - // "id": "compute.autoscalers.get", + // "description": "Moves the specified firewall policy.", + // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/move", + // "httpMethod": "POST", + // "id": "compute.firewallPolicies.move", // "parameterOrder": [ - // "project", - // "zone", - // "autoscaler" + // "firewallPolicy" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to return.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "parentId": { + // "description": "The new parent of the firewall policy.", + // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/autoscalers/{autoscaler}", + // "path": "locations/global/firewallPolicies/{firewallPolicy}/move", // "response": { - // "$ref": "Autoscaler" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.autoscalers.insert": +// method id "compute.firewallPolicies.patch": -type AutoscalersInsertCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallPoliciesPatchCall struct { + s *Service + firewallPolicy string + firewallpolicy *FirewallPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an autoscaler in the specified project using the data -// included in the request. +// Patch: Patches the specified policy with the data included in the +// request. // -// - project: Project ID for this request. -// - zone: Name of the zone for this request. -func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { - c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.autoscaler = autoscaler +// - firewallPolicy: Name of the firewall policy to update. +func (r *FirewallPoliciesService) Patch(firewallPolicy string, firewallpolicy *FirewallPolicy) *FirewallPoliciesPatchCall { + c := &FirewallPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.firewallPolicy = firewallPolicy + c.firewallpolicy = firewallpolicy return c } @@ -55866,7 +72723,7 @@ func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Aut // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCall { +func (c *FirewallPoliciesPatchCall) RequestId(requestId string) *FirewallPoliciesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -55874,7 +72731,7 @@ func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { +func (c *FirewallPoliciesPatchCall) Fields(s ...googleapi.Field) *FirewallPoliciesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55882,21 +72739,21 @@ func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { +func (c *FirewallPoliciesPatchCall) Context(ctx context.Context) *FirewallPoliciesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersInsertCall) Header() http.Header { +func (c *FirewallPoliciesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -55904,52 +72761,51 @@ func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.insert" call. +// Do executes the "compute.firewallPolicies.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -55963,19 +72819,18 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/zones/{zone}/autoscalers", - // "httpMethod": "POST", - // "id": "compute.autoscalers.insert", + // "description": "Patches the specified policy with the data included in the request.", + // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}", + // "httpMethod": "PATCH", + // "id": "compute.firewallPolicies.patch", // "parameterOrder": [ - // "project", - // "zone" + // "firewallPolicy" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", // "required": true, // "type": "string" // }, @@ -55983,18 +72838,11 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/autoscalers", + // "path": "locations/global/firewallPolicies/{firewallPolicy}", // "request": { - // "$ref": "Autoscaler" + // "$ref": "FirewallPolicy" // }, // "response": { // "$ref": "Operation" @@ -56007,188 +72855,130 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.autoscalers.list": +// method id "compute.firewallPolicies.patchRule": -type AutoscalersListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FirewallPoliciesPatchRuleCall struct { + s *Service + firewallPolicy string + firewallpolicyrule *FirewallPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of autoscalers contained within the specified -// zone. +// PatchRule: Patches a rule of the specified priority. // -// - project: Project ID for this request. -// - zone: Name of the zone for this request. -func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { - c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { - c.urlParams_.Set("orderBy", orderBy) +// - firewallPolicy: Name of the firewall policy to update. +func (r *FirewallPoliciesService) PatchRule(firewallPolicy string, firewallpolicyrule *FirewallPolicyRule) *FirewallPoliciesPatchRuleCall { + c := &FirewallPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.firewallPolicy = firewallPolicy + c.firewallpolicyrule = firewallpolicyrule return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { - c.urlParams_.Set("pageToken", pageToken) +// Priority sets the optional parameter "priority": The priority of the +// rule to patch. +func (c *FirewallPoliciesPatchRuleCall) Priority(priority int64) *FirewallPoliciesPatchRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *AutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AutoscalersListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *FirewallPoliciesPatchRuleCall) RequestId(requestId string) *FirewallPoliciesPatchRuleCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { +func (c *FirewallPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *FirewallPoliciesPatchRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { +func (c *FirewallPoliciesPatchRuleCall) Context(ctx context.Context) *FirewallPoliciesPatchRuleCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersListCall) Header() http.Header { +func (c *FirewallPoliciesPatchRuleCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/patchRule") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.list" call. -// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx +// Do executes the "compute.firewallPolicies.patchRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *AutoscalerList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &AutoscalerList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56200,122 +72990,72 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified zone.", - // "flatPath": "projects/{project}/zones/{zone}/autoscalers", - // "httpMethod": "GET", - // "id": "compute.autoscalers.list", + // "description": "Patches a rule of the specified priority.", + // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/patchRule", + // "httpMethod": "POST", + // "id": "compute.firewallPolicies.patchRule", // "parameterOrder": [ - // "project", - // "zone" + // "firewallPolicy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "priority": { + // "description": "The priority of the rule to patch.", + // "format": "int32", // "location": "query", - // "type": "boolean" + // "type": "integer" // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/autoscalers", + // "path": "locations/global/firewallPolicies/{firewallPolicy}/patchRule", + // "request": { + // "$ref": "FirewallPolicyRule" + // }, // "response": { - // "$ref": "AutoscalerList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.autoscalers.patch": +// method id "compute.firewallPolicies.removeAssociation": -type AutoscalersPatchCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallPoliciesRemoveAssociationCall struct { + s *Service + firewallPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates an autoscaler in the specified project using the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. +// RemoveAssociation: Removes an association for the specified firewall +// policy. // -// - project: Project ID for this request. -// - zone: Name of the zone for this request. -func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { - c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.autoscaler = autoscaler +// - firewallPolicy: Name of the firewall policy to update. +func (r *FirewallPoliciesService) RemoveAssociation(firewallPolicy string) *FirewallPoliciesRemoveAssociationCall { + c := &FirewallPoliciesRemoveAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.firewallPolicy = firewallPolicy return c } -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to patch. -func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCall { - c.urlParams_.Set("autoscaler", autoscaler) +// Name sets the optional parameter "name": Name for the attachment that +// will be removed. +func (c *FirewallPoliciesRemoveAssociationCall) Name(name string) *FirewallPoliciesRemoveAssociationCall { + c.urlParams_.Set("name", name) return c } @@ -56330,7 +73070,7 @@ func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCa // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall { +func (c *FirewallPoliciesRemoveAssociationCall) RequestId(requestId string) *FirewallPoliciesRemoveAssociationCall { c.urlParams_.Set("requestId", requestId) return c } @@ -56338,7 +73078,7 @@ func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { +func (c *FirewallPoliciesRemoveAssociationCall) Fields(s ...googleapi.Field) *FirewallPoliciesRemoveAssociationCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56346,21 +73086,21 @@ func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { +func (c *FirewallPoliciesRemoveAssociationCall) Context(ctx context.Context) *FirewallPoliciesRemoveAssociationCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersPatchCall) Header() http.Header { +func (c *FirewallPoliciesRemoveAssociationCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesRemoveAssociationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -56368,52 +73108,46 @@ func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/removeAssociation") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.patch" call. +// Do executes the "compute.firewallPolicies.removeAssociation" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallPoliciesRemoveAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -56427,45 +73161,33 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/zones/{zone}/autoscalers", - // "httpMethod": "PATCH", - // "id": "compute.autoscalers.patch", + // "description": "Removes an association for the specified firewall policy.", + // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/removeAssociation", + // "httpMethod": "POST", + // "id": "compute.firewallPolicies.removeAssociation", // "parameterOrder": [ - // "project", - // "zone" + // "firewallPolicy" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to patch.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "name": { + // "description": "Name for the attachment that will be removed.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, + // "path": "locations/global/firewallPolicies/{firewallPolicy}/removeAssociation", // "response": { // "$ref": "Operation" // }, @@ -56477,35 +73199,29 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.autoscalers.update": +// method id "compute.firewallPolicies.removeRule": -type AutoscalersUpdateCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallPoliciesRemoveRuleCall struct { + s *Service + firewallPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates an autoscaler in the specified project using the data -// included in the request. +// RemoveRule: Deletes a rule of the specified priority. // -// - project: Project ID for this request. -// - zone: Name of the zone for this request. -func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { - c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.autoscaler = autoscaler +// - firewallPolicy: Name of the firewall policy to update. +func (r *FirewallPoliciesService) RemoveRule(firewallPolicy string) *FirewallPoliciesRemoveRuleCall { + c := &FirewallPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.firewallPolicy = firewallPolicy return c } -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to update. -func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { - c.urlParams_.Set("autoscaler", autoscaler) +// Priority sets the optional parameter "priority": The priority of the +// rule to remove from the firewall policy. +func (c *FirewallPoliciesRemoveRuleCall) Priority(priority int64) *FirewallPoliciesRemoveRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) return c } @@ -56520,7 +73236,7 @@ func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdate // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCall { +func (c *FirewallPoliciesRemoveRuleCall) RequestId(requestId string) *FirewallPoliciesRemoveRuleCall { c.urlParams_.Set("requestId", requestId) return c } @@ -56528,7 +73244,7 @@ func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { +func (c *FirewallPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *FirewallPoliciesRemoveRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56536,21 +73252,21 @@ func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { +func (c *FirewallPoliciesRemoveRuleCall) Context(ctx context.Context) *FirewallPoliciesRemoveRuleCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersUpdateCall) Header() http.Header { +func (c *FirewallPoliciesRemoveRuleCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -56558,52 +73274,46 @@ func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/removeRule") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.update" call. +// Do executes the "compute.firewallPolicies.removeRule" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -56617,45 +73327,34 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/zones/{zone}/autoscalers", - // "httpMethod": "PUT", - // "id": "compute.autoscalers.update", + // "description": "Deletes a rule of the specified priority.", + // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/removeRule", + // "httpMethod": "POST", + // "id": "compute.firewallPolicies.removeRule", // "parameterOrder": [ - // "project", - // "zone" + // "firewallPolicy" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to update.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", // "required": true, // "type": "string" // }, + // "priority": { + // "description": "The priority of the rule to remove from the firewall policy.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, + // "path": "locations/global/firewallPolicies/{firewallPolicy}/removeRule", // "response": { // "$ref": "Operation" // }, @@ -56667,52 +73366,32 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.backendBuckets.addSignedUrlKey": +// method id "compute.firewallPolicies.setIamPolicy": -type BackendBucketsAddSignedUrlKeyCall struct { - s *Service - project string - backendBucket string - signedurlkey *SignedUrlKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallPoliciesSetIamPolicyCall struct { + s *Service + resource string + globalorganizationsetpolicyrequest *GlobalOrganizationSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddSignedUrlKey: Adds a key for validating requests with signed URLs -// for this backend bucket. +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. // -// - backendBucket: Name of the BackendBucket resource to which the -// Signed URL Key should be added. The name should conform to RFC1035. -// - project: Project ID for this request. -func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { - c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendBucket = backendBucket - c.signedurlkey = signedurlkey - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsAddSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) +// - resource: Name or id of the resource for this request. +func (r *FirewallPoliciesService) SetIamPolicy(resource string, globalorganizationsetpolicyrequest *GlobalOrganizationSetPolicyRequest) *FirewallPoliciesSetIamPolicyCall { + c := &FirewallPoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.globalorganizationsetpolicyrequest = globalorganizationsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsAddSignedUrlKeyCall { +func (c *FirewallPoliciesSetIamPolicyCall) Fields(s ...googleapi.Field) *FirewallPoliciesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56720,21 +73399,21 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *Backen // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsAddSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsAddSignedUrlKeyCall { +func (c *FirewallPoliciesSetIamPolicyCall) Context(ctx context.Context) *FirewallPoliciesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { +func (c *FirewallPoliciesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -56742,14 +73421,14 @@ func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalorganizationsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -56757,39 +73436,38 @@ func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.addSignedUrlKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.firewallPolicies.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FirewallPoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56801,40 +73479,28 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Adds a key for validating requests with signed URLs for this backend bucket.", - // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "locations/global/firewallPolicies/{resource}/setIamPolicy", // "httpMethod": "POST", - // "id": "compute.backendBuckets.addSignedUrlKey", + // "id": "compute.firewallPolicies.setIamPolicy", // "parameterOrder": [ - // "project", - // "backendBucket" + // "resource" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", + // "path": "locations/global/firewallPolicies/{resource}/setIamPolicy", // "request": { - // "$ref": "SignedUrlKey" + // "$ref": "GlobalOrganizationSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -56844,48 +73510,32 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.backendBuckets.delete": +// method id "compute.firewallPolicies.testIamPermissions": -type BackendBucketsDeleteCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallPoliciesTestIamPermissionsCall struct { + s *Service + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified BackendBucket resource. +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. // -// - backendBucket: Name of the BackendBucket resource to delete. -// - project: Project ID for this request. -func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { - c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendBucket = backendBucket - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { - c.urlParams_.Set("requestId", requestId) +// - resource: Name or id of the resource for this request. +func (r *FirewallPoliciesService) TestIamPermissions(resource string, testpermissionsrequest *TestPermissionsRequest) *FirewallPoliciesTestIamPermissionsCall { + c := &FirewallPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { +func (c *FirewallPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *FirewallPoliciesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56893,21 +73543,21 @@ func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { +func (c *FirewallPoliciesTestIamPermissionsCall) Context(ctx context.Context) *FirewallPoliciesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsDeleteCall) Header() http.Header { +func (c *FirewallPoliciesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -56915,49 +73565,53 @@ func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.firewallPolicies.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *FirewallPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56969,70 +73623,57 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified BackendBucket resource.", - // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}", - // "httpMethod": "DELETE", - // "id": "compute.backendBuckets.delete", + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "locations/global/firewallPolicies/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.firewallPolicies.testIamPermissions", // "parameterOrder": [ - // "project", - // "backendBucket" + // "resource" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "(firewallPolicies/)?[0-9]{0,20}", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/global/backendBuckets/{backendBucket}", + // "path": "locations/global/firewallPolicies/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendBuckets.deleteSignedUrlKey": +// method id "compute.firewalls.delete": -type BackendBucketsDeleteSignedUrlKeyCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsDeleteCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteSignedUrlKey: Deletes a key for validating requests with signed -// URLs for this backend bucket. +// Delete: Deletes the specified firewall. // -// - backendBucket: Name of the BackendBucket resource to which the -// Signed URL Key should be added. The name should conform to RFC1035. -// - keyName: The name of the Signed URL Key to delete. +// - firewall: Name of the firewall rule to delete. // - project: Project ID for this request. -func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { - c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { + c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.urlParams_.Set("keyName", keyName) + c.firewall = firewall return c } @@ -57047,7 +73688,7 @@ func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsDeleteSignedUrlKeyCall { +func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -57055,7 +73696,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *Back // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteSignedUrlKeyCall { +func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57063,21 +73704,21 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *Bac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsDeleteSignedUrlKeyCall { +func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { +func (c *FirewallsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -57087,45 +73728,45 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Resp var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.deleteSignedUrlKey" call. +// Do executes the "compute.firewalls.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -57139,25 +73780,19 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a key for validating requests with signed URLs for this backend bucket.", - // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", - // "httpMethod": "POST", - // "id": "compute.backendBuckets.deleteSignedUrlKey", + // "description": "Deletes the specified firewall.", + // "flatPath": "projects/{project}/global/firewalls/{firewall}", + // "httpMethod": "DELETE", + // "id": "compute.firewalls.delete", // "parameterOrder": [ // "project", - // "backendBucket", - // "keyName" + // "firewall" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "firewall": { + // "description": "Name of the firewall rule to delete.", // "location": "path", - // "required": true, - // "type": "string" - // }, - // "keyName": { - // "description": "The name of the Signed URL Key to delete.", - // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -57174,7 +73809,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "projects/{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", + // "path": "projects/{project}/global/firewalls/{firewall}", // "response": { // "$ref": "Operation" // }, @@ -57186,34 +73821,33 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } -// method id "compute.backendBuckets.get": +// method id "compute.firewalls.get": -type BackendBucketsGetCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FirewallsGetCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified BackendBucket resource. Gets a list of -// available backend buckets by making a list() request. +// Get: Returns the specified firewall. // -// - backendBucket: Name of the BackendBucket resource to return. +// - firewall: Name of the firewall rule to return. // - project: Project ID for this request. -func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { - c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { + c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket + c.firewall = firewall return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { +func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57223,7 +73857,7 @@ func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { +func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -57231,21 +73865,21 @@ func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { +func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsGetCall) Header() http.Header { +func (c *FirewallsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -57258,7 +73892,7 @@ func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -57266,39 +73900,39 @@ func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.get" call. -// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *BackendBucket.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { +// Do executes the "compute.firewalls.get" call. +// Exactly one of *Firewall or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Firewall.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &BackendBucket{ + ret := &Firewall{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57310,17 +73944,17 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } return ret, nil // { - // "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", - // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}", + // "description": "Returns the specified firewall.", + // "flatPath": "projects/{project}/global/firewalls/{firewall}", // "httpMethod": "GET", - // "id": "compute.backendBuckets.get", + // "id": "compute.firewalls.get", // "parameterOrder": [ // "project", - // "backendBucket" + // "firewall" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to return.", + // "firewall": { + // "description": "Name of the firewall rule to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -57334,9 +73968,9 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket // "type": "string" // } // }, - // "path": "projects/{project}/global/backendBuckets/{backendBucket}", + // "path": "projects/{project}/global/firewalls/{firewall}", // "response": { - // "$ref": "BackendBucket" + // "$ref": "Firewall" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -57347,25 +73981,25 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } -// method id "compute.backendBuckets.insert": +// method id "compute.firewalls.insert": -type BackendBucketsInsertCall struct { - s *Service - project string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsInsertCall struct { + s *Service + project string + firewall *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a BackendBucket resource in the specified project -// using the data included in the request. +// Insert: Creates a firewall rule in the specified project using the +// data included in the request. // // - project: Project ID for this request. -func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { - c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { + c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendbucket = backendbucket + c.firewall = firewall return c } @@ -57380,7 +74014,7 @@ func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBuc // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { +func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -57388,7 +74022,7 @@ func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsIn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { +func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57396,21 +74030,21 @@ func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { +func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsInsertCall) Header() http.Header { +func (c *FirewallsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -57418,14 +74052,14 @@ func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -57438,31 +74072,31 @@ func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.insert" call. +// Do executes the "compute.firewalls.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -57476,10 +74110,10 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/global/backendBuckets", + // "description": "Creates a firewall rule in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/global/firewalls", // "httpMethod": "POST", - // "id": "compute.backendBuckets.insert", + // "id": "compute.firewalls.insert", // "parameterOrder": [ // "project" // ], @@ -57497,9 +74131,9 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "projects/{project}/global/backendBuckets", + // "path": "projects/{project}/global/firewalls", // "request": { - // "$ref": "BackendBucket" + // "$ref": "Firewall" // }, // "response": { // "$ref": "Operation" @@ -57512,9 +74146,9 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendBuckets.list": +// method id "compute.firewalls.list": -type BackendBucketsListCall struct { +type FirewallsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -57523,30 +74157,33 @@ type BackendBucketsListCall struct { header_ http.Header } -// List: Retrieves the list of BackendBucket resources available to the -// specified project. +// List: Retrieves the list of firewall rules available to the specified +// project. // // - project: Project ID for this request. -func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { - c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *FirewallsService) List(project string) *FirewallsListCall { + c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -57554,8 +74191,18 @@ func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { c.urlParams_.Set("filter", filter) return c } @@ -57566,7 +74213,7 @@ func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { +func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -57580,7 +74227,7 @@ func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsLis // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { +func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -57588,7 +74235,7 @@ func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { +func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -57597,7 +74244,7 @@ func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsList // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *BackendBucketsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendBucketsListCall { +func (c *FirewallsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *FirewallsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -57605,7 +74252,7 @@ func (c *BackendBucketsListCall) ReturnPartialSuccess(returnPartialSuccess bool) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { +func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57615,7 +74262,7 @@ func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsLis // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { +func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { c.ifNoneMatch_ = entityTag return c } @@ -57623,21 +74270,21 @@ func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsLi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { +func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsListCall) Header() http.Header { +func (c *FirewallsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -57650,7 +74297,7 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -57663,33 +74310,33 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.list" call. -// Exactly one of *BackendBucketList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BackendBucketList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { +// Do executes the "compute.firewalls.list" call. +// Exactly one of *FirewallList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *FirewallList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &BackendBucketList{ + ret := &FirewallList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57701,16 +74348,16 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke } return ret, nil // { - // "description": "Retrieves the list of BackendBucket resources available to the specified project.", - // "flatPath": "projects/{project}/global/backendBuckets", + // "description": "Retrieves the list of firewall rules available to the specified project.", + // "flatPath": "projects/{project}/global/firewalls", // "httpMethod": "GET", - // "id": "compute.backendBuckets.list", + // "id": "compute.firewalls.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -57745,9 +74392,9 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // "type": "boolean" // } // }, - // "path": "projects/{project}/global/backendBuckets", + // "path": "projects/{project}/global/firewalls", // "response": { - // "$ref": "BackendBucketList" + // "$ref": "FirewallList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -57761,7 +74408,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { +func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -57779,29 +74426,29 @@ func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucke } } -// method id "compute.backendBuckets.patch": +// method id "compute.firewalls.patch": -type BackendBucketsPatchCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsPatchCall struct { + s *Service + project string + firewall string + firewall2 *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified BackendBucket resource with the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. +// Patch: Updates the specified firewall rule with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. // -// - backendBucket: Name of the BackendBucket resource to patch. +// - firewall: Name of the firewall rule to patch. // - project: Project ID for this request. -func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { - c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { + c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket + c.firewall = firewall + c.firewall2 = firewall2 return c } @@ -57816,7 +74463,7 @@ func (r *BackendBucketsService) Patch(project string, backendBucket string, back // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { +func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -57824,7 +74471,7 @@ func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPat // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { +func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57832,21 +74479,21 @@ func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { +func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsPatchCall) Header() http.Header { +func (c *FirewallsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -57854,14 +74501,14 @@ func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -57869,37 +74516,37 @@ func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.patch" call. +// Do executes the "compute.firewalls.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -57913,17 +74560,17 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}", + // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/global/firewalls/{firewall}", // "httpMethod": "PATCH", - // "id": "compute.backendBuckets.patch", + // "id": "compute.firewalls.patch", // "parameterOrder": [ // "project", - // "backendBucket" + // "firewall" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to patch.", + // "firewall": { + // "description": "Name of the firewall rule to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -57942,186 +74589,9 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "projects/{project}/global/backendBuckets/{backendBucket}", - // "request": { - // "$ref": "BackendBucket" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendBuckets.setEdgeSecurityPolicy": - -type BackendBucketsSetEdgeSecurityPolicyCall struct { - s *Service - project string - backendBucket string - securitypolicyreference *SecurityPolicyReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetEdgeSecurityPolicy: Sets the edge security policy for the -// specified backend bucket. -// -// - backendBucket: Name of the BackendService resource to which the -// security policy should be set. The name should conform to RFC1035. -// - project: Project ID for this request. -func (r *BackendBucketsService) SetEdgeSecurityPolicy(project string, backendBucket string, securitypolicyreference *SecurityPolicyReference) *BackendBucketsSetEdgeSecurityPolicyCall { - c := &BackendBucketsSetEdgeSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendBucket = backendBucket - c.securitypolicyreference = securitypolicyreference - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsSetEdgeSecurityPolicyCall) RequestId(requestId string) *BackendBucketsSetEdgeSecurityPolicyCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendBucketsSetEdgeSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendBucketsSetEdgeSecurityPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendBucketsSetEdgeSecurityPolicyCall) Context(ctx context.Context) *BackendBucketsSetEdgeSecurityPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendBucketsSetEdgeSecurityPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendBucketsSetEdgeSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{backendBucket}/setEdgeSecurityPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendBuckets.setEdgeSecurityPolicy" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the edge security policy for the specified backend bucket.", - // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}/setEdgeSecurityPolicy", - // "httpMethod": "POST", - // "id": "compute.backendBuckets.setEdgeSecurityPolicy", - // "parameterOrder": [ - // "project", - // "backendBucket" - // ], - // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{project}/global/backendBuckets/{backendBucket}/setEdgeSecurityPolicy", + // "path": "projects/{project}/global/firewalls/{firewall}", // "request": { - // "$ref": "SecurityPolicyReference" + // "$ref": "Firewall" // }, // "response": { // "$ref": "Operation" @@ -58134,28 +74604,30 @@ func (c *BackendBucketsSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOptio } -// method id "compute.backendBuckets.update": +// method id "compute.firewalls.update": -type BackendBucketsUpdateCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsUpdateCall struct { + s *Service + project string + firewall string + firewall2 *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified BackendBucket resource with the data -// included in the request. +// Update: Updates the specified firewall rule with the data included in +// the request. Note that all fields will be updated if using PUT, even +// fields that are not specified. To update individual fields, please +// use PATCH instead. // -// - backendBucket: Name of the BackendBucket resource to update. +// - firewall: Name of the firewall rule to update. // - project: Project ID for this request. -func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { - c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { + c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket + c.firewall = firewall + c.firewall2 = firewall2 return c } @@ -58170,7 +74642,7 @@ func (r *BackendBucketsService) Update(project string, backendBucket string, bac // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { +func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -58178,7 +74650,7 @@ func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUp // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { +func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58186,21 +74658,21 @@ func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsU // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { +func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsUpdateCall) Header() http.Header { +func (c *FirewallsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -58208,14 +74680,14 @@ func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PUT", urls, body) if err != nil { @@ -58223,37 +74695,37 @@ func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.update" call. +// Do executes the "compute.firewalls.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -58267,17 +74739,17 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified BackendBucket resource with the data included in the request.", - // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}", + // "description": "Updates the specified firewall rule with the data included in the request. Note that all fields will be updated if using PUT, even fields that are not specified. To update individual fields, please use PATCH instead.", + // "flatPath": "projects/{project}/global/firewalls/{firewall}", // "httpMethod": "PUT", - // "id": "compute.backendBuckets.update", + // "id": "compute.firewalls.update", // "parameterOrder": [ // "project", - // "backendBucket" + // "firewall" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to update.", + // "firewall": { + // "description": "Name of the firewall rule to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -58296,186 +74768,9 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "projects/{project}/global/backendBuckets/{backendBucket}", - // "request": { - // "$ref": "BackendBucket" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendServices.addSignedUrlKey": - -type BackendServicesAddSignedUrlKeyCall struct { - s *Service - project string - backendService string - signedurlkey *SignedUrlKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AddSignedUrlKey: Adds a key for validating requests with signed URLs -// for this backend service. -// -// - backendService: Name of the BackendService resource to which the -// Signed URL Key should be added. The name should conform to RFC1035. -// - project: Project ID for this request. -func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { - c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - c.signedurlkey = signedurlkey - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *BackendServicesAddSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesAddSignedUrlKeyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesAddSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesAddSignedUrlKeyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}/addSignedUrlKey") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.addSignedUrlKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Adds a key for validating requests with signed URLs for this backend service.", - // "flatPath": "projects/{project}/global/backendServices/{backendService}/addSignedUrlKey", - // "httpMethod": "POST", - // "id": "compute.backendServices.addSignedUrlKey", - // "parameterOrder": [ - // "project", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{project}/global/backendServices/{backendService}/addSignedUrlKey", + // "path": "projects/{project}/global/firewalls/{firewall}", // "request": { - // "$ref": "SignedUrlKey" + // "$ref": "Firewall" // }, // "response": { // "$ref": "Operation" @@ -58488,9 +74783,9 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.backendServices.aggregatedList": +// method id "compute.forwardingRules.aggregatedList": -type BackendServicesAggregatedListCall struct { +type ForwardingRulesAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -58499,30 +74794,32 @@ type BackendServicesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves the list of all BackendService resources, -// regional and global, available to the specified project. +// AggregatedList: Retrieves an aggregated list of forwarding rules. // -// - project: Name of the project scoping this request. -func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { - c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { + c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -58530,8 +74827,18 @@ func (r *BackendServicesService) AggregatedList(project string) *BackendServices // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -58544,7 +74851,7 @@ func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServic // response. For resource types which predate this field, if this flag // is omitted or false, only scopes of the scope types where the // resource type is expected to be found will be included. -func (c *BackendServicesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) return c } @@ -58555,7 +74862,7 @@ func (c *BackendServicesAggregatedListCall) IncludeAllScopes(includeAllScopes bo // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -58569,7 +74876,7 @@ func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *Backen // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -58577,7 +74884,7 @@ func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServ // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -58586,7 +74893,7 @@ func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *Backend // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *BackendServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -58594,7 +74901,7 @@ func (c *BackendServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSu // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58604,7 +74911,7 @@ func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *Backen // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -58612,21 +74919,21 @@ func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *Backe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesAggregatedListCall) Header() http.Header { +func (c *ForwardingRulesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -58639,7 +74946,7 @@ func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Respons var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/forwardingRules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -58652,33 +74959,33 @@ func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.aggregatedList" call. -// Exactly one of *BackendServiceAggregatedList or error will be +// Do executes the "compute.forwardingRules.aggregatedList" call. +// Exactly one of *ForwardingRuleAggregatedList or error will be // non-nil. Any non-2xx status code is an error. Response headers are in -// either *BackendServiceAggregatedList.ServerResponse.Header or (if a +// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a // response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { +func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &BackendServiceAggregatedList{ + ret := &ForwardingRuleAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58690,16 +74997,16 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B } return ret, nil // { - // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", - // "flatPath": "projects/{project}/aggregated/backendServices", + // "description": "Retrieves an aggregated list of forwarding rules.", + // "flatPath": "projects/{project}/aggregated/forwardingRules", // "httpMethod": "GET", - // "id": "compute.backendServices.aggregatedList", + // "id": "compute.forwardingRules.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -58727,7 +75034,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // "type": "string" // }, // "project": { - // "description": "Name of the project scoping this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, @@ -58739,9 +75046,9 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/backendServices", + // "path": "projects/{project}/aggregated/forwardingRules", // "response": { - // "$ref": "BackendServiceAggregatedList" + // "$ref": "ForwardingRuleAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -58755,7 +75062,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { +func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*ForwardingRuleAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -58773,195 +75080,28 @@ func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*B } } -// method id "compute.backendServices.delete": - -type BackendServicesDeleteCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified BackendService resource. -// -// - backendService: Name of the BackendService resource to delete. -// - project: Project ID for this request. -func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { - c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified BackendService resource.", - // "flatPath": "projects/{project}/global/backendServices/{backendService}", - // "httpMethod": "DELETE", - // "id": "compute.backendServices.delete", - // "parameterOrder": [ - // "project", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{project}/global/backendServices/{backendService}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendServices.deleteSignedUrlKey": +// method id "compute.forwardingRules.delete": -type BackendServicesDeleteSignedUrlKeyCall struct { +type ForwardingRulesDeleteCall struct { s *Service project string - backendService string + region string + forwardingRule string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// DeleteSignedUrlKey: Deletes a key for validating requests with signed -// URLs for this backend service. +// Delete: Deletes the specified ForwardingRule resource. // -// - backendService: Name of the BackendService resource to which the -// Signed URL Key should be added. The name should conform to RFC1035. -// - keyName: The name of the Signed URL Key to delete. +// - forwardingRule: Name of the ForwardingRule resource to delete. // - project: Project ID for this request. -func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { - c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { + c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.urlParams_.Set("keyName", keyName) + c.region = region + c.forwardingRule = forwardingRule return c } @@ -58976,7 +75116,7 @@ func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendServi // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendServicesDeleteSignedUrlKeyCall { +func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRulesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -58984,7 +75124,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *Bac // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesDeleteSignedUrlKeyCall { +func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58992,21 +75132,21 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *Ba // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesDeleteSignedUrlKeyCall { +func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { +func (c *ForwardingRulesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -59016,45 +75156,46 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Res var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}/deleteSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "backendService": c.backendService, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.deleteSignedUrlKey" call. +// Do executes the "compute.forwardingRules.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -59068,25 +75209,20 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a key for validating requests with signed URLs for this backend service.", - // "flatPath": "projects/{project}/global/backendServices/{backendService}/deleteSignedUrlKey", - // "httpMethod": "POST", - // "id": "compute.backendServices.deleteSignedUrlKey", + // "description": "Deletes the specified ForwardingRule resource.", + // "flatPath": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "httpMethod": "DELETE", + // "id": "compute.forwardingRules.delete", // "parameterOrder": [ // "project", - // "backendService", - // "keyName" + // "region", + // "forwardingRule" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", // "location": "path", - // "required": true, - // "type": "string" - // }, - // "keyName": { - // "description": "The name of the Signed URL Key to delete.", - // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -59097,13 +75233,20 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/global/backendServices/{backendService}/deleteSignedUrlKey", + // "path": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}", // "response": { // "$ref": "Operation" // }, @@ -59115,34 +75258,36 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } -// method id "compute.backendServices.get": +// method id "compute.forwardingRules.get": -type BackendServicesGetCall struct { +type ForwardingRulesGetCall struct { s *Service project string - backendService string + region string + forwardingRule string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified BackendService resource. Gets a list of -// available backend services. +// Get: Returns the specified ForwardingRule resource. // -// - backendService: Name of the BackendService resource to return. +// - forwardingRule: Name of the ForwardingRule resource to return. // - project: Project ID for this request. -func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { - c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { + c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService + c.region = region + c.forwardingRule = forwardingRule return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { +func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59152,7 +75297,7 @@ func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { +func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -59160,21 +75305,21 @@ func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { +func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesGetCall) Header() http.Header { +func (c *ForwardingRulesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -59187,7 +75332,7 @@ func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -59196,38 +75341,39 @@ func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "backendService": c.backendService, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.get" call. -// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// Do executes the "compute.forwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *BackendService.ServerResponse.Header or (if a response was returned +// *ForwardingRule.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { +func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &BackendService{ + ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59239,17 +75385,18 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi } return ret, nil // { - // "description": "Returns the specified BackendService resource. Gets a list of available backend services.", - // "flatPath": "projects/{project}/global/backendServices/{backendService}", + // "description": "Returns the specified ForwardingRule resource.", + // "flatPath": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}", // "httpMethod": "GET", - // "id": "compute.backendServices.get", + // "id": "compute.forwardingRules.get", // "parameterOrder": [ // "project", - // "backendService" + // "region", + // "forwardingRule" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to return.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -59261,169 +75408,18 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // } - // }, - // "path": "projects/{project}/global/backendServices/{backendService}", - // "response": { - // "$ref": "BackendService" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.backendServices.getHealth": - -type BackendServicesGetHealthCall struct { - s *Service - project string - backendService string - resourcegroupreference *ResourceGroupReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// GetHealth: Gets the most recent health check results for this -// BackendService. Example request body: { "group": -// "/zones/us-east1-b/instanceGroups/lb-backend-example" } -// -// - backendService: Name of the BackendService resource to which the -// queried instance belongs. -// - project: . -func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { - c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - c.resourcegroupreference = resourcegroupreference - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesGetHealthCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}/getHealth") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.getHealth" call. -// Exactly one of *BackendServiceGroupHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *BackendServiceGroupHealth.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendServiceGroupHealth{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the most recent health check results for this BackendService. Example request body: { \"group\": \"/zones/us-east1-b/instanceGroups/lb-backend-example\" }", - // "flatPath": "projects/{project}/global/backendServices/{backendService}/getHealth", - // "httpMethod": "POST", - // "id": "compute.backendServices.getHealth", - // "parameterOrder": [ - // "project", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the queried instance belongs.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // }, - // "project": { + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/backendServices/{backendService}/getHealth", - // "request": { - // "$ref": "ResourceGroupReference" - // }, + // "path": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}", // "response": { - // "$ref": "BackendServiceGroupHealth" + // "$ref": "ForwardingRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -59434,26 +75430,28 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen } -// method id "compute.backendServices.insert": +// method id "compute.forwardingRules.insert": -type BackendServicesInsertCall struct { +type ForwardingRulesInsertCall struct { s *Service project string - backendservice *BackendService + region string + forwardingrule *ForwardingRule urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a BackendService resource in the specified project -// using the data included in the request. For more information, see -// Backend services overview . +// Insert: Creates a ForwardingRule resource in the specified project +// and region using the data included in the request. // // - project: Project ID for this request. -func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { - c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { + c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendservice = backendservice + c.region = region + c.forwardingrule = forwardingrule return c } @@ -59468,7 +75466,7 @@ func (r *BackendServicesService) Insert(project string, backendservice *BackendS // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { +func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRulesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -59476,7 +75474,7 @@ func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { +func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59484,21 +75482,21 @@ func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { +func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesInsertCall) Header() http.Header { +func (c *ForwardingRulesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -59506,14 +75504,14 @@ func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -59522,35 +75520,36 @@ func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.insert" call. +// Do executes the "compute.forwardingRules.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -59564,12 +75563,13 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview .", - // "flatPath": "projects/{project}/global/backendServices", + // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/forwardingRules", // "httpMethod": "POST", - // "id": "compute.backendServices.insert", + // "id": "compute.forwardingRules.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "project": { @@ -59579,15 +75579,22 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/global/backendServices", + // "path": "projects/{project}/regions/{region}/forwardingRules", // "request": { - // "$ref": "BackendService" + // "$ref": "ForwardingRule" // }, // "response": { // "$ref": "Operation" @@ -59600,41 +75607,47 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.backendServices.list": +// method id "compute.forwardingRules.list": -type BackendServicesListCall struct { +type ForwardingRulesListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of BackendService resources available to the -// specified project. +// List: Retrieves a list of ForwardingRule resources available to the +// specified project and region. // // - project: Project ID for this request. -func (r *BackendServicesService) List(project string) *BackendServicesListCall { - c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { + c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -59642,8 +75655,18 @@ func (r *BackendServicesService) List(project string) *BackendServicesListCall { // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { c.urlParams_.Set("filter", filter) return c } @@ -59654,7 +75677,7 @@ func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { +func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -59668,7 +75691,7 @@ func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesL // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { +func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -59676,7 +75699,7 @@ func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { +func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -59685,7 +75708,7 @@ func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesLi // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *BackendServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendServicesListCall { +func (c *ForwardingRulesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ForwardingRulesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -59693,7 +75716,7 @@ func (c *BackendServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { +func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59703,7 +75726,7 @@ func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { +func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { c.ifNoneMatch_ = entityTag return c } @@ -59711,21 +75734,21 @@ func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServices // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { +func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesListCall) Header() http.Header { +func (c *ForwardingRulesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -59738,7 +75761,7 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -59747,37 +75770,38 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.list" call. -// Exactly one of *BackendServiceList or error will be non-nil. Any +// Do executes the "compute.forwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *BackendServiceList.ServerResponse.Header or (if a response was +// *ForwardingRuleList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { +func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &BackendServiceList{ + ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59789,16 +75813,17 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ } return ret, nil // { - // "description": "Retrieves the list of BackendService resources available to the specified project.", - // "flatPath": "projects/{project}/global/backendServices", + // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", + // "flatPath": "projects/{project}/regions/{region}/forwardingRules", // "httpMethod": "GET", - // "id": "compute.backendServices.list", + // "id": "compute.forwardingRules.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -59827,15 +75852,22 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "returnPartialSuccess": { // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } // }, - // "path": "projects/{project}/global/backendServices", + // "path": "projects/{project}/regions/{region}/forwardingRules", // "response": { - // "$ref": "BackendServiceList" + // "$ref": "ForwardingRuleList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -59849,7 +75881,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { +func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -59867,30 +75899,33 @@ func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServ } } -// method id "compute.backendServices.patch": +// method id "compute.forwardingRules.patch": -type BackendServicesPatchCall struct { +type ForwardingRulesPatchCall struct { s *Service project string - backendService string - backendservice *BackendService + region string + forwardingRule string + forwardingrule *ForwardingRule urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Patches the specified BackendService resource with the data -// included in the request. For more information, see Backend services -// overview. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. +// Patch: Updates the specified forwarding rule with the data included +// in the request. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. Currently, you can only +// patch the network_tier field. // -// - backendService: Name of the BackendService resource to patch. +// - forwardingRule: Name of the ForwardingRule resource to patch. // - project: Project ID for this request. -func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { - c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *ForwardingRulesService) Patch(project string, region string, forwardingRule string, forwardingrule *ForwardingRule) *ForwardingRulesPatchCall { + c := &ForwardingRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.backendservice = backendservice + c.region = region + c.forwardingRule = forwardingRule + c.forwardingrule = forwardingrule return c } @@ -59905,7 +75940,7 @@ func (r *BackendServicesService) Patch(project string, backendService string, ba // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { +func (c *ForwardingRulesPatchCall) RequestId(requestId string) *ForwardingRulesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -59913,7 +75948,7 @@ func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { +func (c *ForwardingRulesPatchCall) Fields(s ...googleapi.Field) *ForwardingRulesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59921,21 +75956,21 @@ func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServices // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { +func (c *ForwardingRulesPatchCall) Context(ctx context.Context) *ForwardingRulesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesPatchCall) Header() http.Header { +func (c *ForwardingRulesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -59943,14 +75978,14 @@ func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -59959,36 +75994,37 @@ func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "backendService": c.backendService, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.patch" call. +// Do executes the "compute.forwardingRules.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -60002,17 +76038,18 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Patches the specified BackendService resource with the data included in the request. For more information, see Backend services overview. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/global/backendServices/{backendService}", + // "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", + // "flatPath": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}", // "httpMethod": "PATCH", - // "id": "compute.backendServices.patch", + // "id": "compute.forwardingRules.patch", // "parameterOrder": [ // "project", - // "backendService" + // "region", + // "forwardingRule" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -60025,15 +76062,22 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/global/backendServices/{backendService}", + // "path": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}", // "request": { - // "$ref": "BackendService" + // "$ref": "ForwardingRule" // }, // "response": { // "$ref": "Operation" @@ -60046,30 +76090,31 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendServices.setEdgeSecurityPolicy": +// method id "compute.forwardingRules.setLabels": -type BackendServicesSetEdgeSecurityPolicyCall struct { - s *Service - project string - backendService string - securitypolicyreference *SecurityPolicyReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ForwardingRulesSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetEdgeSecurityPolicy: Sets the edge security policy for the -// specified backend service. +// SetLabels: Sets the labels on the specified resource. To learn more +// about labels, read the Labeling Resources documentation. // -// - backendService: Name of the BackendService resource to which the -// edge security policy should be set. The name should conform to -// RFC1035. // - project: Project ID for this request. -func (r *BackendServicesService) SetEdgeSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetEdgeSecurityPolicyCall { - c := &BackendServicesSetEdgeSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *ForwardingRulesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *ForwardingRulesSetLabelsCall { + c := &ForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.securitypolicyreference = securitypolicyreference + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest return c } @@ -60084,7 +76129,7 @@ func (r *BackendServicesService) SetEdgeSecurityPolicy(project string, backendSe // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendServicesSetEdgeSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetEdgeSecurityPolicyCall { +func (c *ForwardingRulesSetLabelsCall) RequestId(requestId string) *ForwardingRulesSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -60092,7 +76137,7 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) RequestId(requestId string) * // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesSetEdgeSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetEdgeSecurityPolicyCall { +func (c *ForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *ForwardingRulesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60100,21 +76145,21 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesSetEdgeSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetEdgeSecurityPolicyCall { +func (c *ForwardingRulesSetLabelsCall) Context(ctx context.Context) *ForwardingRulesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesSetEdgeSecurityPolicyCall) Header() http.Header { +func (c *ForwardingRulesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesSetEdgeSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -60122,14 +76167,14 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) doRequest(alt string) (*http. } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/forwardingRules/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -60137,37 +76182,38 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) doRequest(alt string) (*http. } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.setEdgeSecurityPolicy" call. +// Do executes the "compute.forwardingRules.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -60181,25 +76227,27 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Sets the edge security policy for the specified backend service.", - // "flatPath": "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy", + // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/regions/{region}/forwardingRules/{resource}/setLabels", // "httpMethod": "POST", - // "id": "compute.backendServices.setEdgeSecurityPolicy", + // "id": "compute.forwardingRules.setLabels", // "parameterOrder": [ // "project", - // "backendService" + // "region", + // "resource" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the edge security policy should be set. The name should conform to RFC1035.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "The region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -60207,11 +76255,18 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOpti // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy", + // "path": "projects/{project}/regions/{region}/forwardingRules/{resource}/setLabels", // "request": { - // "$ref": "SecurityPolicyReference" + // "$ref": "RegionSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -60224,30 +76279,32 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOpti } -// method id "compute.backendServices.setSecurityPolicy": +// method id "compute.forwardingRules.setTarget": -type BackendServicesSetSecurityPolicyCall struct { - s *Service - project string - backendService string - securitypolicyreference *SecurityPolicyReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ForwardingRulesSetTargetCall struct { + s *Service + project string + region string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetSecurityPolicy: Sets the Google Cloud Armor security policy for -// the specified backend service. For more information, see Google Cloud -// Armor Overview +// SetTarget: Changes target URL for forwarding rule. The new target +// should be of the same type as the old target. // -// - backendService: Name of the BackendService resource to which the -// security policy should be set. The name should conform to RFC1035. -// - project: Project ID for this request. -func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { - c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - forwardingRule: Name of the ForwardingRule resource in which target +// is to be set. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { + c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.securitypolicyreference = securitypolicyreference + c.region = region + c.forwardingRule = forwardingRule + c.targetreference = targetreference return c } @@ -60262,7 +76319,7 @@ func (r *BackendServicesService) SetSecurityPolicy(project string, backendServic // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetSecurityPolicyCall { +func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { c.urlParams_.Set("requestId", requestId) return c } @@ -60270,7 +76327,7 @@ func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *Back // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetSecurityPolicyCall { +func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60278,21 +76335,21 @@ func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *Bac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetSecurityPolicyCall { +func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { +func (c *ForwardingRulesSetTargetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -60300,14 +76357,14 @@ func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -60316,36 +76373,37 @@ func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Resp req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "backendService": c.backendService, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.setSecurityPolicy" call. +// Do executes the "compute.forwardingRules.setTarget" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -60359,18 +76417,20 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", - // "flatPath": "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy", + // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + // "flatPath": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", // "httpMethod": "POST", - // "id": "compute.backendServices.setSecurityPolicy", + // "id": "compute.forwardingRules.setTarget", // "parameterOrder": [ // "project", - // "backendService" + // "region", + // "forwardingRule" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -60381,15 +76441,22 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy", + // "path": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", // "request": { - // "$ref": "SecurityPolicyReference" + // "$ref": "TargetReference" // }, // "response": { // "$ref": "Operation" @@ -60402,29 +76469,25 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) } -// method id "compute.backendServices.update": +// method id "compute.globalAddresses.delete": -type BackendServicesUpdateCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalAddressesDeleteCall struct { + s *Service + project string + address string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified BackendService resource with the data -// included in the request. For more information, see Backend services -// overview. +// Delete: Deletes the specified address resource. // -// - backendService: Name of the BackendService resource to update. +// - address: Name of the address resource to delete. // - project: Project ID for this request. -func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { - c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { + c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.backendservice = backendservice + c.address = address return c } @@ -60439,7 +76502,7 @@ func (r *BackendServicesService) Update(project string, backendService string, b // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { +func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -60447,7 +76510,7 @@ func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { +func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60455,21 +76518,21 @@ func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { +func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesUpdateCall) Header() http.Header { +func (c *GlobalAddressesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -60477,52 +76540,47 @@ func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.update" call. +// Do executes the "compute.globalAddresses.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -60536,17 +76594,17 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates the specified BackendService resource with the data included in the request. For more information, see Backend services overview.", - // "flatPath": "projects/{project}/global/backendServices/{backendService}", - // "httpMethod": "PUT", - // "id": "compute.backendServices.update", + // "description": "Deletes the specified address resource.", + // "flatPath": "projects/{project}/global/addresses/{address}", + // "httpMethod": "DELETE", + // "id": "compute.globalAddresses.delete", // "parameterOrder": [ // "project", - // "backendService" + // "address" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to update.", + // "address": { + // "description": "Name of the address resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -60565,10 +76623,7 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "projects/{project}/global/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, + // "path": "projects/{project}/global/addresses/{address}", // "response": { // "$ref": "Operation" // }, @@ -60580,112 +76635,34 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.diskTypes.aggregatedList": +// method id "compute.globalAddresses.get": -type DiskTypesAggregatedListCall struct { +type GlobalAddressesGetCall struct { s *Service project string + address string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of disk types. +// Get: Returns the specified address resource. Gets a list of available +// addresses by making a list() request. // +// - address: Name of the address resource to return. // - project: Project ID for this request. -func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { - c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { + c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *DiskTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *DiskTypesAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *DiskTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DiskTypesAggregatedListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + c.address = address return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { +func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60695,7 +76672,7 @@ func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { +func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -60703,21 +76680,21 @@ func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { +func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesAggregatedListCall) Header() http.Header { +func (c *GlobalAddressesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -60730,7 +76707,7 @@ func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -60739,37 +76716,38 @@ func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, err req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.aggregatedList" call. -// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { +// Do executes the "compute.globalAddresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &DiskTypeAggregatedList{ + ret := &Address{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60781,40 +76759,20 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp } return ret, nil // { - // "description": "Retrieves an aggregated list of disk types.", - // "flatPath": "projects/{project}/aggregated/diskTypes", + // "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request.", + // "flatPath": "projects/{project}/global/addresses/{address}", // "httpMethod": "GET", - // "id": "compute.diskTypes.aggregatedList", + // "id": "compute.globalAddresses.get", // "parameterOrder": [ - // "project" + // "project", + // "address" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "address": { + // "description": "Name of the address resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -60823,16 +76781,11 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/diskTypes", + // "path": "projects/{project}/global/addresses/{address}", // "response": { - // "$ref": "DiskTypeAggregatedList" + // "$ref": "Address" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -60843,144 +76796,124 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.diskTypes.get": +// method id "compute.globalAddresses.insert": -type DiskTypesGetCall struct { - s *Service - project string - zone string - diskType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalAddressesInsertCall struct { + s *Service + project string + address *Address + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified disk type. Gets a list of available disk -// types by making a list() request. +// Insert: Creates an address resource in the specified project by using +// the data included in the request. // -// - diskType: Name of the disk type to return. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { - c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { + c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.diskType = diskType + c.address = address + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { +func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { +func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesGetCall) Header() http.Header { +func (c *GlobalAddressesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/diskTypes/{diskType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "diskType": c.diskType, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.get" call. -// Exactly one of *DiskType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskType.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.globalAddresses.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { +func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &DiskType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60992,23 +76925,14 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { } return ret, nil // { - // "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", - // "flatPath": "projects/{project}/zones/{zone}/diskTypes/{diskType}", - // "httpMethod": "GET", - // "id": "compute.diskTypes.get", + // "description": "Creates an address resource in the specified project by using the data included in the request.", + // "flatPath": "projects/{project}/global/addresses", + // "httpMethod": "POST", + // "id": "compute.globalAddresses.insert", // "parameterOrder": [ - // "project", - // "zone", - // "diskType" + // "project" // ], // "parameters": { - // "diskType": { - // "description": "Name of the disk type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -61016,65 +76940,64 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/diskTypes/{diskType}", + // "path": "projects/{project}/global/addresses", + // "request": { + // "$ref": "Address" + // }, // "response": { - // "$ref": "DiskType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.diskTypes.list": +// method id "compute.globalAddresses.list": -type DiskTypesListCall struct { +type GlobalAddressesListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of disk types available to the specified -// project. +// List: Retrieves a list of global addresses. // // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { - c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { + c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -61082,8 +77005,18 @@ func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { c.urlParams_.Set("filter", filter) return c } @@ -61094,7 +77027,7 @@ func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { +func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -61108,7 +77041,7 @@ func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { +func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -61116,7 +77049,7 @@ func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { +func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -61125,7 +77058,7 @@ func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *DiskTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DiskTypesListCall { +func (c *GlobalAddressesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalAddressesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -61133,7 +77066,7 @@ func (c *DiskTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *Dis // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { +func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61143,7 +77076,7 @@ func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { +func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { c.ifNoneMatch_ = entityTag return c } @@ -61151,21 +77084,21 @@ func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { +func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesListCall) Header() http.Header { +func (c *GlobalAddressesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -61178,7 +77111,7 @@ func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -61187,38 +77120,37 @@ func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.list" call. -// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx +// Do executes the "compute.globalAddresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *DiskTypeList.ServerResponse.Header or (if a response was returned at +// *AddressList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { +func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &DiskTypeList{ + ret := &AddressList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61230,17 +77162,16 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err } return ret, nil // { - // "description": "Retrieves a list of disk types available to the specified project.", - // "flatPath": "projects/{project}/zones/{zone}/diskTypes", + // "description": "Retrieves a list of global addresses.", + // "flatPath": "projects/{project}/global/addresses", // "httpMethod": "GET", - // "id": "compute.diskTypes.list", + // "id": "compute.globalAddresses.list", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -61273,18 +77204,11 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/diskTypes", + // "path": "projects/{project}/global/addresses", // "response": { - // "$ref": "DiskTypeList" + // "$ref": "AddressList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -61298,7 +77222,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { +func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -61316,55 +77240,35 @@ func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) err } } -// method id "compute.disks.addResourcePolicies": +// method id "compute.globalAddresses.setLabels": -type DisksAddResourcePoliciesCall struct { - s *Service - project string - zone string - disk string - disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalAddressesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddResourcePolicies: Adds existing resource policies to a disk. You -// can only add one policy which will be applied to this disk for -// scheduling snapshot creation. +// SetLabels: Sets the labels on a GlobalAddress. To learn more about +// labels, read the Labeling Resources documentation. // -// - disk: The disk name for this request. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *DisksService) AddResourcePolicies(project string, zone string, disk string, disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest) *DisksAddResourcePoliciesCall { - c := &DisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *GlobalAddressesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalAddressesSetLabelsCall { + c := &GlobalAddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.disksaddresourcepoliciesrequest = disksaddresourcepoliciesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *DisksAddResourcePoliciesCall) RequestId(requestId string) *DisksAddResourcePoliciesCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksAddResourcePoliciesCall { +func (c *GlobalAddressesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalAddressesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61372,21 +77276,21 @@ func (c *DisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksAddRes // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksAddResourcePoliciesCall) Context(ctx context.Context) *DisksAddResourcePoliciesCall { +func (c *GlobalAddressesSetLabelsCall) Context(ctx context.Context) *GlobalAddressesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksAddResourcePoliciesCall) Header() http.Header { +func (c *GlobalAddressesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -61394,14 +77298,14 @@ func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksaddresourcepoliciesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/addResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -61409,38 +77313,37 @@ func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.addResourcePolicies" call. +// Do executes the "compute.globalAddresses.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -61454,23 +77357,15 @@ func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Adds existing resource policies to a disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", - // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/addResourcePolicies", + // "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/global/addresses/{resource}/setLabels", // "httpMethod": "POST", - // "id": "compute.disks.addResourcePolicies", + // "id": "compute.globalAddresses.setLabels", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "resource" // ], // "parameters": { - // "disk": { - // "description": "The disk name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -61478,22 +77373,17 @@ func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operat // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/disks/{disk}/addResourcePolicies", + // "path": "projects/{project}/global/addresses/{resource}/setLabels", // "request": { - // "$ref": "DisksAddResourcePoliciesRequest" + // "$ref": "GlobalSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -61506,112 +77396,200 @@ func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.disks.aggregatedList": +// method id "compute.globalForwardingRules.delete": -type DisksAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesDeleteCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of persistent disks. +// Delete: Deletes the specified GlobalForwardingRule resource. // +// - forwardingRule: Name of the ForwardingRule resource to delete. // - project: Project ID for this request. -func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { - c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { + c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.forwardingRule = forwardingRule return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { - c.urlParams_.Set("filter", filter) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *DisksAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *DisksAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { + c.ctx_ = ctx return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c +func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules/{forwardingRule}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "forwardingRule": c.forwardingRule, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *DisksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DisksAggregatedListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// Do executes the "compute.globalForwardingRules.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified GlobalForwardingRule resource.", + // "flatPath": "projects/{project}/global/forwardingRules/{forwardingRule}", + // "httpMethod": "DELETE", + // "id": "compute.globalForwardingRules.delete", + // "parameterOrder": [ + // "project", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/forwardingRules/{forwardingRule}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalForwardingRules.get": + +type GlobalForwardingRulesGetCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified GlobalForwardingRule resource. Gets a list +// of available forwarding rules by making a list() request. +// +// - forwardingRule: Name of the ForwardingRule resource to return. +// - project: Project ID for this request. +func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { + c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.forwardingRule = forwardingRule return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { +func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61621,7 +77599,7 @@ func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { +func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -61629,21 +77607,21 @@ func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregated // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { +func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksAggregatedListCall) Header() http.Header { +func (c *GlobalForwardingRulesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -61656,7 +77634,7 @@ func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -61664,38 +77642,39 @@ func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.aggregatedList" call. -// Exactly one of *DiskAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DiskAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalForwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { +func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &DiskAggregatedList{ + ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61707,40 +77686,20 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega } return ret, nil // { - // "description": "Retrieves an aggregated list of persistent disks.", - // "flatPath": "projects/{project}/aggregated/disks", + // "description": "Returns the specified GlobalForwardingRule resource. Gets a list of available forwarding rules by making a list() request.", + // "flatPath": "projects/{project}/global/forwardingRules/{forwardingRule}", // "httpMethod": "GET", - // "id": "compute.disks.aggregatedList", + // "id": "compute.globalForwardingRules.get", // "parameterOrder": [ - // "project" + // "project", + // "forwardingRule" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -61749,16 +77708,11 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/disks", + // "path": "projects/{project}/global/forwardingRules/{forwardingRule}", // "response": { - // "$ref": "DiskAggregatedList" + // "$ref": "ForwardingRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -61769,63 +77723,25 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.createSnapshot": +// method id "compute.globalForwardingRules.insert": -type DisksCreateSnapshotCall struct { - s *Service - project string - zone string - disk string - snapshot *Snapshot - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesInsertCall struct { + s *Service + project string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// CreateSnapshot: Creates a snapshot of a specified persistent disk. -// For regular snapshot creation, consider using snapshots.insert -// instead, as that method supports more features, such as creating -// snapshots in a project different from the source disk project. +// Insert: Creates a GlobalForwardingRule resource in the specified +// project using the data included in the request. // -// - disk: Name of the persistent disk to snapshot. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { - c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { + c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.snapshot = snapshot - return c -} - -// GuestFlush sets the optional parameter "guestFlush": [Input Only] -// Whether to attempt an application consistent snapshot by informing -// the OS to prepare for the snapshot process. Currently only supported -// on Windows instances using the Volume Shadow Copy Service (VSS). -func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { - c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) + c.forwardingrule = forwardingrule return c } @@ -61840,7 +77756,7 @@ func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapsh // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { +func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalForwardingRulesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -61848,7 +77764,7 @@ func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapsh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { +func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61856,21 +77772,21 @@ func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnaps // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { +func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksCreateSnapshotCall) Header() http.Header { +func (c *GlobalForwardingRulesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -61878,14 +77794,14 @@ func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/createSnapshot") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -61894,37 +77810,35 @@ func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.createSnapshot" call. +// Do executes the "compute.globalForwardingRules.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -61938,28 +77852,14 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a snapshot of a specified persistent disk. For regular snapshot creation, consider using snapshots.insert instead, as that method supports more features, such as creating snapshots in a project different from the source disk project.", - // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/createSnapshot", + // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/global/forwardingRules", // "httpMethod": "POST", - // "id": "compute.disks.createSnapshot", + // "id": "compute.globalForwardingRules.insert", // "parameterOrder": [ - // "project", - // "zone", - // "disk" + // "project" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to snapshot.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "guestFlush": { - // "description": "[Input Only] Whether to attempt an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", - // "location": "query", - // "type": "boolean" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -61971,18 +77871,11 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/disks/{disk}/createSnapshot", + // "path": "projects/{project}/global/forwardingRules", // "request": { - // "$ref": "Snapshot" + // "$ref": "ForwardingRule" // }, // "response": { // "$ref": "Operation" @@ -61995,127 +77888,197 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.disks.delete": +// method id "compute.globalForwardingRules.list": -type DisksDeleteCall struct { - s *Service - project string - zone string - disk string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified persistent disk. Deleting a disk -// removes its data permanently and is irreversible. However, deleting a -// disk does not delete any snapshots previously made from the disk. You -// must separately delete snapshots. +// List: Retrieves a list of GlobalForwardingRule resources available to +// the specified project. // -// - disk: Name of the persistent disk to delete. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { - c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { + c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *GlobalForwardingRulesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalForwardingRulesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { +func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { +func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksDeleteCall) Header() http.Header { +func (c *GlobalForwardingRulesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.globalForwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ForwardingRuleList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62127,171 +78090,208 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", - // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}", - // "httpMethod": "DELETE", - // "id": "compute.disks.delete", + // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", + // "flatPath": "projects/{project}/global/forwardingRules", + // "httpMethod": "GET", + // "id": "compute.globalForwardingRules.list", // "parameterOrder": [ - // "project", - // "zone", - // "disk" + // "project" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to delete.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/zones/{zone}/disks/{disk}", + // "path": "projects/{project}/global/forwardingRules", // "response": { - // "$ref": "Operation" + // "$ref": "ForwardingRuleList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type DisksGetCall struct { - s *Service - project string - zone string - disk string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.globalForwardingRules.patch": + +type GlobalForwardingRulesPatchCall struct { + s *Service + project string + forwardingRule string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns a specified persistent disk. Gets a list of available -// persistent disks by making a list() request. +// Patch: Updates the specified forwarding rule with the data included +// in the request. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. Currently, you can only +// patch the network_tier field. // -// - disk: Name of the persistent disk to return. +// - forwardingRule: Name of the ForwardingRule resource to patch. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { - c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalForwardingRulesService) Patch(project string, forwardingRule string, forwardingrule *ForwardingRule) *GlobalForwardingRulesPatchCall { + c := &GlobalForwardingRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + c.forwardingRule = forwardingRule + c.forwardingrule = forwardingrule + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *GlobalForwardingRulesPatchCall) RequestId(requestId string) *GlobalForwardingRulesPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { +func (c *GlobalForwardingRulesPatchCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { +func (c *GlobalForwardingRulesPatchCall) Context(ctx context.Context) *GlobalForwardingRulesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksGetCall) Header() http.Header { +func (c *GlobalForwardingRulesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.get" call. -// Exactly one of *Disk or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Disk.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { +// Do executes the "compute.globalForwardingRules.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Disk{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62303,18 +78303,17 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", - // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}", - // "httpMethod": "GET", - // "id": "compute.disks.get", + // "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", + // "flatPath": "projects/{project}/global/forwardingRules/{forwardingRule}", + // "httpMethod": "PATCH", + // "id": "compute.globalForwardingRules.patch", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "forwardingRule" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to return.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -62327,151 +78326,133 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/disks/{disk}", + // "path": "projects/{project}/global/forwardingRules/{forwardingRule}", + // "request": { + // "$ref": "ForwardingRule" + // }, // "response": { - // "$ref": "Disk" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.disks.getIamPolicy": +// method id "compute.globalForwardingRules.setLabels": -type DisksGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// SetLabels: Sets the labels on the specified resource. To learn more +// about labels, read the Labeling resources documentation. // // - project: Project ID for this request. // - resource: Name or id of the resource for this request. -// - zone: The name of the zone for this request. -func (r *DisksService) GetIamPolicy(project string, zone string, resource string) *DisksGetIamPolicyCall { - c := &DisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalForwardingRulesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalForwardingRulesSetLabelsCall { + c := &GlobalForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone c.resource = resource - return c -} - -// OptionsRequestedPolicyVersion sets the optional parameter -// "optionsRequestedPolicyVersion": Requested IAM Policy version. -func (c *DisksGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *DisksGetIamPolicyCall { - c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksGetIamPolicyCall) Fields(s ...googleapi.Field) *DisksGetIamPolicyCall { +func (c *GlobalForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksGetIamPolicyCall) IfNoneMatch(entityTag string) *DisksGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksGetIamPolicyCall) Context(ctx context.Context) *DisksGetIamPolicyCall { +func (c *GlobalForwardingRulesSetLabelsCall) Context(ctx context.Context) *GlobalForwardingRulesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksGetIamPolicyCall) Header() http.Header { +func (c *GlobalForwardingRulesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.globalForwardingRules.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62483,22 +78464,15 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/getIamPolicy", - // "httpMethod": "GET", - // "id": "compute.disks.getIamPolicy", + // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling resources documentation.", + // "flatPath": "projects/{project}/global/forwardingRules/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.setLabels", // "parameterOrder": [ // "project", - // "zone", // "resource" // ], // "parameters": { - // "optionsRequestedPolicyVersion": { - // "description": "Requested IAM Policy version.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -62509,57 +78483,49 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/disks/{resource}/getIamPolicy", + // "path": "projects/{project}/global/forwardingRules/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.disks.insert": +// method id "compute.globalForwardingRules.setTarget": -type DisksInsertCall struct { - s *Service - project string - zone string - disk *Disk - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesSetTargetCall struct { + s *Service + project string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a persistent disk in the specified project using the -// data in the request. You can create a disk from a source -// (sourceImage, sourceSnapshot, or sourceDisk) or create an empty 500 -// GB data disk by omitting all properties. You can also create a disk -// that is larger than the default size by specifying the sizeGb -// property. +// SetTarget: Changes target URL for the GlobalForwardingRule resource. +// The new target should be of the same type as the old target. // -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { - c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - forwardingRule: Name of the ForwardingRule resource in which target +// is to be set. +// - project: Project ID for this request. +func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { + c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + c.forwardingRule = forwardingRule + c.targetreference = targetreference return c } @@ -62574,22 +78540,15 @@ func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksIns // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { +func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *GlobalForwardingRulesSetTargetCall { c.urlParams_.Set("requestId", requestId) return c } -// SourceImage sets the optional parameter "sourceImage": Source image -// to restore onto a disk. This field is optional. -func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { - c.urlParams_.Set("sourceImage", sourceImage) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { +func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62597,21 +78556,21 @@ func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { +func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksInsertCall) Header() http.Header { +func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -62619,14 +78578,14 @@ func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -62634,37 +78593,37 @@ func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.insert" call. +// Do executes the "compute.globalForwardingRules.setTarget" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -62678,15 +78637,22 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk from a source (sourceImage, sourceSnapshot, or sourceDisk) or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", - // "flatPath": "projects/{project}/zones/{zone}/disks", + // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", + // "flatPath": "projects/{project}/global/forwardingRules/{forwardingRule}/setTarget", // "httpMethod": "POST", - // "id": "compute.disks.insert", + // "id": "compute.globalForwardingRules.setTarget", // "parameterOrder": [ // "project", - // "zone" + // "forwardingRule" // ], // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -62698,23 +78664,11 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "sourceImage": { - // "description": "Source image to restore onto a disk. This field is optional.", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/disks", + // "path": "projects/{project}/global/forwardingRules/{forwardingRule}/setTarget", // "request": { - // "$ref": "Disk" + // "$ref": "TargetReference" // }, // "response": { // "$ref": "Operation" @@ -62727,188 +78681,130 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } -// method id "compute.disks.list": +// method id "compute.globalNetworkEndpointGroups.attachNetworkEndpoints": -type DisksListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall struct { + s *Service + project string + networkEndpointGroup string + globalnetworkendpointgroupsattachendpointsrequest *GlobalNetworkEndpointGroupsAttachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of persistent disks contained within the -// specified zone. +// AttachNetworkEndpoints: Attach a network endpoint to the specified +// network endpoint group. // -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *DisksService) List(project string, zone string) *DisksListCall { - c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkEndpointGroup: The name of the network endpoint group where +// you are attaching network endpoints to. It should comply with +// RFC1035. +// - project: Project ID for this request. +func (r *GlobalNetworkEndpointGroupsService) AttachNetworkEndpoints(project string, networkEndpointGroup string, globalnetworkendpointgroupsattachendpointsrequest *GlobalNetworkEndpointGroupsAttachEndpointsRequest) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { + c := &GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *DisksListCall) Filter(filter string) *DisksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { - c.urlParams_.Set("pageToken", pageToken) + c.networkEndpointGroup = networkEndpointGroup + c.globalnetworkendpointgroupsattachendpointsrequest = globalnetworkendpointgroupsattachendpointsrequest return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *DisksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DisksListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksListCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalnetworkendpointgroupsattachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.list" call. -// Exactly one of *DiskList or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskList.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.globalNetworkEndpointGroups.attachNetworkEndpoints" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &DiskList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62920,36 +78816,19 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { } return ret, nil // { - // "description": "Retrieves a list of persistent disks contained within the specified zone.", - // "flatPath": "projects/{project}/zones/{zone}/disks", - // "httpMethod": "GET", - // "id": "compute.disks.list", + // "description": "Attach a network endpoint to the specified network endpoint group.", + // "flatPath": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "httpMethod": "POST", + // "id": "compute.globalNetworkEndpointGroups.attachNetworkEndpoints", // "parameterOrder": [ // "project", - // "zone" + // "networkEndpointGroup" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -62959,77 +78838,48 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/disks", + // "path": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "request": { + // "$ref": "GlobalNetworkEndpointGroupsAttachEndpointsRequest" + // }, // "response": { - // "$ref": "DiskList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.removeResourcePolicies": +// method id "compute.globalNetworkEndpointGroups.delete": -type DisksRemoveResourcePoliciesCall struct { - s *Service - project string - zone string - disk string - disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsDeleteCall struct { + s *Service + project string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveResourcePolicies: Removes resource policies from a disk. +// Delete: Deletes the specified network endpoint group.Note that the +// NEG cannot be deleted if there are backend services referencing it. // -// - disk: The disk name for this request. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *DisksService) RemoveResourcePolicies(project string, zone string, disk string, disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest) *DisksRemoveResourcePoliciesCall { - c := &DisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkEndpointGroup: The name of the network endpoint group to +// delete. It should comply with RFC1035. +// - project: Project ID for this request. +func (r *GlobalNetworkEndpointGroupsService) Delete(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsDeleteCall { + c := &GlobalNetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.disksremoveresourcepoliciesrequest = disksremoveresourcepoliciesrequest + c.networkEndpointGroup = networkEndpointGroup return c } @@ -63044,7 +78894,7 @@ func (r *DisksService) RemoveResourcePolicies(project string, zone string, disk // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *DisksRemoveResourcePoliciesCall) RequestId(requestId string) *DisksRemoveResourcePoliciesCall { +func (c *GlobalNetworkEndpointGroupsDeleteCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -63052,7 +78902,7 @@ func (c *DisksRemoveResourcePoliciesCall) RequestId(requestId string) *DisksRemo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksRemoveResourcePoliciesCall { +func (c *GlobalNetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63060,21 +78910,21 @@ func (c *DisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksRem // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksRemoveResourcePoliciesCall) Context(ctx context.Context) *DisksRemoveResourcePoliciesCall { +func (c *GlobalNetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksRemoveResourcePoliciesCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -63082,53 +78932,47 @@ func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksremoveresourcepoliciesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/removeResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.removeResourcePolicies" call. +// Do executes the "compute.globalNetworkEndpointGroups.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -63142,20 +78986,18 @@ func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Removes resource policies from a disk.", - // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/removeResourcePolicies", - // "httpMethod": "POST", - // "id": "compute.disks.removeResourcePolicies", + // "description": "Deletes the specified network endpoint group.Note that the NEG cannot be deleted if there are backend services referencing it.", + // "flatPath": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}", + // "httpMethod": "DELETE", + // "id": "compute.globalNetworkEndpointGroups.delete", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "networkEndpointGroup" // ], // "parameters": { - // "disk": { - // "description": "The disk name for this request.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -63170,19 +79012,9 @@ func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Ope // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/disks/{disk}/removeResourcePolicies", - // "request": { - // "$ref": "DisksRemoveResourcePoliciesRequest" - // }, + // "path": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}", // "response": { // "$ref": "Operation" // }, @@ -63194,31 +79026,29 @@ func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.disks.resize": +// method id "compute.globalNetworkEndpointGroups.detachNetworkEndpoints": -type DisksResizeCall struct { - s *Service - project string - zone string - disk string - disksresizerequest *DisksResizeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall struct { + s *Service + project string + networkEndpointGroup string + globalnetworkendpointgroupsdetachendpointsrequest *GlobalNetworkEndpointGroupsDetachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the specified persistent disk. You can only increase -// the size of the disk. +// DetachNetworkEndpoints: Detach the network endpoint from the +// specified network endpoint group. // -// - disk: The name of the persistent disk. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { - c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkEndpointGroup: The name of the network endpoint group where +// you are removing network endpoints. It should comply with RFC1035. +// - project: Project ID for this request. +func (r *GlobalNetworkEndpointGroupsService) DetachNetworkEndpoints(project string, networkEndpointGroup string, globalnetworkendpointgroupsdetachendpointsrequest *GlobalNetworkEndpointGroupsDetachEndpointsRequest) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { + c := &GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.disksresizerequest = disksresizerequest + c.networkEndpointGroup = networkEndpointGroup + c.globalnetworkendpointgroupsdetachendpointsrequest = globalnetworkendpointgroupsdetachendpointsrequest return c } @@ -63233,7 +79063,7 @@ func (r *DisksService) Resize(project string, zone string, disk string, disksres // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -63241,7 +79071,7 @@ func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63249,21 +79079,21 @@ func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksResizeCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -63271,14 +79101,14 @@ func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalnetworkendpointgroupsdetachendpointsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -63286,38 +79116,37 @@ func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.resize" call. +// Do executes the "compute.globalNetworkEndpointGroups.detachNetworkEndpoints" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -63331,20 +79160,18 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", - // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/resize", + // "description": "Detach the network endpoint from the specified network endpoint group.", + // "flatPath": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", // "httpMethod": "POST", - // "id": "compute.disks.resize", + // "id": "compute.globalNetworkEndpointGroups.detachNetworkEndpoints", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "networkEndpointGroup" // ], // "parameters": { - // "disk": { - // "description": "The name of the persistent disk.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -63359,18 +79186,11 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/disks/{disk}/resize", + // "path": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", // "request": { - // "$ref": "DisksResizeRequest" + // "$ref": "GlobalNetworkEndpointGroupsDetachEndpointsRequest" // }, // "response": { // "$ref": "Operation" @@ -63383,116 +79203,120 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } -// method id "compute.disks.setIamPolicy": +// method id "compute.globalNetworkEndpointGroups.get": -type DisksSetIamPolicyCall struct { +type GlobalNetworkEndpointGroupsGetCall struct { s *Service project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest + networkEndpointGroup string urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// Get: Returns the specified network endpoint group. Gets a list of +// available network endpoint groups by making a list() request. // -// - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -// - zone: The name of the zone for this request. -func (r *DisksService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *DisksSetIamPolicyCall { - c := &DisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkEndpointGroup: The name of the network endpoint group. It +// should comply with RFC1035. +// - project: Project ID for this request. +func (r *GlobalNetworkEndpointGroupsService) Get(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsGetCall { + c := &GlobalNetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest + c.networkEndpointGroup = networkEndpointGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksSetIamPolicyCall) Fields(s ...googleapi.Field) *DisksSetIamPolicyCall { +func (c *GlobalNetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalNetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *GlobalNetworkEndpointGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksSetIamPolicyCall) Context(ctx context.Context) *DisksSetIamPolicyCall { +func (c *GlobalNetworkEndpointGroupsGetCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksSetIamPolicyCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.globalNetworkEndpointGroups.get" call. +// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroup.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63504,78 +79328,61 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/setIamPolicy", - // "httpMethod": "POST", - // "id": "compute.disks.setIamPolicy", + // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "flatPath": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}", + // "httpMethod": "GET", + // "id": "compute.globalNetworkEndpointGroups.get", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "networkEndpointGroup" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/disks/{resource}/setIamPolicy", - // "request": { - // "$ref": "ZoneSetPolicyRequest" - // }, + // "path": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}", // "response": { - // "$ref": "Policy" + // "$ref": "NetworkEndpointGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.setLabels": +// method id "compute.globalNetworkEndpointGroups.insert": -type DisksSetLabelsCall struct { +type GlobalNetworkEndpointGroupsInsertCall struct { s *Service project string - zone string - resource string - zonesetlabelsrequest *ZoneSetLabelsRequest + networkendpointgroup *NetworkEndpointGroup urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetLabels: Sets the labels on a disk. To learn more about labels, -// read the Labeling Resources documentation. +// Insert: Creates a network endpoint group in the specified project +// using the parameters that are included in the request. // // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -// - zone: The name of the zone for this request. -func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { - c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalNetworkEndpointGroupsService) Insert(project string, networkendpointgroup *NetworkEndpointGroup) *GlobalNetworkEndpointGroupsInsertCall { + c := &GlobalNetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.zonesetlabelsrequest = zonesetlabelsrequest + c.networkendpointgroup = networkendpointgroup return c } @@ -63590,7 +79397,7 @@ func (r *DisksService) SetLabels(project string, zone string, resource string, z // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { +func (c *GlobalNetworkEndpointGroupsInsertCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -63598,7 +79405,7 @@ func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { +func (c *GlobalNetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63606,21 +79413,21 @@ func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { +func (c *GlobalNetworkEndpointGroupsInsertCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksSetLabelsCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -63628,14 +79435,14 @@ func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -63643,38 +79450,36 @@ func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.setLabels" call. +// Do executes the "compute.globalNetworkEndpointGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -63688,14 +79493,12 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", - // "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/setLabels", + // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + // "flatPath": "projects/{project}/global/networkEndpointGroups", // "httpMethod": "POST", - // "id": "compute.disks.setLabels", + // "id": "compute.globalNetworkEndpointGroups.insert", // "parameterOrder": [ - // "project", - // "zone", - // "resource" + // "project" // ], // "parameters": { // "project": { @@ -63709,69 +79512,414 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // } + // }, + // "path": "projects/{project}/global/networkEndpointGroups", + // "request": { + // "$ref": "NetworkEndpointGroup" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalNetworkEndpointGroups.list": + +type GlobalNetworkEndpointGroupsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of network endpoint groups that are located +// in the specified project. +// +// - project: Project ID for this request. +func (r *GlobalNetworkEndpointGroupsService) List(project string) *GlobalNetworkEndpointGroupsListCall { + c := &GlobalNetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *GlobalNetworkEndpointGroupsListCall) Filter(filter string) *GlobalNetworkEndpointGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *GlobalNetworkEndpointGroupsListCall) MaxResults(maxResults int64) *GlobalNetworkEndpointGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *GlobalNetworkEndpointGroupsListCall) OrderBy(orderBy string) *GlobalNetworkEndpointGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *GlobalNetworkEndpointGroupsListCall) PageToken(pageToken string) *GlobalNetworkEndpointGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *GlobalNetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalNetworkEndpointGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalNetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalNetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *GlobalNetworkEndpointGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalNetworkEndpointGroupsListCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalNetworkEndpointGroupsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GlobalNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networkEndpointGroups") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.globalNetworkEndpointGroups.list" call. +// Exactly one of *NetworkEndpointGroupList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &NetworkEndpointGroupList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of network endpoint groups that are located in the specified project.", + // "flatPath": "projects/{project}/global/networkEndpointGroups", + // "httpMethod": "GET", + // "id": "compute.globalNetworkEndpointGroups.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/zones/{zone}/disks/{resource}/setLabels", - // "request": { - // "$ref": "ZoneSetLabelsRequest" - // }, + // "path": "projects/{project}/global/networkEndpointGroups", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkEndpointGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalNetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.globalNetworkEndpointGroups.listNetworkEndpoints": + +type GlobalNetworkEndpointGroupsListNetworkEndpointsCall struct { + s *Service + project string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ListNetworkEndpoints: Lists the network endpoints in the specified +// network endpoint group. +// +// - networkEndpointGroup: The name of the network endpoint group from +// which you want to generate a list of included network endpoints. It +// should comply with RFC1035. +// - project: Project ID for this request. +func (r *GlobalNetworkEndpointGroupsService) ListNetworkEndpoints(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c := &GlobalNetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.networkEndpointGroup = networkEndpointGroup + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} -type DisksTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("pageToken", pageToken) + return c } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// -// - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -// - zone: The name of the zone for this request. -func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { - c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63779,21 +79927,21 @@ func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIam // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksTestIamPermissionsCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -63801,14 +79949,9 @@ func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -63816,40 +79959,41 @@ func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.globalNetworkEndpointGroups.listNetworkEndpoints" call. +// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &TestPermissionsResponse{ + ret := &NetworkEndpointGroupsListNetworkEndpoints{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63861,44 +80005,60 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", + // "description": "Lists the network endpoints in the specified network endpoint group.", + // "flatPath": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", // "httpMethod": "POST", - // "id": "compute.disks.testIamPermissions", + // "id": "compute.globalNetworkEndpointGroups.listNetworkEndpoints", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "networkEndpointGroup" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -63909,120 +80069,230 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } -// method id "compute.externalVpnGateways.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ExternalVpnGatewaysDeleteCall struct { - s *Service - project string - externalVpnGateway string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.globalOperations.aggregatedList": + +type GlobalOperationsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified externalVpnGateway. +// AggregatedList: Retrieves an aggregated list of all operations. // -// - externalVpnGateway: Name of the externalVpnGateways to delete. // - project: Project ID for this request. -func (r *ExternalVpnGatewaysService) Delete(project string, externalVpnGateway string) *ExternalVpnGatewaysDeleteCall { - c := &ExternalVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { + c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.externalVpnGateway = externalVpnGateway return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *ExternalVpnGatewaysDeleteCall) RequestId(requestId string) *ExternalVpnGatewaysDeleteCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *GlobalOperationsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *GlobalOperationsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExternalVpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysDeleteCall { +func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExternalVpnGatewaysDeleteCall) Context(ctx context.Context) *ExternalVpnGatewaysDeleteCall { +func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExternalVpnGatewaysDeleteCall) Header() http.Header { +func (c *GlobalOperationsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExternalVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/externalVpnGateways/{externalVpnGateway}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/operations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "externalVpnGateway": c.externalVpnGateway, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.externalVpnGateways.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ExternalVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.globalOperations.aggregatedList" call. +// Exactly one of *OperationAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *OperationAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &OperationAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64034,20 +80304,40 @@ func (c *ExternalVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Deletes the specified externalVpnGateway.", - // "flatPath": "projects/{project}/global/externalVpnGateways/{externalVpnGateway}", - // "httpMethod": "DELETE", - // "id": "compute.externalVpnGateways.delete", + // "description": "Retrieves an aggregated list of all operations.", + // "flatPath": "projects/{project}/aggregated/operations", + // "httpMethod": "GET", + // "id": "compute.globalOperations.aggregatedList", // "parameterOrder": [ - // "project", - // "externalVpnGateway" + // "project" // ], // "parameters": { - // "externalVpnGateway": { - // "description": "Name of the externalVpnGateways to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -64057,161 +80347,143 @@ func (c *ExternalVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Opera // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // } // }, - // "path": "projects/{project}/global/externalVpnGateways/{externalVpnGateway}", + // "path": "projects/{project}/aggregated/operations", // "response": { - // "$ref": "Operation" + // "$ref": "OperationAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.externalVpnGateways.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ExternalVpnGatewaysGetCall struct { - s *Service - project string - externalVpnGateway string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.globalOperations.delete": + +type GlobalOperationsDeleteCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified externalVpnGateway. Get a list of -// available externalVpnGateways by making a list() request. +// Delete: Deletes the specified Operations resource. // -// - externalVpnGateway: Name of the externalVpnGateway to return. +// - operation: Name of the Operations resource to delete. // - project: Project ID for this request. -func (r *ExternalVpnGatewaysService) Get(project string, externalVpnGateway string) *ExternalVpnGatewaysGetCall { - c := &ExternalVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { + c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.externalVpnGateway = externalVpnGateway + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExternalVpnGatewaysGetCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysGetCall { +func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ExternalVpnGatewaysGetCall) IfNoneMatch(entityTag string) *ExternalVpnGatewaysGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExternalVpnGatewaysGetCall) Context(ctx context.Context) *ExternalVpnGatewaysGetCall { +func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExternalVpnGatewaysGetCall) Header() http.Header { +func (c *GlobalOperationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExternalVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/externalVpnGateways/{externalVpnGateway}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "externalVpnGateway": c.externalVpnGateway, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.externalVpnGateways.get" call. -// Exactly one of *ExternalVpnGateway or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ExternalVpnGateway.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ExternalVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*ExternalVpnGateway, error) { +// Do executes the "compute.globalOperations.delete" call. +func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ExternalVpnGateway{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "Returns the specified externalVpnGateway. Get a list of available externalVpnGateways by making a list() request.", - // "flatPath": "projects/{project}/global/externalVpnGateways/{externalVpnGateway}", - // "httpMethod": "GET", - // "id": "compute.externalVpnGateways.get", + // "description": "Deletes the specified Operations resource.", + // "flatPath": "projects/{project}/global/operations/{operation}", + // "httpMethod": "DELETE", + // "id": "compute.globalOperations.delete", // "parameterOrder": [ // "project", - // "externalVpnGateway" + // "operation" // ], // "parameters": { - // "externalVpnGateway": { - // "description": "Name of the externalVpnGateway to return.", + // "operation": { + // "description": "Name of the Operations resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -64223,135 +80495,125 @@ func (c *ExternalVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*External // "type": "string" // } // }, - // "path": "projects/{project}/global/externalVpnGateways/{externalVpnGateway}", - // "response": { - // "$ref": "ExternalVpnGateway" - // }, + // "path": "projects/{project}/global/operations/{operation}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.externalVpnGateways.insert": +// method id "compute.globalOperations.get": -type ExternalVpnGatewaysInsertCall struct { - s *Service - project string - externalvpngateway *ExternalVpnGateway - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOperationsGetCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a ExternalVpnGateway in the specified project using -// the data included in the request. +// Get: Retrieves the specified Operations resource. // +// - operation: Name of the Operations resource to return. // - project: Project ID for this request. -func (r *ExternalVpnGatewaysService) Insert(project string, externalvpngateway *ExternalVpnGateway) *ExternalVpnGatewaysInsertCall { - c := &ExternalVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { + c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.externalvpngateway = externalvpngateway - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *ExternalVpnGatewaysInsertCall) RequestId(requestId string) *ExternalVpnGatewaysInsertCall { - c.urlParams_.Set("requestId", requestId) + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExternalVpnGatewaysInsertCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysInsertCall { +func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExternalVpnGatewaysInsertCall) Context(ctx context.Context) *ExternalVpnGatewaysInsertCall { +func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExternalVpnGatewaysInsertCall) Header() http.Header { +func (c *GlobalOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExternalVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.externalvpngateway) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/externalVpnGateways") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.externalVpnGateways.insert" call. +// Do executes the "compute.globalOperations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ExternalVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -64365,45 +80627,46 @@ func (c *ExternalVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Creates a ExternalVpnGateway in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/global/externalVpnGateways", - // "httpMethod": "POST", - // "id": "compute.externalVpnGateways.insert", + // "description": "Retrieves the specified Operations resource.", + // "flatPath": "projects/{project}/global/operations/{operation}", + // "httpMethod": "GET", + // "id": "compute.globalOperations.get", // "parameterOrder": [ - // "project" + // "project", + // "operation" // ], // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/global/externalVpnGateways", - // "request": { - // "$ref": "ExternalVpnGateway" - // }, + // "path": "projects/{project}/global/operations/{operation}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.externalVpnGateways.list": +// method id "compute.globalOperations.list": -type ExternalVpnGatewaysListCall struct { +type GlobalOperationsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -64412,30 +80675,33 @@ type ExternalVpnGatewaysListCall struct { header_ http.Header } -// List: Retrieves the list of ExternalVpnGateway available to the +// List: Retrieves a list of Operation resources contained within the // specified project. // // - project: Project ID for this request. -func (r *ExternalVpnGatewaysService) List(project string) *ExternalVpnGatewaysListCall { - c := &ExternalVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { + c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -64443,8 +80709,18 @@ func (r *ExternalVpnGatewaysService) List(project string) *ExternalVpnGatewaysLi // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *ExternalVpnGatewaysListCall) Filter(filter string) *ExternalVpnGatewaysListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { c.urlParams_.Set("filter", filter) return c } @@ -64455,7 +80731,7 @@ func (c *ExternalVpnGatewaysListCall) Filter(filter string) *ExternalVpnGateways // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *ExternalVpnGatewaysListCall) MaxResults(maxResults int64) *ExternalVpnGatewaysListCall { +func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -64469,7 +80745,7 @@ func (c *ExternalVpnGatewaysListCall) MaxResults(maxResults int64) *ExternalVpnG // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *ExternalVpnGatewaysListCall) OrderBy(orderBy string) *ExternalVpnGatewaysListCall { +func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -64477,7 +80753,7 @@ func (c *ExternalVpnGatewaysListCall) OrderBy(orderBy string) *ExternalVpnGatewa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *ExternalVpnGatewaysListCall) PageToken(pageToken string) *ExternalVpnGatewaysListCall { +func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -64486,7 +80762,7 @@ func (c *ExternalVpnGatewaysListCall) PageToken(pageToken string) *ExternalVpnGa // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *ExternalVpnGatewaysListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ExternalVpnGatewaysListCall { +func (c *GlobalOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalOperationsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -64494,7 +80770,7 @@ func (c *ExternalVpnGatewaysListCall) ReturnPartialSuccess(returnPartialSuccess // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExternalVpnGatewaysListCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysListCall { +func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64504,7 +80780,7 @@ func (c *ExternalVpnGatewaysListCall) Fields(s ...googleapi.Field) *ExternalVpnG // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ExternalVpnGatewaysListCall) IfNoneMatch(entityTag string) *ExternalVpnGatewaysListCall { +func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -64512,21 +80788,21 @@ func (c *ExternalVpnGatewaysListCall) IfNoneMatch(entityTag string) *ExternalVpn // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExternalVpnGatewaysListCall) Context(ctx context.Context) *ExternalVpnGatewaysListCall { +func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExternalVpnGatewaysListCall) Header() http.Header { +func (c *GlobalOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExternalVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -64539,7 +80815,7 @@ func (c *ExternalVpnGatewaysListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/externalVpnGateways") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/operations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -64552,33 +80828,33 @@ func (c *ExternalVpnGatewaysListCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.externalVpnGateways.list" call. -// Exactly one of *ExternalVpnGatewayList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ExternalVpnGatewayList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*ExternalVpnGatewayList, error) { +func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ExternalVpnGatewayList{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64590,16 +80866,16 @@ func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*Externa } return ret, nil // { - // "description": "Retrieves the list of ExternalVpnGateway available to the specified project.", - // "flatPath": "projects/{project}/global/externalVpnGateways", + // "description": "Retrieves a list of Operation resources contained within the specified project.", + // "flatPath": "projects/{project}/global/operations", // "httpMethod": "GET", - // "id": "compute.externalVpnGateways.list", + // "id": "compute.globalOperations.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -64634,9 +80910,9 @@ func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*Externa // "type": "boolean" // } // }, - // "path": "projects/{project}/global/externalVpnGateways", + // "path": "projects/{project}/global/operations", // "response": { - // "$ref": "ExternalVpnGatewayList" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -64650,7 +80926,7 @@ func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*Externa // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ExternalVpnGatewaysListCall) Pages(ctx context.Context, f func(*ExternalVpnGatewayList) error) error { +func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -64668,35 +80944,43 @@ func (c *ExternalVpnGatewaysListCall) Pages(ctx context.Context, f func(*Externa } } -// method id "compute.externalVpnGateways.setLabels": +// method id "compute.globalOperations.wait": -type ExternalVpnGatewaysSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOperationsWaitCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on an ExternalVpnGateway. To learn more -// about labels, read the Labeling Resources documentation. +// Wait: Waits for the specified Operation resource to return as `DONE` +// or for the request to approach the 2 minute deadline, and retrieves +// the specified Operation resource. This method differs from the `GET` +// method in that it waits for no more than the default deadline (2 +// minutes) and then returns the current state of the operation, which +// might be `DONE` or still in progress. This method is called on a +// best-effort basis. Specifically: - In uncommon cases, when the server +// is overloaded, the request might return before the default deadline +// is reached, or might return after zero seconds. - If the default +// deadline is reached, there is no guarantee that the operation is +// actually done when the method returns. Be prepared to retry if the +// operation is not `DONE`. // +// - operation: Name of the Operations resource to return. // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -func (r *ExternalVpnGatewaysService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ExternalVpnGatewaysSetLabelsCall { - c := &ExternalVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalOperationsService) Wait(project string, operation string) *GlobalOperationsWaitCall { + c := &GlobalOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ExternalVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysSetLabelsCall { +func (c *GlobalOperationsWaitCall) Fields(s ...googleapi.Field) *GlobalOperationsWaitCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64704,21 +80988,21 @@ func (c *ExternalVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *Externa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ExternalVpnGatewaysSetLabelsCall) Context(ctx context.Context) *ExternalVpnGatewaysSetLabelsCall { +func (c *GlobalOperationsWaitCall) Context(ctx context.Context) *GlobalOperationsWaitCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ExternalVpnGatewaysSetLabelsCall) Header() http.Header { +func (c *GlobalOperationsWaitCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ExternalVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -64726,14 +81010,9 @@ func (c *ExternalVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/externalVpnGateways/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/operations/{operation}/wait") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -64741,37 +81020,37 @@ func (c *ExternalVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.externalVpnGateways.setLabels" call. +// Do executes the "compute.globalOperations.wait" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ExternalVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -64785,192 +81064,33 @@ func (c *ExternalVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Sets the labels on an ExternalVpnGateway. To learn more about labels, read the Labeling Resources documentation.", - // "flatPath": "projects/{project}/global/externalVpnGateways/{resource}/setLabels", + // "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress. This method is called on a best-effort basis. Specifically: - In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. - If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`. ", + // "flatPath": "projects/{project}/global/operations/{operation}/wait", // "httpMethod": "POST", - // "id": "compute.externalVpnGateways.setLabels", + // "id": "compute.globalOperations.wait", // "parameterOrder": [ // "project", - // "resource" + // "operation" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/global/externalVpnGateways/{resource}/setLabels", - // "request": { - // "$ref": "GlobalSetLabelsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.externalVpnGateways.testIamPermissions": - -type ExternalVpnGatewaysTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// -// - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -func (r *ExternalVpnGatewaysService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ExternalVpnGatewaysTestIamPermissionsCall { - c := &ExternalVpnGatewaysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ExternalVpnGatewaysTestIamPermissionsCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ExternalVpnGatewaysTestIamPermissionsCall) Context(ctx context.Context) *ExternalVpnGatewaysTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ExternalVpnGatewaysTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ExternalVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/externalVpnGateways/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.externalVpnGateways.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ExternalVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "flatPath": "projects/{project}/global/externalVpnGateways/{resource}/testIamPermissions", - // "httpMethod": "POST", - // "id": "compute.externalVpnGateways.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/global/externalVpnGateways/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "projects/{project}/global/operations/{operation}/wait", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -64981,58 +81101,36 @@ func (c *ExternalVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOpt } -// method id "compute.firewallPolicies.addAssociation": +// method id "compute.globalOrganizationOperations.delete": -type FirewallPoliciesAddAssociationCall struct { - s *Service - firewallPolicy string - firewallpolicyassociation *FirewallPolicyAssociation - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOrganizationOperationsDeleteCall struct { + s *Service + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddAssociation: Inserts an association for the specified firewall -// policy. +// Delete: Deletes the specified Operations resource. // -// - firewallPolicy: Name of the firewall policy to update. -func (r *FirewallPoliciesService) AddAssociation(firewallPolicy string, firewallpolicyassociation *FirewallPolicyAssociation) *FirewallPoliciesAddAssociationCall { - c := &FirewallPoliciesAddAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.firewallPolicy = firewallPolicy - c.firewallpolicyassociation = firewallpolicyassociation - return c -} - -// ReplaceExistingAssociation sets the optional parameter -// "replaceExistingAssociation": Indicates whether or not to replace it -// if an association of the attachment already exists. This is false by -// default, in which case an error will be returned if an association -// already exists. -func (c *FirewallPoliciesAddAssociationCall) ReplaceExistingAssociation(replaceExistingAssociation bool) *FirewallPoliciesAddAssociationCall { - c.urlParams_.Set("replaceExistingAssociation", fmt.Sprint(replaceExistingAssociation)) +// - operation: Name of the Operations resource to delete. +func (r *GlobalOrganizationOperationsService) Delete(operation string) *GlobalOrganizationOperationsDeleteCall { + c := &GlobalOrganizationOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.operation = operation return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *FirewallPoliciesAddAssociationCall) RequestId(requestId string) *FirewallPoliciesAddAssociationCall { - c.urlParams_.Set("requestId", requestId) +// ParentId sets the optional parameter "parentId": Parent ID for this +// request. +func (c *GlobalOrganizationOperationsDeleteCall) ParentId(parentId string) *GlobalOrganizationOperationsDeleteCall { + c.urlParams_.Set("parentId", parentId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesAddAssociationCall) Fields(s ...googleapi.Field) *FirewallPoliciesAddAssociationCall { +func (c *GlobalOrganizationOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOrganizationOperationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65040,21 +81138,21 @@ func (c *FirewallPoliciesAddAssociationCall) Fields(s ...googleapi.Field) *Firew // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesAddAssociationCall) Context(ctx context.Context) *FirewallPoliciesAddAssociationCall { +func (c *GlobalOrganizationOperationsDeleteCall) Context(ctx context.Context) *GlobalOrganizationOperationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesAddAssociationCall) Header() http.Header { +func (c *GlobalOrganizationOperationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesAddAssociationCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOrganizationOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -65062,97 +81160,56 @@ func (c *FirewallPoliciesAddAssociationCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyassociation) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/addAssociation") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "firewallPolicy": c.firewallPolicy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.firewallPolicies.addAssociation" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + "operation": c.operation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.globalOrganizationOperations.delete" call. +func (c *GlobalOrganizationOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "Inserts an association for the specified firewall policy.", - // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/addAssociation", - // "httpMethod": "POST", - // "id": "compute.firewallPolicies.addAssociation", + // "description": "Deletes the specified Operations resource.", + // "flatPath": "locations/global/operations/{operation}", + // "httpMethod": "DELETE", + // "id": "compute.globalOrganizationOperations.delete", // "parameterOrder": [ - // "firewallPolicy" + // "operation" // ], // "parameters": { - // "firewallPolicy": { - // "description": "Name of the firewall policy to update.", + // "operation": { + // "description": "Name of the Operations resource to delete.", // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "replaceExistingAssociation": { - // "description": "Indicates whether or not to replace it if an association of the attachment already exists. This is false by default, in which case an error will be returned if an association already exists.", - // "location": "query", - // "type": "boolean" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "parentId": { + // "description": "Parent ID for this request.", // "location": "query", // "type": "string" // } // }, - // "path": "locations/global/firewallPolicies/{firewallPolicy}/addAssociation", - // "request": { - // "$ref": "FirewallPolicyAssociation" - // }, - // "response": { - // "$ref": "Operation" - // }, + // "path": "locations/global/operations/{operation}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute" @@ -65161,121 +81218,120 @@ func (c *FirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.firewallPolicies.addRule": +// method id "compute.globalOrganizationOperations.get": -type FirewallPoliciesAddRuleCall struct { - s *Service - firewallPolicy string - firewallpolicyrule *FirewallPolicyRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOrganizationOperationsGetCall struct { + s *Service + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// AddRule: Inserts a rule into a firewall policy. +// Get: Retrieves the specified Operations resource. Gets a list of +// operations by making a `list()` request. // -// - firewallPolicy: Name of the firewall policy to update. -func (r *FirewallPoliciesService) AddRule(firewallPolicy string, firewallpolicyrule *FirewallPolicyRule) *FirewallPoliciesAddRuleCall { - c := &FirewallPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.firewallPolicy = firewallPolicy - c.firewallpolicyrule = firewallpolicyrule +// - operation: Name of the Operations resource to return. +func (r *GlobalOrganizationOperationsService) Get(operation string) *GlobalOrganizationOperationsGetCall { + c := &GlobalOrganizationOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.operation = operation return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *FirewallPoliciesAddRuleCall) RequestId(requestId string) *FirewallPoliciesAddRuleCall { - c.urlParams_.Set("requestId", requestId) +// ParentId sets the optional parameter "parentId": Parent ID for this +// request. +func (c *GlobalOrganizationOperationsGetCall) ParentId(parentId string) *GlobalOrganizationOperationsGetCall { + c.urlParams_.Set("parentId", parentId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesAddRuleCall) Fields(s ...googleapi.Field) *FirewallPoliciesAddRuleCall { +func (c *GlobalOrganizationOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOrganizationOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOrganizationOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOrganizationOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesAddRuleCall) Context(ctx context.Context) *FirewallPoliciesAddRuleCall { +func (c *GlobalOrganizationOperationsGetCall) Context(ctx context.Context) *GlobalOrganizationOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesAddRuleCall) Header() http.Header { +func (c *GlobalOrganizationOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOrganizationOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyrule) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/addRule") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "firewallPolicy": c.firewallPolicy, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewallPolicies.addRule" call. +// Do executes the "compute.globalOrganizationOperations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalOrganizationOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -65289,159 +81345,231 @@ func (c *FirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Inserts a rule into a firewall policy.", - // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/addRule", - // "httpMethod": "POST", - // "id": "compute.firewallPolicies.addRule", + // "description": "Retrieves the specified Operations resource. Gets a list of operations by making a `list()` request.", + // "flatPath": "locations/global/operations/{operation}", + // "httpMethod": "GET", + // "id": "compute.globalOrganizationOperations.get", // "parameterOrder": [ - // "firewallPolicy" + // "operation" // ], // "parameters": { - // "firewallPolicy": { - // "description": "Name of the firewall policy to update.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "parentId": { + // "description": "Parent ID for this request.", // "location": "query", // "type": "string" // } // }, - // "path": "locations/global/firewallPolicies/{firewallPolicy}/addRule", - // "request": { - // "$ref": "FirewallPolicyRule" - // }, + // "path": "locations/global/operations/{operation}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.firewallPolicies.cloneRules": +// method id "compute.globalOrganizationOperations.list": -type FirewallPoliciesCloneRulesCall struct { - s *Service - firewallPolicy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOrganizationOperationsListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// CloneRules: Copies rules to the specified firewall policy. -// -// - firewallPolicy: Name of the firewall policy to update. -func (r *FirewallPoliciesService) CloneRules(firewallPolicy string) *FirewallPoliciesCloneRulesCall { - c := &FirewallPoliciesCloneRulesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.firewallPolicy = firewallPolicy +// List: Retrieves a list of Operation resources contained within the +// specified organization. +func (r *GlobalOrganizationOperationsService) List() *GlobalOrganizationOperationsListCall { + c := &GlobalOrganizationOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *FirewallPoliciesCloneRulesCall) RequestId(requestId string) *FirewallPoliciesCloneRulesCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *GlobalOrganizationOperationsListCall) Filter(filter string) *GlobalOrganizationOperationsListCall { + c.urlParams_.Set("filter", filter) return c } -// SourceFirewallPolicy sets the optional parameter -// "sourceFirewallPolicy": The firewall policy from which to copy rules. -func (c *FirewallPoliciesCloneRulesCall) SourceFirewallPolicy(sourceFirewallPolicy string) *FirewallPoliciesCloneRulesCall { - c.urlParams_.Set("sourceFirewallPolicy", sourceFirewallPolicy) +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *GlobalOrganizationOperationsListCall) MaxResults(maxResults int64) *GlobalOrganizationOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *GlobalOrganizationOperationsListCall) OrderBy(orderBy string) *GlobalOrganizationOperationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *GlobalOrganizationOperationsListCall) PageToken(pageToken string) *GlobalOrganizationOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ParentId sets the optional parameter "parentId": Parent ID for this +// request. +func (c *GlobalOrganizationOperationsListCall) ParentId(parentId string) *GlobalOrganizationOperationsListCall { + c.urlParams_.Set("parentId", parentId) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *GlobalOrganizationOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalOrganizationOperationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesCloneRulesCall) Fields(s ...googleapi.Field) *FirewallPoliciesCloneRulesCall { +func (c *GlobalOrganizationOperationsListCall) Fields(s ...googleapi.Field) *GlobalOrganizationOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOrganizationOperationsListCall) IfNoneMatch(entityTag string) *GlobalOrganizationOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesCloneRulesCall) Context(ctx context.Context) *FirewallPoliciesCloneRulesCall { +func (c *GlobalOrganizationOperationsListCall) Context(ctx context.Context) *GlobalOrganizationOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesCloneRulesCall) Header() http.Header { +func (c *GlobalOrganizationOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesCloneRulesCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOrganizationOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/cloneRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/operations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "firewallPolicy": c.firewallPolicy, - }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewallPolicies.cloneRules" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.globalOrganizationOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallPoliciesCloneRulesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65453,60 +81581,99 @@ func (c *FirewallPoliciesCloneRulesCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Copies rules to the specified firewall policy.", - // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/cloneRules", - // "httpMethod": "POST", - // "id": "compute.firewallPolicies.cloneRules", - // "parameterOrder": [ - // "firewallPolicy" - // ], + // "description": "Retrieves a list of Operation resources contained within the specified organization.", + // "flatPath": "locations/global/operations", + // "httpMethod": "GET", + // "id": "compute.globalOrganizationOperations.list", // "parameters": { - // "firewallPolicy": { - // "description": "Name of the firewall policy to update.", - // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, - // "sourceFirewallPolicy": { - // "description": "The firewall policy from which to copy rules.", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "parentId": { + // "description": "Parent ID for this request.", // "location": "query", // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "locations/global/firewallPolicies/{firewallPolicy}/cloneRules", + // "path": "locations/global/operations", // "response": { - // "$ref": "Operation" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.firewallPolicies.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalOrganizationOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type FirewallPoliciesDeleteCall struct { - s *Service - firewallPolicy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.globalPublicDelegatedPrefixes.delete": + +type GlobalPublicDelegatedPrefixesDeleteCall struct { + s *Service + project string + publicDelegatedPrefix string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified policy. +// Delete: Deletes the specified global PublicDelegatedPrefix. // -// - firewallPolicy: Name of the firewall policy to delete. -func (r *FirewallPoliciesService) Delete(firewallPolicy string) *FirewallPoliciesDeleteCall { - c := &FirewallPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.firewallPolicy = firewallPolicy +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to delete. +func (r *GlobalPublicDelegatedPrefixesService) Delete(project string, publicDelegatedPrefix string) *GlobalPublicDelegatedPrefixesDeleteCall { + c := &GlobalPublicDelegatedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.publicDelegatedPrefix = publicDelegatedPrefix return c } @@ -65521,7 +81688,7 @@ func (r *FirewallPoliciesService) Delete(firewallPolicy string) *FirewallPolicie // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *FirewallPoliciesDeleteCall) RequestId(requestId string) *FirewallPoliciesDeleteCall { +func (c *GlobalPublicDelegatedPrefixesDeleteCall) RequestId(requestId string) *GlobalPublicDelegatedPrefixesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -65529,7 +81696,7 @@ func (c *FirewallPoliciesDeleteCall) RequestId(requestId string) *FirewallPolici // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesDeleteCall) Fields(s ...googleapi.Field) *FirewallPoliciesDeleteCall { +func (c *GlobalPublicDelegatedPrefixesDeleteCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65537,21 +81704,21 @@ func (c *FirewallPoliciesDeleteCall) Fields(s ...googleapi.Field) *FirewallPolic // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesDeleteCall) Context(ctx context.Context) *FirewallPoliciesDeleteCall { +func (c *GlobalPublicDelegatedPrefixesDeleteCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesDeleteCall) Header() http.Header { +func (c *GlobalPublicDelegatedPrefixesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalPublicDelegatedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -65561,7 +81728,7 @@ func (c *FirewallPoliciesDeleteCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -65569,36 +81736,37 @@ func (c *FirewallPoliciesDeleteCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "firewallPolicy": c.firewallPolicy, + "project": c.project, + "publicDelegatedPrefix": c.publicDelegatedPrefix, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewallPolicies.delete" call. +// Do executes the "compute.globalPublicDelegatedPrefixes.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalPublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -65612,18 +81780,26 @@ func (c *FirewallPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified policy.", - // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}", + // "description": "Deletes the specified global PublicDelegatedPrefix.", + // "flatPath": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", // "httpMethod": "DELETE", - // "id": "compute.firewallPolicies.delete", + // "id": "compute.globalPublicDelegatedPrefixes.delete", // "parameterOrder": [ - // "firewallPolicy" + // "project", + // "publicDelegatedPrefix" // ], // "parameters": { - // "firewallPolicy": { - // "description": "Name of the firewall policy to delete.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "publicDelegatedPrefix": { + // "description": "Name of the PublicDelegatedPrefix resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -65633,7 +81809,7 @@ func (c *FirewallPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "locations/global/firewallPolicies/{firewallPolicy}", + // "path": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", // "response": { // "$ref": "Operation" // }, @@ -65645,30 +81821,34 @@ func (c *FirewallPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.firewallPolicies.get": +// method id "compute.globalPublicDelegatedPrefixes.get": -type FirewallPoliciesGetCall struct { - s *Service - firewallPolicy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalPublicDelegatedPrefixesGetCall struct { + s *Service + project string + publicDelegatedPrefix string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified firewall policy. +// Get: Returns the specified global PublicDelegatedPrefix resource. // -// - firewallPolicy: Name of the firewall policy to get. -func (r *FirewallPoliciesService) Get(firewallPolicy string) *FirewallPoliciesGetCall { - c := &FirewallPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.firewallPolicy = firewallPolicy +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to return. +func (r *GlobalPublicDelegatedPrefixesService) Get(project string, publicDelegatedPrefix string) *GlobalPublicDelegatedPrefixesGetCall { + c := &GlobalPublicDelegatedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.publicDelegatedPrefix = publicDelegatedPrefix return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesGetCall) Fields(s ...googleapi.Field) *FirewallPoliciesGetCall { +func (c *GlobalPublicDelegatedPrefixesGetCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65678,7 +81858,7 @@ func (c *FirewallPoliciesGetCall) Fields(s ...googleapi.Field) *FirewallPolicies // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FirewallPoliciesGetCall) IfNoneMatch(entityTag string) *FirewallPoliciesGetCall { +func (c *GlobalPublicDelegatedPrefixesGetCall) IfNoneMatch(entityTag string) *GlobalPublicDelegatedPrefixesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -65686,21 +81866,21 @@ func (c *FirewallPoliciesGetCall) IfNoneMatch(entityTag string) *FirewallPolicie // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesGetCall) Context(ctx context.Context) *FirewallPoliciesGetCall { +func (c *GlobalPublicDelegatedPrefixesGetCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesGetCall) Header() http.Header { +func (c *GlobalPublicDelegatedPrefixesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalPublicDelegatedPrefixesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -65713,7 +81893,7 @@ func (c *FirewallPoliciesGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -65721,38 +81901,39 @@ func (c *FirewallPoliciesGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "firewallPolicy": c.firewallPolicy, + "project": c.project, + "publicDelegatedPrefix": c.publicDelegatedPrefix, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewallPolicies.get" call. -// Exactly one of *FirewallPolicy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *FirewallPolicy.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalPublicDelegatedPrefixes.get" call. +// Exactly one of *PublicDelegatedPrefix or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PublicDelegatedPrefix.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *FirewallPoliciesGetCall) Do(opts ...googleapi.CallOption) (*FirewallPolicy, error) { +func (c *GlobalPublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefix, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &FirewallPolicy{ + ret := &PublicDelegatedPrefix{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65764,25 +81945,33 @@ func (c *FirewallPoliciesGetCall) Do(opts ...googleapi.CallOption) (*FirewallPol } return ret, nil // { - // "description": "Returns the specified firewall policy.", - // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}", + // "description": "Returns the specified global PublicDelegatedPrefix resource.", + // "flatPath": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", // "httpMethod": "GET", - // "id": "compute.firewallPolicies.get", + // "id": "compute.globalPublicDelegatedPrefixes.get", // "parameterOrder": [ - // "firewallPolicy" + // "project", + // "publicDelegatedPrefix" // ], // "parameters": { - // "firewallPolicy": { - // "description": "Name of the firewall policy to get.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "publicDelegatedPrefix": { + // "description": "Name of the PublicDelegatedPrefix resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "locations/global/firewallPolicies/{firewallPolicy}", + // "path": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", // "response": { - // "$ref": "FirewallPolicy" + // "$ref": "PublicDelegatedPrefix" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -65793,122 +81982,124 @@ func (c *FirewallPoliciesGetCall) Do(opts ...googleapi.CallOption) (*FirewallPol } -// method id "compute.firewallPolicies.getAssociation": +// method id "compute.globalPublicDelegatedPrefixes.insert": -type FirewallPoliciesGetAssociationCall struct { - s *Service - firewallPolicy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalPublicDelegatedPrefixesInsertCall struct { + s *Service + project string + publicdelegatedprefix *PublicDelegatedPrefix + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetAssociation: Gets an association with the specified name. +// Insert: Creates a global PublicDelegatedPrefix in the specified +// project using the parameters that are included in the request. // -// - firewallPolicy: Name of the firewall policy to which the queried -// rule belongs. -func (r *FirewallPoliciesService) GetAssociation(firewallPolicy string) *FirewallPoliciesGetAssociationCall { - c := &FirewallPoliciesGetAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.firewallPolicy = firewallPolicy +// - project: Project ID for this request. +func (r *GlobalPublicDelegatedPrefixesService) Insert(project string, publicdelegatedprefix *PublicDelegatedPrefix) *GlobalPublicDelegatedPrefixesInsertCall { + c := &GlobalPublicDelegatedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.publicdelegatedprefix = publicdelegatedprefix return c } -// Name sets the optional parameter "name": The name of the association -// to get from the firewall policy. -func (c *FirewallPoliciesGetAssociationCall) Name(name string) *FirewallPoliciesGetAssociationCall { - c.urlParams_.Set("name", name) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *GlobalPublicDelegatedPrefixesInsertCall) RequestId(requestId string) *GlobalPublicDelegatedPrefixesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesGetAssociationCall) Fields(s ...googleapi.Field) *FirewallPoliciesGetAssociationCall { +func (c *GlobalPublicDelegatedPrefixesInsertCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *FirewallPoliciesGetAssociationCall) IfNoneMatch(entityTag string) *FirewallPoliciesGetAssociationCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesGetAssociationCall) Context(ctx context.Context) *FirewallPoliciesGetAssociationCall { +func (c *GlobalPublicDelegatedPrefixesInsertCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesGetAssociationCall) Header() http.Header { +func (c *GlobalPublicDelegatedPrefixesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesGetAssociationCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalPublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/getAssociation") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "firewallPolicy": c.firewallPolicy, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewallPolicies.getAssociation" call. -// Exactly one of *FirewallPolicyAssociation or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *FirewallPolicyAssociation.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *FirewallPoliciesGetAssociationCall) Do(opts ...googleapi.CallOption) (*FirewallPolicyAssociation, error) { +// Do executes the "compute.globalPublicDelegatedPrefixes.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalPublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &FirewallPolicyAssociation{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65920,234 +82111,148 @@ func (c *FirewallPoliciesGetAssociationCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Gets an association with the specified name.", - // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/getAssociation", - // "httpMethod": "GET", - // "id": "compute.firewallPolicies.getAssociation", + // "description": "Creates a global PublicDelegatedPrefix in the specified project using the parameters that are included in the request.", + // "flatPath": "projects/{project}/global/publicDelegatedPrefixes", + // "httpMethod": "POST", + // "id": "compute.globalPublicDelegatedPrefixes.insert", // "parameterOrder": [ - // "firewallPolicy" + // "project" // ], // "parameters": { - // "firewallPolicy": { - // "description": "Name of the firewall policy to which the queried rule belongs.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "name": { - // "description": "The name of the association to get from the firewall policy.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "locations/global/firewallPolicies/{firewallPolicy}/getAssociation", + // "path": "projects/{project}/global/publicDelegatedPrefixes", + // "request": { + // "$ref": "PublicDelegatedPrefix" + // }, // "response": { - // "$ref": "FirewallPolicyAssociation" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.firewallPolicies.getIamPolicy": +// method id "compute.globalPublicDelegatedPrefixes.list": -type FirewallPoliciesGetIamPolicyCall struct { +type GlobalPublicDelegatedPrefixesListCall struct { s *Service - resource string + project string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// List: Lists the global PublicDelegatedPrefixes for a project. // -// - resource: Name or id of the resource for this request. -func (r *FirewallPoliciesService) GetIamPolicy(resource string) *FirewallPoliciesGetIamPolicyCall { - c := &FirewallPoliciesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - return c -} - -// OptionsRequestedPolicyVersion sets the optional parameter -// "optionsRequestedPolicyVersion": Requested IAM Policy version. -func (c *FirewallPoliciesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *FirewallPoliciesGetIamPolicyCall { - c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) +// - project: Project ID for this request. +func (r *GlobalPublicDelegatedPrefixesService) List(project string) *GlobalPublicDelegatedPrefixesListCall { + c := &GlobalPublicDelegatedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project return c } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FirewallPoliciesGetIamPolicyCall) Fields(s ...googleapi.Field) *FirewallPoliciesGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *GlobalPublicDelegatedPrefixesListCall) Filter(filter string) *GlobalPublicDelegatedPrefixesListCall { + c.urlParams_.Set("filter", filter) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *FirewallPoliciesGetIamPolicyCall) IfNoneMatch(entityTag string) *FirewallPoliciesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *GlobalPublicDelegatedPrefixesListCall) MaxResults(maxResults int64) *GlobalPublicDelegatedPrefixesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FirewallPoliciesGetIamPolicyCall) Context(ctx context.Context) *FirewallPoliciesGetIamPolicyCall { - c.ctx_ = ctx +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *GlobalPublicDelegatedPrefixesListCall) OrderBy(orderBy string) *GlobalPublicDelegatedPrefixesListCall { + c.urlParams_.Set("orderBy", orderBy) return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FirewallPoliciesGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FirewallPoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{resource}/getIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.firewallPolicies.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *FirewallPoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "flatPath": "locations/global/firewallPolicies/{resource}/getIamPolicy", - // "httpMethod": "GET", - // "id": "compute.firewallPolicies.getIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "optionsRequestedPolicyVersion": { - // "description": "Requested IAM Policy version.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "locations/global/firewallPolicies/{resource}/getIamPolicy", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.firewallPolicies.getRule": - -type FirewallPoliciesGetRuleCall struct { - s *Service - firewallPolicy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetRule: Gets a rule of the specified priority. -// -// - firewallPolicy: Name of the firewall policy to which the queried -// rule belongs. -func (r *FirewallPoliciesService) GetRule(firewallPolicy string) *FirewallPoliciesGetRuleCall { - c := &FirewallPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.firewallPolicy = firewallPolicy +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *GlobalPublicDelegatedPrefixesListCall) PageToken(pageToken string) *GlobalPublicDelegatedPrefixesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } -// Priority sets the optional parameter "priority": The priority of the -// rule to get from the firewall policy. -func (c *FirewallPoliciesGetRuleCall) Priority(priority int64) *FirewallPoliciesGetRuleCall { - c.urlParams_.Set("priority", fmt.Sprint(priority)) +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *GlobalPublicDelegatedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalPublicDelegatedPrefixesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesGetRuleCall) Fields(s ...googleapi.Field) *FirewallPoliciesGetRuleCall { +func (c *GlobalPublicDelegatedPrefixesListCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66157,7 +82262,7 @@ func (c *FirewallPoliciesGetRuleCall) Fields(s ...googleapi.Field) *FirewallPoli // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FirewallPoliciesGetRuleCall) IfNoneMatch(entityTag string) *FirewallPoliciesGetRuleCall { +func (c *GlobalPublicDelegatedPrefixesListCall) IfNoneMatch(entityTag string) *GlobalPublicDelegatedPrefixesListCall { c.ifNoneMatch_ = entityTag return c } @@ -66165,21 +82270,21 @@ func (c *FirewallPoliciesGetRuleCall) IfNoneMatch(entityTag string) *FirewallPol // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesGetRuleCall) Context(ctx context.Context) *FirewallPoliciesGetRuleCall { +func (c *GlobalPublicDelegatedPrefixesListCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesGetRuleCall) Header() http.Header { +func (c *GlobalPublicDelegatedPrefixesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalPublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -66192,7 +82297,7 @@ func (c *FirewallPoliciesGetRuleCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/getRule") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -66200,38 +82305,38 @@ func (c *FirewallPoliciesGetRuleCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "firewallPolicy": c.firewallPolicy, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewallPolicies.getRule" call. -// Exactly one of *FirewallPolicyRule or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *FirewallPolicyRule.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalPublicDelegatedPrefixes.list" call. +// Exactly one of *PublicDelegatedPrefixList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *PublicDelegatedPrefixList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *FirewallPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*FirewallPolicyRule, error) { +func (c *GlobalPublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &FirewallPolicyRule{ + ret := &PublicDelegatedPrefixList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66243,31 +82348,53 @@ func (c *FirewallPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*Firewal } return ret, nil // { - // "description": "Gets a rule of the specified priority.", - // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/getRule", + // "description": "Lists the global PublicDelegatedPrefixes for a project.", + // "flatPath": "projects/{project}/global/publicDelegatedPrefixes", // "httpMethod": "GET", - // "id": "compute.firewallPolicies.getRule", + // "id": "compute.globalPublicDelegatedPrefixes.list", // "parameterOrder": [ - // "firewallPolicy" + // "project" // ], // "parameters": { - // "firewallPolicy": { - // "description": "Name of the firewall policy to which the queried rule belongs.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "priority": { - // "description": "The priority of the rule to get from the firewall policy.", - // "format": "int32", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "integer" + // "type": "boolean" // } // }, - // "path": "locations/global/firewallPolicies/{firewallPolicy}/getRule", + // "path": "projects/{project}/global/publicDelegatedPrefixes", // "response": { - // "$ref": "FirewallPolicyRule" + // "$ref": "PublicDelegatedPrefixList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -66278,30 +82405,51 @@ func (c *FirewallPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*Firewal } -// method id "compute.firewallPolicies.insert": - -type FirewallPoliciesInsertCall struct { - s *Service - firewallpolicy *FirewallPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalPublicDelegatedPrefixesListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// Insert: Creates a new policy in the specified project using the data -// included in the request. -func (r *FirewallPoliciesService) Insert(firewallpolicy *FirewallPolicy) *FirewallPoliciesInsertCall { - c := &FirewallPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.firewallpolicy = firewallpolicy - return c +// method id "compute.globalPublicDelegatedPrefixes.patch": + +type GlobalPublicDelegatedPrefixesPatchCall struct { + s *Service + project string + publicDelegatedPrefix string + publicdelegatedprefix *PublicDelegatedPrefix + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ParentId sets the optional parameter "parentId": Parent ID for this -// request. The ID can be either be "folders/[FOLDER_ID]" if the parent -// is a folder or "organizations/[ORGANIZATION_ID]" if the parent is an -// organization. -func (c *FirewallPoliciesInsertCall) ParentId(parentId string) *FirewallPoliciesInsertCall { - c.urlParams_.Set("parentId", parentId) +// Patch: Patches the specified global PublicDelegatedPrefix resource +// with the data included in the request. This method supports PATCH +// semantics and uses JSON merge patch format and processing rules. +// +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to patch. +func (r *GlobalPublicDelegatedPrefixesService) Patch(project string, publicDelegatedPrefix string, publicdelegatedprefix *PublicDelegatedPrefix) *GlobalPublicDelegatedPrefixesPatchCall { + c := &GlobalPublicDelegatedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.publicDelegatedPrefix = publicDelegatedPrefix + c.publicdelegatedprefix = publicdelegatedprefix return c } @@ -66316,7 +82464,7 @@ func (c *FirewallPoliciesInsertCall) ParentId(parentId string) *FirewallPolicies // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *FirewallPoliciesInsertCall) RequestId(requestId string) *FirewallPoliciesInsertCall { +func (c *GlobalPublicDelegatedPrefixesPatchCall) RequestId(requestId string) *GlobalPublicDelegatedPrefixesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -66324,7 +82472,7 @@ func (c *FirewallPoliciesInsertCall) RequestId(requestId string) *FirewallPolici // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesInsertCall) Fields(s ...googleapi.Field) *FirewallPoliciesInsertCall { +func (c *GlobalPublicDelegatedPrefixesPatchCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66332,21 +82480,21 @@ func (c *FirewallPoliciesInsertCall) Fields(s ...googleapi.Field) *FirewallPolic // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesInsertCall) Context(ctx context.Context) *FirewallPoliciesInsertCall { +func (c *GlobalPublicDelegatedPrefixesPatchCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesInsertCall) Header() http.Header { +func (c *GlobalPublicDelegatedPrefixesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalPublicDelegatedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -66354,48 +82502,52 @@ func (c *FirewallPoliciesInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "publicDelegatedPrefix": c.publicDelegatedPrefix, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewallPolicies.insert" call. +// Do executes the "compute.globalPublicDelegatedPrefixes.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalPublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -66409,14 +82561,27 @@ func (c *FirewallPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a new policy in the specified project using the data included in the request.", - // "flatPath": "locations/global/firewallPolicies", - // "httpMethod": "POST", - // "id": "compute.firewallPolicies.insert", + // "description": "Patches the specified global PublicDelegatedPrefix resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", + // "httpMethod": "PATCH", + // "id": "compute.globalPublicDelegatedPrefixes.patch", + // "parameterOrder": [ + // "project", + // "publicDelegatedPrefix" + // ], // "parameters": { - // "parentId": { - // "description": "Parent ID for this request. The ID can be either be \"folders/[FOLDER_ID]\" if the parent is a folder or \"organizations/[ORGANIZATION_ID]\" if the parent is an organization.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "publicDelegatedPrefix": { + // "description": "Name of the PublicDelegatedPrefix resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "requestId": { @@ -66425,9 +82590,9 @@ func (c *FirewallPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "locations/global/firewallPolicies", + // "path": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", // "request": { - // "$ref": "FirewallPolicy" + // "$ref": "PublicDelegatedPrefix" // }, // "response": { // "$ref": "Operation" @@ -66440,37 +82605,44 @@ func (c *FirewallPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.firewallPolicies.list": +// method id "compute.healthChecks.aggregatedList": -type FirewallPoliciesListCall struct { +type HealthChecksAggregatedListCall struct { s *Service + project string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists all the policies that have been configured for the -// specified folder or organization. -func (r *FirewallPoliciesService) List() *FirewallPoliciesListCall { - c := &FirewallPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of all HealthCheck resources, +// regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. +func (r *HealthChecksService) AggregatedList(project string) *HealthChecksAggregatedListCall { + c := &HealthChecksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -66478,19 +82650,42 @@ func (r *FirewallPoliciesService) List() *FirewallPoliciesListCall { // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *FirewallPoliciesListCall) Filter(filter string) *FirewallPoliciesListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *HealthChecksAggregatedListCall) Filter(filter string) *HealthChecksAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *HealthChecksAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *HealthChecksAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of // available results is larger than `maxResults`, Compute Engine returns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *FirewallPoliciesListCall) MaxResults(maxResults int64) *FirewallPoliciesListCall { +func (c *HealthChecksAggregatedListCall) MaxResults(maxResults int64) *HealthChecksAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -66504,7 +82699,7 @@ func (c *FirewallPoliciesListCall) MaxResults(maxResults int64) *FirewallPolicie // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *FirewallPoliciesListCall) OrderBy(orderBy string) *FirewallPoliciesListCall { +func (c *HealthChecksAggregatedListCall) OrderBy(orderBy string) *HealthChecksAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -66512,23 +82707,16 @@ func (c *FirewallPoliciesListCall) OrderBy(orderBy string) *FirewallPoliciesList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *FirewallPoliciesListCall) PageToken(pageToken string) *FirewallPoliciesListCall { +func (c *HealthChecksAggregatedListCall) PageToken(pageToken string) *HealthChecksAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// ParentId sets the optional parameter "parentId": Parent ID for this -// request. -func (c *FirewallPoliciesListCall) ParentId(parentId string) *FirewallPoliciesListCall { - c.urlParams_.Set("parentId", parentId) - return c -} - // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *FirewallPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *FirewallPoliciesListCall { +func (c *HealthChecksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HealthChecksAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -66536,7 +82724,7 @@ func (c *FirewallPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess boo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesListCall) Fields(s ...googleapi.Field) *FirewallPoliciesListCall { +func (c *HealthChecksAggregatedListCall) Fields(s ...googleapi.Field) *HealthChecksAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66546,7 +82734,7 @@ func (c *FirewallPoliciesListCall) Fields(s ...googleapi.Field) *FirewallPolicie // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FirewallPoliciesListCall) IfNoneMatch(entityTag string) *FirewallPoliciesListCall { +func (c *HealthChecksAggregatedListCall) IfNoneMatch(entityTag string) *HealthChecksAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -66554,21 +82742,21 @@ func (c *FirewallPoliciesListCall) IfNoneMatch(entityTag string) *FirewallPolici // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesListCall) Context(ctx context.Context) *FirewallPoliciesListCall { +func (c *HealthChecksAggregatedListCall) Context(ctx context.Context) *HealthChecksAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesListCall) Header() http.Header { +func (c *HealthChecksAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -66581,43 +82769,46 @@ func (c *FirewallPoliciesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewallPolicies.list" call. -// Exactly one of *FirewallPolicyList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *FirewallPolicyList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.healthChecks.aggregatedList" call. +// Exactly one of *HealthChecksAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *HealthChecksAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *FirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*FirewallPolicyList, error) { +func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*HealthChecksAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &FirewallPolicyList{ + ret := &HealthChecksAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66629,16 +82820,24 @@ func (c *FirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*FirewallPo } return ret, nil // { - // "description": "Lists all the policies that have been configured for the specified folder or organization.", - // "flatPath": "locations/global/firewallPolicies", + // "description": "Retrieves the list of all HealthCheck resources, regional and global, available to the specified project.", + // "flatPath": "projects/{project}/aggregated/healthChecks", // "httpMethod": "GET", - // "id": "compute.firewallPolicies.list", + // "id": "compute.healthChecks.aggregatedList", + // "parameterOrder": [ + // "project" + // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -66657,9 +82856,11 @@ func (c *FirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*FirewallPo // "location": "query", // "type": "string" // }, - // "parentId": { - // "description": "Parent ID for this request.", - // "location": "query", + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, // "returnPartialSuccess": { @@ -66668,9 +82869,9 @@ func (c *FirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*FirewallPo // "type": "boolean" // } // }, - // "path": "locations/global/firewallPolicies", + // "path": "projects/{project}/aggregated/healthChecks", // "response": { - // "$ref": "FirewallPolicyList" + // "$ref": "HealthChecksAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -66684,7 +82885,7 @@ func (c *FirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*FirewallPo // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *FirewallPoliciesListCall) Pages(ctx context.Context, f func(*FirewallPolicyList) error) error { +func (c *HealthChecksAggregatedListCall) Pages(ctx context.Context, f func(*HealthChecksAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -66702,176 +82903,25 @@ func (c *FirewallPoliciesListCall) Pages(ctx context.Context, f func(*FirewallPo } } -// method id "compute.firewallPolicies.listAssociations": - -type FirewallPoliciesListAssociationsCall struct { - s *Service - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// ListAssociations: Lists associations of a specified target, i.e., -// organization or folder. -func (r *FirewallPoliciesService) ListAssociations() *FirewallPoliciesListAssociationsCall { - c := &FirewallPoliciesListAssociationsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - return c -} - -// TargetResource sets the optional parameter "targetResource": The -// target resource to list associations. It is an organization, or a -// folder. -func (c *FirewallPoliciesListAssociationsCall) TargetResource(targetResource string) *FirewallPoliciesListAssociationsCall { - c.urlParams_.Set("targetResource", targetResource) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FirewallPoliciesListAssociationsCall) Fields(s ...googleapi.Field) *FirewallPoliciesListAssociationsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *FirewallPoliciesListAssociationsCall) IfNoneMatch(entityTag string) *FirewallPoliciesListAssociationsCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FirewallPoliciesListAssociationsCall) Context(ctx context.Context) *FirewallPoliciesListAssociationsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FirewallPoliciesListAssociationsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FirewallPoliciesListAssociationsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/listAssociations") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.firewallPolicies.listAssociations" call. -// Exactly one of *FirewallPoliciesListAssociationsResponse or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *FirewallPoliciesListAssociationsResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *FirewallPoliciesListAssociationsCall) Do(opts ...googleapi.CallOption) (*FirewallPoliciesListAssociationsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &FirewallPoliciesListAssociationsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists associations of a specified target, i.e., organization or folder.", - // "flatPath": "locations/global/firewallPolicies/listAssociations", - // "httpMethod": "GET", - // "id": "compute.firewallPolicies.listAssociations", - // "parameters": { - // "targetResource": { - // "description": "The target resource to list associations. It is an organization, or a folder.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "locations/global/firewallPolicies/listAssociations", - // "response": { - // "$ref": "FirewallPoliciesListAssociationsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.firewallPolicies.move": +// method id "compute.healthChecks.delete": -type FirewallPoliciesMoveCall struct { - s *Service - firewallPolicy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksDeleteCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Move: Moves the specified firewall policy. +// Delete: Deletes the specified HealthCheck resource. // -// - firewallPolicy: Name of the firewall policy to update. -func (r *FirewallPoliciesService) Move(firewallPolicy string) *FirewallPoliciesMoveCall { - c := &FirewallPoliciesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.firewallPolicy = firewallPolicy - return c -} - -// ParentId sets the optional parameter "parentId": The new parent of -// the firewall policy. -func (c *FirewallPoliciesMoveCall) ParentId(parentId string) *FirewallPoliciesMoveCall { - c.urlParams_.Set("parentId", parentId) +// - healthCheck: Name of the HealthCheck resource to delete. +// - project: Project ID for this request. +func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { + c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.healthCheck = healthCheck return c } @@ -66886,7 +82936,7 @@ func (c *FirewallPoliciesMoveCall) ParentId(parentId string) *FirewallPoliciesMo // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *FirewallPoliciesMoveCall) RequestId(requestId string) *FirewallPoliciesMoveCall { +func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -66894,7 +82944,7 @@ func (c *FirewallPoliciesMoveCall) RequestId(requestId string) *FirewallPolicies // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesMoveCall) Fields(s ...googleapi.Field) *FirewallPoliciesMoveCall { +func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66902,21 +82952,21 @@ func (c *FirewallPoliciesMoveCall) Fields(s ...googleapi.Field) *FirewallPolicie // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesMoveCall) Context(ctx context.Context) *FirewallPoliciesMoveCall { +func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesMoveCall) Header() http.Header { +func (c *HealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesMoveCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -66926,44 +82976,45 @@ func (c *FirewallPoliciesMoveCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/move") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "firewallPolicy": c.firewallPolicy, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewallPolicies.move" call. +// Do executes the "compute.healthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallPoliciesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -66977,24 +83028,27 @@ func (c *FirewallPoliciesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Moves the specified firewall policy.", - // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/move", - // "httpMethod": "POST", - // "id": "compute.firewallPolicies.move", + // "description": "Deletes the specified HealthCheck resource.", + // "flatPath": "projects/{project}/global/healthChecks/{healthCheck}", + // "httpMethod": "DELETE", + // "id": "compute.healthChecks.delete", // "parameterOrder": [ - // "firewallPolicy" + // "project", + // "healthCheck" // ], // "parameters": { - // "firewallPolicy": { - // "description": "Name of the firewall policy to update.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to delete.", // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "parentId": { - // "description": "The new parent of the firewall policy.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, // "requestId": { @@ -67003,7 +83057,7 @@ func (c *FirewallPoliciesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "locations/global/firewallPolicies/{firewallPolicy}/move", + // "path": "projects/{project}/global/healthChecks/{healthCheck}", // "response": { // "$ref": "Operation" // }, @@ -67015,124 +83069,119 @@ func (c *FirewallPoliciesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.firewallPolicies.patch": +// method id "compute.healthChecks.get": -type FirewallPoliciesPatchCall struct { - s *Service - firewallPolicy string - firewallpolicy *FirewallPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksGetCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified policy with the data included in the -// request. +// Get: Returns the specified HealthCheck resource. Gets a list of +// available health checks by making a list() request. // -// - firewallPolicy: Name of the firewall policy to update. -func (r *FirewallPoliciesService) Patch(firewallPolicy string, firewallpolicy *FirewallPolicy) *FirewallPoliciesPatchCall { - c := &FirewallPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.firewallPolicy = firewallPolicy - c.firewallpolicy = firewallpolicy - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *FirewallPoliciesPatchCall) RequestId(requestId string) *FirewallPoliciesPatchCall { - c.urlParams_.Set("requestId", requestId) +// - healthCheck: Name of the HealthCheck resource to return. +// - project: Project ID for this request. +func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { + c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.healthCheck = healthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesPatchCall) Fields(s ...googleapi.Field) *FirewallPoliciesPatchCall { +func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesPatchCall) Context(ctx context.Context) *FirewallPoliciesPatchCall { +func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesPatchCall) Header() http.Header { +func (c *HealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicy) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "firewallPolicy": c.firewallPolicy, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewallPolicies.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.healthChecks.get" call. +// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *HealthCheck.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67144,67 +83193,62 @@ func (c *FirewallPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Patches the specified policy with the data included in the request.", - // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}", - // "httpMethod": "PATCH", - // "id": "compute.firewallPolicies.patch", + // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "flatPath": "projects/{project}/global/healthChecks/{healthCheck}", + // "httpMethod": "GET", + // "id": "compute.healthChecks.get", // "parameterOrder": [ - // "firewallPolicy" + // "project", + // "healthCheck" // ], // "parameters": { - // "firewallPolicy": { - // "description": "Name of the firewall policy to update.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to return.", // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // } // }, - // "path": "locations/global/firewallPolicies/{firewallPolicy}", - // "request": { - // "$ref": "FirewallPolicy" - // }, + // "path": "projects/{project}/global/healthChecks/{healthCheck}", // "response": { - // "$ref": "Operation" + // "$ref": "HealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.firewallPolicies.patchRule": +// method id "compute.healthChecks.insert": -type FirewallPoliciesPatchRuleCall struct { - s *Service - firewallPolicy string - firewallpolicyrule *FirewallPolicyRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksInsertCall struct { + s *Service + project string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PatchRule: Patches a rule of the specified priority. +// Insert: Creates a HealthCheck resource in the specified project using +// the data included in the request. // -// - firewallPolicy: Name of the firewall policy to update. -func (r *FirewallPoliciesService) PatchRule(firewallPolicy string, firewallpolicyrule *FirewallPolicyRule) *FirewallPoliciesPatchRuleCall { - c := &FirewallPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.firewallPolicy = firewallPolicy - c.firewallpolicyrule = firewallpolicyrule - return c -} - -// Priority sets the optional parameter "priority": The priority of the -// rule to patch. -func (c *FirewallPoliciesPatchRuleCall) Priority(priority int64) *FirewallPoliciesPatchRuleCall { - c.urlParams_.Set("priority", fmt.Sprint(priority)) +// - project: Project ID for this request. +func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { + c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.healthcheck = healthcheck return c } @@ -67219,7 +83263,7 @@ func (c *FirewallPoliciesPatchRuleCall) Priority(priority int64) *FirewallPolici // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *FirewallPoliciesPatchRuleCall) RequestId(requestId string) *FirewallPoliciesPatchRuleCall { +func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -67227,7 +83271,7 @@ func (c *FirewallPoliciesPatchRuleCall) RequestId(requestId string) *FirewallPol // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *FirewallPoliciesPatchRuleCall { +func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67235,21 +83279,21 @@ func (c *FirewallPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *FirewallPo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesPatchRuleCall) Context(ctx context.Context) *FirewallPoliciesPatchRuleCall { +func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesPatchRuleCall) Header() http.Header { +func (c *HealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -67257,14 +83301,14 @@ func (c *FirewallPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/patchRule") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -67272,36 +83316,36 @@ func (c *FirewallPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, e } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "firewallPolicy": c.firewallPolicy, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewallPolicies.patchRule" call. +// Do executes the "compute.healthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -67315,36 +83359,30 @@ func (c *FirewallPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Patches a rule of the specified priority.", - // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/patchRule", + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/global/healthChecks", // "httpMethod": "POST", - // "id": "compute.firewallPolicies.patchRule", + // "id": "compute.healthChecks.insert", // "parameterOrder": [ - // "firewallPolicy" + // "project" // ], // "parameters": { - // "firewallPolicy": { - // "description": "Name of the firewall policy to update.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "priority": { - // "description": "The priority of the rule to patch.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "locations/global/firewallPolicies/{firewallPolicy}/patchRule", + // "path": "projects/{project}/global/healthChecks", // "request": { - // "$ref": "FirewallPolicyRule" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -67357,124 +83395,197 @@ func (c *FirewallPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.firewallPolicies.removeAssociation": +// method id "compute.healthChecks.list": -type FirewallPoliciesRemoveAssociationCall struct { - s *Service - firewallPolicy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RemoveAssociation: Removes an association for the specified firewall -// policy. +// List: Retrieves the list of HealthCheck resources available to the +// specified project. // -// - firewallPolicy: Name of the firewall policy to update. -func (r *FirewallPoliciesService) RemoveAssociation(firewallPolicy string) *FirewallPoliciesRemoveAssociationCall { - c := &FirewallPoliciesRemoveAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.firewallPolicy = firewallPolicy +// - project: Project ID for this request. +func (r *HealthChecksService) List(project string) *HealthChecksListCall { + c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project return c } -// Name sets the optional parameter "name": Name for the attachment that -// will be removed. -func (c *FirewallPoliciesRemoveAssociationCall) Name(name string) *FirewallPoliciesRemoveAssociationCall { - c.urlParams_.Set("name", name) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { + c.urlParams_.Set("filter", filter) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *FirewallPoliciesRemoveAssociationCall) RequestId(requestId string) *FirewallPoliciesRemoveAssociationCall { - c.urlParams_.Set("requestId", requestId) +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *HealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HealthChecksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesRemoveAssociationCall) Fields(s ...googleapi.Field) *FirewallPoliciesRemoveAssociationCall { +func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesRemoveAssociationCall) Context(ctx context.Context) *FirewallPoliciesRemoveAssociationCall { +func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesRemoveAssociationCall) Header() http.Header { +func (c *HealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesRemoveAssociationCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/removeAssociation") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "firewallPolicy": c.firewallPolicy, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewallPolicies.removeAssociation" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.healthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallPoliciesRemoveAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67486,67 +83597,107 @@ func (c *FirewallPoliciesRemoveAssociationCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Removes an association for the specified firewall policy.", - // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/removeAssociation", - // "httpMethod": "POST", - // "id": "compute.firewallPolicies.removeAssociation", + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", + // "flatPath": "projects/{project}/global/healthChecks", + // "httpMethod": "GET", + // "id": "compute.healthChecks.list", // "parameterOrder": [ - // "firewallPolicy" + // "project" // ], // "parameters": { - // "firewallPolicy": { - // "description": "Name of the firewall policy to update.", - // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", // "type": "string" // }, - // "name": { - // "description": "Name for the attachment that will be removed.", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "locations/global/firewallPolicies/{firewallPolicy}/removeAssociation", + // "path": "projects/{project}/global/healthChecks", // "response": { - // "$ref": "Operation" + // "$ref": "HealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.firewallPolicies.removeRule": - -type FirewallPoliciesRemoveRuleCall struct { - s *Service - firewallPolicy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// RemoveRule: Deletes a rule of the specified priority. -// -// - firewallPolicy: Name of the firewall policy to update. -func (r *FirewallPoliciesService) RemoveRule(firewallPolicy string) *FirewallPoliciesRemoveRuleCall { - c := &FirewallPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.firewallPolicy = firewallPolicy - return c +// method id "compute.healthChecks.patch": + +type HealthChecksPatchCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Priority sets the optional parameter "priority": The priority of the -// rule to remove from the firewall policy. -func (c *FirewallPoliciesRemoveRuleCall) Priority(priority int64) *FirewallPoliciesRemoveRuleCall { - c.urlParams_.Set("priority", fmt.Sprint(priority)) +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +// +// - healthCheck: Name of the HealthCheck resource to patch. +// - project: Project ID for this request. +func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { + c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } @@ -67561,7 +83712,7 @@ func (c *FirewallPoliciesRemoveRuleCall) Priority(priority int64) *FirewallPolic // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *FirewallPoliciesRemoveRuleCall) RequestId(requestId string) *FirewallPoliciesRemoveRuleCall { +func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -67569,7 +83720,7 @@ func (c *FirewallPoliciesRemoveRuleCall) RequestId(requestId string) *FirewallPo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *FirewallPoliciesRemoveRuleCall { +func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67577,21 +83728,21 @@ func (c *FirewallPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *FirewallP // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesRemoveRuleCall) Context(ctx context.Context) *FirewallPoliciesRemoveRuleCall { +func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesRemoveRuleCall) Header() http.Header { +func (c *HealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -67599,46 +83750,52 @@ func (c *FirewallPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{firewallPolicy}/removeRule") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "firewallPolicy": c.firewallPolicy, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewallPolicies.removeRule" call. +// Do executes the "compute.healthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -67652,180 +83809,41 @@ func (c *FirewallPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Deletes a rule of the specified priority.", - // "flatPath": "locations/global/firewallPolicies/{firewallPolicy}/removeRule", - // "httpMethod": "POST", - // "id": "compute.firewallPolicies.removeRule", - // "parameterOrder": [ - // "firewallPolicy" - // ], - // "parameters": { - // "firewallPolicy": { - // "description": "Name of the firewall policy to update.", - // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", - // "required": true, - // "type": "string" - // }, - // "priority": { - // "description": "The priority of the rule to remove from the firewall policy.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "locations/global/firewallPolicies/{firewallPolicy}/removeRule", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.firewallPolicies.setIamPolicy": - -type FirewallPoliciesSetIamPolicyCall struct { - s *Service - resource string - globalorganizationsetpolicyrequest *GlobalOrganizationSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -// -// - resource: Name or id of the resource for this request. -func (r *FirewallPoliciesService) SetIamPolicy(resource string, globalorganizationsetpolicyrequest *GlobalOrganizationSetPolicyRequest) *FirewallPoliciesSetIamPolicyCall { - c := &FirewallPoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.globalorganizationsetpolicyrequest = globalorganizationsetpolicyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FirewallPoliciesSetIamPolicyCall) Fields(s ...googleapi.Field) *FirewallPoliciesSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FirewallPoliciesSetIamPolicyCall) Context(ctx context.Context) *FirewallPoliciesSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FirewallPoliciesSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FirewallPoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalorganizationsetpolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{resource}/setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.firewallPolicies.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *FirewallPoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "flatPath": "locations/global/firewallPolicies/{resource}/setIamPolicy", - // "httpMethod": "POST", - // "id": "compute.firewallPolicies.setIamPolicy", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/global/healthChecks/{healthCheck}", + // "httpMethod": "PATCH", + // "id": "compute.healthChecks.patch", // "parameterOrder": [ - // "resource" + // "project", + // "healthCheck" // ], // "parameters": { - // "resource": { - // "description": "Name or id of the resource for this request.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to patch.", // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "locations/global/firewallPolicies/{resource}/setIamPolicy", + // "path": "projects/{project}/global/healthChecks/{healthCheck}", // "request": { - // "$ref": "GlobalOrganizationSetPolicyRequest" + // "$ref": "HealthCheck" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -67835,32 +83853,51 @@ func (c *FirewallPoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Po } -// method id "compute.firewallPolicies.testIamPermissions": +// method id "compute.healthChecks.update": -type FirewallPoliciesTestIamPermissionsCall struct { - s *Service - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksUpdateCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// Update: Updates a HealthCheck resource in the specified project using +// the data included in the request. // -// - resource: Name or id of the resource for this request. -func (r *FirewallPoliciesService) TestIamPermissions(resource string, testpermissionsrequest *TestPermissionsRequest) *FirewallPoliciesTestIamPermissionsCall { - c := &FirewallPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest +// - healthCheck: Name of the HealthCheck resource to update. +// - project: Project ID for this request. +func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { + c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.healthCheck = healthCheck + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *FirewallPoliciesTestIamPermissionsCall { +func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67868,21 +83905,21 @@ func (c *FirewallPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *F // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallPoliciesTestIamPermissionsCall) Context(ctx context.Context) *FirewallPoliciesTestIamPermissionsCall { +func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallPoliciesTestIamPermissionsCall) Header() http.Header { +func (c *HealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -67890,53 +83927,54 @@ func (c *FirewallPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Re } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/firewallPolicies/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewallPolicies.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *FirewallPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.healthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67948,57 +83986,69 @@ func (c *FirewallPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "flatPath": "locations/global/firewallPolicies/{resource}/testIamPermissions", - // "httpMethod": "POST", - // "id": "compute.firewallPolicies.testIamPermissions", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/global/healthChecks/{healthCheck}", + // "httpMethod": "PUT", + // "id": "compute.healthChecks.update", // "parameterOrder": [ - // "resource" + // "project", + // "healthCheck" // ], // "parameters": { - // "resource": { - // "description": "Name or id of the resource for this request.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", // "location": "path", - // "pattern": "(firewallPolicies/)?[0-9]{0,20}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "locations/global/firewallPolicies/{resource}/testIamPermissions", + // "path": "projects/{project}/global/healthChecks/{healthCheck}", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "HealthCheck" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.firewalls.delete": +// method id "compute.httpHealthChecks.delete": -type FirewallsDeleteCall struct { - s *Service - project string - firewall string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksDeleteCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified firewall. +// Delete: Deletes the specified HttpHealthCheck resource. // -// - firewall: Name of the firewall rule to delete. +// - httpHealthCheck: Name of the HttpHealthCheck resource to delete. // - project: Project ID for this request. -func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { - c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { + c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.httpHealthCheck = httpHealthCheck return c } @@ -68013,7 +84063,7 @@ func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDel // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { +func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -68021,7 +84071,7 @@ func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { +func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68029,21 +84079,21 @@ func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { +func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsDeleteCall) Header() http.Header { +func (c *HttpHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -68053,7 +84103,7 @@ func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -68061,37 +84111,37 @@ func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.delete" call. +// Do executes the "compute.httpHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -68105,17 +84155,17 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified firewall.", - // "flatPath": "projects/{project}/global/firewalls/{firewall}", + // "description": "Deletes the specified HttpHealthCheck resource.", + // "flatPath": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", // "httpMethod": "DELETE", - // "id": "compute.firewalls.delete", + // "id": "compute.httpHealthChecks.delete", // "parameterOrder": [ // "project", - // "firewall" + // "httpHealthCheck" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to delete.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -68134,7 +84184,7 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "projects/{project}/global/firewalls/{firewall}", + // "path": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { // "$ref": "Operation" // }, @@ -68146,33 +84196,34 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.firewalls.get": +// method id "compute.httpHealthChecks.get": -type FirewallsGetCall struct { - s *Service - project string - firewall string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpHealthChecksGetCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified firewall. +// Get: Returns the specified HttpHealthCheck resource. Gets a list of +// available HTTP health checks by making a list() request. // -// - firewall: Name of the firewall rule to return. +// - httpHealthCheck: Name of the HttpHealthCheck resource to return. // - project: Project ID for this request. -func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { - c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { + c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.httpHealthCheck = httpHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { +func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68182,7 +84233,7 @@ func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { +func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -68190,21 +84241,21 @@ func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { +func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsGetCall) Header() http.Header { +func (c *HttpHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -68217,7 +84268,7 @@ func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -68225,39 +84276,39 @@ func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.get" call. -// Exactly one of *Firewall or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Firewall.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { +// Do executes the "compute.httpHealthChecks.get" call. +// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HttpHealthCheck.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Firewall{ + ret := &HttpHealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -68269,17 +84320,17 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { } return ret, nil // { - // "description": "Returns the specified firewall.", - // "flatPath": "projects/{project}/global/firewalls/{firewall}", + // "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", + // "flatPath": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", // "httpMethod": "GET", - // "id": "compute.firewalls.get", + // "id": "compute.httpHealthChecks.get", // "parameterOrder": [ // "project", - // "firewall" + // "httpHealthCheck" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to return.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -68293,9 +84344,9 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { // "type": "string" // } // }, - // "path": "projects/{project}/global/firewalls/{firewall}", + // "path": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { - // "$ref": "Firewall" + // "$ref": "HttpHealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -68306,25 +84357,25 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { } -// method id "compute.firewalls.insert": +// method id "compute.httpHealthChecks.insert": -type FirewallsInsertCall struct { - s *Service - project string - firewall *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksInsertCall struct { + s *Service + project string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a firewall rule in the specified project using the -// data included in the request. +// Insert: Creates a HttpHealthCheck resource in the specified project +// using the data included in the request. // // - project: Project ID for this request. -func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { - c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { + c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.httphealthcheck = httphealthcheck return c } @@ -68339,7 +84390,7 @@ func (r *FirewallsService) Insert(project string, firewall *Firewall) *Firewalls // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { +func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -68347,7 +84398,7 @@ func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { +func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68355,21 +84406,21 @@ func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { +func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsInsertCall) Header() http.Header { +func (c *HttpHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -68377,14 +84428,14 @@ func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewalls") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -68397,31 +84448,31 @@ func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.insert" call. +// Do executes the "compute.httpHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -68435,10 +84486,10 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates a firewall rule in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/global/firewalls", + // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/global/httpHealthChecks", // "httpMethod": "POST", - // "id": "compute.firewalls.insert", + // "id": "compute.httpHealthChecks.insert", // "parameterOrder": [ // "project" // ], @@ -68456,9 +84507,9 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "projects/{project}/global/firewalls", + // "path": "projects/{project}/global/httpHealthChecks", // "request": { - // "$ref": "Firewall" + // "$ref": "HttpHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -68471,9 +84522,9 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.firewalls.list": +// method id "compute.httpHealthChecks.list": -type FirewallsListCall struct { +type HttpHealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -68482,30 +84533,33 @@ type FirewallsListCall struct { header_ http.Header } -// List: Retrieves the list of firewall rules available to the specified -// project. +// List: Retrieves the list of HttpHealthCheck resources available to +// the specified project. // // - project: Project ID for this request. -func (r *FirewallsService) List(project string) *FirewallsListCall { - c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { + c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -68513,8 +84567,18 @@ func (r *FirewallsService) List(project string) *FirewallsListCall { // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -68525,7 +84589,7 @@ func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { +func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -68539,7 +84603,7 @@ func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { +func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -68547,7 +84611,7 @@ func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { +func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -68556,7 +84620,7 @@ func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *FirewallsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *FirewallsListCall { +func (c *HttpHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HttpHealthChecksListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -68564,7 +84628,7 @@ func (c *FirewallsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *Fir // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { +func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68574,7 +84638,7 @@ func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { +func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -68582,21 +84646,21 @@ func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { +func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsListCall) Header() http.Header { +func (c *HttpHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -68609,7 +84673,7 @@ func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewalls") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -68622,33 +84686,33 @@ func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.list" call. -// Exactly one of *FirewallList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *FirewallList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { +// Do executes the "compute.httpHealthChecks.list" call. +// Exactly one of *HttpHealthCheckList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpHealthCheckList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &FirewallList{ + ret := &HttpHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -68660,16 +84724,16 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err } return ret, nil // { - // "description": "Retrieves the list of firewall rules available to the specified project.", - // "flatPath": "projects/{project}/global/firewalls", + // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + // "flatPath": "projects/{project}/global/httpHealthChecks", // "httpMethod": "GET", - // "id": "compute.firewalls.list", + // "id": "compute.httpHealthChecks.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -68704,9 +84768,9 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // "type": "boolean" // } // }, - // "path": "projects/{project}/global/firewalls", + // "path": "projects/{project}/global/httpHealthChecks", // "response": { - // "$ref": "FirewallList" + // "$ref": "HttpHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -68720,7 +84784,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) error) error { +func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -68738,29 +84802,29 @@ func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) err } } -// method id "compute.firewalls.patch": +// method id "compute.httpHealthChecks.patch": -type FirewallsPatchCall struct { - s *Service - project string - firewall string - firewall2 *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksPatchCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified firewall rule with the data included in -// the request. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. +// Patch: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. // -// - firewall: Name of the firewall rule to patch. +// - httpHealthCheck: Name of the HttpHealthCheck resource to patch. // - project: Project ID for this request. -func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { - c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { + c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall - c.firewall2 = firewall2 + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck return c } @@ -68775,7 +84839,7 @@ func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Fir // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { +func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -68783,7 +84847,7 @@ func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { +func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68791,21 +84855,21 @@ func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { +func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsPatchCall) Header() http.Header { +func (c *HttpHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -68813,14 +84877,14 @@ func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -68828,37 +84892,37 @@ func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.patch" call. +// Do executes the "compute.httpHealthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -68872,17 +84936,17 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/global/firewalls/{firewall}", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", // "httpMethod": "PATCH", - // "id": "compute.firewalls.patch", + // "id": "compute.httpHealthChecks.patch", // "parameterOrder": [ // "project", - // "firewall" + // "httpHealthCheck" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to patch.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -68901,9 +84965,9 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "projects/{project}/global/firewalls/{firewall}", + // "path": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", // "request": { - // "$ref": "Firewall" + // "$ref": "HttpHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -68916,30 +84980,28 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.firewalls.update": +// method id "compute.httpHealthChecks.update": -type FirewallsUpdateCall struct { - s *Service - project string - firewall string - firewall2 *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksUpdateCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified firewall rule with the data included in -// the request. Note that all fields will be updated if using PUT, even -// fields that are not specified. To update individual fields, please -// use PATCH instead. +// Update: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. // -// - firewall: Name of the firewall rule to update. +// - httpHealthCheck: Name of the HttpHealthCheck resource to update. // - project: Project ID for this request. -func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { - c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { + c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall - c.firewall2 = firewall2 + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck return c } @@ -68954,7 +85016,7 @@ func (r *FirewallsService) Update(project string, firewall string, firewall2 *Fi // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { +func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -68962,7 +85024,7 @@ func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { +func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68970,21 +85032,21 @@ func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { +func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsUpdateCall) Header() http.Header { +func (c *HttpHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -68992,14 +85054,14 @@ func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PUT", urls, body) if err != nil { @@ -69007,37 +85069,37 @@ func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.update" call. +// Do executes the "compute.httpHealthChecks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -69051,17 +85113,17 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Updates the specified firewall rule with the data included in the request. Note that all fields will be updated if using PUT, even fields that are not specified. To update individual fields, please use PATCH instead.", - // "flatPath": "projects/{project}/global/firewalls/{firewall}", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", // "httpMethod": "PUT", - // "id": "compute.firewalls.update", + // "id": "compute.httpHealthChecks.update", // "parameterOrder": [ // "project", - // "firewall" + // "httpHealthCheck" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to update.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -69080,9 +85142,9 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "projects/{project}/global/firewalls/{firewall}", + // "path": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", // "request": { - // "$ref": "Firewall" + // "$ref": "HttpHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -69095,312 +85157,25 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.forwardingRules.aggregatedList": - -type ForwardingRulesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of forwarding rules. -// -// - project: Project ID for this request. -func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { - c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *ForwardingRulesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *ForwardingRulesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ForwardingRulesAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/forwardingRules") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.forwardingRules.aggregatedList" call. -// Exactly one of *ForwardingRuleAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ForwardingRuleAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of forwarding rules.", - // "flatPath": "projects/{project}/aggregated/forwardingRules", - // "httpMethod": "GET", - // "id": "compute.forwardingRules.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "projects/{project}/aggregated/forwardingRules", - // "response": { - // "$ref": "ForwardingRuleAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*ForwardingRuleAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.forwardingRules.delete": +// method id "compute.httpsHealthChecks.delete": -type ForwardingRulesDeleteCall struct { - s *Service - project string - region string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksDeleteCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified ForwardingRule resource. +// Delete: Deletes the specified HttpsHealthCheck resource. // -// - forwardingRule: Name of the ForwardingRule resource to delete. +// - httpsHealthCheck: Name of the HttpsHealthCheck resource to delete. // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { - c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { + c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule + c.httpsHealthCheck = httpsHealthCheck return c } @@ -69415,7 +85190,7 @@ func (r *ForwardingRulesService) Delete(project string, region string, forwardin // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRulesDeleteCall { +func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -69423,7 +85198,7 @@ func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRules // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { +func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69431,21 +85206,21 @@ func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRule // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { +func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesDeleteCall) Header() http.Header { +func (c *HttpsHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -69455,7 +85230,7 @@ func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -69463,38 +85238,37 @@ func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.delete" call. +// Do executes the "compute.httpsHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -69508,18 +85282,17 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified ForwardingRule resource.", - // "flatPath": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "description": "Deletes the specified HttpsHealthCheck resource.", + // "flatPath": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "httpMethod": "DELETE", - // "id": "compute.forwardingRules.delete", + // "id": "compute.httpsHealthChecks.delete", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "httpsHealthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to delete.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -69532,20 +85305,13 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "path": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "response": { // "$ref": "Operation" // }, @@ -69557,36 +85323,34 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.forwardingRules.get": +// method id "compute.httpsHealthChecks.get": -type ForwardingRulesGetCall struct { - s *Service - project string - region string - forwardingRule string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksGetCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified ForwardingRule resource. +// Get: Returns the specified HttpsHealthCheck resource. Gets a list of +// available HTTPS health checks by making a list() request. // -// - forwardingRule: Name of the ForwardingRule resource to return. +// - httpsHealthCheck: Name of the HttpsHealthCheck resource to return. // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { - c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { + c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule + c.httpsHealthCheck = httpsHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { +func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69596,7 +85360,7 @@ func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { +func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -69604,21 +85368,21 @@ func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { +func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesGetCall) Header() http.Header { +func (c *HttpsHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -69631,7 +85395,7 @@ func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -69639,40 +85403,39 @@ func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.get" call. -// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ForwardingRule.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.httpsHealthChecks.get" call. +// Exactly one of *HttpsHealthCheck or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheck.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { +func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ForwardingRule{ + ret := &HttpsHealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -69684,18 +85447,17 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu } return ret, nil // { - // "description": "Returns the specified ForwardingRule resource.", - // "flatPath": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", + // "flatPath": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "httpMethod": "GET", - // "id": "compute.forwardingRules.get", + // "id": "compute.httpsHealthChecks.get", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "httpsHealthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to return.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -69707,18 +85469,11 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "path": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "response": { - // "$ref": "ForwardingRule" + // "$ref": "HttpsHealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -69729,28 +85484,25 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu } -// method id "compute.forwardingRules.insert": +// method id "compute.httpsHealthChecks.insert": -type ForwardingRulesInsertCall struct { - s *Service - project string - region string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksInsertCall struct { + s *Service + project string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a ForwardingRule resource in the specified project -// and region using the data included in the request. +// Insert: Creates a HttpsHealthCheck resource in the specified project +// using the data included in the request. // // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { - c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { + c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingrule = forwardingrule + c.httpshealthcheck = httpshealthcheck return c } @@ -69765,7 +85517,7 @@ func (r *ForwardingRulesService) Insert(project string, region string, forwardin // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRulesInsertCall { +func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -69773,7 +85525,7 @@ func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRules // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { +func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69781,21 +85533,21 @@ func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRule // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { +func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesInsertCall) Header() http.Header { +func (c *HttpsHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -69803,14 +85555,14 @@ func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -69819,36 +85571,35 @@ func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.insert" call. +// Do executes the "compute.httpsHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -69862,13 +85613,12 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/forwardingRules", + // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/global/httpsHealthChecks", // "httpMethod": "POST", - // "id": "compute.forwardingRules.insert", + // "id": "compute.httpsHealthChecks.insert", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "project": { @@ -69878,22 +85628,15 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/forwardingRules", + // "path": "projects/{project}/global/httpsHealthChecks", // "request": { - // "$ref": "ForwardingRule" + // "$ref": "HttpsHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -69906,44 +85649,44 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.forwardingRules.list": +// method id "compute.httpsHealthChecks.list": -type ForwardingRulesListCall struct { +type HttpsHealthChecksListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of ForwardingRule resources available to the -// specified project and region. +// List: Retrieves the list of HttpsHealthCheck resources available to +// the specified project. // // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { - c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { + c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -69951,8 +85694,18 @@ func (r *ForwardingRulesService) List(project string, region string) *Forwarding // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -69963,7 +85716,7 @@ func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { +func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -69977,7 +85730,7 @@ func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesL // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCall { +func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -69985,7 +85738,7 @@ func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { +func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -69994,7 +85747,7 @@ func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesLi // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *ForwardingRulesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ForwardingRulesListCall { +func (c *HttpsHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HttpsHealthChecksListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -70002,7 +85755,7 @@ func (c *ForwardingRulesListCall) ReturnPartialSuccess(returnPartialSuccess bool // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { +func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70012,7 +85765,7 @@ func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { +func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -70020,21 +85773,21 @@ func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRules // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { +func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesListCall) Header() http.Header { +func (c *HttpsHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -70047,7 +85800,7 @@ func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -70056,38 +85809,37 @@ func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.list" call. -// Exactly one of *ForwardingRuleList or error will be non-nil. Any +// Do executes the "compute.httpsHealthChecks.list" call. +// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ForwardingRuleList.ServerResponse.Header or (if a response was +// *HttpsHealthCheckList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { +func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ForwardingRuleList{ + ret := &HttpsHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -70099,17 +85851,16 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR } return ret, nil // { - // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", - // "flatPath": "projects/{project}/regions/{region}/forwardingRules", + // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", + // "flatPath": "projects/{project}/global/httpsHealthChecks", // "httpMethod": "GET", - // "id": "compute.forwardingRules.list", + // "id": "compute.httpsHealthChecks.list", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -70138,22 +85889,15 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "returnPartialSuccess": { // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/forwardingRules", + // "path": "projects/{project}/global/httpsHealthChecks", // "response": { - // "$ref": "ForwardingRuleList" + // "$ref": "HttpsHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -70167,7 +85911,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { +func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -70185,33 +85929,29 @@ func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingR } } -// method id "compute.forwardingRules.patch": +// method id "compute.httpsHealthChecks.patch": -type ForwardingRulesPatchCall struct { - s *Service - project string - region string - forwardingRule string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksPatchCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified forwarding rule with the data included -// in the request. This method supports PATCH semantics and uses the -// JSON merge patch format and processing rules. Currently, you can only -// patch the network_tier field. +// Patch: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. // -// - forwardingRule: Name of the ForwardingRule resource to patch. +// - httpsHealthCheck: Name of the HttpsHealthCheck resource to patch. // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *ForwardingRulesService) Patch(project string, region string, forwardingRule string, forwardingrule *ForwardingRule) *ForwardingRulesPatchCall { - c := &ForwardingRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { + c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule - c.forwardingrule = forwardingrule + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck return c } @@ -70226,7 +85966,7 @@ func (r *ForwardingRulesService) Patch(project string, region string, forwarding // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesPatchCall) RequestId(requestId string) *ForwardingRulesPatchCall { +func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -70234,7 +85974,7 @@ func (c *ForwardingRulesPatchCall) RequestId(requestId string) *ForwardingRulesP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesPatchCall) Fields(s ...googleapi.Field) *ForwardingRulesPatchCall { +func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70242,21 +85982,21 @@ func (c *ForwardingRulesPatchCall) Fields(s ...googleapi.Field) *ForwardingRules // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesPatchCall) Context(ctx context.Context) *ForwardingRulesPatchCall { +func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesPatchCall) Header() http.Header { +func (c *HttpsHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -70264,14 +86004,14 @@ func (c *ForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -70279,38 +86019,37 @@ func (c *ForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.patch" call. +// Do executes the "compute.httpsHealthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -70324,18 +86063,17 @@ func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", - // "flatPath": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "httpMethod": "PATCH", - // "id": "compute.forwardingRules.patch", + // "id": "compute.httpsHealthChecks.patch", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "httpsHealthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to patch.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -70348,22 +86086,15 @@ func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "path": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "request": { - // "$ref": "ForwardingRule" + // "$ref": "HttpsHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -70376,31 +86107,28 @@ func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.forwardingRules.setLabels": +// method id "compute.httpsHealthChecks.update": -type ForwardingRulesSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksUpdateCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on the specified resource. To learn more -// about labels, read the Labeling Resources documentation. +// Update: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. // +// - httpsHealthCheck: Name of the HttpsHealthCheck resource to update. // - project: Project ID for this request. -// - region: The region for this request. -// - resource: Name or id of the resource for this request. -func (r *ForwardingRulesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *ForwardingRulesSetLabelsCall { - c := &ForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { + c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck return c } @@ -70415,7 +86143,7 @@ func (r *ForwardingRulesService) SetLabels(project string, region string, resour // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesSetLabelsCall) RequestId(requestId string) *ForwardingRulesSetLabelsCall { +func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -70423,7 +86151,7 @@ func (c *ForwardingRulesSetLabelsCall) RequestId(requestId string) *ForwardingRu // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *ForwardingRulesSetLabelsCall { +func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70431,21 +86159,21 @@ func (c *ForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *ForwardingR // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesSetLabelsCall) Context(ctx context.Context) *ForwardingRulesSetLabelsCall { +func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesSetLabelsCall) Header() http.Header { +func (c *HttpsHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -70453,53 +86181,52 @@ func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/forwardingRules/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.setLabels" call. +// Do executes the "compute.httpsHealthChecks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -70513,27 +86240,26 @@ func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", - // "flatPath": "projects/{project}/regions/{region}/forwardingRules/{resource}/setLabels", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.setLabels", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "httpMethod": "PUT", + // "id": "compute.httpsHealthChecks.update", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "httpsHealthCheck" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to update.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, @@ -70541,18 +86267,11 @@ func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/forwardingRules/{resource}/setLabels", + // "path": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "request": { - // "$ref": "RegionSetLabelsRequest" + // "$ref": "HttpsHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -70565,133 +86284,123 @@ func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.forwardingRules.setTarget": +// method id "compute.imageFamilyViews.get": -type ForwardingRulesSetTargetCall struct { - s *Service - project string - region string - forwardingRule string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImageFamilyViewsGetCall struct { + s *Service + project string + zone string + family string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetTarget: Changes target URL for forwarding rule. The new target -// should be of the same type as the old target. +// Get: Returns the latest image that is part of an image family, is not +// deprecated and is rolled out in the specified zone. // -// - forwardingRule: Name of the ForwardingRule resource in which target -// is to be set. +// - family: Name of the image family to search for. // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { - c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *ImageFamilyViewsService) Get(project string, zone string, family string) *ImageFamilyViewsGetCall { + c := &ImageFamilyViewsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule - c.targetreference = targetreference - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.family = family return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { +func (c *ImageFamilyViewsGetCall) Fields(s ...googleapi.Field) *ImageFamilyViewsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImageFamilyViewsGetCall) IfNoneMatch(entityTag string) *ImageFamilyViewsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { +func (c *ImageFamilyViewsGetCall) Context(ctx context.Context) *ImageFamilyViewsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesSetTargetCall) Header() http.Header { +func (c *ImageFamilyViewsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { +func (c *ImageFamilyViewsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/imageFamilyViews/{family}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "zone": c.zone, + "family": c.family, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.setTarget" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.imageFamilyViews.get" call. +// Exactly one of *ImageFamilyView or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *ImageFamilyView.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ImageFamilyViewsGetCall) Do(opts ...googleapi.CallOption) (*ImageFamilyView, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &ImageFamilyView{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -70703,18 +86412,18 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", - // "flatPath": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.setTarget", + // "description": "Returns the latest image that is part of an image family, is not deprecated and is rolled out in the specified zone.", + // "flatPath": "projects/{project}/zones/{zone}/imageFamilyViews/{family}", + // "httpMethod": "GET", + // "id": "compute.imageFamilyViews.get", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "zone", + // "family" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "family": { + // "description": "Name of the image family to search for.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -70727,53 +86436,46 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", - // "request": { - // "$ref": "TargetReference" - // }, + // "path": "projects/{project}/zones/{zone}/imageFamilyViews/{family}", // "response": { - // "$ref": "Operation" + // "$ref": "ImageFamilyView" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.globalAddresses.delete": +// method id "compute.images.delete": -type GlobalAddressesDeleteCall struct { +type ImagesDeleteCall struct { s *Service project string - address string + image string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified address resource. +// Delete: Deletes the specified image. // -// - address: Name of the address resource to delete. +// - image: Name of the image resource to delete. // - project: Project ID for this request. -func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { - c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { + c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + c.image = image return c } @@ -70788,7 +86490,7 @@ func (r *GlobalAddressesService) Delete(project string, address string) *GlobalA // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall { +func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -70796,7 +86498,7 @@ func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddresses // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { +func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70804,21 +86506,21 @@ func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddresse // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { +func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesDeleteCall) Header() http.Header { +func (c *ImagesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -70828,7 +86530,7 @@ func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -70837,36 +86539,36 @@ func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "address": c.address, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.delete" call. +// Do executes the "compute.images.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -70880,17 +86582,17 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified address resource.", - // "flatPath": "projects/{project}/global/addresses/{address}", + // "description": "Deletes the specified image.", + // "flatPath": "projects/{project}/global/images/{image}", // "httpMethod": "DELETE", - // "id": "compute.globalAddresses.delete", + // "id": "compute.images.delete", // "parameterOrder": [ // "project", - // "address" + // "image" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to delete.", + // "image": { + // "description": "Name of the image resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -70909,7 +86611,7 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "projects/{project}/global/addresses/{address}", + // "path": "projects/{project}/global/images/{image}", // "response": { // "$ref": "Operation" // }, @@ -70921,186 +86623,28 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.globalAddresses.get": - -type GlobalAddressesGetCall struct { - s *Service - project string - address string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified address resource. Gets a list of available -// addresses by making a list() request. -// -// - address: Name of the address resource to return. -// - project: Project ID for this request. -func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { - c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.address = address - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalAddressesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "address": c.address, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalAddresses.get" call. -// Exactly one of *Address or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Address.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Address{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request.", - // "flatPath": "projects/{project}/global/addresses/{address}", - // "httpMethod": "GET", - // "id": "compute.globalAddresses.get", - // "parameterOrder": [ - // "project", - // "address" - // ], - // "parameters": { - // "address": { - // "description": "Name of the address resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/global/addresses/{address}", - // "response": { - // "$ref": "Address" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.globalAddresses.insert": +// method id "compute.images.deprecate": -type GlobalAddressesInsertCall struct { - s *Service - project string - address *Address - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesDeprecateCall struct { + s *Service + project string + image string + deprecationstatus *DeprecationStatus + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an address resource in the specified project by using -// the data included in the request. +// Deprecate: Sets the deprecation status of an image. If an empty +// request body is given, clears the deprecation status instead. // +// - image: Image name. // - project: Project ID for this request. -func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { - c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { + c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + c.image = image + c.deprecationstatus = deprecationstatus return c } @@ -71115,7 +86659,7 @@ func (r *GlobalAddressesService) Insert(project string, address *Address) *Globa // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall { +func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -71123,7 +86667,7 @@ func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddresses // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { +func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71131,21 +86675,21 @@ func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddresse // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { +func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesInsertCall) Header() http.Header { +func (c *ImagesDeprecateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -71153,14 +86697,14 @@ func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{image}/deprecate") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -71169,35 +86713,36 @@ func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.insert" call. +// Do executes the "compute.images.deprecate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -71211,14 +86756,22 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates an address resource in the specified project by using the data included in the request.", - // "flatPath": "projects/{project}/global/addresses", + // "description": "Sets the deprecation status of an image. If an empty request body is given, clears the deprecation status instead.", + // "flatPath": "projects/{project}/global/images/{image}/deprecate", // "httpMethod": "POST", - // "id": "compute.globalAddresses.insert", + // "id": "compute.images.deprecate", // "parameterOrder": [ - // "project" + // "project", + // "image" // ], // "parameters": { + // "image": { + // "description": "Image name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -71232,9 +86785,9 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "projects/{project}/global/addresses", + // "path": "projects/{project}/global/images/{image}/deprecate", // "request": { - // "$ref": "Address" + // "$ref": "DeprecationStatus" // }, // "response": { // "$ref": "Operation" @@ -71247,99 +86800,34 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.globalAddresses.list": +// method id "compute.images.get": -type GlobalAddressesListCall struct { +type ImagesGetCall struct { s *Service project string + image string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of global addresses. +// Get: Returns the specified image. Gets a list of available images by +// making a list() request. // +// - image: Name of the image resource to return. // - project: Project ID for this request. -func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { - c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ImagesService) Get(project string, image string) *ImagesGetCall { + c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *GlobalAddressesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalAddressesListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + c.image = image return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { +func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71349,7 +86837,7 @@ func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { +func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -71357,21 +86845,21 @@ func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddresses // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { +func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesListCall) Header() http.Header { +func (c *ImagesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -71384,7 +86872,7 @@ func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -71393,37 +86881,38 @@ func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.list" call. -// Exactly one of *AddressList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AddressList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { +// Do executes the "compute.images.get" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &AddressList{ + ret := &Image{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71435,35 +86924,20 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList } return ret, nil // { - // "description": "Retrieves a list of global addresses.", - // "flatPath": "projects/{project}/global/addresses", + // "description": "Returns the specified image. Gets a list of available images by making a list() request.", + // "flatPath": "projects/{project}/global/images/{image}", // "httpMethod": "GET", - // "id": "compute.globalAddresses.list", + // "id": "compute.images.get", // "parameterOrder": [ - // "project" + // "project", + // "image" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "image": { + // "description": "Name of the image resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -71472,16 +86946,11 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/global/addresses", + // "path": "projects/{project}/global/images/{image}", // "response": { - // "$ref": "AddressList" + // "$ref": "Image" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -71492,141 +86961,119 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.globalForwardingRules.delete": +// method id "compute.images.getFromFamily": -type GlobalForwardingRulesDeleteCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesGetFromFamilyCall struct { + s *Service + project string + family string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified GlobalForwardingRule resource. +// GetFromFamily: Returns the latest image that is part of an image +// family and is not deprecated. // -// - forwardingRule: Name of the ForwardingRule resource to delete. +// - family: Name of the image family to search for. // - project: Project ID for this request. -func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { - c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { + c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.family = family return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { +func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { +func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { +func (c *ImagesGetFromFamilyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/family/{family}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "family": c.family, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.images.getFromFamily" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Image{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71638,17 +87085,17 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified GlobalForwardingRule resource.", - // "flatPath": "projects/{project}/global/forwardingRules/{forwardingRule}", - // "httpMethod": "DELETE", - // "id": "compute.globalForwardingRules.delete", + // "description": "Returns the latest image that is part of an image family and is not deprecated.", + // "flatPath": "projects/{project}/global/images/family/{family}", + // "httpMethod": "GET", + // "id": "compute.images.getFromFamily", // "parameterOrder": [ // "project", - // "forwardingRule" + // "family" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to delete.", + // "family": { + // "description": "Name of the image family to search for.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -71660,53 +87107,56 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/global/forwardingRules/{forwardingRule}", + // "path": "projects/{project}/global/images/family/{family}", // "response": { - // "$ref": "Operation" + // "$ref": "Image" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.globalForwardingRules.get": +// method id "compute.images.getIamPolicy": -type GlobalForwardingRulesGetCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ImagesGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } - -// Get: Returns the specified GlobalForwardingRule resource. Gets a list -// of available forwarding rules by making a list() request. + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. // -// - forwardingRule: Name of the ForwardingRule resource to return. // - project: Project ID for this request. -func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { - c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *ImagesService) GetIamPolicy(project string, resource string) *ImagesGetIamPolicyCall { + c := &ImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule + c.resource = resource + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *ImagesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ImagesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { +func (c *ImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71716,7 +87166,7 @@ func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwa // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { +func (c *ImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *ImagesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -71724,21 +87174,21 @@ func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForw // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { +func (c *ImagesGetIamPolicyCall) Context(ctx context.Context) *ImagesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesGetCall) Header() http.Header { +func (c *ImagesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -71751,7 +87201,7 @@ func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -71759,39 +87209,39 @@ func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.get" call. -// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ForwardingRule.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { +// Do executes the "compute.images.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ForwardingRule{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71803,21 +87253,20 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar } return ret, nil // { - // "description": "Returns the specified GlobalForwardingRule resource. Gets a list of available forwarding rules by making a list() request.", - // "flatPath": "projects/{project}/global/forwardingRules/{forwardingRule}", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/global/images/{resource}/getIamPolicy", // "httpMethod": "GET", - // "id": "compute.globalForwardingRules.get", + // "id": "compute.images.getIamPolicy", // "parameterOrder": [ // "project", - // "forwardingRule" + // "resource" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" // }, // "project": { // "description": "Project ID for this request.", @@ -71825,11 +87274,18 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/forwardingRules/{forwardingRule}", + // "path": "projects/{project}/global/images/{resource}/getIamPolicy", // "response": { - // "$ref": "ForwardingRule" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -71840,25 +87296,32 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar } -// method id "compute.globalForwardingRules.insert": +// method id "compute.images.insert": -type GlobalForwardingRulesInsertCall struct { - s *Service - project string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesInsertCall struct { + s *Service + project string + image *Image + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a GlobalForwardingRule resource in the specified -// project using the data included in the request. +// Insert: Creates an image in the specified project using the data +// included in the request. // // - project: Project ID for this request. -func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { - c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { + c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingrule = forwardingrule + c.image = image + return c +} + +// ForceCreate sets the optional parameter "forceCreate": Force image +// creation if true. +func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { + c.urlParams_.Set("forceCreate", fmt.Sprint(forceCreate)) return c } @@ -71873,7 +87336,7 @@ func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *Fo // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalForwardingRulesInsertCall { +func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -71881,7 +87344,7 @@ func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalFor // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { +func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71889,21 +87352,21 @@ func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalFo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { +func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesInsertCall) Header() http.Header { +func (c *ImagesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -71911,14 +87374,14 @@ func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -71931,31 +87394,31 @@ func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.insert" call. +// Do executes the "compute.images.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -71969,14 +87432,19 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/global/forwardingRules", + // "description": "Creates an image in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/global/images", // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.insert", + // "id": "compute.images.insert", // "parameterOrder": [ // "project" // ], // "parameters": { + // "forceCreate": { + // "description": "Force image creation if true.", + // "location": "query", + // "type": "boolean" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -71990,24 +87458,27 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "projects/{project}/global/forwardingRules", + // "path": "projects/{project}/global/images", // "request": { - // "$ref": "ForwardingRule" + // "$ref": "Image" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.globalForwardingRules.list": +// method id "compute.images.list": -type GlobalForwardingRulesListCall struct { +type ImagesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -72016,30 +87487,38 @@ type GlobalForwardingRulesListCall struct { header_ http.Header } -// List: Retrieves a list of GlobalForwardingRule resources available to -// the specified project. +// List: Retrieves the list of custom images available to the specified +// project. Custom images are images you create that belong to your +// project. This method does not get any images that belong to other +// projects, including publicly-available images, like Debian 8. If you +// want to get a list of publicly-available images, use this method to +// make a request to the respective image project, such as debian-cloud +// or windows-cloud. // // - project: Project ID for this request. -func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { - c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ImagesService) List(project string) *ImagesListCall { + c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -72047,8 +87526,18 @@ func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRul // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *ImagesListCall) Filter(filter string) *ImagesListCall { c.urlParams_.Set("filter", filter) return c } @@ -72059,7 +87548,7 @@ func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingR // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { +func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -72073,7 +87562,7 @@ func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForw // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardingRulesListCall { +func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -72081,7 +87570,7 @@ func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardin // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { +func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -72090,7 +87579,7 @@ func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwa // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *GlobalForwardingRulesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalForwardingRulesListCall { +func (c *ImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ImagesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -72098,7 +87587,7 @@ func (c *GlobalForwardingRulesListCall) ReturnPartialSuccess(returnPartialSucces // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { +func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72108,7 +87597,7 @@ func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForw // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { +func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { c.ifNoneMatch_ = entityTag return c } @@ -72116,21 +87605,21 @@ func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalFor // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { +func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesListCall) Header() http.Header { +func (c *ImagesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -72143,7 +87632,7 @@ func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -72156,33 +87645,33 @@ func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.list" call. -// Exactly one of *ForwardingRuleList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ForwardingRuleList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { +// Do executes the "compute.images.list" call. +// Exactly one of *ImageList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ImageList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ForwardingRuleList{ + ret := &ImageList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72194,16 +87683,16 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa } return ret, nil // { - // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", - // "flatPath": "projects/{project}/global/forwardingRules", + // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "flatPath": "projects/{project}/global/images", // "httpMethod": "GET", - // "id": "compute.globalForwardingRules.list", + // "id": "compute.images.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -72238,9 +87727,9 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // "type": "boolean" // } // }, - // "path": "projects/{project}/global/forwardingRules", + // "path": "projects/{project}/global/images", // "response": { - // "$ref": "ForwardingRuleList" + // "$ref": "ImageList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -72254,7 +87743,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { +func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -72272,30 +87761,29 @@ func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*Forwa } } -// method id "compute.globalForwardingRules.patch": +// method id "compute.images.patch": -type GlobalForwardingRulesPatchCall struct { - s *Service - project string - forwardingRule string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesPatchCall struct { + s *Service + project string + image string + image2 *Image + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified forwarding rule with the data included -// in the request. This method supports PATCH semantics and uses the -// JSON merge patch format and processing rules. Currently, you can only -// patch the network_tier field. +// Patch: Patches the specified image with the data included in the +// request. Only the following fields can be modified: family, +// description, deprecation status. // -// - forwardingRule: Name of the ForwardingRule resource to patch. +// - image: Name of the image resource to patch. // - project: Project ID for this request. -func (r *GlobalForwardingRulesService) Patch(project string, forwardingRule string, forwardingrule *ForwardingRule) *GlobalForwardingRulesPatchCall { - c := &GlobalForwardingRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ImagesService) Patch(project string, image string, image2 *Image) *ImagesPatchCall { + c := &ImagesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule - c.forwardingrule = forwardingrule + c.image = image + c.image2 = image2 return c } @@ -72310,7 +87798,7 @@ func (r *GlobalForwardingRulesService) Patch(project string, forwardingRule stri // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesPatchCall) RequestId(requestId string) *GlobalForwardingRulesPatchCall { +func (c *ImagesPatchCall) RequestId(requestId string) *ImagesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -72318,7 +87806,7 @@ func (c *GlobalForwardingRulesPatchCall) RequestId(requestId string) *GlobalForw // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesPatchCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesPatchCall { +func (c *ImagesPatchCall) Fields(s ...googleapi.Field) *ImagesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72326,21 +87814,21 @@ func (c *GlobalForwardingRulesPatchCall) Fields(s ...googleapi.Field) *GlobalFor // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesPatchCall) Context(ctx context.Context) *GlobalForwardingRulesPatchCall { +func (c *ImagesPatchCall) Context(ctx context.Context) *ImagesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesPatchCall) Header() http.Header { +func (c *ImagesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -72348,14 +87836,14 @@ func (c *GlobalForwardingRulesPatchCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.image2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -72363,37 +87851,37 @@ func (c *GlobalForwardingRulesPatchCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.patch" call. +// Do executes the "compute.images.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -72407,17 +87895,17 @@ func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", - // "flatPath": "projects/{project}/global/forwardingRules/{forwardingRule}", + // "description": "Patches the specified image with the data included in the request. Only the following fields can be modified: family, description, deprecation status.", + // "flatPath": "projects/{project}/global/images/{image}", // "httpMethod": "PATCH", - // "id": "compute.globalForwardingRules.patch", + // "id": "compute.images.patch", // "parameterOrder": [ // "project", - // "forwardingRule" + // "image" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to patch.", + // "image": { + // "description": "Name of the image resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -72436,9 +87924,9 @@ func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "projects/{project}/global/forwardingRules/{forwardingRule}", + // "path": "projects/{project}/global/images/{image}", // "request": { - // "$ref": "ForwardingRule" + // "$ref": "Image" // }, // "response": { // "$ref": "Operation" @@ -72451,35 +87939,35 @@ func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.globalForwardingRules.setLabels": +// method id "compute.images.setIamPolicy": -type GlobalForwardingRulesSetLabelsCall struct { +type ImagesSetIamPolicyCall struct { s *Service project string resource string - globalsetlabelsrequest *GlobalSetLabelsRequest + globalsetpolicyrequest *GlobalSetPolicyRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetLabels: Sets the labels on the specified resource. To learn more -// about labels, read the Labeling resources documentation. +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. // // - project: Project ID for this request. // - resource: Name or id of the resource for this request. -func (r *GlobalForwardingRulesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalForwardingRulesSetLabelsCall { - c := &GlobalForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *ImagesSetIamPolicyCall { + c := &ImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetLabelsCall { +func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72487,21 +87975,21 @@ func (c *GlobalForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *Globa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesSetLabelsCall) Context(ctx context.Context) *GlobalForwardingRulesSetLabelsCall { +func (c *ImagesSetIamPolicyCall) Context(ctx context.Context) *ImagesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesSetLabelsCall) Header() http.Header { +func (c *ImagesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -72509,14 +87997,14 @@ func (c *GlobalForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -72530,33 +88018,33 @@ func (c *GlobalForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.images.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72568,10 +88056,10 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling resources documentation.", - // "flatPath": "projects/{project}/global/forwardingRules/{resource}/setLabels", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/global/images/{resource}/setIamPolicy", // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.setLabels", + // "id": "compute.images.setIamPolicy", // "parameterOrder": [ // "project", // "resource" @@ -72592,12 +88080,12 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "projects/{project}/global/forwardingRules/{resource}/setLabels", + // "path": "projects/{project}/global/images/{resource}/setIamPolicy", // "request": { - // "$ref": "GlobalSetLabelsRequest" + // "$ref": "GlobalSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -72607,52 +88095,35 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.globalForwardingRules.setTarget": +// method id "compute.images.setLabels": -type GlobalForwardingRulesSetTargetCall struct { - s *Service - project string - forwardingRule string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTarget: Changes target URL for the GlobalForwardingRule resource. -// The new target should be of the same type as the old target. +// SetLabels: Sets the labels on an image. To learn more about labels, +// read the Labeling Resources documentation. // -// - forwardingRule: Name of the ForwardingRule resource in which target -// is to be set. // - project: Project ID for this request. -func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { - c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { + c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule - c.targetreference = targetreference - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *GlobalForwardingRulesSetTargetCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { +func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72660,21 +88131,21 @@ func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *Globa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { +func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { +func (c *ImagesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -72682,14 +88153,14 @@ func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules/{forwardingRule}/setTarget") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -72697,37 +88168,37 @@ func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.setTarget" call. +// Do executes the "compute.images.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -72741,22 +88212,15 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", - // "flatPath": "projects/{project}/global/forwardingRules/{forwardingRule}/setTarget", + // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/global/images/{resource}/setLabels", // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.setTarget", + // "id": "compute.images.setLabels", // "parameterOrder": [ // "project", - // "forwardingRule" + // "resource" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -72764,361 +88228,18 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{project}/global/forwardingRules/{forwardingRule}/setTarget", - // "request": { - // "$ref": "TargetReference" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.globalNetworkEndpointGroups.attachNetworkEndpoints": - -type GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall struct { - s *Service - project string - networkEndpointGroup string - globalnetworkendpointgroupsattachendpointsrequest *GlobalNetworkEndpointGroupsAttachEndpointsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AttachNetworkEndpoints: Attach a network endpoint to the specified -// network endpoint group. -// -// - networkEndpointGroup: The name of the network endpoint group where -// you are attaching network endpoints to. It should comply with -// RFC1035. -// - project: Project ID for this request. -func (r *GlobalNetworkEndpointGroupsService) AttachNetworkEndpoints(project string, networkEndpointGroup string, globalnetworkendpointgroupsattachendpointsrequest *GlobalNetworkEndpointGroupsAttachEndpointsRequest) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { - c := &GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.networkEndpointGroup = networkEndpointGroup - c.globalnetworkendpointgroupsattachendpointsrequest = globalnetworkendpointgroupsattachendpointsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalnetworkendpointgroupsattachendpointsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "networkEndpointGroup": c.networkEndpointGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalNetworkEndpointGroups.attachNetworkEndpoints" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Attach a network endpoint to the specified network endpoint group.", - // "flatPath": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", - // "httpMethod": "POST", - // "id": "compute.globalNetworkEndpointGroups.attachNetworkEndpoints", - // "parameterOrder": [ - // "project", - // "networkEndpointGroup" - // ], - // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "path": "projects/{project}/global/images/{resource}/setLabels", // "request": { - // "$ref": "GlobalNetworkEndpointGroupsAttachEndpointsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.globalNetworkEndpointGroups.delete": - -type GlobalNetworkEndpointGroupsDeleteCall struct { - s *Service - project string - networkEndpointGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified network endpoint group.Note that the -// NEG cannot be deleted if there are backend services referencing it. -// -// - networkEndpointGroup: The name of the network endpoint group to -// delete. It should comply with RFC1035. -// - project: Project ID for this request. -func (r *GlobalNetworkEndpointGroupsService) Delete(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsDeleteCall { - c := &GlobalNetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.networkEndpointGroup = networkEndpointGroup - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *GlobalNetworkEndpointGroupsDeleteCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsDeleteCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalNetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalNetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalNetworkEndpointGroupsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "networkEndpointGroup": c.networkEndpointGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalNetworkEndpointGroups.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified network endpoint group.Note that the NEG cannot be deleted if there are backend services referencing it.", - // "flatPath": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}", - // "httpMethod": "DELETE", - // "id": "compute.globalNetworkEndpointGroups.delete", - // "parameterOrder": [ - // "project", - // "networkEndpointGroup" - // ], - // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } + // "$ref": "GlobalSetLabelsRequest" // }, - // "path": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}", // "response": { // "$ref": "Operation" // }, @@ -73130,52 +88251,35 @@ func (c *GlobalNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) } -// method id "compute.globalNetworkEndpointGroups.detachNetworkEndpoints": +// method id "compute.images.testIamPermissions": -type GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall struct { - s *Service - project string - networkEndpointGroup string - globalnetworkendpointgroupsdetachendpointsrequest *GlobalNetworkEndpointGroupsDetachEndpointsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DetachNetworkEndpoints: Detach the network endpoint from the -// specified network endpoint group. +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. // -// - networkEndpointGroup: The name of the network endpoint group where -// you are removing network endpoints. It should comply with RFC1035. // - project: Project ID for this request. -func (r *GlobalNetworkEndpointGroupsService) DetachNetworkEndpoints(project string, networkEndpointGroup string, globalnetworkendpointgroupsdetachendpointsrequest *GlobalNetworkEndpointGroupsDetachEndpointsRequest) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { - c := &GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { + c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.networkEndpointGroup = networkEndpointGroup - c.globalnetworkendpointgroupsdetachendpointsrequest = globalnetworkendpointgroupsdetachendpointsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { +func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73183,21 +88287,21 @@ func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...goog // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { +func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { +func (c *ImagesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -73205,14 +88309,14 @@ func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt st } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalnetworkendpointgroupsdetachendpointsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -73220,207 +88324,39 @@ func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt st } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "networkEndpointGroup": c.networkEndpointGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalNetworkEndpointGroups.detachNetworkEndpoints" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Detach the network endpoint from the specified network endpoint group.", - // "flatPath": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", - // "httpMethod": "POST", - // "id": "compute.globalNetworkEndpointGroups.detachNetworkEndpoints", - // "parameterOrder": [ - // "project", - // "networkEndpointGroup" - // ], - // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", - // "request": { - // "$ref": "GlobalNetworkEndpointGroupsDetachEndpointsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.globalNetworkEndpointGroups.get": - -type GlobalNetworkEndpointGroupsGetCall struct { - s *Service - project string - networkEndpointGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified network endpoint group. Gets a list of -// available network endpoint groups by making a list() request. -// -// - networkEndpointGroup: The name of the network endpoint group. It -// should comply with RFC1035. -// - project: Project ID for this request. -func (r *GlobalNetworkEndpointGroupsService) Get(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsGetCall { - c := &GlobalNetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.networkEndpointGroup = networkEndpointGroup - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalNetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalNetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *GlobalNetworkEndpointGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalNetworkEndpointGroupsGetCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalNetworkEndpointGroupsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalNetworkEndpointGroups.get" call. -// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any +// Do executes the "compute.images.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *NetworkEndpointGroup.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { +func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworkEndpointGroup{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73432,32 +88368,36 @@ func (c *GlobalNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", - // "flatPath": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}", - // "httpMethod": "GET", - // "id": "compute.globalNetworkEndpointGroups.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/global/images/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.images.testIamPermissions", // "parameterOrder": [ // "project", - // "networkEndpointGroup" + // "resource" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group. It should comply with RFC1035.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}", + // "path": "projects/{project}/global/images/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "NetworkEndpointGroup" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -73468,25 +88408,43 @@ func (c *GlobalNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.globalNetworkEndpointGroups.insert": +// method id "compute.instanceGroupManagers.abandonInstances": -type GlobalNetworkEndpointGroupsInsertCall struct { - s *Service - project string - networkendpointgroup *NetworkEndpointGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a network endpoint group in the specified project -// using the parameters that are included in the request. +// AbandonInstances: Flags the specified instances to be removed from +// the managed instance group. Abandoning an instance does not delete +// the instance, but it does remove the instance from any target pools +// that are applied by the managed instance group. This method reduces +// the targetSize of the managed instance group by the number of +// instances that you abandon. This operation is marked as DONE when the +// action is scheduled even if the instances have not yet been removed +// from the group. You must separately verify the status of the +// abandoning action with the listmanagedinstances method. If the group +// is part of a backend service that has enabled connection draining, it +// can take up to 60 seconds after the connection draining duration has +// elapsed before the VM instance is removed or deleted. You can specify +// a maximum of 1000 instances with this method per request. // -// - project: Project ID for this request. -func (r *GlobalNetworkEndpointGroupsService) Insert(project string, networkendpointgroup *NetworkEndpointGroup) *GlobalNetworkEndpointGroupsInsertCall { - c := &GlobalNetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. +func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { + c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.networkendpointgroup = networkendpointgroup + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest return c } @@ -73501,7 +88459,7 @@ func (r *GlobalNetworkEndpointGroupsService) Insert(project string, networkendpo // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *GlobalNetworkEndpointGroupsInsertCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsInsertCall { +func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -73509,7 +88467,7 @@ func (c *GlobalNetworkEndpointGroupsInsertCall) RequestId(requestId string) *Glo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalNetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsInsertCall { +func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73517,21 +88475,21 @@ func (c *GlobalNetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *Gl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalNetworkEndpointGroupsInsertCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsInsertCall { +func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalNetworkEndpointGroupsInsertCall) Header() http.Header { +func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -73539,14 +88497,14 @@ func (c *GlobalNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -73554,36 +88512,38 @@ func (c *GlobalNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalNetworkEndpointGroups.insert" call. +// Do executes the "compute.instanceGroupManagers.abandonInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -73597,14 +88557,22 @@ func (c *GlobalNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", - // "flatPath": "projects/{project}/global/networkEndpointGroups", + // "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", // "httpMethod": "POST", - // "id": "compute.globalNetworkEndpointGroups.insert", + // "id": "compute.instanceGroupManagers.abandonInstances", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -73616,11 +88584,17 @@ func (c *GlobalNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/networkEndpointGroups", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", // "request": { - // "$ref": "NetworkEndpointGroup" + // "$ref": "InstanceGroupManagersAbandonInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -73633,9 +88607,9 @@ func (c *GlobalNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) } -// method id "compute.globalNetworkEndpointGroups.list": +// method id "compute.instanceGroupManagers.aggregatedList": -type GlobalNetworkEndpointGroupsListCall struct { +type InstanceGroupManagersAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -73644,30 +88618,33 @@ type GlobalNetworkEndpointGroupsListCall struct { header_ http.Header } -// List: Retrieves the list of network endpoint groups that are located -// in the specified project. +// AggregatedList: Retrieves the list of managed instance groups and +// groups them by zone. // // - project: Project ID for this request. -func (r *GlobalNetworkEndpointGroupsService) List(project string) *GlobalNetworkEndpointGroupsListCall { - c := &GlobalNetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { + c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -73675,19 +88652,42 @@ func (r *GlobalNetworkEndpointGroupsService) List(project string) *GlobalNetwork // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *GlobalNetworkEndpointGroupsListCall) Filter(filter string) *GlobalNetworkEndpointGroupsListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *InstanceGroupManagersAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of // available results is larger than `maxResults`, Compute Engine returns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *GlobalNetworkEndpointGroupsListCall) MaxResults(maxResults int64) *GlobalNetworkEndpointGroupsListCall { +func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -73701,7 +88701,7 @@ func (c *GlobalNetworkEndpointGroupsListCall) MaxResults(maxResults int64) *Glob // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *GlobalNetworkEndpointGroupsListCall) OrderBy(orderBy string) *GlobalNetworkEndpointGroupsListCall { +func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -73709,7 +88709,7 @@ func (c *GlobalNetworkEndpointGroupsListCall) OrderBy(orderBy string) *GlobalNet // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *GlobalNetworkEndpointGroupsListCall) PageToken(pageToken string) *GlobalNetworkEndpointGroupsListCall { +func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -73718,7 +88718,7 @@ func (c *GlobalNetworkEndpointGroupsListCall) PageToken(pageToken string) *Globa // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *GlobalNetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalNetworkEndpointGroupsListCall { +func (c *InstanceGroupManagersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -73726,7 +88726,7 @@ func (c *GlobalNetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartial // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalNetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsListCall { +func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73736,7 +88736,7 @@ func (c *GlobalNetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *Glob // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalNetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *GlobalNetworkEndpointGroupsListCall { +func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -73744,21 +88744,21 @@ func (c *GlobalNetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *Glo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalNetworkEndpointGroupsListCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsListCall { +func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalNetworkEndpointGroupsListCall) Header() http.Header { +func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -73771,7 +88771,7 @@ func (c *GlobalNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Respo var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -73784,33 +88784,34 @@ func (c *GlobalNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalNetworkEndpointGroups.list" call. -// Exactly one of *NetworkEndpointGroupList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { +// Do executes the "compute.instanceGroupManagers.aggregatedList" call. +// Exactly one of *InstanceGroupManagerAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworkEndpointGroupList{ + ret := &InstanceGroupManagerAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73822,19 +88823,24 @@ func (c *GlobalNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves the list of network endpoint groups that are located in the specified project.", - // "flatPath": "projects/{project}/global/networkEndpointGroups", + // "description": "Retrieves the list of managed instance groups and groups them by zone.", + // "flatPath": "projects/{project}/aggregated/instanceGroupManagers", // "httpMethod": "GET", - // "id": "compute.globalNetworkEndpointGroups.list", + // "id": "compute.instanceGroupManagers.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -73866,9 +88872,9 @@ func (c *GlobalNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( // "type": "boolean" // } // }, - // "path": "projects/{project}/global/networkEndpointGroups", + // "path": "projects/{project}/aggregated/instanceGroupManagers", // "response": { - // "$ref": "NetworkEndpointGroupList" + // "$ref": "InstanceGroupManagerAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -73882,7 +88888,7 @@ func (c *GlobalNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *GlobalNetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { +func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -73900,104 +88906,228 @@ func (c *GlobalNetworkEndpointGroupsListCall) Pages(ctx context.Context, f func( } } -// method id "compute.globalNetworkEndpointGroups.listNetworkEndpoints": +// method id "compute.instanceGroupManagers.applyUpdatesToInstances": -type GlobalNetworkEndpointGroupsListNetworkEndpointsCall struct { - s *Service - project string - networkEndpointGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersApplyUpdatesToInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListNetworkEndpoints: Lists the network endpoints in the specified -// network endpoint group. +// ApplyUpdatesToInstances: Applies changes to selected instances on the +// managed instance group. This method can be used to apply new +// overrides and/or new versions. // -// - networkEndpointGroup: The name of the network endpoint group from -// which you want to generate a list of included network endpoints. It -// should comply with RFC1035. -// - project: Project ID for this request. -func (r *GlobalNetworkEndpointGroupsService) ListNetworkEndpoints(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { - c := &GlobalNetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group, +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. Should conform to RFC1035. +func (r *InstanceGroupManagersService) ApplyUpdatesToInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest) *InstanceGroupManagersApplyUpdatesToInstancesCall { + c := &InstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.networkEndpointGroup = networkEndpointGroup + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersapplyupdatesrequest = instancegroupmanagersapplyupdatesrequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { - c.urlParams_.Set("filter", filter) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersApplyUpdatesToInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *InstanceGroupManagersApplyUpdatesToInstancesCall { + c.ctx_ = ctx return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { - c.urlParams_.Set("orderBy", orderBy) - return c +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { - c.urlParams_.Set("pageToken", pageToken) +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersapplyupdatesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.applyUpdatesToInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Applies changes to selected instances on the managed instance group. This method can be used to apply new overrides and/or new versions.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.applyUpdatesToInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group, should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located. Should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + // "request": { + // "$ref": "InstanceGroupManagersApplyUpdatesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.createInstances": + +type InstanceGroupManagersCreateInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerscreateinstancesrequest *InstanceGroupManagersCreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// CreateInstances: Creates instances with per-instance configurations +// in this managed instance group. Instances are created using the +// current instance template. The create instances operation is marked +// DONE if the createInstances request is successful. The underlying +// actions take additional time. You must separately verify the status +// of the creating or actions with the listmanagedinstances method. +// +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. +func (r *InstanceGroupManagersService) CreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagerscreateinstancesrequest *InstanceGroupManagersCreateInstancesRequest) *InstanceGroupManagersCreateInstancesCall { + c := &InstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerscreateinstancesrequest = instancegroupmanagerscreateinstancesrequest return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. The request ID +// must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersCreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersCreateInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { +func (c *InstanceGroupManagersCreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersCreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -74005,21 +89135,21 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...google // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { +func (c *InstanceGroupManagersCreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersCreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { +func (c *InstanceGroupManagersCreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -74027,9 +89157,14 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt stri } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerscreateinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/createInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -74038,40 +89173,39 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt stri req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "networkEndpointGroup": c.networkEndpointGroup, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalNetworkEndpointGroups.listNetworkEndpoints" call. -// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { +// Do executes the "compute.instanceGroupManagers.createInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworkEndpointGroupsListNetworkEndpoints{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74083,42 +89217,204 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the network endpoints in the specified network endpoint group.", - // "flatPath": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + // "description": "Creates instances with per-instance configurations in this managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/createInstances", // "httpMethod": "POST", - // "id": "compute.globalNetworkEndpointGroups.listNetworkEndpoints", + // "id": "compute.instanceGroupManagers.createInstances", // "parameterOrder": [ // "project", - // "networkEndpointGroup" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "zone": { + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/createInstances", + // "request": { + // "$ref": "InstanceGroupManagersCreateInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.delete": + +type InstanceGroupManagersDeleteCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. Note that the instance group must not belong +// to a backend service. Read Deleting an instance group for more +// information. +// +// - instanceGroupManager: The name of the managed instance group to +// delete. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. +func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { + c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroupManagers.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group to delete.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -74128,236 +89424,340 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googlea // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.globalOperations.aggregatedList": +// method id "compute.instanceGroupManagers.deleteInstances": -type GlobalOperationsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of all operations. +// DeleteInstances: Flags the specified instances in the managed +// instance group for immediate deletion. The instances are also removed +// from any target pools of which they were a member. This method +// reduces the targetSize of the managed instance group by the number of +// instances that you delete. This operation is marked as DONE when the +// action is scheduled even if the instances are still being deleted. +// You must separately verify the status of the deleting action with the +// listmanagedinstances method. If the group is part of a backend +// service that has enabled connection draining, it can take up to 60 +// seconds after the connection draining duration has elapsed before the +// VM instance is removed or deleted. You can specify a maximum of 1000 +// instances with this method per request. // -// - project: Project ID for this request. -func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { - c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. +func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { + c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("filter", filter) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *GlobalOperationsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { + c.ctx_ = ctx return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c +func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *GlobalOperationsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// Do executes the "compute.instanceGroupManagers.deleteInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.deleteInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "request": { + // "$ref": "InstanceGroupManagersDeleteInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.deletePerInstanceConfigs": + +type InstanceGroupManagersDeletePerInstanceConfigsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeletePerInstanceConfigs: Deletes selected per-instance +// configurations for the managed instance group. +// +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. +func (r *InstanceGroupManagersService) DeletePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq) *InstanceGroupManagersDeletePerInstanceConfigsCall { + c := &InstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersdeleteperinstanceconfigsreq = instancegroupmanagersdeleteperinstanceconfigsreq return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeletePerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersDeletePerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsAggregatedListCall) Header() http.Header { +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteperinstanceconfigsreq) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.aggregatedList" call. -// Exactly one of *OperationAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *OperationAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { +// Do executes the "compute.instanceGroupManagers.deletePerInstanceConfigs" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &OperationAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74369,40 +89769,20 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of all operations.", - // "flatPath": "projects/{project}/aggregated/operations", - // "httpMethod": "GET", - // "id": "compute.globalOperations.aggregatedList", + // "description": "Deletes selected per-instance configurations for the managed instance group.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.deletePerInstanceConfigs", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -74412,143 +89792,171 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" + // "zone": { + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/aggregated/operations", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + // "request": { + // "$ref": "InstanceGroupManagersDeletePerInstanceConfigsReq" + // }, // "response": { - // "$ref": "OperationAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.globalOperations.delete": +// method id "compute.instanceGroupManagers.get": -type GlobalOperationsDeleteCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersGetCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Operations resource. +// Get: Returns all of the details about the specified managed instance +// group. Gets a list of available managed instance groups by making a +// list() request. // -// - operation: Name of the Operations resource to delete. -// - project: Project ID for this request. -func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { - c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. +func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { + c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { +func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { +func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsDeleteCall) Header() http.Header { +func (c *InstanceGroupManagersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.delete" call. -func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "compute.instanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupManager.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return nil, gensupport.WrapError(err) } - return nil + ret := &InstanceGroupManager{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Deletes the specified Operations resource.", - // "flatPath": "projects/{project}/global/operations/{operation}", - // "httpMethod": "DELETE", - // "id": "compute.globalOperations.delete", + // "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.get", // "parameterOrder": [ // "project", - // "operation" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -74558,127 +89966,155 @@ func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/operations/{operation}", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "response": { + // "$ref": "InstanceGroupManager" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.globalOperations.get": +// method id "compute.instanceGroupManagers.insert": -type GlobalOperationsGetCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersInsertCall struct { + s *Service + project string + zone string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Retrieves the specified Operations resource. +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, instances in +// the group are created using the specified instance template. This +// operation is marked as DONE when the group is created even if the +// instances in the group have not yet been created. You must separately +// verify the status of the individual instances with the +// listmanagedinstances method. A managed instance group can have up to +// 1000 VM instances per group. Please contact Cloud Support if you need +// an increase in this limit. // -// - operation: Name of the Operations resource to return. -// - project: Project ID for this request. -func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { - c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the managed +// instance group. +func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { + c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation + c.zone = zone + c.instancegroupmanager = instancegroupmanager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { +func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { +func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsGetCall) Header() http.Header { +func (c *InstanceGroupManagersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.get" call. +// Do executes the "compute.instanceGroupManagers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -74692,78 +90128,91 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified Operations resource.", - // "flatPath": "projects/{project}/global/operations/{operation}", - // "httpMethod": "GET", - // "id": "compute.globalOperations.get", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method. A managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.insert", // "parameterOrder": [ // "project", - // "operation" + // "zone" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/operations/{operation}", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers", + // "request": { + // "$ref": "InstanceGroupManager" + // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalOperations.list": +// method id "compute.instanceGroupManagers.list": -type GlobalOperationsListCall struct { +type InstanceGroupManagersListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of Operation resources contained within the -// specified project. +// List: Retrieves a list of managed instance groups that are contained +// within the specified project and zone. // -// - project: Project ID for this request. -func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { - c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. +func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { + c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -74771,8 +90220,18 @@ func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c } @@ -74783,7 +90242,7 @@ func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCa // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { +func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -74797,7 +90256,7 @@ func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperation // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { +func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -74805,7 +90264,7 @@ func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { +func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -74814,7 +90273,7 @@ func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperations // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *GlobalOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalOperationsListCall { +func (c *InstanceGroupManagersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -74822,7 +90281,7 @@ func (c *GlobalOperationsListCall) ReturnPartialSuccess(returnPartialSuccess boo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { +func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -74832,7 +90291,7 @@ func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperation // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { +func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { c.ifNoneMatch_ = entityTag return c } @@ -74840,21 +90299,21 @@ func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperatio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { +func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsListCall) Header() http.Header { +func (c *InstanceGroupManagersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -74867,7 +90326,7 @@ func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -74876,37 +90335,38 @@ func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instanceGroupManagers.list" call. +// Exactly one of *InstanceGroupManagerList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupManagerList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { +func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &OperationList{ + ret := &InstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74918,16 +90378,17 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified project.", - // "flatPath": "projects/{project}/global/operations", + // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers", // "httpMethod": "GET", - // "id": "compute.globalOperations.list", + // "id": "compute.instanceGroupManagers.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -74960,11 +90421,17 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/operations", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers", // "response": { - // "$ref": "OperationList" + // "$ref": "InstanceGroupManagerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -74978,7 +90445,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { +func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -74996,115 +90463,211 @@ func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationL } } -// method id "compute.globalOperations.wait": +// method id "compute.instanceGroupManagers.listErrors": -type GlobalOperationsWaitCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersListErrorsCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Wait: Waits for the specified Operation resource to return as `DONE` -// or for the request to approach the 2 minute deadline, and retrieves -// the specified Operation resource. This method differs from the `GET` -// method in that it waits for no more than the default deadline (2 -// minutes) and then returns the current state of the operation, which -// might be `DONE` or still in progress. This method is called on a -// best-effort basis. Specifically: - In uncommon cases, when the server -// is overloaded, the request might return before the default deadline -// is reached, or might return after zero seconds. - If the default -// deadline is reached, there is no guarantee that the operation is -// actually done when the method returns. Be prepared to retry if the -// operation is not `DONE`. +// ListErrors: Lists all errors thrown by actions on instances for a +// given managed instance group. The filter and orderBy query parameters +// are not supported. // -// - operation: Name of the Operations resource to return. -// - project: Project ID for this request. -func (r *GlobalOperationsService) Wait(project string, operation string) *GlobalOperationsWaitCall { - c := &GlobalOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group. It +// must be a string that meets the requirements in RFC1035, or an +// unsigned long integer: must match regexp pattern: (?:a-z +// (?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. +func (r *InstanceGroupManagersService) ListErrors(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListErrorsCall { + c := &InstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InstanceGroupManagersListErrorsCall) Filter(filter string) *InstanceGroupManagersListErrorsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstanceGroupManagersListErrorsCall) MaxResults(maxResults int64) *InstanceGroupManagersListErrorsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *InstanceGroupManagersListErrorsCall) OrderBy(orderBy string) *InstanceGroupManagersListErrorsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersListErrorsCall) PageToken(pageToken string) *InstanceGroupManagersListErrorsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *InstanceGroupManagersListErrorsCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListErrorsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsWaitCall) Fields(s ...googleapi.Field) *GlobalOperationsWaitCall { +func (c *InstanceGroupManagersListErrorsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListErrorsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersListErrorsCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListErrorsCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsWaitCall) Context(ctx context.Context) *GlobalOperationsWaitCall { +func (c *InstanceGroupManagersListErrorsCall) Context(ctx context.Context) *InstanceGroupManagersListErrorsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsWaitCall) Header() http.Header { +func (c *InstanceGroupManagersListErrorsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsWaitCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/operations/{operation}/wait") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listErrors") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.wait" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroupManagers.listErrors" call. +// Exactly one of *InstanceGroupManagersListErrorsResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *InstanceGroupManagersListErrorsResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListErrorsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &InstanceGroupManagersListErrorsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75116,33 +90679,67 @@ func (c *GlobalOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress. This method is called on a best-effort basis. Specifically: - In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. - If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`. ", - // "flatPath": "projects/{project}/global/operations/{operation}/wait", - // "httpMethod": "POST", - // "id": "compute.globalOperations.wait", + // "description": "Lists all errors thrown by actions on instances for a given managed instance group. The filter and orderBy query parameters are not supported.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listErrors", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.listErrors", // "parameterOrder": [ // "project", - // "operation" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035, or an unsigned long integer: must match regexp pattern: (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/operations/{operation}/wait", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listErrors", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupManagersListErrorsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -75153,239 +90750,222 @@ func (c *GlobalOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.globalOrganizationOperations.delete": - -type GlobalOrganizationOperationsDeleteCall struct { - s *Service - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersListErrorsCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListErrorsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// Delete: Deletes the specified Operations resource. -// -// - operation: Name of the Operations resource to delete. -func (r *GlobalOrganizationOperationsService) Delete(operation string) *GlobalOrganizationOperationsDeleteCall { - c := &GlobalOrganizationOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.operation = operation - return c -} +// method id "compute.instanceGroupManagers.listManagedInstances": -// ParentId sets the optional parameter "parentId": Parent ID for this -// request. -func (c *GlobalOrganizationOperationsDeleteCall) ParentId(parentId string) *GlobalOrganizationOperationsDeleteCall { - c.urlParams_.Set("parentId", parentId) - return c +type InstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalOrganizationOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOrganizationOperationsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// ListManagedInstances: Lists all of the instances in the managed +// instance group. Each instance in the list has a currentAction, which +// indicates the action that the managed instance group is performing on +// the instance. For example, if the group is still creating an +// instance, the currentAction is CREATING. If a previous action failed, +// the list displays the errors for that failed action. The orderBy +// query parameter is not supported. The `pageToken` query parameter is +// supported only in the alpha and beta API and only if the group's +// `listManagedInstancesResults` field is set to `PAGINATED`. +// +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. +func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { + c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalOrganizationOperationsDeleteCall) Context(ctx context.Context) *GlobalOrganizationOperationsDeleteCall { - c.ctx_ = ctx +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("filter", filter) return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalOrganizationOperationsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalOrganizationOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/operations/{operation}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "operation": c.operation, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalOrganizationOperations.delete" call. -func (c *GlobalOrganizationOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Deletes the specified Operations resource.", - // "flatPath": "locations/global/operations/{operation}", - // "httpMethod": "DELETE", - // "id": "compute.globalOrganizationOperations.delete", - // "parameterOrder": [ - // "operation" - // ], - // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "parentId": { - // "description": "Parent ID for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "locations/global/operations/{operation}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c } -// method id "compute.globalOrganizationOperations.get": - -type GlobalOrganizationOperationsGetCall struct { - s *Service - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c } -// Get: Retrieves the specified Operations resource. Gets a list of -// operations by making a `list()` request. -// -// - operation: Name of the Operations resource to return. -func (r *GlobalOrganizationOperationsService) Get(operation string) *GlobalOrganizationOperationsGetCall { - c := &GlobalOrganizationOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.operation = operation +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } -// ParentId sets the optional parameter "parentId": Parent ID for this -// request. -func (c *GlobalOrganizationOperationsGetCall) ParentId(parentId string) *GlobalOrganizationOperationsGetCall { - c.urlParams_.Set("parentId", parentId) +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *InstanceGroupManagersListManagedInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOrganizationOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOrganizationOperationsGetCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOrganizationOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOrganizationOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOrganizationOperationsGetCall) Context(ctx context.Context) *GlobalOrganizationOperationsGetCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOrganizationOperationsGetCall) Header() http.Header { +func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOrganizationOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "operation": c.operation, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOrganizationOperations.get" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalOrganizationOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. +// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head +// er or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &InstanceGroupManagersListManagedInstancesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75397,30 +90977,67 @@ func (c *GlobalOrganizationOperationsGetCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves the specified Operations resource. Gets a list of operations by making a `list()` request.", - // "flatPath": "locations/global/operations/{operation}", - // "httpMethod": "GET", - // "id": "compute.globalOrganizationOperations.get", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.listManagedInstances", // "parameterOrder": [ - // "operation" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "parentId": { - // "description": "Parent ID for this request.", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", + // "type": "boolean" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "locations/global/operations/{operation}", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupManagersListManagedInstancesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -75431,37 +91048,73 @@ func (c *GlobalOrganizationOperationsGetCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.globalOrganizationOperations.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListManagedInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type GlobalOrganizationOperationsListCall struct { - s *Service - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroupManagers.listPerInstanceConfigs": + +type InstanceGroupManagersListPerInstanceConfigsCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of Operation resources contained within the -// specified organization. -func (r *GlobalOrganizationOperationsService) List() *GlobalOrganizationOperationsListCall { - c := &GlobalOrganizationOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListPerInstanceConfigs: Lists all of the per-instance configurations +// defined for the managed instance group. The orderBy query parameter +// is not supported. +// +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. +func (r *InstanceGroupManagersService) ListPerInstanceConfigs(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListPerInstanceConfigsCall { + c := &InstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -75469,8 +91122,18 @@ func (r *GlobalOrganizationOperationsService) List() *GlobalOrganizationOperatio // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *GlobalOrganizationOperationsListCall) Filter(filter string) *GlobalOrganizationOperationsListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("filter", filter) return c } @@ -75481,7 +91144,7 @@ func (c *GlobalOrganizationOperationsListCall) Filter(filter string) *GlobalOrga // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *GlobalOrganizationOperationsListCall) MaxResults(maxResults int64) *GlobalOrganizationOperationsListCall { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults int64) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -75495,7 +91158,7 @@ func (c *GlobalOrganizationOperationsListCall) MaxResults(maxResults int64) *Glo // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *GlobalOrganizationOperationsListCall) OrderBy(orderBy string) *GlobalOrganizationOperationsListCall { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -75503,23 +91166,16 @@ func (c *GlobalOrganizationOperationsListCall) OrderBy(orderBy string) *GlobalOr // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *GlobalOrganizationOperationsListCall) PageToken(pageToken string) *GlobalOrganizationOperationsListCall { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken string) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("pageToken", pageToken) return c } -// ParentId sets the optional parameter "parentId": Parent ID for this -// request. -func (c *GlobalOrganizationOperationsListCall) ParentId(parentId string) *GlobalOrganizationOperationsListCall { - c.urlParams_.Set("parentId", parentId) - return c -} - // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *GlobalOrganizationOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalOrganizationOperationsListCall { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -75527,88 +91183,84 @@ func (c *GlobalOrganizationOperationsListCall) ReturnPartialSuccess(returnPartia // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOrganizationOperationsListCall) Fields(s ...googleapi.Field) *GlobalOrganizationOperationsListCall { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOrganizationOperationsListCall) IfNoneMatch(entityTag string) *GlobalOrganizationOperationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOrganizationOperationsListCall) Context(ctx context.Context) *GlobalOrganizationOperationsListCall { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersListPerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOrganizationOperationsListCall) Header() http.Header { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOrganizationOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOrganizationOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { +// Do executes the "compute.instanceGroupManagers.listPerInstanceConfigs" call. +// Exactly one of *InstanceGroupManagersListPerInstanceConfigsResp or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *InstanceGroupManagersListPerInstanceConfigsResp.ServerResponse.Header +// +// or (if a response was returned at all) in +// +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListPerInstanceConfigsResp, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &OperationList{ + ret := &InstanceGroupManagersListPerInstanceConfigsResp{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75620,16 +91272,27 @@ func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified organization.", - // "flatPath": "locations/global/operations", - // "httpMethod": "GET", - // "id": "compute.globalOrganizationOperations.list", + // "description": "Lists all of the per-instance configurations defined for the managed instance group. The orderBy query parameter is not supported.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.listPerInstanceConfigs", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -75648,20 +91311,28 @@ func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) // "location": "query", // "type": "string" // }, - // "parentId": { - // "description": "Parent ID for this request.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, // "returnPartialSuccess": { // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "locations/global/operations", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", // "response": { - // "$ref": "OperationList" + // "$ref": "InstanceGroupManagersListPerInstanceConfigsResp" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -75675,7 +91346,7 @@ func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *GlobalOrganizationOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListPerInstanceConfigsResp) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -75693,26 +91364,41 @@ func (c *GlobalOrganizationOperationsListCall) Pages(ctx context.Context, f func } } -// method id "compute.globalPublicDelegatedPrefixes.delete": +// method id "compute.instanceGroupManagers.patch": -type GlobalPublicDelegatedPrefixesDeleteCall struct { - s *Service - project string - publicDelegatedPrefix string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersPatchCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified global PublicDelegatedPrefix. +// Patch: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is patched even if the instances in the group are still in the +// process of being patched. You must separately verify the status of +// the individual instances with the listManagedInstances method. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. If you update your group to specify a new +// template or instance configuration, it's possible that your intended +// specification for each VM in the group is different from the current +// state of that VM. To learn how to apply an updated configuration to +// the VMs in a MIG, see Updating instances in a MIG. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to delete. -func (r *GlobalPublicDelegatedPrefixesService) Delete(project string, publicDelegatedPrefix string) *GlobalPublicDelegatedPrefixesDeleteCall { - c := &GlobalPublicDelegatedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the instance group manager. +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the managed +// instance group. +func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { + c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.publicDelegatedPrefix = publicDelegatedPrefix + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager return c } @@ -75727,7 +91413,7 @@ func (r *GlobalPublicDelegatedPrefixesService) Delete(project string, publicDele // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *GlobalPublicDelegatedPrefixesDeleteCall) RequestId(requestId string) *GlobalPublicDelegatedPrefixesDeleteCall { +func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGroupManagersPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -75735,7 +91421,7 @@ func (c *GlobalPublicDelegatedPrefixesDeleteCall) RequestId(requestId string) *G // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalPublicDelegatedPrefixesDeleteCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesDeleteCall { +func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -75743,21 +91429,21 @@ func (c *GlobalPublicDelegatedPrefixesDeleteCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalPublicDelegatedPrefixesDeleteCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesDeleteCall { +func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalPublicDelegatedPrefixesDeleteCall) Header() http.Header { +func (c *InstanceGroupManagersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalPublicDelegatedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -75765,47 +91451,53 @@ func (c *GlobalPublicDelegatedPrefixesDeleteCall) doRequest(alt string) (*http.R } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "publicDelegatedPrefix": c.publicDelegatedPrefix, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalPublicDelegatedPrefixes.delete" call. +// Do executes the "compute.instanceGroupManagers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalPublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -75819,26 +91511,26 @@ func (c *GlobalPublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Deletes the specified global PublicDelegatedPrefix.", - // "flatPath": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", - // "httpMethod": "DELETE", - // "id": "compute.globalPublicDelegatedPrefixes.delete", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. If you update your group to specify a new template or instance configuration, it's possible that your intended specification for each VM in the group is different from the current state of that VM. To learn how to apply an updated configuration to the VMs in a MIG, see Updating instances in a MIG.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "httpMethod": "PATCH", + // "id": "compute.instanceGroupManagers.patch", // "parameterOrder": [ // "project", - // "publicDelegatedPrefix" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "publicDelegatedPrefix": { - // "description": "Name of the PublicDelegatedPrefix resource to delete.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, @@ -75846,9 +91538,18 @@ func (c *GlobalPublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOptio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "request": { + // "$ref": "InstanceGroupManager" + // }, // "response": { // "$ref": "Operation" // }, @@ -75860,119 +91561,136 @@ func (c *GlobalPublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOptio } -// method id "compute.globalPublicDelegatedPrefixes.get": +// method id "compute.instanceGroupManagers.patchPerInstanceConfigs": -type GlobalPublicDelegatedPrefixesGetCall struct { - s *Service - project string - publicDelegatedPrefix string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersPatchPerInstanceConfigsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerspatchperinstanceconfigsreq *InstanceGroupManagersPatchPerInstanceConfigsReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified global PublicDelegatedPrefix resource. +// PatchPerInstanceConfigs: Inserts or patches per-instance +// configurations for the managed instance group. perInstanceConfig.name +// serves as a key used to distinguish whether to perform insert or +// patch. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to return. -func (r *GlobalPublicDelegatedPrefixesService) Get(project string, publicDelegatedPrefix string) *GlobalPublicDelegatedPrefixesGetCall { - c := &GlobalPublicDelegatedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. +func (r *InstanceGroupManagersService) PatchPerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagerspatchperinstanceconfigsreq *InstanceGroupManagersPatchPerInstanceConfigsReq) *InstanceGroupManagersPatchPerInstanceConfigsCall { + c := &InstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.publicDelegatedPrefix = publicDelegatedPrefix + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerspatchperinstanceconfigsreq = instancegroupmanagerspatchperinstanceconfigsreq + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(requestId string) *InstanceGroupManagersPatchPerInstanceConfigsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalPublicDelegatedPrefixesGetCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesGetCall { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchPerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalPublicDelegatedPrefixesGetCall) IfNoneMatch(entityTag string) *GlobalPublicDelegatedPrefixesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalPublicDelegatedPrefixesGetCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesGetCall { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersPatchPerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalPublicDelegatedPrefixesGetCall) Header() http.Header { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalPublicDelegatedPrefixesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerspatchperinstanceconfigsreq) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "publicDelegatedPrefix": c.publicDelegatedPrefix, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalPublicDelegatedPrefixes.get" call. -// Exactly one of *PublicDelegatedPrefix or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *PublicDelegatedPrefix.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalPublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefix, error) { +// Do executes the "compute.instanceGroupManagers.patchPerInstanceConfigs" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &PublicDelegatedPrefix{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75984,15 +91702,22 @@ func (c *GlobalPublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns the specified global PublicDelegatedPrefix resource.", - // "flatPath": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", - // "httpMethod": "GET", - // "id": "compute.globalPublicDelegatedPrefixes.get", + // "description": "Inserts or patches per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.patchPerInstanceConfigs", // "parameterOrder": [ // "project", - // "publicDelegatedPrefix" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -76000,46 +91725,68 @@ func (c *GlobalPublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "publicDelegatedPrefix": { - // "description": "Name of the PublicDelegatedPrefix resource to return.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", + // "request": { + // "$ref": "InstanceGroupManagersPatchPerInstanceConfigsReq" + // }, // "response": { - // "$ref": "PublicDelegatedPrefix" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalPublicDelegatedPrefixes.insert": +// method id "compute.instanceGroupManagers.recreateInstances": -type GlobalPublicDelegatedPrefixesInsertCall struct { - s *Service - project string - publicdelegatedprefix *PublicDelegatedPrefix - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a global PublicDelegatedPrefix in the specified -// project using the parameters that are included in the request. +// RecreateInstances: Flags the specified VM instances in the managed +// instance group to be immediately recreated. Each instance is +// recreated using the group's current configuration. This operation is +// marked as DONE when the flag is set even if the instances have not +// yet been recreated. You must separately verify the status of each +// instance by checking its currentAction field; for more information, +// see Checking the status of managed instances. If the group is part of +// a backend service that has enabled connection draining, it can take +// up to 60 seconds after the connection draining duration has elapsed +// before the VM instance is removed or deleted. You can specify a +// maximum of 1000 instances with this method per request. // -// - project: Project ID for this request. -func (r *GlobalPublicDelegatedPrefixesService) Insert(project string, publicdelegatedprefix *PublicDelegatedPrefix) *GlobalPublicDelegatedPrefixesInsertCall { - c := &GlobalPublicDelegatedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. +func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { + c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.publicdelegatedprefix = publicdelegatedprefix + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest return c } @@ -76054,7 +91801,7 @@ func (r *GlobalPublicDelegatedPrefixesService) Insert(project string, publicdele // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *GlobalPublicDelegatedPrefixesInsertCall) RequestId(requestId string) *GlobalPublicDelegatedPrefixesInsertCall { +func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -76062,7 +91809,7 @@ func (c *GlobalPublicDelegatedPrefixesInsertCall) RequestId(requestId string) *G // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalPublicDelegatedPrefixesInsertCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesInsertCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -76070,21 +91817,21 @@ func (c *GlobalPublicDelegatedPrefixesInsertCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalPublicDelegatedPrefixesInsertCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesInsertCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalPublicDelegatedPrefixesInsertCall) Header() http.Header { +func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalPublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -76092,14 +91839,14 @@ func (c *GlobalPublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.R } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -76107,36 +91854,38 @@ func (c *GlobalPublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.R } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalPublicDelegatedPrefixes.insert" call. +// Do executes the "compute.instanceGroupManagers.recreateInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalPublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -76150,14 +91899,22 @@ func (c *GlobalPublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Creates a global PublicDelegatedPrefix in the specified project using the parameters that are included in the request.", - // "flatPath": "projects/{project}/global/publicDelegatedPrefixes", + // "description": "Flags the specified VM instances in the managed instance group to be immediately recreated. Each instance is recreated using the group's current configuration. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of each instance by checking its currentAction field; for more information, see Checking the status of managed instances. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", // "httpMethod": "POST", - // "id": "compute.globalPublicDelegatedPrefixes.insert", + // "id": "compute.instanceGroupManagers.recreateInstances", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -76169,11 +91926,17 @@ func (c *GlobalPublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOptio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/publicDelegatedPrefixes", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", // "request": { - // "$ref": "PublicDelegatedPrefix" + // "$ref": "InstanceGroupManagersRecreateInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -76186,183 +91949,145 @@ func (c *GlobalPublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOptio } -// method id "compute.globalPublicDelegatedPrefixes.list": +// method id "compute.instanceGroupManagers.resize": -type GlobalPublicDelegatedPrefixesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersResizeCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Lists the global PublicDelegatedPrefixes for a project. +// Resize: Resizes the managed instance group. If you increase the size, +// the group creates new instances using the current instance template. +// If you decrease the size, the group deletes instances. The resize +// operation is marked DONE when the resize actions are scheduled even +// if the group has not yet added or deleted any instances. You must +// separately verify the status of the creating or deleting actions with +// the listmanagedinstances method. When resizing down, the instance +// group arbitrarily chooses the order in which VMs are deleted. The +// group takes into account some VM attributes when making the selection +// including: + The status of the VM instance. + The health of the VM +// instance. + The instance template version the VM is based on. + For +// regional managed instance groups, the location of the VM instance. +// This list is subject to change. If the group is part of a backend +// service that has enabled connection draining, it can take up to 60 +// seconds after the connection draining duration has elapsed before the +// VM instance is removed or deleted. // -// - project: Project ID for this request. -func (r *GlobalPublicDelegatedPrefixesService) List(project string) *GlobalPublicDelegatedPrefixesListCall { - c := &GlobalPublicDelegatedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - size: The number of running instances that the managed instance +// group should maintain at any given time. The group automatically +// adds or removes instances to maintain the number of instances +// specified by this parameter. +// - zone: The name of the zone where the managed instance group is +// located. +func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { + c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *GlobalPublicDelegatedPrefixesListCall) Filter(filter string) *GlobalPublicDelegatedPrefixesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *GlobalPublicDelegatedPrefixesListCall) MaxResults(maxResults int64) *GlobalPublicDelegatedPrefixesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *GlobalPublicDelegatedPrefixesListCall) OrderBy(orderBy string) *GlobalPublicDelegatedPrefixesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *GlobalPublicDelegatedPrefixesListCall) PageToken(pageToken string) *GlobalPublicDelegatedPrefixesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *GlobalPublicDelegatedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalPublicDelegatedPrefixesListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalPublicDelegatedPrefixesListCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesListCall { +func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalPublicDelegatedPrefixesListCall) IfNoneMatch(entityTag string) *GlobalPublicDelegatedPrefixesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalPublicDelegatedPrefixesListCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesListCall { +func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalPublicDelegatedPrefixesListCall) Header() http.Header { +func (c *InstanceGroupManagersResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalPublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalPublicDelegatedPrefixes.list" call. -// Exactly one of *PublicDelegatedPrefixList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *PublicDelegatedPrefixList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalPublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixList, error) { +// Do executes the "compute.instanceGroupManagers.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &PublicDelegatedPrefixList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -76374,108 +92099,90 @@ func (c *GlobalPublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the global PublicDelegatedPrefixes for a project.", - // "flatPath": "projects/{project}/global/publicDelegatedPrefixes", - // "httpMethod": "GET", - // "id": "compute.globalPublicDelegatedPrefixes.list", + // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method. When resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including: + The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance. This list is subject to change. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.resize", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager", + // "size" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "size": { + // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + // "format": "int32", // "location": "query", - // "type": "string" + // "required": true, + // "type": "integer" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/global/publicDelegatedPrefixes", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", // "response": { - // "$ref": "PublicDelegatedPrefixList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalPublicDelegatedPrefixesListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.globalPublicDelegatedPrefixes.patch": +// method id "compute.instanceGroupManagers.setInstanceTemplate": -type GlobalPublicDelegatedPrefixesPatchCall struct { - s *Service - project string - publicDelegatedPrefix string - publicdelegatedprefix *PublicDelegatedPrefix - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified global PublicDelegatedPrefix resource -// with the data included in the request. This method supports PATCH -// semantics and uses JSON merge patch format and processing rules. +// SetInstanceTemplate: Specifies the instance template to use when +// creating new instances in this group. The templates for existing +// instances in the group do not change unless you run +// recreateInstances, run applyUpdatesToInstances, or set the group's +// updatePolicy.type to PROACTIVE. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to patch. -func (r *GlobalPublicDelegatedPrefixesService) Patch(project string, publicDelegatedPrefix string, publicdelegatedprefix *PublicDelegatedPrefix) *GlobalPublicDelegatedPrefixesPatchCall { - c := &GlobalPublicDelegatedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. +func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { + c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.publicDelegatedPrefix = publicDelegatedPrefix - c.publicdelegatedprefix = publicdelegatedprefix + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest return c } @@ -76490,7 +92197,7 @@ func (r *GlobalPublicDelegatedPrefixesService) Patch(project string, publicDeleg // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *GlobalPublicDelegatedPrefixesPatchCall) RequestId(requestId string) *GlobalPublicDelegatedPrefixesPatchCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -76498,7 +92205,7 @@ func (c *GlobalPublicDelegatedPrefixesPatchCall) RequestId(requestId string) *Gl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalPublicDelegatedPrefixesPatchCall) Fields(s ...googleapi.Field) *GlobalPublicDelegatedPrefixesPatchCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -76506,21 +92213,21 @@ func (c *GlobalPublicDelegatedPrefixesPatchCall) Fields(s ...googleapi.Field) *G // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalPublicDelegatedPrefixesPatchCall) Context(ctx context.Context) *GlobalPublicDelegatedPrefixesPatchCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalPublicDelegatedPrefixesPatchCall) Header() http.Header { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalPublicDelegatedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -76528,52 +92235,53 @@ func (c *GlobalPublicDelegatedPrefixesPatchCall) doRequest(alt string) (*http.Re } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "publicDelegatedPrefix": c.publicDelegatedPrefix, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalPublicDelegatedPrefixes.patch" call. +// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalPublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -76587,26 +92295,26 @@ func (c *GlobalPublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Patches the specified global PublicDelegatedPrefix resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", - // "httpMethod": "PATCH", - // "id": "compute.globalPublicDelegatedPrefixes.patch", + // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you run recreateInstances, run applyUpdatesToInstances, or set the group's updatePolicy.type to PROACTIVE.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ // "project", - // "publicDelegatedPrefix" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "publicDelegatedPrefix": { - // "description": "Name of the PublicDelegatedPrefix resource to patch.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, @@ -76614,327 +92322,60 @@ func (c *GlobalPublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // } - // }, - // "path": "projects/{project}/global/publicDelegatedPrefixes/{publicDelegatedPrefix}", - // "request": { - // "$ref": "PublicDelegatedPrefix" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.healthChecks.aggregatedList": - -type HealthChecksAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves the list of all HealthCheck resources, -// regional and global, available to the specified project. -// -// - project: Name of the project scoping this request. -func (r *HealthChecksService) AggregatedList(project string) *HealthChecksAggregatedListCall { - c := &HealthChecksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *HealthChecksAggregatedListCall) Filter(filter string) *HealthChecksAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *HealthChecksAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *HealthChecksAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *HealthChecksAggregatedListCall) MaxResults(maxResults int64) *HealthChecksAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *HealthChecksAggregatedListCall) OrderBy(orderBy string) *HealthChecksAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *HealthChecksAggregatedListCall) PageToken(pageToken string) *HealthChecksAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *HealthChecksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HealthChecksAggregatedListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HealthChecksAggregatedListCall) Fields(s ...googleapi.Field) *HealthChecksAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HealthChecksAggregatedListCall) IfNoneMatch(entityTag string) *HealthChecksAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HealthChecksAggregatedListCall) Context(ctx context.Context) *HealthChecksAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HealthChecksAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/healthChecks") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.healthChecks.aggregatedList" call. -// Exactly one of *HealthChecksAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *HealthChecksAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*HealthChecksAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &HealthChecksAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of all HealthCheck resources, regional and global, available to the specified project.", - // "flatPath": "projects/{project}/aggregated/healthChecks", - // "httpMethod": "GET", - // "id": "compute.healthChecks.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" // }, - // "project": { - // "description": "Name of the project scoping this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/healthChecks", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { + // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" + // }, // "response": { - // "$ref": "HealthChecksAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HealthChecksAggregatedListCall) Pages(ctx context.Context, f func(*HealthChecksAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.healthChecks.delete": +// method id "compute.instanceGroupManagers.setTargetPools": -type HealthChecksDeleteCall struct { - s *Service - project string - healthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HealthCheck resource. -// -// - healthCheck: Name of the HealthCheck resource to delete. -// - project: Project ID for this request. -func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { - c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTargetPools: Modifies the target pools to which all instances in +// this managed instance group are assigned. The target pools +// automatically apply to all of the instances in the managed instance +// group. This operation is marked DONE when you make the request even +// if the instances have not yet been added to their target pools. The +// change might take some time to apply to all of the instances in the +// group depending on the size of the group. +// +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. +func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { + c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest return c } @@ -76949,7 +92390,7 @@ func (r *HealthChecksService) Delete(project string, healthCheck string) *Health // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -76957,7 +92398,7 @@ func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDelete // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -76965,21 +92406,21 @@ func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDelet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksDeleteCall) Header() http.Header { +func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -76987,47 +92428,53 @@ func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.delete" call. +// Do executes the "compute.instanceGroupManagers.setTargetPools" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -77041,19 +92488,19 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Deletes the specified HealthCheck resource.", - // "flatPath": "projects/{project}/global/healthChecks/{healthCheck}", - // "httpMethod": "DELETE", - // "id": "compute.healthChecks.delete", + // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setTargetPools", // "parameterOrder": [ // "project", - // "healthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to delete.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -77068,9 +92515,18 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/healthChecks/{healthCheck}", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "request": { + // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -77082,119 +92538,136 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.healthChecks.get": +// method id "compute.instanceGroupManagers.updatePerInstanceConfigs": -type HealthChecksGetCall struct { - s *Service - project string - healthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersUpdatePerInstanceConfigsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. +// UpdatePerInstanceConfigs: Inserts or updates per-instance +// configurations for the managed instance group. perInstanceConfig.name +// serves as a key used to distinguish whether to perform insert or +// patch. // -// - healthCheck: Name of the HealthCheck resource to return. -// - project: Project ID for this request. -func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { - c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. +func (r *InstanceGroupManagersService) UpdatePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq) *InstanceGroupManagersUpdatePerInstanceConfigsCall { + c := &InstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersupdateperinstanceconfigsreq = instancegroupmanagersupdateperinstanceconfigsreq + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *InstanceGroupManagersUpdatePerInstanceConfigsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdatePerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersUpdatePerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksGetCall) Header() http.Header { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersupdateperinstanceconfigsreq) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.get" call. -// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx +// Do executes the "compute.instanceGroupManagers.updatePerInstanceConfigs" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *HealthCheck.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &HealthCheck{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77206,19 +92679,19 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", - // "flatPath": "projects/{project}/global/healthChecks/{healthCheck}", - // "httpMethod": "GET", - // "id": "compute.healthChecks.get", + // "description": "Inserts or updates per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.updatePerInstanceConfigs", // "parameterOrder": [ // "project", - // "healthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to return.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -77228,40 +92701,61 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/healthChecks/{healthCheck}", + // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "request": { + // "$ref": "InstanceGroupManagersUpdatePerInstanceConfigsReq" + // }, // "response": { - // "$ref": "HealthCheck" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.healthChecks.insert": +// method id "compute.instanceGroups.addInstances": -type HealthChecksInsertCall struct { - s *Service - project string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsAddInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HealthCheck resource in the specified project using -// the data included in the request. +// AddInstances: Adds a list of instances to the specified instance +// group. All of the instances in the instance group must be in the same +// network/subnetwork. Read Adding instances for more information. // -// - project: Project ID for this request. -func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { - c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroup: The name of the instance group where you are adding +// instances. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. +func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { + c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthcheck = healthcheck + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest return c } @@ -77276,7 +92770,7 @@ func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) * // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { +func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -77284,7 +92778,7 @@ func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsert // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { +func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -77292,21 +92786,21 @@ func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInser // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { +func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksInsertCall) Header() http.Header { +func (c *InstanceGroupsAddInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -77314,14 +92808,14 @@ func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -77329,36 +92823,38 @@ func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.insert" call. +// Do executes the "compute.instanceGroups.addInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -77372,14 +92868,22 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/global/healthChecks", + // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", // "httpMethod": "POST", - // "id": "compute.healthChecks.insert", + // "id": "compute.instanceGroups.addInstances", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroup" // ], // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group where you are adding instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -77391,11 +92895,17 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/healthChecks", + // "path": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", // "request": { - // "$ref": "HealthCheck" + // "$ref": "InstanceGroupsAddInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -77408,9 +92918,9 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.healthChecks.list": +// method id "compute.instanceGroups.aggregatedList": -type HealthChecksListCall struct { +type InstanceGroupsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -77419,30 +92929,33 @@ type HealthChecksListCall struct { header_ http.Header } -// List: Retrieves the list of HealthCheck resources available to the -// specified project. +// AggregatedList: Retrieves the list of instance groups and sorts them +// by zone. // // - project: Project ID for this request. -func (r *HealthChecksService) List(project string) *HealthChecksListCall { - c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { + c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -77450,19 +92963,42 @@ func (r *HealthChecksService) List(project string) *HealthChecksListCall { // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *InstanceGroupsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of // available results is larger than `maxResults`, Compute Engine returns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { +func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -77476,7 +93012,7 @@ func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCal // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { +func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -77484,7 +93020,7 @@ func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { +func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -77493,7 +93029,7 @@ func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *HealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HealthChecksListCall { +func (c *InstanceGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -77501,7 +93037,7 @@ func (c *HealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) * // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { +func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -77511,7 +93047,7 @@ func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { +func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -77519,21 +93055,21 @@ func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { +func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksListCall) Header() http.Header { +func (c *InstanceGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -77546,7 +93082,7 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/instanceGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -77559,33 +93095,33 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.list" call. -// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheckList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instanceGroups.aggregatedList" call. +// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { +func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &HealthCheckList{ + ret := &InstanceGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77597,19 +93133,24 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis } return ret, nil // { - // "description": "Retrieves the list of HealthCheck resources available to the specified project.", - // "flatPath": "projects/{project}/global/healthChecks", + // "description": "Retrieves the list of instance groups and sorts them by zone.", + // "flatPath": "projects/{project}/aggregated/instanceGroups", // "httpMethod": "GET", - // "id": "compute.healthChecks.list", + // "id": "compute.instanceGroups.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -77641,9 +93182,9 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // "type": "boolean" // } // }, - // "path": "projects/{project}/global/healthChecks", + // "path": "projects/{project}/aggregated/instanceGroups", // "response": { - // "$ref": "HealthCheckList" + // "$ref": "InstanceGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -77657,7 +93198,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { +func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -77675,29 +93216,31 @@ func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckLis } } -// method id "compute.healthChecks.patch": +// method id "compute.instanceGroups.delete": -type HealthChecksPatchCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsDeleteCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HealthCheck resource in the specified project using -// the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. +// Delete: Deletes the specified instance group. The instances in the +// group are not deleted. Note that instance group must not belong to a +// backend service. Read Deleting an instance group for more +// information. // -// - healthCheck: Name of the HealthCheck resource to patch. +// - instanceGroup: The name of the instance group to delete. // - project: Project ID for this request. -func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { - c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone where the instance group is located. +func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { + c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck + c.zone = zone + c.instanceGroup = instanceGroup return c } @@ -77712,7 +93255,7 @@ func (r *HealthChecksService) Patch(project string, healthCheck string, healthch // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { +func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -77720,7 +93263,7 @@ func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { +func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -77728,21 +93271,21 @@ func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { +func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksPatchCall) Header() http.Header { +func (c *InstanceGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -77750,52 +93293,48 @@ func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.patch" call. +// Do executes the "compute.instanceGroups.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -77809,19 +93348,19 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/global/healthChecks/{healthCheck}", - // "httpMethod": "PATCH", - // "id": "compute.healthChecks.patch", + // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroups.delete", // "parameterOrder": [ // "project", - // "healthCheck" + // "zone", + // "instanceGroup" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to patch.", + // "instanceGroup": { + // "description": "The name of the instance group to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -77836,12 +93375,15 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/healthChecks/{healthCheck}", - // "request": { - // "$ref": "HealthCheck" - // }, + // "path": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}", // "response": { // "$ref": "Operation" // }, @@ -77853,128 +93395,125 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.healthChecks.update": +// method id "compute.instanceGroups.get": -type HealthChecksUpdateCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsGetCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates a HealthCheck resource in the specified project using -// the data included in the request. +// Get: Returns the specified zonal instance group. Get a list of +// available zonal instance groups by making a list() request. For +// managed instance groups, use the instanceGroupManagers or +// regionInstanceGroupManagers methods instead. // -// - healthCheck: Name of the HealthCheck resource to update. +// - instanceGroup: The name of the instance group. // - project: Project ID for this request. -func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { - c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone where the instance group is located. +func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { + c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.instanceGroup = instanceGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { +func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { +func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksUpdateCall) Header() http.Header { +func (c *InstanceGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.instanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77986,19 +93525,19 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/global/healthChecks/{healthCheck}", - // "httpMethod": "PUT", - // "id": "compute.healthChecks.update", + // "description": "Returns the specified zonal instance group. Get a list of available zonal instance groups by making a list() request. For managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.get", // "parameterOrder": [ // "project", - // "healthCheck" + // "zone", + // "instanceGroup" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to update.", + // "instanceGroup": { + // "description": "The name of the instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -78009,46 +93548,49 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/healthChecks/{healthCheck}", - // "request": { - // "$ref": "HealthCheck" - // }, + // "path": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.httpHealthChecks.delete": +// method id "compute.instanceGroups.insert": -type HttpHealthChecksDeleteCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsInsertCall struct { + s *Service + project string + zone string + instancegroup *InstanceGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HttpHealthCheck resource. +// Insert: Creates an instance group in the specified project using the +// parameters that are included in the request. // -// - httpHealthCheck: Name of the HttpHealthCheck resource to delete. -// - project: Project ID for this request. -func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { - c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the instance +// group. +func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { + c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck + c.zone = zone + c.instancegroup = instancegroup return c } @@ -78063,7 +93605,7 @@ func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { +func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -78071,7 +93613,7 @@ func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { +func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78079,21 +93621,21 @@ func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { +func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksDeleteCall) Header() http.Header { +func (c *InstanceGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -78101,47 +93643,52 @@ func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.delete" call. +// Do executes the "compute.instanceGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -78155,22 +93702,15 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified HttpHealthCheck resource.", - // "flatPath": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", - // "httpMethod": "DELETE", - // "id": "compute.httpHealthChecks.delete", + // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroups", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.insert", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "zone" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -78182,9 +93722,18 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the instance group.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "projects/{project}/zones/{zone}/instanceGroups", + // "request": { + // "$ref": "InstanceGroup" + // }, // "response": { // "$ref": "Operation" // }, @@ -78196,34 +93745,117 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpHealthChecks.get": +// method id "compute.instanceGroups.list": -type HttpHealthChecksGetCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HttpHealthCheck resource. Gets a list of -// available HTTP health checks by making a list() request. +// List: Retrieves the list of zonal instance group resources contained +// within the specified zone. For managed instance groups, use the +// instanceGroupManagers or regionInstanceGroupManagers methods instead. // -// - httpHealthCheck: Name of the HttpHealthCheck resource to return. // - project: Project ID for this request. -func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { - c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone where the instance group is located. +func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { + c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *InstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { +func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78233,7 +93865,7 @@ func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecks // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { +func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -78241,21 +93873,21 @@ func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthCheck // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { +func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksGetCall) Header() http.Header { +func (c *InstanceGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -78268,7 +93900,7 @@ func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -78276,39 +93908,39 @@ func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.get" call. -// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HttpHealthCheck.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instanceGroups.list" call. +// Exactly one of *InstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { +func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &HttpHealthCheck{ + ret := &InstanceGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -78320,20 +93952,36 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } return ret, nil // { - // "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", - // "flatPath": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", + // "description": "Retrieves the list of zonal instance group resources contained within the specified zone. For managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroups", // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.get", + // "id": "compute.instanceGroups.list", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "zone" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -78342,11 +93990,22 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "projects/{project}/zones/{zone}/instanceGroups", // "response": { - // "$ref": "HttpHealthCheck" + // "$ref": "InstanceGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -78357,206 +94016,75 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } -// method id "compute.httpHealthChecks.insert": - -type HttpHealthChecksInsertCall struct { - s *Service - project string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// -// - project: Project ID for this request. -func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { - c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httphealthcheck = httphealthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpHealthChecksInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpHealthChecks") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpHealthChecks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, + if err := f(x); err != nil { + return err } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/global/httpHealthChecks", - // "httpMethod": "POST", - // "id": "compute.httpHealthChecks.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{project}/global/httpHealthChecks", - // "request": { - // "$ref": "HttpHealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - } -// method id "compute.httpHealthChecks.list": +// method id "compute.instanceGroups.listInstances": -type HttpHealthChecksListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupsListInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of HttpHealthCheck resources available to -// the specified project. -// -// - project: Project ID for this request. -func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { - c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListInstances: Lists the instances in the specified instance group. +// The orderBy query parameter is not supported. The filter query +// parameter is supported, but only for expressions that use `eq` +// (equal) or `ne` (not equal) operators. +// +// - instanceGroup: The name of the instance group from which you want +// to generate a list of included instances. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. +func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { + c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -78564,8 +94092,18 @@ func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c } @@ -78576,7 +94114,7 @@ func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCa // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { +func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -78590,7 +94128,7 @@ func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthCheck // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { +func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -78598,7 +94136,7 @@ func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { +func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -78607,7 +94145,7 @@ func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecks // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *HttpHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HttpHealthChecksListCall { +func (c *InstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsListInstancesCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -78615,91 +94153,85 @@ func (c *HttpHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess boo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { +func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { +func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksListCall) Header() http.Header { +func (c *InstanceGroupsListInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.list" call. -// Exactly one of *HttpHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpHealthCheckList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instanceGroups.listInstances" call. +// Exactly one of *InstanceGroupsListInstances or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupsListInstances.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { +func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &HttpHealthCheckList{ + ret := &InstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -78711,19 +94243,27 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth } return ret, nil // { - // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", - // "flatPath": "projects/{project}/global/httpHealthChecks", - // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.list", + // "description": "Lists the instances in the specified instance group. The orderBy query parameter is not supported. The filter query parameter is supported, but only for expressions that use `eq` (equal) or `ne` (not equal) operators.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.listInstances", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroup" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, + // "instanceGroup": { + // "description": "The name of the instance group from which you want to generate a list of included instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -78753,11 +94293,20 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/httpHealthChecks", + // "path": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + // "request": { + // "$ref": "InstanceGroupsListInstancesRequest" + // }, // "response": { - // "$ref": "HttpHealthCheckList" + // "$ref": "InstanceGroupsListInstances" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -78771,7 +94320,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { +func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -78789,29 +94338,35 @@ func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealth } } -// method id "compute.httpHealthChecks.patch": +// method id "compute.instanceGroups.removeInstances": -type HttpHealthChecksPatchCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsRemoveInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. +// RemoveInstances: Removes one or more instances from the specified +// instance group, but does not delete those instances. If the group is +// part of a backend service that has enabled connection draining, it +// can take up to 60 seconds after the connection draining duration +// before the VM instance is removed or deleted. // -// - httpHealthCheck: Name of the HttpHealthCheck resource to patch. -// - project: Project ID for this request. -func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { - c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroup: The name of the instance group where the specified +// instances will be removed. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. +func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { + c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest return c } @@ -78826,7 +94381,7 @@ func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { +func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -78834,7 +94389,7 @@ func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthCheck // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { +func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78842,21 +94397,21 @@ func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { +func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksPatchCall) Header() http.Header { +func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -78864,52 +94419,53 @@ func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.patch" call. +// Do executes the "compute.instanceGroups.removeInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -78923,19 +94479,19 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", - // "httpMethod": "PATCH", - // "id": "compute.httpHealthChecks.patch", + // "description": "Removes one or more instances from the specified instance group, but does not delete those instances. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.removeInstances", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "zone", + // "instanceGroup" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to patch.", + // "instanceGroup": { + // "description": "The name of the instance group where the specified instances will be removed.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -78950,11 +94506,17 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", // "request": { - // "$ref": "HttpHealthCheck" + // "$ref": "InstanceGroupsRemoveInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -78967,28 +94529,31 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.httpHealthChecks.update": +// method id "compute.instanceGroups.setNamedPorts": -type HttpHealthChecksUpdateCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsSetNamedPortsCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. +// SetNamedPorts: Sets the named ports for the specified instance group. // -// - httpHealthCheck: Name of the HttpHealthCheck resource to update. -// - project: Project ID for this request. -func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { - c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroup: The name of the instance group where the named ports +// are updated. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. +func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { + c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest return c } @@ -79003,7 +94568,7 @@ func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { +func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -79011,7 +94576,7 @@ func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { +func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79019,21 +94584,21 @@ func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { +func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksUpdateCall) Header() http.Header { +func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -79041,52 +94606,53 @@ func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.update" call. +// Do executes the "compute.instanceGroups.setNamedPorts" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -79100,19 +94666,19 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", - // "httpMethod": "PUT", - // "id": "compute.httpHealthChecks.update", + // "description": "Sets the named ports for the specified instance group.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.setNamedPorts", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "zone", + // "instanceGroup" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to update.", + // "instanceGroup": { + // "description": "The name of the instance group where the named ports are updated.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -79127,11 +94693,17 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", // "request": { - // "$ref": "HttpHealthCheck" + // "$ref": "InstanceGroupsSetNamedPortsRequest" // }, // "response": { // "$ref": "Operation" @@ -79144,25 +94716,27 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpsHealthChecks.delete": +// method id "compute.instanceTemplates.delete": -type HttpsHealthChecksDeleteCall struct { +type InstanceTemplatesDeleteCall struct { s *Service project string - httpsHealthCheck string + instanceTemplate string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified HttpsHealthCheck resource. +// Delete: Deletes the specified instance template. Deleting an instance +// template is permanent and cannot be undone. It is not possible to +// delete templates that are already in use by a managed instance group. // -// - httpsHealthCheck: Name of the HttpsHealthCheck resource to delete. +// - instanceTemplate: The name of the instance template to delete. // - project: Project ID for this request. -func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { - c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { + c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck + c.instanceTemplate = instanceTemplate return c } @@ -79177,7 +94751,7 @@ func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck strin // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { +func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -79185,7 +94759,7 @@ func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthCh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { +func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79193,21 +94767,21 @@ func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { +func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksDeleteCall) Header() http.Header { +func (c *InstanceTemplatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -79217,7 +94791,7 @@ func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -79226,36 +94800,36 @@ func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, err req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "instanceTemplate": c.instanceTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.delete" call. +// Do executes the "compute.instanceTemplates.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -79269,17 +94843,17 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified HttpsHealthCheck resource.", - // "flatPath": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group.", + // "flatPath": "projects/{project}/global/instanceTemplates/{instanceTemplate}", // "httpMethod": "DELETE", - // "id": "compute.httpsHealthChecks.delete", + // "id": "compute.instanceTemplates.delete", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "instanceTemplate" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to delete.", + // "instanceTemplate": { + // "description": "The name of the instance template to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -79298,7 +94872,7 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "projects/{project}/global/instanceTemplates/{instanceTemplate}", // "response": { // "$ref": "Operation" // }, @@ -79310,34 +94884,34 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.httpsHealthChecks.get": +// method id "compute.instanceTemplates.get": -type HttpsHealthChecksGetCall struct { +type InstanceTemplatesGetCall struct { s *Service project string - httpsHealthCheck string + instanceTemplate string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified HttpsHealthCheck resource. Gets a list of -// available HTTPS health checks by making a list() request. +// Get: Returns the specified instance template. Gets a list of +// available instance templates by making a list() request. // -// - httpsHealthCheck: Name of the HttpsHealthCheck resource to return. +// - instanceTemplate: The name of the instance template. // - project: Project ID for this request. -func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { - c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { + c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck + c.instanceTemplate = instanceTemplate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { +func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79347,7 +94921,7 @@ func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChec // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { +func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -79355,21 +94929,21 @@ func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { +func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksGetCall) Header() http.Header { +func (c *InstanceTemplatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -79382,7 +94956,7 @@ func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -79391,38 +94965,38 @@ func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "instanceTemplate": c.instanceTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.get" call. -// Exactly one of *HttpsHealthCheck or error will be non-nil. Any +// Do executes the "compute.instanceTemplates.get" call. +// Exactly one of *InstanceTemplate or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheck.ServerResponse.Header or (if a response was +// *InstanceTemplate.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { +func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &HttpsHealthCheck{ + ret := &InstanceTemplate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -79434,17 +95008,17 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } return ret, nil // { - // "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", - // "flatPath": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", + // "flatPath": "projects/{project}/global/instanceTemplates/{instanceTemplate}", // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.get", + // "id": "compute.instanceTemplates.get", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "instanceTemplate" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to return.", + // "instanceTemplate": { + // "description": "The name of the instance template.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -79458,9 +95032,9 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt // "type": "string" // } // }, - // "path": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "projects/{project}/global/instanceTemplates/{instanceTemplate}", // "response": { - // "$ref": "HttpsHealthCheck" + // "$ref": "InstanceTemplate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -79471,25 +95045,202 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } -// method id "compute.httpsHealthChecks.insert": +// method id "compute.instanceTemplates.getIamPolicy": -type HttpsHealthChecksInsertCall struct { +type InstanceTemplatesGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *InstanceTemplatesService) GetIamPolicy(project string, resource string) *InstanceTemplatesGetIamPolicyCall { + c := &InstanceTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *InstanceTemplatesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *InstanceTemplatesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesGetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/instanceTemplates/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceTemplates.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/global/instanceTemplates/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.getIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/instanceTemplates/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceTemplates.insert": + +type InstanceTemplatesInsertCall struct { s *Service project string - httpshealthcheck *HttpsHealthCheck + instancetemplate *InstanceTemplate urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a HttpsHealthCheck resource in the specified project -// using the data included in the request. +// Insert: Creates an instance template in the specified project using +// the data that is included in the request. If you are creating a new +// template to update an existing instance group, your new instance +// template must use the same network or, if applicable, the same +// subnetwork as the original template. // // - project: Project ID for this request. -func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { - c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { + c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpshealthcheck = httpshealthcheck + c.instancetemplate = instancetemplate return c } @@ -79504,7 +95255,7 @@ func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *Http // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { +func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -79512,7 +95263,7 @@ func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthCh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { +func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79520,21 +95271,21 @@ func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { +func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksInsertCall) Header() http.Header { +func (c *InstanceTemplatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -79542,14 +95293,14 @@ func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -79562,31 +95313,31 @@ func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.insert" call. +// Do executes the "compute.instanceTemplates.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -79600,10 +95351,10 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/global/httpsHealthChecks", + // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + // "flatPath": "projects/{project}/global/instanceTemplates", // "httpMethod": "POST", - // "id": "compute.httpsHealthChecks.insert", + // "id": "compute.instanceTemplates.insert", // "parameterOrder": [ // "project" // ], @@ -79621,9 +95372,9 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "projects/{project}/global/httpsHealthChecks", + // "path": "projects/{project}/global/instanceTemplates", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "InstanceTemplate" // }, // "response": { // "$ref": "Operation" @@ -79636,9 +95387,9 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.httpsHealthChecks.list": +// method id "compute.instanceTemplates.list": -type HttpsHealthChecksListCall struct { +type InstanceTemplatesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -79647,30 +95398,33 @@ type HttpsHealthChecksListCall struct { header_ http.Header } -// List: Retrieves the list of HttpsHealthCheck resources available to -// the specified project. +// List: Retrieves a list of instance templates that are contained +// within the specified project. // // - project: Project ID for this request. -func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { - c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { + c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -79678,8 +95432,18 @@ func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCa // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { c.urlParams_.Set("filter", filter) return c } @@ -79690,7 +95454,7 @@ func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksList // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { +func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -79704,7 +95468,7 @@ func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChe // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { +func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -79712,7 +95476,7 @@ func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksLi // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { +func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -79721,7 +95485,7 @@ func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChec // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *HttpsHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HttpsHealthChecksListCall { +func (c *InstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceTemplatesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -79729,7 +95493,7 @@ func (c *HttpsHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { +func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79739,7 +95503,7 @@ func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { +func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { c.ifNoneMatch_ = entityTag return c } @@ -79747,21 +95511,21 @@ func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthCh // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { +func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksListCall) Header() http.Header { +func (c *InstanceTemplatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -79774,7 +95538,7 @@ func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -79787,33 +95551,33 @@ func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.list" call. -// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any +// Do executes the "compute.instanceTemplates.list" call. +// Exactly one of *InstanceTemplateList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheckList.ServerResponse.Header or (if a response was +// *InstanceTemplateList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { +func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &HttpsHealthCheckList{ + ret := &InstanceTemplateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -79825,16 +95589,16 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal } return ret, nil // { - // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", - // "flatPath": "projects/{project}/global/httpsHealthChecks", + // "description": "Retrieves a list of instance templates that are contained within the specified project.", + // "flatPath": "projects/{project}/global/instanceTemplates", // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.list", + // "id": "compute.instanceTemplates.list", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -79869,9 +95633,9 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // "type": "boolean" // } // }, - // "path": "projects/{project}/global/httpsHealthChecks", + // "path": "projects/{project}/global/instanceTemplates", // "response": { - // "$ref": "HttpsHealthCheckList" + // "$ref": "InstanceTemplateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -79885,7 +95649,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { +func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -79903,52 +95667,35 @@ func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHeal } } -// method id "compute.httpsHealthChecks.patch": +// method id "compute.instanceTemplates.setIamPolicy": -type HttpsHealthChecksPatchCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. // -// - httpsHealthCheck: Name of the HttpsHealthCheck resource to patch. // - project: Project ID for this request. -func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { - c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *InstanceTemplatesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *InstanceTemplatesSetIamPolicyCall { + c := &InstanceTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { +func (c *InstanceTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79956,21 +95703,21 @@ func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthCh // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { +func (c *InstanceTemplatesSetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksPatchCall) Header() http.Header { +func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -79978,54 +95725,54 @@ func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/instanceTemplates/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceTemplates.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80037,22 +95784,15 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", - // "httpMethod": "PATCH", - // "id": "compute.httpsHealthChecks.patch", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/global/instanceTemplates/{resource}/setIamPolicy", + // "httpMethod": "POST", + // "id": "compute.instanceTemplates.setIamPolicy", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "resource" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -80060,195 +95800,20 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", - // "request": { - // "$ref": "HttpsHealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.httpsHealthChecks.update": - -type HttpsHealthChecksUpdateCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. -// -// - httpsHealthCheck: Name of the HttpsHealthCheck resource to update. -// - project: Project ID for this request. -func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { - c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpsHealthChecksUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpsHealthChecks.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", - // "httpMethod": "PUT", - // "id": "compute.httpsHealthChecks.update", - // "parameterOrder": [ - // "project", - // "httpsHealthCheck" - // ], - // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "projects/{project}/global/instanceTemplates/{resource}/setIamPolicy", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "GlobalSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -80258,123 +95823,112 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.imageFamilyViews.get": +// method id "compute.instanceTemplates.testIamPermissions": -type ImageFamilyViewsGetCall struct { - s *Service - project string - zone string - family string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceTemplatesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the latest image that is part of an image family, is not -// deprecated and is rolled out in the specified zone. +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. // -// - family: Name of the image family to search for. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *ImageFamilyViewsService) Get(project string, zone string, family string) *ImageFamilyViewsGetCall { - c := &ImageFamilyViewsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { + c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.family = family + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImageFamilyViewsGetCall) Fields(s ...googleapi.Field) *ImageFamilyViewsGetCall { +func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImageFamilyViewsGetCall) IfNoneMatch(entityTag string) *ImageFamilyViewsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImageFamilyViewsGetCall) Context(ctx context.Context) *ImageFamilyViewsGetCall { +func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImageFamilyViewsGetCall) Header() http.Header { +func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImageFamilyViewsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/imageFamilyViews/{family}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/instanceTemplates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "family": c.family, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.imageFamilyViews.get" call. -// Exactly one of *ImageFamilyView or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ImageFamilyView.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instanceTemplates.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ImageFamilyViewsGetCall) Do(opts ...googleapi.CallOption) (*ImageFamilyView, error) { +func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ImageFamilyView{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80386,23 +95940,15 @@ func (c *ImageFamilyViewsGetCall) Do(opts ...googleapi.CallOption) (*ImageFamily } return ret, nil // { - // "description": "Returns the latest image that is part of an image family, is not deprecated and is rolled out in the specified zone.", - // "flatPath": "projects/{project}/zones/{zone}/imageFamilyViews/{family}", - // "httpMethod": "GET", - // "id": "compute.imageFamilyViews.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/global/instanceTemplates/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.instanceTemplates.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "family" + // "resource" // ], // "parameters": { - // "family": { - // "description": "Name of the image family to search for.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -80410,17 +95956,20 @@ func (c *ImageFamilyViewsGetCall) Do(opts ...googleapi.CallOption) (*ImageFamily // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/imageFamilyViews/{family}", + // "path": "projects/{project}/global/instanceTemplates/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "ImageFamilyView" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -80431,25 +95980,34 @@ func (c *ImageFamilyViewsGetCall) Do(opts ...googleapi.CallOption) (*ImageFamily } -// method id "compute.images.delete": +// method id "compute.instances.addAccessConfig": -type ImagesDeleteCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAddAccessConfigCall struct { + s *Service + project string + zone string + instance string + accessconfig *AccessConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified image. +// AddAccessConfig: Adds an access config to an instance's network +// interface. // -// - image: Name of the image resource to delete. -// - project: Project ID for this request. -func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { - c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instance: The instance name for this request. +// - networkInterface: The name of the network interface to add to this +// instance. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { + c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image + c.zone = zone + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig return c } @@ -80464,7 +96022,7 @@ func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { +func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -80472,7 +96030,7 @@ func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { +func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80480,21 +96038,21 @@ func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { +func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesDeleteCall) Header() http.Header { +func (c *InstancesAddAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -80502,47 +96060,53 @@ func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/addAccessConfig") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.delete" call. +// Do executes the "compute.instances.addAccessConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -80556,22 +96120,30 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified image.", - // "flatPath": "projects/{project}/global/images/{image}", - // "httpMethod": "DELETE", - // "id": "compute.images.delete", + // "description": "Adds an access config to an instance's network interface.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/addAccessConfig", + // "httpMethod": "POST", + // "id": "compute.instances.addAccessConfig", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instance", + // "networkInterface" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to delete.", + // "instance": { + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "networkInterface": { + // "description": "The name of the network interface to add to this instance.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -80583,9 +96155,19 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/images/{image}", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/addAccessConfig", + // "request": { + // "$ref": "AccessConfig" + // }, // "response": { // "$ref": "Operation" // }, @@ -80597,28 +96179,32 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.images.deprecate": +// method id "compute.instances.addResourcePolicies": -type ImagesDeprecateCall struct { - s *Service - project string - image string - deprecationstatus *DeprecationStatus - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAddResourcePoliciesCall struct { + s *Service + project string + zone string + instance string + instancesaddresourcepoliciesrequest *InstancesAddResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Deprecate: Sets the deprecation status of an image. If an empty -// request body is given, clears the deprecation status instead. +// AddResourcePolicies: Adds existing resource policies to an instance. +// You can only add one policy right now which will be applied to this +// instance for scheduling live migrations. // -// - image: Image name. +// - instance: The instance name for this request. // - project: Project ID for this request. -func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { - c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) AddResourcePolicies(project string, zone string, instance string, instancesaddresourcepoliciesrequest *InstancesAddResourcePoliciesRequest) *InstancesAddResourcePoliciesCall { + c := &InstancesAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - c.deprecationstatus = deprecationstatus + c.zone = zone + c.instance = instance + c.instancesaddresourcepoliciesrequest = instancesaddresourcepoliciesrequest return c } @@ -80633,7 +96219,7 @@ func (r *ImagesService) Deprecate(project string, image string, deprecationstatu // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { +func (c *InstancesAddResourcePoliciesCall) RequestId(requestId string) *InstancesAddResourcePoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -80641,7 +96227,7 @@ func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { +func (c *InstancesAddResourcePoliciesCall) Fields(s ...googleapi.Field) *InstancesAddResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80649,21 +96235,21 @@ func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { +func (c *InstancesAddResourcePoliciesCall) Context(ctx context.Context) *InstancesAddResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesDeprecateCall) Header() http.Header { +func (c *InstancesAddResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -80671,14 +96257,14 @@ func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesaddresourcepoliciesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{image}/deprecate") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/addResourcePolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -80686,37 +96272,38 @@ func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.deprecate" call. +// Do executes the "compute.instances.addResourcePolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -80730,17 +96317,18 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the deprecation status of an image. If an empty request body is given, clears the deprecation status instead.", - // "flatPath": "projects/{project}/global/images/{image}/deprecate", + // "description": "Adds existing resource policies to an instance. You can only add one policy right now which will be applied to this instance for scheduling live migrations.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/addResourcePolicies", // "httpMethod": "POST", - // "id": "compute.images.deprecate", + // "id": "compute.instances.addResourcePolicies", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instance" // ], // "parameters": { - // "image": { - // "description": "Image name.", + // "instance": { + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -80757,11 +96345,18 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/images/{image}/deprecate", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/addResourcePolicies", // "request": { - // "$ref": "DeprecationStatus" + // "$ref": "InstancesAddResourcePoliciesRequest" // }, // "response": { // "$ref": "Operation" @@ -80774,34 +96369,128 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.images.get": +// method id "compute.instances.aggregatedList": -type ImagesGetCall struct { +type InstancesAggregatedListCall struct { s *Service project string - image string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified image. Gets a list of available images by -// making a list() request. +// AggregatedList: Retrieves an aggregated list of all of the instances +// in your project across all regions and zones. The performance of this +// method degrades when a filter is specified on a project that has a +// very large number of instances. // -// - image: Name of the image resource to return. // - project: Project ID for this request. -func (r *ImagesService) Get(project string, image string) *ImagesGetCall { - c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { + c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *InstancesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InstancesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *InstancesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { +func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80811,7 +96500,7 @@ func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { +func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -80819,21 +96508,21 @@ func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { +func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetCall) Header() http.Header { +func (c *InstancesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -80846,7 +96535,7 @@ func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/instances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -80855,38 +96544,37 @@ func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.get" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { +// Do executes the "compute.instances.aggregatedList" call. +// Exactly one of *InstanceAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Image{ + ret := &InstanceAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80898,20 +96586,40 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { } return ret, nil // { - // "description": "Returns the specified image. Gets a list of available images by making a list() request.", - // "flatPath": "projects/{project}/global/images/{image}", + // "description": "Retrieves an aggregated list of all of the instances in your project across all regions and zones. The performance of this method degrades when a filter is specified on a project that has a very large number of instances.", + // "flatPath": "projects/{project}/aggregated/instances", // "httpMethod": "GET", - // "id": "compute.images.get", + // "id": "compute.instances.aggregatedList", // "parameterOrder": [ - // "project", - // "image" + // "project" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -80920,11 +96628,16 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/global/images/{image}", + // "path": "projects/{project}/aggregated/instances", // "response": { - // "$ref": "Image" + // "$ref": "InstanceAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -80935,119 +96648,164 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { } -// method id "compute.images.getFromFamily": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ImagesGetFromFamilyCall struct { +// method id "compute.instances.attachDisk": + +type InstancesAttachDiskCall struct { s *Service project string - family string + zone string + instance string + attacheddisk *AttachedDisk urlParams_ gensupport.URLParams - ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetFromFamily: Returns the latest image that is part of an image -// family and is not deprecated. +// AttachDisk: Attaches an existing Disk resource to an instance. You +// must first create the disk before you can attach it. It is not +// possible to create and attach a disk at the same time. For more +// information, read Adding a persistent disk to your instance. // -// - family: Name of the image family to search for. +// - instance: The instance name for this request. // - project: Project ID for this request. -func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { - c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { + c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.family = family + c.zone = zone + c.instance = instance + c.attacheddisk = attacheddisk + return c +} + +// ForceAttach sets the optional parameter "forceAttach": Whether to +// force attach the regional disk even if it's currently attached to +// another instance. If you try to force attach a zonal disk to an +// instance, you will receive an error. +func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { + c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { +func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { +func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetFromFamilyCall) Header() http.Header { +func (c *InstancesAttachDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/family/{family}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/attachDisk") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "family": c.family, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.getFromFamily" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { +// Do executes the "compute.instances.attachDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Image{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81059,17 +96817,23 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro } return ret, nil // { - // "description": "Returns the latest image that is part of an image family and is not deprecated.", - // "flatPath": "projects/{project}/global/images/family/{family}", - // "httpMethod": "GET", - // "id": "compute.images.getFromFamily", + // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/attachDisk", + // "httpMethod": "POST", + // "id": "compute.instances.attachDisk", // "parameterOrder": [ // "project", - // "family" + // "zone", + // "instance" // ], // "parameters": { - // "family": { - // "description": "Name of the image family to search for.", + // "forceAttach": { + // "description": "Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error.", + // "location": "query", + // "type": "boolean" + // }, + // "instance": { + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -81081,141 +96845,158 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/images/family/{family}", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/attachDisk", + // "request": { + // "$ref": "AttachedDisk" + // }, // "response": { - // "$ref": "Image" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.getIamPolicy": +// method id "compute.instances.bulkInsert": -type ImagesGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesBulkInsertCall struct { + s *Service + project string + zone string + bulkinsertinstanceresource *BulkInsertInstanceResource + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// BulkInsert: Creates multiple instances. Count specifies the number of +// instances to create. For more information, see About bulk creation of +// VMs. // // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -func (r *ImagesService) GetIamPolicy(project string, resource string) *ImagesGetIamPolicyCall { - c := &ImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) BulkInsert(project string, zone string, bulkinsertinstanceresource *BulkInsertInstanceResource) *InstancesBulkInsertCall { + c := &InstancesBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.zone = zone + c.bulkinsertinstanceresource = bulkinsertinstanceresource return c } -// OptionsRequestedPolicyVersion sets the optional parameter -// "optionsRequestedPolicyVersion": Requested IAM Policy version. -func (c *ImagesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ImagesGetIamPolicyCall { - c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesBulkInsertCall) RequestId(requestId string) *InstancesBulkInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesGetIamPolicyCall { +func (c *InstancesBulkInsertCall) Fields(s ...googleapi.Field) *InstancesBulkInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *ImagesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetIamPolicyCall) Context(ctx context.Context) *ImagesGetIamPolicyCall { +func (c *InstancesBulkInsertCall) Context(ctx context.Context) *InstancesBulkInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetIamPolicyCall) Header() http.Header { +func (c *InstancesBulkInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesBulkInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertinstanceresource) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/bulkInsert") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.bulkInsert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81227,21 +97008,15 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "flatPath": "projects/{project}/global/images/{resource}/getIamPolicy", - // "httpMethod": "GET", - // "id": "compute.images.getIamPolicy", + // "description": "Creates multiple instances. Count specifies the number of instances to create. For more information, see About bulk creation of VMs.", + // "flatPath": "projects/{project}/zones/{zone}/instances/bulkInsert", + // "httpMethod": "POST", + // "id": "compute.instances.bulkInsert", // "parameterOrder": [ // "project", - // "resource" + // "zone" // ], // "parameters": { - // "optionsRequestedPolicyVersion": { - // "description": "Requested IAM Policy version.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -81249,53 +97024,57 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/images/{resource}/getIamPolicy", + // "path": "projects/{project}/zones/{zone}/instances/bulkInsert", + // "request": { + // "$ref": "BulkInsertInstanceResource" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.insert": +// method id "compute.instances.delete": -type ImagesInsertCall struct { +type InstancesDeleteCall struct { s *Service project string - image *Image + zone string + instance string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates an image in the specified project using the data -// included in the request. +// Delete: Deletes the specified Instance resource. For more +// information, see Deleting an instance. // +// - instance: Name of the instance resource to delete. // - project: Project ID for this request. -func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { - c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { + c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - return c -} - -// ForceCreate sets the optional parameter "forceCreate": Force image -// creation if true. -func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { - c.urlParams_.Set("forceCreate", fmt.Sprint(forceCreate)) + c.zone = zone + c.instance = instance return c } @@ -81310,7 +97089,7 @@ func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { +func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -81318,7 +97097,7 @@ func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { +func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -81326,21 +97105,21 @@ func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { +func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesInsertCall) Header() http.Header { +func (c *InstancesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -81348,51 +97127,48 @@ func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.insert" call. +// Do executes the "compute.instances.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -81406,18 +97182,22 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates an image in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/global/images", - // "httpMethod": "POST", - // "id": "compute.images.insert", + // "description": "Deletes the specified Instance resource. For more information, see Deleting an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}", + // "httpMethod": "DELETE", + // "id": "compute.instances.delete", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { - // "forceCreate": { - // "description": "Force image creation if true.", - // "location": "query", - // "type": "boolean" + // "instance": { + // "description": "Name of the instance resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // }, // "project": { // "description": "Project ID for this request.", @@ -81430,209 +97210,346 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/images", - // "request": { - // "$ref": "Image" - // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.list": +// method id "compute.instances.deleteAccessConfig": -type ImagesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesDeleteAccessConfigCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of custom images available to the specified -// project. Custom images are images you create that belong to your -// project. This method does not get any images that belong to other -// projects, including publicly-available images, like Debian 8. If you -// want to get a list of publicly-available images, use this method to -// make a request to the respective image project, such as debian-cloud -// or windows-cloud. +// DeleteAccessConfig: Deletes an access config from an instance's +// network interface. // +// - accessConfig: The name of the access config to delete. +// - instance: The instance name for this request. +// - networkInterface: The name of the network interface. // - project: Project ID for this request. -func (r *ImagesService) List(project string) *ImagesListCall { - c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { + c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("accessConfig", accessConfig) + c.urlParams_.Set("networkInterface", networkInterface) return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *ImagesListCall) Filter(filter string) *ImagesListCall { - c.urlParams_.Set("filter", filter) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { + c.urlParams_.Set("requestId", requestId) return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { + c.ctx_ = ctx return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesDeleteAccessConfigCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.deleteAccessConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes an access config from an instance's network interface.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + // "httpMethod": "POST", + // "id": "compute.instances.deleteAccessConfig", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "accessConfig", + // "networkInterface" + // ], + // "parameters": { + // "accessConfig": { + // "description": "The name of the access config to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.detachDisk": + +type InstancesDetachDiskCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { - c.urlParams_.Set("pageToken", pageToken) +// DetachDisk: Detaches a disk from an instance. +// +// - deviceName: The device name of the disk to detach. Make a get() +// request on the instance to view currently attached disks and device +// names. +// - instance: Instance name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { + c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("deviceName", deviceName) return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *ImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ImagesListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { +func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { +func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesListCall) Header() http.Header { +func (c *InstancesDetachDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/detachDisk") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.list" call. -// Exactly one of *ImageList or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.detachDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *ImageList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { +func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ImageList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81644,35 +97561,28 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { } return ret, nil // { - // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", - // "flatPath": "projects/{project}/global/images", - // "httpMethod": "GET", - // "id": "compute.images.list", + // "description": "Detaches a disk from an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/detachDisk", + // "httpMethod": "POST", + // "id": "compute.instances.detachDisk", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance", + // "deviceName" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "deviceName": { + // "description": "The device name of the disk to detach. Make a get() request on the instance to view currently attached disks and device names.", // "location": "query", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "instance": { + // "description": "Instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -81682,169 +97592,148 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/images", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/detachDisk", // "response": { - // "$ref": "ImageList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.images.patch": +// method id "compute.instances.get": -type ImagesPatchCall struct { - s *Service - project string - image string - image2 *Image - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified image with the data included in the -// request. Only the following fields can be modified: family, -// description, deprecation status. +// Get: Returns the specified Instance resource. Gets a list of +// available instances by making a list() request. // -// - image: Name of the image resource to patch. +// - instance: Name of the instance resource to return. // - project: Project ID for this request. -func (r *ImagesService) Patch(project string, image string, image2 *Image) *ImagesPatchCall { - c := &ImagesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { + c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - c.image2 = image2 - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *ImagesPatchCall) RequestId(requestId string) *ImagesPatchCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesPatchCall) Fields(s ...googleapi.Field) *ImagesPatchCall { +func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesPatchCall) Context(ctx context.Context) *ImagesPatchCall { +func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesPatchCall) Header() http.Header { +func (c *InstancesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.image2) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.instances.get" call. +// Exactly one of *Instance or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Instance.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Instance{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81856,17 +97745,18 @@ func (c *ImagesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Patches the specified image with the data included in the request. Only the following fields can be modified: family, description, deprecation status.", - // "flatPath": "projects/{project}/global/images/{image}", - // "httpMethod": "PATCH", - // "id": "compute.images.patch", + // "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}", + // "httpMethod": "GET", + // "id": "compute.instances.get", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instance" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to patch.", + // "instance": { + // "description": "Name of the instance resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -81879,133 +97769,148 @@ func (c *ImagesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/images/{image}", - // "request": { - // "$ref": "Image" - // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}", // "response": { - // "$ref": "Operation" + // "$ref": "Instance" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.images.setIamPolicy": +// method id "compute.instances.getEffectiveFirewalls": -type ImagesSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetEffectiveFirewallsCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// GetEffectiveFirewalls: Returns effective firewalls applied to an +// interface of the instance. // -// - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -func (r *ImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *ImagesSetIamPolicyCall { - c := &ImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instance: Name of the instance scoping this request. +// - networkInterface: The name of the network interface to get the +// effective firewalls. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) GetEffectiveFirewalls(project string, zone string, instance string, networkInterface string) *InstancesGetEffectiveFirewallsCall { + c := &InstancesGetEffectiveFirewallsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.zone = zone + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolicyCall { +func (c *InstancesGetEffectiveFirewallsCall) Fields(s ...googleapi.Field) *InstancesGetEffectiveFirewallsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetEffectiveFirewallsCall) IfNoneMatch(entityTag string) *InstancesGetEffectiveFirewallsCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesSetIamPolicyCall) Context(ctx context.Context) *ImagesSetIamPolicyCall { +func (c *InstancesGetEffectiveFirewallsCall) Context(ctx context.Context) *InstancesGetEffectiveFirewallsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesSetIamPolicyCall) Header() http.Header { +func (c *InstancesGetEffectiveFirewallsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetEffectiveFirewallsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/getEffectiveFirewalls") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.getEffectiveFirewalls" call. +// Exactly one of *InstancesGetEffectiveFirewallsResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *InstancesGetEffectiveFirewallsResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (*InstancesGetEffectiveFirewallsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &InstancesGetEffectiveFirewallsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82017,15 +97922,30 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "flatPath": "projects/{project}/global/images/{resource}/setIamPolicy", - // "httpMethod": "POST", - // "id": "compute.images.setIamPolicy", + // "description": "Returns effective firewalls applied to an interface of the instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/getEffectiveFirewalls", + // "httpMethod": "GET", + // "id": "compute.instances.getEffectiveFirewalls", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance", + // "networkInterface" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface to get the effective firewalls.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -82033,135 +97953,157 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/images/{resource}/setIamPolicy", - // "request": { - // "$ref": "GlobalSetPolicyRequest" - // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/getEffectiveFirewalls", // "response": { - // "$ref": "Policy" + // "$ref": "InstancesGetEffectiveFirewallsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.images.setLabels": +// method id "compute.instances.getGuestAttributes": -type ImagesSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetGuestAttributesCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on an image. To learn more about labels, -// read the Labeling Resources documentation. +// GetGuestAttributes: Returns the specified guest attributes entry. // +// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { - c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) GetGuestAttributes(project string, zone string, instance string) *InstancesGetGuestAttributesCall { + c := &InstancesGetGuestAttributesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.zone = zone + c.instance = instance + return c +} + +// QueryPath sets the optional parameter "queryPath": Specifies the +// guest attributes path to be queried. +func (c *InstancesGetGuestAttributesCall) QueryPath(queryPath string) *InstancesGetGuestAttributesCall { + c.urlParams_.Set("queryPath", queryPath) + return c +} + +// VariableKey sets the optional parameter "variableKey": Specifies the +// key for the guest attributes entry. +func (c *InstancesGetGuestAttributesCall) VariableKey(variableKey string) *InstancesGetGuestAttributesCall { + c.urlParams_.Set("variableKey", variableKey) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { +func (c *InstancesGetGuestAttributesCall) Fields(s ...googleapi.Field) *InstancesGetGuestAttributesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetGuestAttributesCall) IfNoneMatch(entityTag string) *InstancesGetGuestAttributesCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { +func (c *InstancesGetGuestAttributesCall) Context(ctx context.Context) *InstancesGetGuestAttributesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesSetLabelsCall) Header() http.Header { +func (c *InstancesGetGuestAttributesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/getGuestAttributes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.getGuestAttributes" call. +// Exactly one of *GuestAttributes or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *GuestAttributes.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*GuestAttributes, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &GuestAttributes{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82173,15 +98115,23 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", - // "flatPath": "projects/{project}/global/images/{resource}/setLabels", - // "httpMethod": "POST", - // "id": "compute.images.setLabels", + // "description": "Returns the specified guest attributes entry.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/getGuestAttributes", + // "httpMethod": "GET", + // "id": "compute.instances.getGuestAttributes", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -82189,135 +98139,161 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "queryPath": { + // "description": "Specifies the guest attributes path to be queried.", + // "location": "query", + // "type": "string" + // }, + // "variableKey": { + // "description": "Specifies the key for the guest attributes entry.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/images/{resource}/setLabels", - // "request": { - // "$ref": "GlobalSetLabelsRequest" - // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/getGuestAttributes", // "response": { - // "$ref": "Operation" + // "$ref": "GuestAttributes" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.images.testIamPermissions": +// method id "compute.instances.getIamPolicy": -type ImagesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. // // - project: Project ID for this request. // - resource: Name or id of the resource for this request. -func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { - c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) GetIamPolicy(project string, zone string, resource string) *InstancesGetIamPolicyCall { + c := &InstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *InstancesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *InstancesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { +func (c *InstancesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstancesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { +func (c *InstancesGetIamPolicyCall) Context(ctx context.Context) *InstancesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesTestIamPermissionsCall) Header() http.Header { +func (c *InstancesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/images/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instances.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &TestPermissionsResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82329,15 +98305,22 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "flatPath": "projects/{project}/global/images/{resource}/testIamPermissions", - // "httpMethod": "POST", - // "id": "compute.images.testIamPermissions", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.instances.getIamPolicy", // "parameterOrder": [ // "project", + // "zone", // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -82348,17 +98331,21 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/images/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "projects/{project}/zones/{zone}/instances/{resource}/getIamPolicy", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -82369,144 +98356,122 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } -// method id "compute.instanceGroupManagers.abandonInstances": +// method id "compute.instances.getScreenshot": -type InstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetScreenshotCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Flags the specified instances to be removed from -// the managed instance group. Abandoning an instance does not delete -// the instance, but it does remove the instance from any target pools -// that are applied by the managed instance group. This method reduces -// the targetSize of the managed instance group by the number of -// instances that you abandon. This operation is marked as DONE when the -// action is scheduled even if the instances have not yet been removed -// from the group. You must separately verify the status of the -// abandoning action with the listmanagedinstances method. If the group -// is part of a backend service that has enabled connection draining, it -// can take up to 60 seconds after the connection draining duration has -// elapsed before the VM instance is removed or deleted. You can specify -// a maximum of 1000 instances with this method per request. +// GetScreenshot: Returns the screenshot from the specified instance. // -// - instanceGroupManager: The name of the managed instance group. +// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. -func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { - c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) GetScreenshot(project string, zone string, instance string) *InstancesGetScreenshotCall { + c := &InstancesGetScreenshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { - c.urlParams_.Set("requestId", requestId) + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstancesGetScreenshotCall) Fields(s ...googleapi.Field) *InstancesGetScreenshotCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetScreenshotCall) IfNoneMatch(entityTag string) *InstancesGetScreenshotCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstancesGetScreenshotCall) Context(ctx context.Context) *InstancesGetScreenshotCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { +func (c *InstancesGetScreenshotCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetScreenshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/screenshot") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.abandonInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.getScreenshot" call. +// Exactly one of *Screenshot or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *Screenshot.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesGetScreenshotCall) Do(opts ...googleapi.CallOption) (*Screenshot, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Screenshot{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82518,19 +98483,20 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.abandonInstances", + // "description": "Returns the screenshot from the specified instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/screenshot", + // "httpMethod": "GET", + // "id": "compute.instances.getScreenshot", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -82541,140 +98507,83 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", - // "request": { - // "$ref": "InstanceGroupManagersAbandonInstancesRequest" - // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/screenshot", // "response": { - // "$ref": "Operation" + // "$ref": "Screenshot" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.aggregatedList": +// method id "compute.instances.getSerialPortOutput": -type InstanceGroupManagersAggregatedListCall struct { +type InstancesGetSerialPortOutputCall struct { s *Service project string + zone string + instance string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves the list of managed instance groups and -// groups them by zone. +// GetSerialPortOutput: Returns the last 1 MB of serial port output from +// the specified instance. // +// - instance: Name of the instance for this request. // - project: Project ID for this request. -func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { - c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { + c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instance = instance return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *InstanceGroupManagersAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// Port sets the optional parameter "port": Specifies which COM or +// serial port to retrieve data from. +func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("port", fmt.Sprint(port)) return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *InstanceGroupManagersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// Start sets the optional parameter "start": Specifies the starting +// byte position of the output to return. To start with the first byte +// of output to the specified port, omit this field or set it to `0`. If +// the output for that byte position is available, this field matches +// the `start` parameter sent with the request. If the amount of serial +// console output exceeds the size of the buffer (1 MB), the oldest +// output is discarded and is no longer available. If the requested +// start position refers to discarded output, the start position is +// adjusted to the oldest output still available, and the adjusted start +// position is returned as the `start` property value. You can also +// provide a negative start position, which translates to the most +// recent number of bytes written to the serial port. For example, -3 is +// interpreted as the most recent 3 bytes written to the serial console. +func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("start", fmt.Sprint(start)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { +func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -82684,7 +98593,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) * // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { +func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { c.ifNoneMatch_ = entityTag return c } @@ -82692,21 +98601,21 @@ func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { +func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { +func (c *InstancesGetSerialPortOutputCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -82719,7 +98628,7 @@ func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.R var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/serialPort") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -82727,39 +98636,40 @@ func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.R } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.aggregatedList" call. -// Exactly one of *InstanceGroupManagerAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { +// Do executes the "compute.instances.getSerialPortOutput" call. +// Exactly one of *SerialPortOutput or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SerialPortOutput.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceGroupManagerAggregatedList{ + ret := &SerialPortOutput{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82771,58 +98681,56 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Retrieves the list of managed instance groups and groups them by zone.", - // "flatPath": "projects/{project}/aggregated/instanceGroupManagers", + // "description": "Returns the last 1 MB of serial port output from the specified instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/serialPort", // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.aggregatedList", + // "id": "compute.instances.getSerialPortOutput", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", + // "instance": { + // "description": "Name of the instance for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", + // "port": { + // "default": "1", + // "description": "Specifies which COM or serial port to retrieve data from.", + // "format": "int32", // "location": "query", - // "minimum": "0", + // "maximum": "4", + // "minimum": "1", // "type": "integer" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "start": { + // "description": "Specifies the starting byte position of the output to return. To start with the first byte of output to the specified port, omit this field or set it to `0`. If the output for that byte position is available, this field matches the `start` parameter sent with the request. If the amount of serial console output exceeds the size of the buffer (1 MB), the oldest output is discarded and is no longer available. If the requested start position refers to discarded output, the start position is adjusted to the oldest output still available, and the adjusted start position is returned as the `start` property value. You can also provide a negative start position, which translates to the most recent number of bytes written to the serial port. For example, -3 is interpreted as the most recent 3 bytes written to the serial console.", + // "format": "int64", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/instanceGroupManagers", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/serialPort", // "response": { - // "$ref": "InstanceGroupManagerAggregatedList" + // "$ref": "SerialPortOutput" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -82833,140 +98741,123 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.applyUpdatesToInstances": +// method id "compute.instances.getShieldedInstanceIdentity": -type InstanceGroupManagersApplyUpdatesToInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetShieldedInstanceIdentityCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ApplyUpdatesToInstances: Applies changes to selected instances on the -// managed instance group. This method can be used to apply new -// overrides and/or new versions. +// GetShieldedInstanceIdentity: Returns the Shielded Instance Identity +// of an instance // -// - instanceGroupManager: The name of the managed instance group, -// should conform to RFC1035. +// - instance: Name or id of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. Should conform to RFC1035. -func (r *InstanceGroupManagersService) ApplyUpdatesToInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest) *InstanceGroupManagersApplyUpdatesToInstancesCall { - c := &InstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) GetShieldedInstanceIdentity(project string, zone string, instance string) *InstancesGetShieldedInstanceIdentityCall { + c := &InstancesGetShieldedInstanceIdentityCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersapplyupdatesrequest = instancegroupmanagersapplyupdatesrequest + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersApplyUpdatesToInstancesCall { +func (c *InstancesGetShieldedInstanceIdentityCall) Fields(s ...googleapi.Field) *InstancesGetShieldedInstanceIdentityCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetShieldedInstanceIdentityCall) IfNoneMatch(entityTag string) *InstancesGetShieldedInstanceIdentityCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *InstanceGroupManagersApplyUpdatesToInstancesCall { +func (c *InstancesGetShieldedInstanceIdentityCall) Context(ctx context.Context) *InstancesGetShieldedInstanceIdentityCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { +func (c *InstancesGetShieldedInstanceIdentityCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetShieldedInstanceIdentityCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersapplyupdatesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.applyUpdatesToInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.getShieldedInstanceIdentity" call. +// Exactly one of *ShieldedInstanceIdentity or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ShieldedInstanceIdentity.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOption) (*ShieldedInstanceIdentity, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &ShieldedInstanceIdentity{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82978,19 +98869,20 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Applies changes to selected instances on the managed instance group. This method can be used to apply new overrides and/or new versions.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.applyUpdatesToInstances", + // "description": "Returns the Shielded Instance Identity of an instance", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity", + // "httpMethod": "GET", + // "id": "compute.instances.getShieldedInstanceIdentity", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group, should conform to RFC1035.", + // "instance": { + // "description": "Name or id of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -83002,58 +98894,48 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi. // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located. Should conform to RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", - // "request": { - // "$ref": "InstanceGroupManagersApplyUpdatesRequest" - // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity", // "response": { - // "$ref": "Operation" + // "$ref": "ShieldedInstanceIdentity" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.createInstances": +// method id "compute.instances.insert": -type InstanceGroupManagersCreateInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerscreateinstancesrequest *InstanceGroupManagersCreateInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesInsertCall struct { + s *Service + project string + zone string + instance *Instance + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// CreateInstances: Creates instances with per-instance configs in this -// managed instance group. Instances are created using the current -// instance template. The create instances operation is marked DONE if -// the createInstances request is successful. The underlying actions -// take additional time. You must separately verify the status of the -// creating or actions with the listmanagedinstances method. +// Insert: Creates an instance resource in the specified project using +// the data included in the request. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. // - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. -func (r *InstanceGroupManagersService) CreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagerscreateinstancesrequest *InstanceGroupManagersCreateInstancesRequest) *InstanceGroupManagersCreateInstancesCall { - c := &InstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { + c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerscreateinstancesrequest = instancegroupmanagerscreateinstancesrequest + c.instance = instance return c } @@ -83064,18 +98946,45 @@ func (r *InstanceGroupManagersService) CreateInstances(project string, zone stri // situation where you make an initial request and the request times // out. If you make the request again with the same request ID, the // server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. The request ID -// must be a valid UUID with the exception that zero UUID is not +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersCreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersCreateInstancesCall { +func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// SourceInstanceTemplate sets the optional parameter +// "sourceInstanceTemplate": Specifies instance template to create the +// instance. This field is optional. It can be a full or partial URL. +// For example, the following are all valid URLs to an instance +// template: - https://www.googleapis.com/compute/v1/projects/project +// /global/instanceTemplates/instanceTemplate - +// projects/project/global/instanceTemplates/instanceTemplate - +// global/instanceTemplates/instanceTemplate +func (c *InstancesInsertCall) SourceInstanceTemplate(sourceInstanceTemplate string) *InstancesInsertCall { + c.urlParams_.Set("sourceInstanceTemplate", sourceInstanceTemplate) + return c +} + +// SourceMachineImage sets the optional parameter "sourceMachineImage": +// Specifies the machine image to use to create the instance. This field +// is optional. It can be a full or partial URL. For example, the +// following are all valid URLs to a machine image: - +// https://www.googleapis.com/compute/v1/projects/project/global/global +// /machineImages/machineImage - +// projects/project/global/global/machineImages/machineImage - +// global/machineImages/machineImage +func (c *InstancesInsertCall) SourceMachineImage(sourceMachineImage string) *InstancesInsertCall { + c.urlParams_.Set("sourceMachineImage", sourceMachineImage) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersCreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersCreateInstancesCall { +func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83083,21 +98992,21 @@ func (c *InstanceGroupManagersCreateInstancesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersCreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersCreateInstancesCall { +func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersCreateInstancesCall) Header() http.Header { +func (c *InstancesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -83105,14 +99014,14 @@ func (c *InstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http. } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerscreateinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/createInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -83120,38 +99029,37 @@ func (c *InstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http. } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.createInstances" call. +// Do executes the "compute.instances.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -83165,22 +99073,15 @@ func (c *InstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Creates instances with per-instance configs in this managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/createInstances", + // "description": "Creates an instance resource in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/zones/{zone}/instances", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.createInstances", + // "id": "compute.instances.insert", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "zone" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -83189,20 +99090,31 @@ func (c *InstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOpti // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sourceInstanceTemplate": { + // "description": "Specifies instance template to create the instance. This field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: - https://www.googleapis.com/compute/v1/projects/project /global/instanceTemplates/instanceTemplate - projects/project/global/instanceTemplates/instanceTemplate - global/instanceTemplates/instanceTemplate ", + // "location": "query", + // "type": "string" + // }, + // "sourceMachineImage": { + // "description": "Specifies the machine image to use to create the instance. This field is optional. It can be a full or partial URL. For example, the following are all valid URLs to a machine image: - https://www.googleapis.com/compute/v1/projects/project/global/global /machineImages/machineImage - projects/project/global/global/machineImages/machineImage - global/machineImages/machineImage ", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/createInstances", + // "path": "projects/{project}/zones/{zone}/instances", // "request": { - // "$ref": "InstanceGroupManagersCreateInstancesRequest" + // "$ref": "Instance" // }, // "response": { // "$ref": "Operation" @@ -83215,129 +99127,201 @@ func (c *InstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOpti } -// method id "compute.instanceGroupManagers.delete": +// method id "compute.instances.list": -type InstanceGroupManagersDeleteCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. Note that the instance group must not belong -// to a backend service. Read Deleting an instance group for more -// information. +// List: Retrieves the list of instances contained within the specified +// zone. // -// - instanceGroupManager: The name of the managed instance group to -// delete. // - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. -func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { - c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) List(project string, zone string) *InstancesListCall { + c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InstancesListCall) Filter(filter string) *InstancesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *InstancesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { +func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { +func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteCall) Header() http.Header { +func (c *InstancesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.list" call. +// Exactly one of *InstanceList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *InstanceList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &InstanceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83349,20 +99333,36 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroupManagers.delete", + // "description": "Retrieves the list of instances contained within the specified zone.", + // "flatPath": "projects/{project}/zones/{zone}/instances", + // "httpMethod": "GET", + // "id": "compute.instances.list", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "zone" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group to delete.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -83372,167 +99372,256 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "projects/{project}/zones/{zone}/instances", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.deleteInstances": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instances.listReferrers": + +type InstancesListReferrersCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DeleteInstances: Flags the specified instances in the managed -// instance group for immediate deletion. The instances are also removed -// from any target pools of which they were a member. This method -// reduces the targetSize of the managed instance group by the number of -// instances that you delete. This operation is marked as DONE when the -// action is scheduled even if the instances are still being deleted. -// You must separately verify the status of the deleting action with the -// listmanagedinstances method. If the group is part of a backend -// service that has enabled connection draining, it can take up to 60 -// seconds after the connection draining duration has elapsed before the -// VM instance is removed or deleted. You can specify a maximum of 1000 -// instances with this method per request. +// ListReferrers: Retrieves a list of resources that refer to the VM +// instance specified in the request. For example, if the VM instance is +// part of a managed or unmanaged instance group, the referrers list +// includes the instance group. For more information, read Viewing +// referrers to VM instances. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. -func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { - c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instance: Name of the target instance scoping this request, or '-' +// if the request should span over all instances in the container. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { + c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest + c.instance = instance return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *InstancesListReferrersCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesListReferrersCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { +func (c *InstancesListReferrersCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/referrers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.deleteInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.listReferrers" call. +// Exactly one of *InstanceListReferrers or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceListReferrers.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &InstanceListReferrers{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83544,22 +99633,46 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.deleteInstances", + // "description": "Retrieves a list of resources that refer to the VM instance specified in the request. For example, if the VM instance is part of a managed or unmanaged instance group, the referrers list includes the instance group. For more information, read Viewing referrers to VM instances.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/referrers", + // "httpMethod": "GET", + // "id": "compute.instances.listReferrers", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "instance": { + // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", // "location": "path", + // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -83567,67 +99680,100 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - // "request": { - // "$ref": "InstanceGroupManagersDeleteInstancesRequest" - // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/referrers", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceListReferrers" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.deletePerInstanceConfigs": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupManagersDeletePerInstanceConfigsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instances.removeResourcePolicies": + +type InstancesRemoveResourcePoliciesCall struct { + s *Service + project string + zone string + instance string + instancesremoveresourcepoliciesrequest *InstancesRemoveResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeletePerInstanceConfigs: Deletes selected per-instance configs for -// the managed instance group. +// RemoveResourcePolicies: Removes resource policies from an instance. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. +// - instance: The instance name for this request. // - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. -func (r *InstanceGroupManagersService) DeletePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq) *InstanceGroupManagersDeletePerInstanceConfigsCall { - c := &InstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) RemoveResourcePolicies(project string, zone string, instance string, instancesremoveresourcepoliciesrequest *InstancesRemoveResourcePoliciesRequest) *InstancesRemoveResourcePoliciesCall { + c := &InstancesRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersdeleteperinstanceconfigsreq = instancegroupmanagersdeleteperinstanceconfigsreq + c.instance = instance + c.instancesremoveresourcepoliciesrequest = instancesremoveresourcepoliciesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesRemoveResourcePoliciesCall) RequestId(requestId string) *InstancesRemoveResourcePoliciesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeletePerInstanceConfigsCall { +func (c *InstancesRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *InstancesRemoveResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83635,21 +99781,21 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleap // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersDeletePerInstanceConfigsCall { +func (c *InstancesRemoveResourcePoliciesCall) Context(ctx context.Context) *InstancesRemoveResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header { +func (c *InstancesRemoveResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -83657,14 +99803,14 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteperinstanceconfigsreq) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesremoveresourcepoliciesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/removeResourcePolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -83672,38 +99818,38 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.deletePerInstanceConfigs" call. +// Do executes the "compute.instances.removeResourcePolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -83717,19 +99863,20 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Deletes selected per-instance configs for the managed instance group.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + // "description": "Removes resource policies from an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/removeResourcePolicies", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.deletePerInstanceConfigs", + // "id": "compute.instances.removeResourcePolicies", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "instance": { + // "description": "The instance name for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -83740,16 +99887,22 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/removeResourcePolicies", // "request": { - // "$ref": "InstanceGroupManagersDeletePerInstanceConfigsReq" + // "$ref": "InstancesRemoveResourcePoliciesRequest" // }, // "response": { // "$ref": "Operation" @@ -83762,125 +99915,126 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi } -// method id "compute.instanceGroupManagers.get": +// method id "compute.instances.reset": -type InstanceGroupManagersGetCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesResetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns all of the details about the specified managed instance -// group. Gets a list of available managed instance groups by making a -// list() request. +// Reset: Performs a reset on the instance. This is a hard reset. The VM +// does not do a graceful shutdown. For more information, see Resetting +// an instance. // -// - instanceGroupManager: The name of the managed instance group. +// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. -func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { - c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { + c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager + c.instance = instance + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { +func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { +func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersGetCall) Header() http.Header { +func (c *InstancesResetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/reset") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { +// Do executes the "compute.instances.reset" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceGroupManager{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83892,19 +100046,20 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.get", + // "description": "Performs a reset on the instance. This is a hard reset. The VM does not do a graceful shutdown. For more information, see Resetting an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/reset", + // "httpMethod": "POST", + // "id": "compute.instances.reset", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -83915,56 +100070,54 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/reset", // "response": { - // "$ref": "InstanceGroupManager" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroupManagers.insert": +// method id "compute.instances.resume": -type InstanceGroupManagersInsertCall struct { - s *Service - project string - zone string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesResumeCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, instances in -// the group are created using the specified instance template. This -// operation is marked as DONE when the group is created even if the -// instances in the group have not yet been created. You must separately -// verify the status of the individual instances with the -// listmanagedinstances method. A managed instance group can have up to -// 1000 VM instances per group. Please contact Cloud Support if you need -// an increase in this limit. +// Resume: Resumes an instance that was suspended using the +// instances().suspend method. // +// - instance: Name of the instance resource to resume. // - project: Project ID for this request. -// - zone: The name of the zone where you want to create the managed -// instance group. -func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { - c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) Resume(project string, zone string, instance string) *InstancesResumeCall { + c := &InstancesResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instancegroupmanager = instancegroupmanager + c.instance = instance return c } @@ -83979,7 +100132,7 @@ func (r *InstanceGroupManagersService) Insert(project string, zone string, insta // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { +func (c *InstancesResumeCall) RequestId(requestId string) *InstancesResumeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -83987,7 +100140,7 @@ func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { +func (c *InstancesResumeCall) Fields(s ...googleapi.Field) *InstancesResumeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83995,21 +100148,21 @@ func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { +func (c *InstancesResumeCall) Context(ctx context.Context) *InstancesResumeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersInsertCall) Header() http.Header { +func (c *InstancesResumeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesResumeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -84017,14 +100170,9 @@ func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/resume") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -84032,37 +100180,38 @@ func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.insert" call. +// Do executes the "compute.instances.resume" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -84076,15 +100225,23 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method. A managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers", + // "description": "Resumes an instance that was suspended using the instances().suspend method.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/resume", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.insert", + // "id": "compute.instances.resume", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance resource to resume.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -84098,16 +100255,14 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/resume", // "response": { // "$ref": "Operation" // }, @@ -84119,230 +100274,109 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.list": +// method id "compute.instances.sendDiagnosticInterrupt": -type InstanceGroupManagersListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSendDiagnosticInterruptCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of managed instance groups that are contained -// within the specified project and zone. +// SendDiagnosticInterrupt: Sends diagnostic interrupt to the instance. // +// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. -func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { - c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) SendDiagnosticInterrupt(project string, zone string, instance string) *InstancesSendDiagnosticInterruptCall { + c := &InstancesSendDiagnosticInterruptCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *InstanceGroupManagersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { +func (c *InstancesSendDiagnosticInterruptCall) Fields(s ...googleapi.Field) *InstancesSendDiagnosticInterruptCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { +func (c *InstancesSendDiagnosticInterruptCall) Context(ctx context.Context) *InstancesSendDiagnosticInterruptCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListCall) Header() http.Header { +func (c *InstancesSendDiagnosticInterruptCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSendDiagnosticInterruptCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/sendDiagnosticInterrupt") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.list" call. -// Exactly one of *InstanceGroupManagerList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupManagerList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { +// Do executes the "compute.instances.sendDiagnosticInterrupt" call. +func (c *InstancesSendDiagnosticInterruptCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroupManagerList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.list", + // "description": "Sends diagnostic interrupt to the instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/sendDiagnosticInterrupt", + // "httpMethod": "POST", + // "id": "compute.instances.sendDiagnosticInterrupt", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -84352,244 +100386,148 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" - // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers", - // "response": { - // "$ref": "InstanceGroupManagerList" - // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/sendDiagnosticInterrupt", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.listErrors": +// method id "compute.instances.setDeletionProtection": -type InstanceGroupManagersListErrorsCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetDeletionProtectionCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListErrors: Lists all errors thrown by actions on instances for a -// given managed instance group. The filter and orderBy query parameters -// are not supported. +// SetDeletionProtection: Sets deletion protection on the instance. // -// - instanceGroupManager: The name of the managed instance group. It -// must be a string that meets the requirements in RFC1035, or an -// unsigned long integer: must match regexp pattern: (?:a-z -// (?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}. // - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. -func (r *InstanceGroupManagersService) ListErrors(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListErrorsCall { - c := &InstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { + c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InstanceGroupManagersListErrorsCall) Filter(filter string) *InstanceGroupManagersListErrorsCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *InstanceGroupManagersListErrorsCall) MaxResults(maxResults int64) *InstanceGroupManagersListErrorsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *InstanceGroupManagersListErrorsCall) OrderBy(orderBy string) *InstanceGroupManagersListErrorsCall { - c.urlParams_.Set("orderBy", orderBy) + c.resource = resource return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersListErrorsCall) PageToken(pageToken string) *InstanceGroupManagersListErrorsCall { - c.urlParams_.Set("pageToken", pageToken) +// DeletionProtection sets the optional parameter "deletionProtection": +// Whether the resource should be protected against deletion. +func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtection bool) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("deletionProtection", fmt.Sprint(deletionProtection)) return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *InstanceGroupManagersListErrorsCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListErrorsCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListErrorsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListErrorsCall { +func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *InstancesSetDeletionProtectionCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersListErrorsCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListErrorsCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListErrorsCall) Context(ctx context.Context) *InstanceGroupManagersListErrorsCall { +func (c *InstancesSetDeletionProtectionCall) Context(ctx context.Context) *InstancesSetDeletionProtectionCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListErrorsCall) Header() http.Header { +func (c *InstancesSetDeletionProtectionCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listErrors") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{resource}/setDeletionProtection") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.listErrors" call. -// Exactly one of *InstanceGroupManagersListErrorsResponse or error will -// be non-nil. Any non-2xx status code is an error. Response headers are -// in either -// *InstanceGroupManagersListErrorsResponse.ServerResponse.Header or (if -// a response was returned at all) in error.(*googleapi.Error).Header. -// Use googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListErrorsResponse, error) { +// Do executes the "compute.instances.setDeletionProtection" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceGroupManagersListErrorsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -84601,203 +100539,114 @@ func (c *InstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Lists all errors thrown by actions on instances for a given managed instance group. The filter and orderBy query parameters are not supported.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listErrors", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.listErrors", + // "description": "Sets deletion protection on the instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{resource}/setDeletionProtection", + // "httpMethod": "POST", + // "id": "compute.instances.setDeletionProtection", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "deletionProtection": { + // "default": "true", + // "description": "Whether the resource should be protected against deletion.", // "location": "query", - // "type": "string" + // "type": "boolean" // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035, or an unsigned long integer: must match regexp pattern: (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" - // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listErrors", + // "path": "projects/{project}/zones/{zone}/instances/{resource}/setDeletionProtection", // "response": { - // "$ref": "InstanceGroupManagersListErrorsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListErrorsCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListErrorsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.listManagedInstances": +// method id "compute.instances.setDiskAutoDelete": -type InstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetDiskAutoDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListManagedInstances: Lists all of the instances in the managed -// instance group. Each instance in the list has a currentAction, which -// indicates the action that the managed instance group is performing on -// the instance. For example, if the group is still creating an -// instance, the currentAction is CREATING. If a previous action failed, -// the list displays the errors for that failed action. The orderBy -// query parameter is not supported. +// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to +// an instance. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. -func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { - c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - autoDelete: Whether to auto-delete the disk when the instance is +// deleted. +// - deviceName: The device name of the disk to modify. Make a get() +// request on the instance to view currently attached disks and device +// names. +// - instance: The instance name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { + c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("pageToken", pageToken) + c.instance = instance + c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) + c.urlParams_.Set("deviceName", deviceName) return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *InstanceGroupManagersListManagedInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { +func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -84805,21 +100654,21 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Fi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { +func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { +func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -84829,7 +100678,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (* var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -84837,42 +100686,40 @@ func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (* } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. -// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head -// er or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { +// Do executes the "compute.instances.setDiskAutoDelete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceGroupManagersListManagedInstancesResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -84884,43 +100731,36 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "description": "Sets the auto-delete flag for a disk attached to an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.listManagedInstances", + // "id": "compute.instances.setDiskAutoDelete", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance", + // "autoDelete", + // "deviceName" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "autoDelete": { + // "description": "Whether to auto-delete the disk when the instance is deleted.", // "location": "query", - // "type": "string" - // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" + // "type": "boolean" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "deviceName": { + // "description": "The device name of the disk to modify. Make a get() request on the instance to view currently attached disks and device names.", // "location": "query", + // "pattern": "\\w[\\w.-]{0,254}", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -84930,154 +100770,63 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", // "response": { - // "$ref": "InstanceGroupManagersListManagedInstancesResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListManagedInstancesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.listPerInstanceConfigs": +// method id "compute.instances.setIamPolicy": -type InstanceGroupManagersListPerInstanceConfigsCall struct { +type InstancesSetIamPolicyCall struct { s *Service project string zone string - instanceGroupManager string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// ListPerInstanceConfigs: Lists all of the per-instance configs defined -// for the managed instance group. The orderBy query parameter is not -// supported. +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. // - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. -func (r *InstanceGroupManagersService) ListPerInstanceConfigs(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListPerInstanceConfigsCall { - c := &InstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *InstancesSetIamPolicyCall { + c := &InstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *InstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *InstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults int64) *InstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string) *InstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken string) *InstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListPerInstanceConfigsCall { +func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -85085,21 +100834,21 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersListPerInstanceConfigsCall { +func (c *InstancesSetIamPolicyCall) Context(ctx context.Context) *InstancesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { +func (c *InstancesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -85107,9 +100856,14 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -85117,42 +100871,40 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.listPerInstanceConfigs" call. -// Exactly one of *InstanceGroupManagersListPerInstanceConfigsResp or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *InstanceGroupManagersListPerInstanceConfigsResp.ServerResponse.Header -// or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListPerInstanceConfigsResp, error) { +// Do executes the "compute.instances.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceGroupManagersListPerInstanceConfigsResp{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -85164,45 +100916,16 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Lists all of the per-instance configs defined for the managed instance group. The orderBy query parameter is not supported.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{resource}/setIamPolicy", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.listPerInstanceConfigs", + // "id": "compute.instances.setIamPolicy", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -85210,87 +100933,61 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + // "path": "projects/{project}/zones/{zone}/instances/{resource}/setIamPolicy", + // "request": { + // "$ref": "ZoneSetPolicyRequest" + // }, // "response": { - // "$ref": "InstanceGroupManagersListPerInstanceConfigsResp" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListPerInstanceConfigsResp) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.patch": +// method id "compute.instances.setLabels": -type InstanceGroupManagersPatchCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetLabelsCall struct { + s *Service + project string + zone string + instance string + instancessetlabelsrequest *InstancesSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listManagedInstances method. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. If you update your group to specify a new -// template or instance configuration, it's possible that your intended -// specification for each VM in the group is different from the current -// state of that VM. To learn how to apply an updated configuration to -// the VMs in a MIG, see Updating instances in a MIG. +// SetLabels: Sets labels on an instance. To learn more about labels, +// read the Labeling Resources documentation. // -// - instanceGroupManager: The name of the instance group manager. +// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone where you want to create the managed -// instance group. -func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { - c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { + c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager + c.instance = instance + c.instancessetlabelsrequest = instancessetlabelsrequest return c } @@ -85305,7 +101002,7 @@ func (r *InstanceGroupManagersService) Patch(project string, zone string, instan // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGroupManagersPatchCall { +func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -85313,7 +101010,7 @@ func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { +func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -85321,21 +101018,21 @@ func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { +func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersPatchCall) Header() http.Header { +func (c *InstancesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -85343,53 +101040,53 @@ func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.patch" call. +// Do executes the "compute.instances.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -85403,19 +101100,20 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. If you update your group to specify a new template or instance configuration, it's possible that your intended specification for each VM in the group is different from the current state of that VM. To learn how to apply an updated configuration to the VMs in a MIG, see Updating instances in a MIG.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - // "httpMethod": "PATCH", - // "id": "compute.instanceGroupManagers.patch", + // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setLabels", + // "httpMethod": "POST", + // "id": "compute.instances.setLabels", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -85432,15 +101130,16 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // }, // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setLabels", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "InstancesSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -85453,34 +101152,31 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instanceGroupManagers.patchPerInstanceConfigs": +// method id "compute.instances.setMachineResources": -type InstanceGroupManagersPatchPerInstanceConfigsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerspatchperinstanceconfigsreq *InstanceGroupManagersPatchPerInstanceConfigsReq - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMachineResourcesCall struct { + s *Service + project string + zone string + instance string + instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PatchPerInstanceConfigs: Inserts or patches per-instance configs for -// the managed instance group. perInstanceConfig.name serves as a key -// used to distinguish whether to perform insert or patch. +// SetMachineResources: Changes the number and/or type of accelerator +// for a stopped instance to the values specified in the request. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. +// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. -func (r *InstanceGroupManagersService) PatchPerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagerspatchperinstanceconfigsreq *InstanceGroupManagersPatchPerInstanceConfigsReq) *InstanceGroupManagersPatchPerInstanceConfigsCall { - c := &InstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { + c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerspatchperinstanceconfigsreq = instancegroupmanagerspatchperinstanceconfigsreq + c.instance = instance + c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest return c } @@ -85495,7 +101191,7 @@ func (r *InstanceGroupManagersService) PatchPerInstanceConfigs(project string, z // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(requestId string) *InstanceGroupManagersPatchPerInstanceConfigsCall { +func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -85503,7 +101199,7 @@ func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(requestId s // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchPerInstanceConfigsCall { +func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -85511,21 +101207,21 @@ func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...googleapi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersPatchPerInstanceConfigsCall { +func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header { +func (c *InstancesSetMachineResourcesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -85533,14 +101229,14 @@ func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerspatchperinstanceconfigsreq) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setMachineResources") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -85548,38 +101244,38 @@ func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.patchPerInstanceConfigs" call. +// Do executes the "compute.instances.setMachineResources" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -85593,19 +101289,20 @@ func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Inserts or patches per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", + // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setMachineResources", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.patchPerInstanceConfigs", + // "id": "compute.instances.setMachineResources", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -85622,15 +101319,16 @@ func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi. // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setMachineResources", // "request": { - // "$ref": "InstanceGroupManagersPatchPerInstanceConfigsReq" + // "$ref": "InstancesSetMachineResourcesRequest" // }, // "response": { // "$ref": "Operation" @@ -85643,41 +101341,31 @@ func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi. } -// method id "compute.instanceGroupManagers.recreateInstances": +// method id "compute.instances.setMachineType": -type InstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMachineTypeCall struct { + s *Service + project string + zone string + instance string + instancessetmachinetyperequest *InstancesSetMachineTypeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RecreateInstances: Flags the specified VM instances in the managed -// instance group to be immediately recreated. Each instance is -// recreated using the group's current configuration. This operation is -// marked as DONE when the flag is set even if the instances have not -// yet been recreated. You must separately verify the status of each -// instance by checking its currentAction field; for more information, -// see Checking the status of managed instances. If the group is part of -// a backend service that has enabled connection draining, it can take -// up to 60 seconds after the connection draining duration has elapsed -// before the VM instance is removed or deleted. You can specify a -// maximum of 1000 instances with this method per request. +// SetMachineType: Changes the machine type for a stopped instance to +// the machine type specified in the request. // -// - instanceGroupManager: The name of the managed instance group. +// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. -func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { - c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { + c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest + c.instance = instance + c.instancessetmachinetyperequest = instancessetmachinetyperequest return c } @@ -85692,7 +101380,7 @@ func (r *InstanceGroupManagersService) RecreateInstances(project string, zone st // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -85700,7 +101388,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -85708,21 +101396,21 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { +func (c *InstancesSetMachineTypeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -85730,14 +101418,14 @@ func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*htt } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setMachineType") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -85745,38 +101433,38 @@ func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*htt } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.recreateInstances" call. +// Do executes the "compute.instances.setMachineType" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -85790,19 +101478,20 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Flags the specified VM instances in the managed instance group to be immediately recreated. Each instance is recreated using the group's current configuration. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of each instance by checking its currentAction field; for more information, see Checking the status of managed instances. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setMachineType", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.recreateInstances", + // "id": "compute.instances.setMachineType", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -85819,15 +101508,16 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setMachineType", // "request": { - // "$ref": "InstanceGroupManagersRecreateInstancesRequest" + // "$ref": "InstancesSetMachineTypeRequest" // }, // "response": { // "$ref": "Operation" @@ -85840,49 +101530,31 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } -// method id "compute.instanceGroupManagers.resize": +// method id "compute.instances.setMetadata": -type InstanceGroupManagersResizeCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMetadataCall struct { + s *Service + project string + zone string + instance string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the managed instance group. If you increase the size, -// the group creates new instances using the current instance template. -// If you decrease the size, the group deletes instances. The resize -// operation is marked DONE when the resize actions are scheduled even -// if the group has not yet added or deleted any instances. You must -// separately verify the status of the creating or deleting actions with -// the listmanagedinstances method. When resizing down, the instance -// group arbitrarily chooses the order in which VMs are deleted. The -// group takes into account some VM attributes when making the selection -// including: + The status of the VM instance. + The health of the VM -// instance. + The instance template version the VM is based on. + For -// regional managed instance groups, the location of the VM instance. -// This list is subject to change. If the group is part of a backend -// service that has enabled connection draining, it can take up to 60 -// seconds after the connection draining duration has elapsed before the -// VM instance is removed or deleted. +// SetMetadata: Sets metadata for the specified instance to the data +// included in the request. // -// - instanceGroupManager: The name of the managed instance group. +// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - size: The number of running instances that the managed instance -// group should maintain at any given time. The group automatically -// adds or removes instances to maintain the number of instances -// specified by this parameter. -// - zone: The name of the zone where the managed instance group is -// located. -func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { - c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { + c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) + c.instance = instance + c.metadata = metadata return c } @@ -85897,7 +101569,7 @@ func (r *InstanceGroupManagersService) Resize(project string, zone string, insta // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { +func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { c.urlParams_.Set("requestId", requestId) return c } @@ -85905,7 +101577,7 @@ func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { +func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -85913,21 +101585,21 @@ func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { +func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersResizeCall) Header() http.Header { +func (c *InstancesSetMetadataCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -85935,9 +101607,14 @@ func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setMetadata") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -85945,38 +101622,38 @@ func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.resize" call. +// Do executes the "compute.instances.setMetadata" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -85990,20 +101667,20 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method. When resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including: + The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance. This list is subject to change. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + // "description": "Sets metadata for the specified instance to the data included in the request.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setMetadata", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resize", + // "id": "compute.instances.setMetadata", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager", - // "size" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -86019,21 +101696,18 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope // "location": "query", // "type": "string" // }, - // "size": { - // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - // "format": "int32", - // "location": "query", - // "required": true, - // "type": "integer" - // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setMetadata", + // "request": { + // "$ref": "Metadata" + // }, // "response": { // "$ref": "Operation" // }, @@ -86045,35 +101719,33 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.setInstanceTemplate": +// method id "compute.instances.setMinCpuPlatform": -type InstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMinCpuPlatformCall struct { + s *Service + project string + zone string + instance string + instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Specifies the instance template to use when -// creating new instances in this group. The templates for existing -// instances in the group do not change unless you run -// recreateInstances, run applyUpdatesToInstances, or set the group's -// updatePolicy.type to PROACTIVE. +// SetMinCpuPlatform: Changes the minimum CPU platform that this +// instance should use. This method can only be called on a stopped +// instance. For more information, read Specifying a Minimum CPU +// Platform. // -// - instanceGroupManager: The name of the managed instance group. +// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. -func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { - c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { + c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest + c.instance = instance + c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest return c } @@ -86088,7 +101760,7 @@ func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { c.urlParams_.Set("requestId", requestId) return c } @@ -86096,7 +101768,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId strin // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86104,21 +101776,21 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Fie // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { +func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -86126,14 +101798,14 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*h } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -86141,38 +101813,38 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*h } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. +// Do executes the "compute.instances.setMinCpuPlatform" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -86186,19 +101858,20 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you run recreateInstances, run applyUpdatesToInstances, or set the group's updatePolicy.type to PROACTIVE.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setInstanceTemplate", + // "id": "compute.instances.setMinCpuPlatform", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -86215,15 +101888,16 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", // "request": { - // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" + // "$ref": "InstancesSetMinCpuPlatformRequest" // }, // "response": { // "$ref": "Operation" @@ -86236,37 +101910,35 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } -// method id "compute.instanceGroupManagers.setTargetPools": +// method id "compute.instances.setScheduling": -type InstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetSchedulingCall struct { + s *Service + project string + zone string + instance string + scheduling *Scheduling + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTargetPools: Modifies the target pools to which all instances in -// this managed instance group are assigned. The target pools -// automatically apply to all of the instances in the managed instance -// group. This operation is marked DONE when you make the request even -// if the instances have not yet been added to their target pools. The -// change might take some time to apply to all of the instances in the -// group depending on the size of the group. +// SetScheduling: Sets an instance's scheduling options. You can only +// call this method on a stopped instance, that is, a VM instance that +// is in a `TERMINATED` state. See Instance Life Cycle for more +// information on the possible instance states. For more information +// about setting scheduling options for a VM, see Set VM host +// maintenance policy. // -// - instanceGroupManager: The name of the managed instance group. +// - instance: Instance name for this request. // - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. -func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { - c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { + c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest + c.instance = instance + c.scheduling = scheduling return c } @@ -86281,7 +101953,7 @@ func (r *InstanceGroupManagersService) SetTargetPools(project string, zone strin // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { c.urlParams_.Set("requestId", requestId) return c } @@ -86289,7 +101961,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *I // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86297,21 +101969,21 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { +func (c *InstancesSetSchedulingCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -86319,14 +101991,14 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.R } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setScheduling") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -86334,38 +102006,38 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.R } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Do executes the "compute.instances.setScheduling" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -86379,19 +102051,20 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "description": "Sets an instance's scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. For more information about setting scheduling options for a VM, see Set VM host maintenance policy.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setScheduling", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setTargetPools", + // "id": "compute.instances.setScheduling", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Instance name for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -86408,15 +102081,16 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setScheduling", // "request": { - // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" + // "$ref": "Scheduling" // }, // "response": { // "$ref": "Operation" @@ -86429,34 +102103,32 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } -// method id "compute.instanceGroupManagers.updatePerInstanceConfigs": +// method id "compute.instances.setServiceAccount": -type InstanceGroupManagersUpdatePerInstanceConfigsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetServiceAccountCall struct { + s *Service + project string + zone string + instance string + instancessetserviceaccountrequest *InstancesSetServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdatePerInstanceConfigs: Inserts or updates per-instance configs for -// the managed instance group. perInstanceConfig.name serves as a key -// used to distinguish whether to perform insert or patch. +// SetServiceAccount: Sets the service account on the instance. For more +// information, read Changing the service account and access scopes for +// an instance. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. +// - instance: Name of the instance resource to start. // - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. -func (r *InstanceGroupManagersService) UpdatePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq) *InstanceGroupManagersUpdatePerInstanceConfigsCall { - c := &InstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { + c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersupdateperinstanceconfigsreq = instancegroupmanagersupdateperinstanceconfigsreq + c.instance = instance + c.instancessetserviceaccountrequest = instancessetserviceaccountrequest return c } @@ -86471,7 +102143,7 @@ func (r *InstanceGroupManagersService) UpdatePerInstanceConfigs(project string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *InstanceGroupManagersUpdatePerInstanceConfigsCall { +func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { c.urlParams_.Set("requestId", requestId) return c } @@ -86479,7 +102151,7 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdatePerInstanceConfigsCall { +func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86487,21 +102159,21 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleap // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersUpdatePerInstanceConfigsCall { +func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { +func (c *InstancesSetServiceAccountCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -86509,14 +102181,14 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersupdateperinstanceconfigsreq) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -86524,38 +102196,38 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.updatePerInstanceConfigs" call. +// Do executes the "compute.instances.setServiceAccount" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -86569,19 +102241,20 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Inserts or updates per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.updatePerInstanceConfigs", + // "id": "compute.instances.setServiceAccount", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "instance": { + // "description": "Name of the instance resource to start.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -86598,15 +102271,16 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", // "request": { - // "$ref": "InstanceGroupManagersUpdatePerInstanceConfigsReq" + // "$ref": "InstancesSetServiceAccountRequest" // }, // "response": { // "$ref": "Operation" @@ -86619,33 +102293,33 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi } -// method id "compute.instanceGroups.addInstances": +// method id "compute.instances.setShieldedInstanceIntegrityPolicy": -type InstanceGroupsAddInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetShieldedInstanceIntegrityPolicyCall struct { + s *Service + project string + zone string + instance string + shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddInstances: Adds a list of instances to the specified instance -// group. All of the instances in the instance group must be in the same -// network/subnetwork. Read Adding instances for more information. +// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance +// integrity policy for an instance. You can only use this method on a +// running instance. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. // -// - instanceGroup: The name of the instance group where you are adding -// instances. +// - instance: Name or id of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. -func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { - c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest + c.instance = instance + c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy return c } @@ -86660,7 +102334,7 @@ func (r *InstanceGroupsService) AddInstances(project string, zone string, instan // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -86668,7 +102342,7 @@ func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86676,21 +102350,21 @@ func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsAddInstancesCall) Header() http.Header { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -86698,53 +102372,53 @@ func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.addInstances" call. +// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -86758,19 +102432,20 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.addInstances", + // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + // "httpMethod": "PATCH", + // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where you are adding instances.", + // "instance": { + // "description": "Name or id of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -86787,15 +102462,16 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", // "request": { - // "$ref": "InstanceGroupsAddInstancesRequest" + // "$ref": "ShieldedInstanceIntegrityPolicy" // }, // "response": { // "$ref": "Operation" @@ -86808,197 +102484,132 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instanceGroups.aggregatedList": +// method id "compute.instances.setTags": -type InstanceGroupsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetTagsCall struct { + s *Service + project string + zone string + instance string + tags *Tags + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves the list of instance groups and sorts them -// by zone. +// SetTags: Sets network tags for the specified instance to the data +// included in the request. // +// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { - c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { + c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instance = instance + c.tags = tags return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *InstanceGroupsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *InstanceGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { +func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { +func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsAggregatedListCall) Header() http.Header { +func (c *InstancesSetTagsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setTags") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.aggregatedList" call. -// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { +// Do executes the "compute.instances.setTags" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceGroupAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -87010,40 +102621,21 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In } return ret, nil // { - // "description": "Retrieves the list of instance groups and sorts them by zone.", - // "flatPath": "projects/{project}/aggregated/instanceGroups", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.aggregatedList", + // "description": "Sets network tags for the specified instance to the data included in the request.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setTags", + // "httpMethod": "POST", + // "id": "compute.instances.setTags", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -87053,94 +102645,64 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/aggregated/instanceGroups", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setTags", + // "request": { + // "$ref": "Tags" + // }, // "response": { - // "$ref": "InstanceGroupAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.delete": +// method id "compute.instances.simulateMaintenanceEvent": -type InstanceGroupsDeleteCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSimulateMaintenanceEventCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified instance group. The instances in the -// group are not deleted. Note that instance group must not belong to a -// backend service. Read Deleting an instance group for more -// information. +// SimulateMaintenanceEvent: Simulates a host maintenance event on a VM. +// For more information, see Simulate a host maintenance event. // -// - instanceGroup: The name of the instance group to delete. +// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. -func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { - c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { + c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { +func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -87148,21 +102710,21 @@ func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { +func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsDeleteCall) Header() http.Header { +func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -87172,46 +102734,46 @@ func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.delete" call. +// Do executes the "compute.instances.simulateMaintenanceEvent" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -87225,19 +102787,20 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroups.delete", + // "description": "Simulates a host maintenance event on a VM. For more information, see Simulate a host maintenance event.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "httpMethod": "POST", + // "id": "compute.instances.simulateMaintenanceEvent", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group to delete.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -87248,19 +102811,15 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", // "response": { // "$ref": "Operation" // }, @@ -87272,125 +102831,125 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instanceGroups.get": +// method id "compute.instances.start": -type InstanceGroupsGetCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesStartCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified zonal instance group. Get a list of -// available zonal instance groups by making a list() request. For -// managed instance groups, use the instanceGroupManagers or -// regionInstanceGroupManagers methods instead. +// Start: Starts an instance that was stopped using the instances().stop +// method. For more information, see Restart an instance. // -// - instanceGroup: The name of the instance group. +// - instance: Name of the instance resource to start. // - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. -func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { - c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { + c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup + c.instance = instance + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { +func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { +func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsGetCall) Header() http.Header { +func (c *InstancesStartCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/start") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.start" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceGroup{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -87402,19 +102961,20 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup } return ret, nil // { - // "description": "Returns the specified zonal instance group. Get a list of available zonal instance groups by making a list() request. For managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.get", + // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/start", + // "httpMethod": "POST", + // "id": "compute.instances.start", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group.", + // "instance": { + // "description": "Name of the instance resource to start.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -87425,49 +102985,57 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/start", // "response": { - // "$ref": "InstanceGroup" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroups.insert": +// method id "compute.instances.startWithEncryptionKey": -type InstanceGroupsInsertCall struct { - s *Service - project string - zone string - instancegroup *InstanceGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesStartWithEncryptionKeyCall struct { + s *Service + project string + zone string + instance string + instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance group in the specified project using the -// parameters that are included in the request. +// StartWithEncryptionKey: Starts an instance that was stopped using the +// instances().stop method. For more information, see Restart an +// instance. // +// - instance: Name of the instance resource to start. // - project: Project ID for this request. -// - zone: The name of the zone where you want to create the instance -// group. -func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { - c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { + c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instancegroup = instancegroup + c.instance = instance + c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest return c } @@ -87482,7 +103050,7 @@ func (r *InstanceGroupsService) Insert(project string, zone string, instancegrou // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { +func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -87490,7 +103058,7 @@ func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsIn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { +func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -87498,21 +103066,21 @@ func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { +func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsInsertCall) Header() http.Header { +func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -87520,14 +103088,14 @@ func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -87535,37 +103103,38 @@ func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.insert" call. +// Do executes the "compute.instances.startWithEncryptionKey" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -87579,15 +103148,23 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroups", + // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", // "httpMethod": "POST", - // "id": "compute.instanceGroups.insert", + // "id": "compute.instances.startWithEncryptionKey", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -87601,15 +103178,16 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "zone": { - // "description": "The name of the zone where you want to create the instance group.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroups", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", // "request": { - // "$ref": "InstanceGroup" + // "$ref": "InstancesStartWithEncryptionKeyRequest" // }, // "response": { // "$ref": "Operation" @@ -87622,189 +103200,137 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instanceGroups.list": +// method id "compute.instances.stop": -type InstanceGroupsListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesStopCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of zonal instance group resources contained -// within the specified zone. For managed instance groups, use the -// instanceGroupManagers or regionInstanceGroupManagers methods instead. +// Stop: Stops a running instance, shutting it down cleanly, and allows +// you to restart the instance at a later time. Stopped instances do not +// incur VM usage charges while they are stopped. However, resources +// that the VM is using, such as persistent disks and static IP +// addresses, will continue to be charged until they are deleted. For +// more information, see Stopping an instance. // +// - instance: Name of the instance resource to stop. // - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. -func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { - c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { + c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone + c.instance = instance return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { - c.urlParams_.Set("pageToken", pageToken) +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If +// true, discard the contents of any attached localSSD partitions. +// Default value is false. +func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { + c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *InstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { +func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { +func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsListCall) Header() http.Header { +func (c *InstancesStopCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/stop") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.list" call. -// Exactly one of *InstanceGroupList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { +// Do executes the "compute.instances.stop" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceGroupList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -87816,36 +103342,26 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou } return ret, nil // { - // "description": "Retrieves the list of zonal instance group resources contained within the specified zone. For managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroups", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.list", + // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/stop", + // "httpMethod": "POST", + // "id": "compute.instances.stop", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "discardLocalSsd": { + // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "instance": { + // "description": "Name of the instance resource to stop.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -87855,154 +103371,91 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroups", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/stop", // "response": { - // "$ref": "InstanceGroupList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.listInstances": +// method id "compute.instances.suspend": -type InstanceGroupsListInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSuspendCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListInstances: Lists the instances in the specified instance group. -// The orderBy query parameter is not supported. +// Suspend: This method suspends a running instance, saving its state to +// persistent storage, and allows you to resume the instance at a later +// time. Suspended instances have no compute costs (cores or RAM), and +// incur only storage charges for the saved VM memory and localSSD data. +// Any charged resources the virtual machine was using, such as +// persistent disks and static IP addresses, will continue to be charged +// while the instance is suspended. For more information, see Suspending +// and resuming an instance. // -// - instanceGroup: The name of the instance group from which you want -// to generate a list of included instances. +// - instance: Name of the instance resource to suspend. // - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. -func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { - c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) Suspend(project string, zone string, instance string) *InstancesSuspendCall { + c := &InstancesSuspendCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("orderBy", orderBy) + c.instance = instance return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("pageToken", pageToken) +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If +// true, discard the contents of any attached localSSD partitions. +// Default value is false. +func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { + c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *InstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { +func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -88010,21 +103463,21 @@ func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { +func (c *InstancesSuspendCall) Context(ctx context.Context) *InstancesSuspendCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsListInstancesCall) Header() http.Header { +func (c *InstancesSuspendCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -88032,14 +103485,9 @@ func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/suspend") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -88047,40 +103495,40 @@ func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.listInstances" call. -// Exactly one of *InstanceGroupsListInstances or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupsListInstances.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { +// Do executes the "compute.instances.suspend" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceGroupsListInstances{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -88092,45 +103540,28 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins } return ret, nil // { - // "description": "Lists the instances in the specified instance group. The orderBy query parameter is not supported.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + // "description": "This method suspends a running instance, saving its state to persistent storage, and allows you to resume the instance at a later time. Suspended instances have no compute costs (cores or RAM), and incur only storage charges for the saved VM memory and localSSD data. Any charged resources the virtual machine was using, such as persistent disks and static IP addresses, will continue to be charged while the instance is suspended. For more information, see Suspending and resuming an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/suspend", // "httpMethod": "POST", - // "id": "compute.instanceGroups.listInstances", + // "id": "compute.instances.suspend", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "discardLocalSsd": { + // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // }, - // "instanceGroup": { - // "description": "The name of the instance group from which you want to generate a list of included instances.", + // "instance": { + // "description": "Name of the instance resource to suspend.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -88138,107 +103569,63 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", - // "request": { - // "$ref": "InstanceGroupsListInstancesRequest" - // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/suspend", // "response": { - // "$ref": "InstanceGroupsListInstances" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.removeInstances": +// method id "compute.instances.testIamPermissions": -type InstanceGroupsRemoveInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveInstances: Removes one or more instances from the specified -// instance group, but does not delete those instances. If the group is -// part of a backend service that has enabled connection draining, it -// can take up to 60 seconds after the connection draining duration -// before the VM instance is removed or deleted. +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. // -// - instanceGroup: The name of the instance group where the specified -// instances will be removed. // - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. -func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { - c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { + c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { +func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -88246,21 +103633,21 @@ func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { +func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { +func (c *InstancesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -88268,14 +103655,14 @@ func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -88283,40 +103670,40 @@ func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.removeInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -88328,22 +103715,16 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Removes one or more instances from the specified instance group, but does not delete those instances. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{resource}/testIamPermissions", // "httpMethod": "POST", - // "id": "compute.instanceGroups.removeInstances", + // "id": "compute.instances.testIamPermissions", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "resource" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the specified instances will be removed.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -88351,58 +103732,98 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + // "path": "projects/{project}/zones/{zone}/instances/{resource}/testIamPermissions", // "request": { - // "$ref": "InstanceGroupsRemoveInstancesRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroups.setNamedPorts": +// method id "compute.instances.update": -type InstanceGroupsSetNamedPortsCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateCall struct { + s *Service + project string + zone string + instance string + instance2 *Instance + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetNamedPorts: Sets the named ports for the specified instance group. +// Update: Updates an instance only if the necessary resources are +// available. This method can update only a specific set of instance +// properties. See Updating a running instance for a list of updatable +// instance properties. // -// - instanceGroup: The name of the instance group where the named ports -// are updated. +// - instance: Name of the instance resource to update. // - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. -func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { - c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) Update(project string, zone string, instance string, instance2 *Instance) *InstancesUpdateCall { + c := &InstancesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest + c.instance = instance + c.instance2 = instance2 + return c +} + +// MinimalAction sets the optional parameter "minimalAction": Specifies +// the action to take when updating an instance even if the updated +// properties do not require it. If not specified, then Compute Engine +// acts based on the minimum action that the updated properties require. +// +// Possible values: +// +// "INVALID" +// "NO_EFFECT" - No changes can be made to the instance. +// "REFRESH" - The instance will not restart. +// "RESTART" - The instance will restart. +func (c *InstancesUpdateCall) MinimalAction(minimalAction string) *InstancesUpdateCall { + c.urlParams_.Set("minimalAction", minimalAction) + return c +} + +// MostDisruptiveAllowedAction sets the optional parameter +// "mostDisruptiveAllowedAction": Specifies the most disruptive action +// that can be taken on the instance as part of the update. Compute +// Engine returns an error if the instance properties require a more +// disruptive action as part of the instance update. Valid options from +// lowest to highest are NO_EFFECT, REFRESH, and RESTART. +// +// Possible values: +// +// "INVALID" +// "NO_EFFECT" - No changes can be made to the instance. +// "REFRESH" - The instance will not restart. +// "RESTART" - The instance will restart. +func (c *InstancesUpdateCall) MostDisruptiveAllowedAction(mostDisruptiveAllowedAction string) *InstancesUpdateCall { + c.urlParams_.Set("mostDisruptiveAllowedAction", mostDisruptiveAllowedAction) return c } @@ -88417,7 +103838,7 @@ func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, insta // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesUpdateCall) RequestId(requestId string) *InstancesUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -88425,7 +103846,7 @@ func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesUpdateCall) Fields(s ...googleapi.Field) *InstancesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -88433,21 +103854,21 @@ func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesUpdateCall) Context(ctx context.Context) *InstancesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { +func (c *InstancesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -88455,53 +103876,53 @@ func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.setNamedPorts" call. +// Do executes the "compute.instances.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -88515,22 +103936,57 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Sets the named ports for the specified instance group.", - // "flatPath": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.setNamedPorts", + // "description": "Updates an instance only if the necessary resources are available. This method can update only a specific set of instance properties. See Updating a running instance for a list of updatable instance properties.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}", + // "httpMethod": "PUT", + // "id": "compute.instances.update", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the named ports are updated.", + // "instance": { + // "description": "Name of the instance resource to update.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "minimalAction": { + // "description": "Specifies the action to take when updating an instance even if the updated properties do not require it. If not specified, then Compute Engine acts based on the minimum action that the updated properties require.", + // "enum": [ + // "INVALID", + // "NO_EFFECT", + // "REFRESH", + // "RESTART" + // ], + // "enumDescriptions": [ + // "", + // "No changes can be made to the instance.", + // "The instance will not restart.", + // "The instance will restart." + // ], + // "location": "query", + // "type": "string" + // }, + // "mostDisruptiveAllowedAction": { + // "description": "Specifies the most disruptive action that can be taken on the instance as part of the update. Compute Engine returns an error if the instance properties require a more disruptive action as part of the instance update. Valid options from lowest to highest are NO_EFFECT, REFRESH, and RESTART.", + // "enum": [ + // "INVALID", + // "NO_EFFECT", + // "REFRESH", + // "RESTART" + // ], + // "enumDescriptions": [ + // "", + // "No changes can be made to the instance.", + // "The instance will not restart.", + // "The instance will restart." + // ], + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -88544,15 +104000,16 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + // "path": "projects/{project}/zones/{zone}/instances/{instance}", // "request": { - // "$ref": "InstanceGroupsSetNamedPortsRequest" + // "$ref": "Instance" // }, // "response": { // "$ref": "Operation" @@ -88565,27 +104022,36 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceTemplates.delete": +// method id "compute.instances.updateAccessConfig": -type InstanceTemplatesDeleteCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateAccessConfigCall struct { + s *Service + project string + zone string + instance string + accessconfig *AccessConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified instance template. Deleting an instance -// template is permanent and cannot be undone. It is not possible to -// delete templates that are already in use by a managed instance group. +// UpdateAccessConfig: Updates the specified access config from an +// instance's network interface with the data included in the request. +// This method supports PATCH semantics and uses the JSON merge patch +// format and processing rules. // -// - instanceTemplate: The name of the instance template to delete. -// - project: Project ID for this request. -func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { - c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instance: The instance name for this request. +// - networkInterface: The name of the network interface where the +// access config is attached. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { + c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.zone = zone + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig return c } @@ -88600,7 +104066,7 @@ func (r *InstanceTemplatesService) Delete(project string, instanceTemplate strin // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { +func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *InstancesUpdateAccessConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -88608,7 +104074,7 @@ func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTempl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { +func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -88616,21 +104082,21 @@ func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { +func (c *InstancesUpdateAccessConfigCall) Context(ctx context.Context) *InstancesUpdateAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesDeleteCall) Header() http.Header { +func (c *InstancesUpdateAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -88638,47 +104104,53 @@ func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/updateAccessConfig") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.delete" call. +// Do executes the "compute.instances.updateAccessConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -88692,22 +104164,30 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group.", - // "flatPath": "projects/{project}/global/instanceTemplates/{instanceTemplate}", - // "httpMethod": "DELETE", - // "id": "compute.instanceTemplates.delete", + // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/updateAccessConfig", + // "httpMethod": "POST", + // "id": "compute.instances.updateAccessConfig", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "zone", + // "instance", + // "networkInterface" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template to delete.", + // "instance": { + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "networkInterface": { + // "description": "The name of the network interface where the access config is attached.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -88719,9 +104199,19 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/updateAccessConfig", + // "request": { + // "$ref": "AccessConfig" + // }, // "response": { // "$ref": "Operation" // }, @@ -88733,119 +104223,134 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instanceTemplates.get": +// method id "compute.instances.updateDisplayDevice": -type InstanceTemplatesGetCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesUpdateDisplayDeviceCall struct { + s *Service + project string + zone string + instance string + displaydevice *DisplayDevice + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance template. Gets a list of -// available instance templates by making a list() request. +// UpdateDisplayDevice: Updates the Display config for a VM instance. +// You can only use this method on a stopped VM instance. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. // -// - instanceTemplate: The name of the instance template. +// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { - c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) UpdateDisplayDevice(project string, zone string, instance string, displaydevice *DisplayDevice) *InstancesUpdateDisplayDeviceCall { + c := &InstancesUpdateDisplayDeviceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.zone = zone + c.instance = instance + c.displaydevice = displaydevice + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesUpdateDisplayDeviceCall) RequestId(requestId string) *InstancesUpdateDisplayDeviceCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { +func (c *InstancesUpdateDisplayDeviceCall) Fields(s ...googleapi.Field) *InstancesUpdateDisplayDeviceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { +func (c *InstancesUpdateDisplayDeviceCall) Context(ctx context.Context) *InstancesUpdateDisplayDeviceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesGetCall) Header() http.Header { +func (c *InstancesUpdateDisplayDeviceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateDisplayDeviceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.displaydevice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/updateDisplayDevice") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.get" call. -// Exactly one of *InstanceTemplate or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplate.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { +// Do executes the "compute.instances.updateDisplayDevice" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceTemplate{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -88857,163 +104362,190 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe } return ret, nil // { - // "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", - // "flatPath": "projects/{project}/global/instanceTemplates/{instanceTemplate}", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.get", + // "description": "Updates the Display config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/updateDisplayDevice", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateDisplayDevice", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "zone", + // "instance" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/updateDisplayDevice", + // "request": { + // "$ref": "DisplayDevice" + // }, // "response": { - // "$ref": "InstanceTemplate" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceTemplates.getIamPolicy": +// method id "compute.instances.updateNetworkInterface": -type InstanceTemplatesGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesUpdateNetworkInterfaceCall struct { + s *Service + project string + zone string + instance string + networkinterface *NetworkInterface + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// UpdateNetworkInterface: Updates an instance's network interface. This +// method can only update an interface's alias IP range and attached +// network. See Modifying alias IP ranges for an existing instance for +// instructions on changing alias IP ranges. See Migrating a VM between +// networks for instructions on migrating an interface. This method +// follows PATCH semantics. // +// - instance: The instance name for this request. +// - networkInterface: The name of the network interface to update. // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -func (r *InstanceTemplatesService) GetIamPolicy(project string, resource string) *InstanceTemplatesGetIamPolicyCall { - c := &InstanceTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { + c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.zone = zone + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.networkinterface = networkinterface return c } -// OptionsRequestedPolicyVersion sets the optional parameter -// "optionsRequestedPolicyVersion": Requested IAM Policy version. -func (c *InstanceTemplatesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *InstanceTemplatesGetIamPolicyCall { - c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *InstancesUpdateNetworkInterfaceCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetIamPolicyCall { +func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesUpdateNetworkInterfaceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesGetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesGetIamPolicyCall { +func (c *InstancesUpdateNetworkInterfaceCall) Context(ctx context.Context) *InstancesUpdateNetworkInterfaceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { +func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkinterface) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/instanceTemplates/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.updateNetworkInterface" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -89025,20 +104557,29 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "flatPath": "projects/{project}/global/instanceTemplates/{resource}/getIamPolicy", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.getIamPolicy", + // "description": "Updates an instance's network interface. This method can only update an interface's alias IP range and attached network. See Modifying alias IP ranges for an existing instance for instructions on changing alias IP ranges. See Migrating a VM between networks for instructions on migrating an interface. This method follows PATCH semantics.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateNetworkInterface", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance", + // "networkInterface" // ], // "parameters": { - // "optionsRequestedPolicyVersion": { - // "description": "Requested IAM Policy version.", - // "format": "int32", + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface to update.", // "location": "query", - // "type": "integer" + // "required": true, + // "type": "string" // }, // "project": { // "description": "Project ID for this request.", @@ -89047,49 +104588,61 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/instanceTemplates/{resource}/getIamPolicy", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", + // "request": { + // "$ref": "NetworkInterface" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceTemplates.insert": +// method id "compute.instances.updateShieldedInstanceConfig": -type InstanceTemplatesInsertCall struct { - s *Service - project string - instancetemplate *InstanceTemplate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateShieldedInstanceConfigCall struct { + s *Service + project string + zone string + instance string + shieldedinstanceconfig *ShieldedInstanceConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance template in the specified project using -// the data that is included in the request. If you are creating a new -// template to update an existing instance group, your new instance -// template must use the same network or, if applicable, the same -// subnetwork as the original template. +// UpdateShieldedInstanceConfig: Updates the Shielded Instance config +// for an instance. You can only use this method on a stopped instance. +// This method supports PATCH semantics and uses the JSON merge patch +// format and processing rules. // +// - instance: Name or id of the instance scoping this request. // - project: Project ID for this request. -func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { - c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *InstancesService) UpdateShieldedInstanceConfig(project string, zone string, instance string, shieldedinstanceconfig *ShieldedInstanceConfig) *InstancesUpdateShieldedInstanceConfigCall { + c := &InstancesUpdateShieldedInstanceConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instancetemplate = instancetemplate + c.zone = zone + c.instance = instance + c.shieldedinstanceconfig = shieldedinstanceconfig return c } @@ -89104,7 +104657,7 @@ func (r *InstanceTemplatesService) Insert(project string, instancetemplate *Inst // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) RequestId(requestId string) *InstancesUpdateShieldedInstanceConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -89112,7 +104665,7 @@ func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTempl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateShieldedInstanceConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -89120,21 +104673,21 @@ func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) Context(ctx context.Context) *InstancesUpdateShieldedInstanceConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesInsertCall) Header() http.Header { +func (c *InstancesUpdateShieldedInstanceConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateShieldedInstanceConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -89142,51 +104695,53 @@ func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceconfig) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.insert" call. +// Do executes the "compute.instances.updateShieldedInstanceConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -89200,14 +104755,23 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", - // "flatPath": "projects/{project}/global/instanceTemplates", - // "httpMethod": "POST", - // "id": "compute.instanceTemplates.insert", + // "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateShieldedInstanceConfig", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name or id of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -89219,11 +104783,18 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/instanceTemplates", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig", // "request": { - // "$ref": "InstanceTemplate" + // "$ref": "ShieldedInstanceConfig" // }, // "response": { // "$ref": "Operation" @@ -89236,9 +104807,9 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instanceTemplates.list": +// method id "compute.interconnectAttachments.aggregatedList": -type InstanceTemplatesListCall struct { +type InterconnectAttachmentsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -89247,30 +104818,33 @@ type InstanceTemplatesListCall struct { header_ http.Header } -// List: Retrieves a list of instance templates that are contained -// within the specified project. +// AggregatedList: Retrieves an aggregated list of interconnect +// attachments. // // - project: Project ID for this request. -func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { - c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { + c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -89278,19 +104852,42 @@ func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCa // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *InterconnectAttachmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of // available results is larger than `maxResults`, Compute Engine returns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { +func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -89304,7 +104901,7 @@ func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTempla // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { +func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -89312,7 +104909,7 @@ func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesLi // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { +func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -89321,7 +104918,7 @@ func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplat // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *InstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceTemplatesListCall { +func (c *InterconnectAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -89329,7 +104926,7 @@ func (c *InstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { +func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -89339,7 +104936,7 @@ func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTempla // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { +func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -89347,21 +104944,21 @@ func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTempl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { +func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesListCall) Header() http.Header { +func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -89374,7 +104971,7 @@ func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -89387,33 +104984,34 @@ func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.list" call. -// Exactly one of *InstanceTemplateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { +// Do executes the "compute.interconnectAttachments.aggregatedList" call. +// Exactly one of *InterconnectAttachmentAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceTemplateList{ + ret := &InterconnectAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -89425,19 +105023,24 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT } return ret, nil // { - // "description": "Retrieves a list of instance templates that are contained within the specified project.", - // "flatPath": "projects/{project}/global/instanceTemplates", + // "description": "Retrieves an aggregated list of interconnect attachments.", + // "flatPath": "projects/{project}/aggregated/interconnectAttachments", // "httpMethod": "GET", - // "id": "compute.instanceTemplates.list", + // "id": "compute.interconnectAttachments.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -89469,9 +105072,9 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT // "type": "boolean" // } // }, - // "path": "projects/{project}/global/instanceTemplates", + // "path": "projects/{project}/aggregated/interconnectAttachments", // "response": { - // "$ref": "InstanceTemplateList" + // "$ref": "InterconnectAttachmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -89485,7 +105088,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { +func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -89503,35 +105106,52 @@ func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceT } } -// method id "compute.instanceTemplates.setIamPolicy": +// method id "compute.interconnectAttachments.delete": -type InstanceTemplatesSetIamPolicyCall struct { +type InterconnectAttachmentsDeleteCall struct { s *Service project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest + region string + interconnectAttachment string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// Delete: Deletes the specified interconnect attachment. // -// - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -func (r *InstanceTemplatesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *InstanceTemplatesSetIamPolicyCall { - c := &InstanceTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - interconnectAttachment: Name of the interconnect attachment to +// delete. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { + c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.region = region + c.interconnectAttachment = interconnectAttachment + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesSetIamPolicyCall { +func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -89539,21 +105159,21 @@ func (c *InstanceTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesSetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesSetIamPolicyCall { +func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { +func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -89561,54 +105181,50 @@ func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/instanceTemplates/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.interconnectAttachments.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -89620,15 +105236,23 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "flatPath": "projects/{project}/global/instanceTemplates/{resource}/setIamPolicy", - // "httpMethod": "POST", - // "id": "compute.instanceTemplates.setIamPolicy", + // "description": "Deletes the specified interconnect attachment.", + // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "httpMethod": "DELETE", + // "id": "compute.interconnectAttachments.delete", // "parameterOrder": [ // "project", - // "resource" + // "region", + // "interconnectAttachment" // ], // "parameters": { + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -89636,20 +105260,22 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/global/instanceTemplates/{resource}/setIamPolicy", - // "request": { - // "$ref": "GlobalSetPolicyRequest" - // }, + // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -89659,292 +105285,123 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } -// method id "compute.instanceTemplates.testIamPermissions": +// method id "compute.interconnectAttachments.get": -type InstanceTemplatesTestIamPermissionsCall struct { +type InterconnectAttachmentsGetCall struct { s *Service project string - resource string - testpermissionsrequest *TestPermissionsRequest + region string + interconnectAttachment string urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// Get: Returns the specified interconnect attachment. // -// - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { - c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - interconnectAttachment: Name of the interconnect attachment to +// return. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { + c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.region = region + c.interconnectAttachment = interconnectAttachment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { +func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { +func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { +func (c *InterconnectAttachmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/instanceTemplates/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.interconnectAttachments.get" call. +// Exactly one of *InterconnectAttachment or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *InterconnectAttachment.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "flatPath": "projects/{project}/global/instanceTemplates/{resource}/testIamPermissions", - // "httpMethod": "POST", - // "id": "compute.instanceTemplates.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/global/instanceTemplates/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.instances.addAccessConfig": - -type InstancesAddAccessConfigCall struct { - s *Service - project string - zone string - instance string - accessconfig *AccessConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AddAccessConfig: Adds an access config to an instance's network -// interface. -// -// - instance: The instance name for this request. -// - networkInterface: The name of the network interface to add to this -// instance. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { - c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesAddAccessConfigCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/addAccessConfig") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.addAccessConfig" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &InterconnectAttachment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -89956,30 +105413,23 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Adds an access config to an instance's network interface.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/addAccessConfig", - // "httpMethod": "POST", - // "id": "compute.instances.addAccessConfig", + // "description": "Returns the specified interconnect attachment.", + // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.get", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "networkInterface" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface to add to this instance.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -89987,60 +105437,49 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/addAccessConfig", - // "request": { - // "$ref": "AccessConfig" - // }, + // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectAttachment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.addResourcePolicies": +// method id "compute.interconnectAttachments.insert": -type InstancesAddResourcePoliciesCall struct { - s *Service - project string - zone string - instance string - instancesaddresourcepoliciesrequest *InstancesAddResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsInsertCall struct { + s *Service + project string + region string + interconnectattachment *InterconnectAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddResourcePolicies: Adds existing resource policies to an instance. -// You can only add one policy right now which will be applied to this -// instance for scheduling live migrations. +// Insert: Creates an InterconnectAttachment in the specified project +// using the data included in the request. // -// - instance: The instance name for this request. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) AddResourcePolicies(project string, zone string, instance string, instancesaddresourcepoliciesrequest *InstancesAddResourcePoliciesRequest) *InstancesAddResourcePoliciesCall { - c := &InstancesAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { + c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancesaddresourcepoliciesrequest = instancesaddresourcepoliciesrequest + c.region = region + c.interconnectattachment = interconnectattachment return c } @@ -90055,15 +105494,22 @@ func (r *InstancesService) AddResourcePolicies(project string, zone string, inst // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesAddResourcePoliciesCall) RequestId(requestId string) *InstancesAddResourcePoliciesCall { +func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *InterconnectAttachmentsInsertCall) ValidateOnly(validateOnly bool) *InterconnectAttachmentsInsertCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAddResourcePoliciesCall) Fields(s ...googleapi.Field) *InstancesAddResourcePoliciesCall { +func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -90071,21 +105517,21 @@ func (c *InstancesAddResourcePoliciesCall) Fields(s ...googleapi.Field) *Instanc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAddResourcePoliciesCall) Context(ctx context.Context) *InstancesAddResourcePoliciesCall { +func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAddResourcePoliciesCall) Header() http.Header { +func (c *InterconnectAttachmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -90093,14 +105539,14 @@ func (c *InstancesAddResourcePoliciesCall) doRequest(alt string) (*http.Response } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesaddresourcepoliciesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/addResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -90108,38 +105554,37 @@ func (c *InstancesAddResourcePoliciesCall) doRequest(alt string) (*http.Response } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.addResourcePolicies" call. +// Do executes the "compute.interconnectAttachments.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -90153,27 +105598,26 @@ func (c *InstancesAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Adds existing resource policies to an instance. You can only add one policy right now which will be applied to this instance for scheduling live migrations.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/addResourcePolicies", + // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", // "httpMethod": "POST", - // "id": "compute.instances.addResourcePolicies", + // "id": "compute.interconnectAttachments.insert", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -90182,17 +105626,15 @@ func (c *InstancesAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Op // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/addResourcePolicies", + // "path": "projects/{project}/regions/{region}/interconnectAttachments", // "request": { - // "$ref": "InstancesAddResourcePoliciesRequest" + // "$ref": "InterconnectAttachment" // }, // "response": { // "$ref": "Operation" @@ -90205,43 +105647,47 @@ func (c *InstancesAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.instances.aggregatedList": +// method id "compute.interconnectAttachments.list": -type InstancesAggregatedListCall struct { +type InterconnectAttachmentsListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of all of the instances -// in your project across all regions and zones. The performance of this -// method degrades when a filter is specified on a project that has a -// very large number of instances. +// List: Retrieves the list of interconnect attachments contained within +// the specified region. // // - project: Project ID for this request. -func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { - c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { + c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -90249,32 +105695,29 @@ func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedLi // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { c.urlParams_.Set("filter", filter) return c } -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *InstancesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InstancesAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) - return c -} - // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of // available results is larger than `maxResults`, Compute Engine returns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { +func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -90288,7 +105731,7 @@ func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAgg // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { +func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -90296,7 +105739,7 @@ func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregat // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { +func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -90305,7 +105748,7 @@ func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggr // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *InstancesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesAggregatedListCall { +func (c *InterconnectAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -90313,7 +105756,7 @@ func (c *InstancesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { +func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -90323,7 +105766,7 @@ func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { +func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { c.ifNoneMatch_ = entityTag return c } @@ -90331,21 +105774,21 @@ func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { +func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAggregatedListCall) Header() http.Header { +func (c *InterconnectAttachmentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -90358,7 +105801,7 @@ func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -90367,37 +105810,38 @@ func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, err req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.aggregatedList" call. -// Exactly one of *InstanceAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.interconnectAttachments.list" call. +// Exactly one of *InterconnectAttachmentList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectAttachmentList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { +func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceAggregatedList{ + ret := &InterconnectAttachmentList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -90409,24 +105853,20 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Retrieves an aggregated list of all of the instances in your project across all regions and zones. The performance of this method degrades when a filter is specified on a project that has a very large number of instances.", - // "flatPath": "projects/{project}/aggregated/instances", + // "description": "Retrieves the list of interconnect attachments contained within the specified region.", + // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", // "httpMethod": "GET", - // "id": "compute.instances.aggregatedList", + // "id": "compute.interconnectAttachments.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -90452,15 +105892,22 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "returnPartialSuccess": { // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/instances", + // "path": "projects/{project}/regions/{region}/interconnectAttachments", // "response": { - // "$ref": "InstanceAggregatedList" + // "$ref": "InterconnectAttachmentList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -90474,7 +105921,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { +func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -90492,42 +105939,33 @@ func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*Instanc } } -// method id "compute.instances.attachDisk": +// method id "compute.interconnectAttachments.patch": -type InstancesAttachDiskCall struct { - s *Service - project string - zone string - instance string - attacheddisk *AttachedDisk - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsPatchCall struct { + s *Service + project string + region string + interconnectAttachment string + interconnectattachment *InterconnectAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AttachDisk: Attaches an existing Disk resource to an instance. You -// must first create the disk before you can attach it. It is not -// possible to create and attach a disk at the same time. For more -// information, read Adding a persistent disk to your instance. +// Patch: Updates the specified interconnect attachment with the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. // -// - instance: The instance name for this request. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { - c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - interconnectAttachment: Name of the interconnect attachment to +// patch. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { + c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.attacheddisk = attacheddisk - return c -} - -// ForceAttach sets the optional parameter "forceAttach": Whether to -// force attach the regional disk even if it's currently attached to -// another instance. If you try to force attach a zonal disk to an -// instance, you will receive an error. -func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { - c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) + c.region = region + c.interconnectAttachment = interconnectAttachment + c.interconnectattachment = interconnectattachment return c } @@ -90542,7 +105980,7 @@ func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttach // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { +func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -90550,7 +105988,7 @@ func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { +func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -90558,21 +105996,21 @@ func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { +func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAttachDiskCall) Header() http.Header { +func (c *InterconnectAttachmentsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -90580,53 +106018,53 @@ func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/attachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.attachDisk" call. +// Do executes the "compute.interconnectAttachments.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -90640,23 +106078,18 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/attachDisk", - // "httpMethod": "POST", - // "id": "compute.instances.attachDisk", + // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "httpMethod": "PATCH", + // "id": "compute.interconnectAttachments.patch", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "forceAttach": { - // "description": "Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error.", - // "location": "query", - // "type": "boolean" - // }, - // "instance": { - // "description": "The instance name for this request.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -90669,22 +106102,22 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/attachDisk", + // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "request": { - // "$ref": "AttachedDisk" + // "$ref": "InterconnectAttachment" // }, // "response": { // "$ref": "Operation" @@ -90697,28 +106130,31 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instances.bulkInsert": +// method id "compute.interconnectAttachments.setLabels": -type InstancesBulkInsertCall struct { - s *Service - project string - zone string - bulkinsertinstanceresource *BulkInsertInstanceResource - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// BulkInsert: Creates multiple instances. Count specifies the number of -// instances to create. +// SetLabels: Sets the labels on an InterconnectAttachment. To learn +// more about labels, read the Labeling Resources documentation. // // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) BulkInsert(project string, zone string, bulkinsertinstanceresource *BulkInsertInstanceResource) *InstancesBulkInsertCall { - c := &InstancesBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *InterconnectAttachmentsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *InterconnectAttachmentsSetLabelsCall { + c := &InterconnectAttachmentsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.bulkinsertinstanceresource = bulkinsertinstanceresource + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest return c } @@ -90733,7 +106169,7 @@ func (r *InstancesService) BulkInsert(project string, zone string, bulkinsertins // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesBulkInsertCall) RequestId(requestId string) *InstancesBulkInsertCall { +func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *InterconnectAttachmentsSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -90741,7 +106177,7 @@ func (c *InstancesBulkInsertCall) RequestId(requestId string) *InstancesBulkInse // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesBulkInsertCall) Fields(s ...googleapi.Field) *InstancesBulkInsertCall { +func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -90749,21 +106185,21 @@ func (c *InstancesBulkInsertCall) Fields(s ...googleapi.Field) *InstancesBulkIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesBulkInsertCall) Context(ctx context.Context) *InstancesBulkInsertCall { +func (c *InterconnectAttachmentsSetLabelsCall) Context(ctx context.Context) *InterconnectAttachmentsSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesBulkInsertCall) Header() http.Header { +func (c *InterconnectAttachmentsSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesBulkInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -90771,14 +106207,14 @@ func (c *InstancesBulkInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertinstanceresource) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/bulkInsert") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -90786,37 +106222,38 @@ func (c *InstancesBulkInsertCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.bulkInsert" call. +// Do executes the "compute.interconnectAttachments.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -90830,13 +106267,14 @@ func (c *InstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates multiple instances. Count specifies the number of instances to create.", - // "flatPath": "projects/{project}/zones/{zone}/instances/bulkInsert", + // "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", // "httpMethod": "POST", - // "id": "compute.instances.bulkInsert", + // "id": "compute.interconnectAttachments.setLabels", // "parameterOrder": [ // "project", - // "zone" + // "region", + // "resource" // ], // "parameters": { // "project": { @@ -90846,22 +106284,29 @@ func (c *InstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/bulkInsert", + // "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", // "request": { - // "$ref": "BulkInsertInstanceResource" + // "$ref": "RegionSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -90874,125 +106319,120 @@ func (c *InstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instances.delete": +// method id "compute.interconnectLocations.get": -type InstancesDeleteCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectLocationsGetCall struct { + s *Service + project string + interconnectLocation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Instance resource. For more -// information, see Deleting an instance. +// Get: Returns the details for the specified interconnect location. +// Gets a list of available interconnect locations by making a list() +// request. // -// - instance: Name of the instance resource to delete. +// - interconnectLocation: Name of the interconnect location to return. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { - c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { + c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.interconnectLocation = interconnectLocation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { +func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { +func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteCall) Header() http.Header { +func (c *InterconnectLocationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectLocations/{interconnectLocation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "interconnectLocation": c.interconnectLocation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectLocations.get" call. +// Exactly one of *InterconnectLocation or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectLocation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &InterconnectLocation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -91004,18 +106444,17 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified Instance resource. For more information, see Deleting an instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}", - // "httpMethod": "DELETE", - // "id": "compute.instances.delete", + // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", + // "flatPath": "projects/{project}/global/interconnectLocations/{interconnectLocation}", + // "httpMethod": "GET", + // "id": "compute.interconnectLocations.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "interconnectLocation" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to delete.", + // "interconnectLocation": { + // "description": "Name of the interconnect location to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -91027,155 +106466,212 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}", + // "path": "projects/{project}/global/interconnectLocations/{interconnectLocation}", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectLocation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.deleteAccessConfig": +// method id "compute.interconnectLocations.list": -type InstancesDeleteAccessConfigCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectLocationsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DeleteAccessConfig: Deletes an access config from an instance's -// network interface. +// List: Retrieves the list of interconnect locations available to the +// specified project. // -// - accessConfig: The name of the access config to delete. -// - instance: The instance name for this request. -// - networkInterface: The name of the network interface. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { - c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { + c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("accessConfig", accessConfig) - c.urlParams_.Set("networkInterface", networkInterface) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *InterconnectLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectLocationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { +func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { +func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteAccessConfigCall) Header() http.Header { +func (c *InterconnectLocationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectLocations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.deleteAccessConfig" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectLocations.list" call. +// Exactly one of *InterconnectLocationList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectLocationList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &InterconnectLocationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -91187,35 +106683,35 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes an access config from an instance's network interface.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", - // "httpMethod": "POST", - // "id": "compute.instances.deleteAccessConfig", + // "description": "Retrieves the list of interconnect locations available to the specified project.", + // "flatPath": "projects/{project}/global/interconnectLocations", + // "httpMethod": "GET", + // "id": "compute.interconnectLocations.list", // "parameterOrder": [ - // "project", - // "zone", - // "instance", - // "accessConfig", - // "networkInterface" + // "project" // ], // "parameters": { - // "accessConfig": { - // "description": "The name of the access config to delete.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", - // "required": true, // "type": "string" // }, - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface.", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", - // "required": true, // "type": "string" // }, // "project": { @@ -91225,57 +106721,65 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" + // "type": "boolean" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + // "path": "projects/{project}/global/interconnectLocations", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectLocationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.detachDisk": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesDetachDiskCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.interconnects.delete": + +type InterconnectsDeleteCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DetachDisk: Detaches a disk from an instance. +// Delete: Deletes the specified Interconnect. // -// - deviceName: The device name of the disk to detach. Make a get() -// request on the instance to view currently attached disks and device -// names. -// - instance: Instance name for this request. +// - interconnect: Name of the interconnect to delete. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { - c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { + c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("deviceName", deviceName) + c.interconnect = interconnect return c } @@ -91290,7 +106794,7 @@ func (r *InstancesService) DetachDisk(project string, zone string, instance stri // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { +func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -91298,7 +106802,7 @@ func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { +func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -91306,21 +106810,21 @@ func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { +func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDetachDiskCall) Header() http.Header { +func (c *InterconnectsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -91330,46 +106834,45 @@ func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/detachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.detachDisk" call. +// Do executes the "compute.interconnects.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -91383,25 +106886,17 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Detaches a disk from an instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/detachDisk", - // "httpMethod": "POST", - // "id": "compute.instances.detachDisk", + // "description": "Deletes the specified Interconnect.", + // "flatPath": "projects/{project}/global/interconnects/{interconnect}", + // "httpMethod": "DELETE", + // "id": "compute.interconnects.delete", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "deviceName" + // "interconnect" // ], // "parameters": { - // "deviceName": { - // "description": "The device name of the disk to detach. Make a get() request on the instance to view currently attached disks and device names.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "Instance name for this request.", + // "interconnect": { + // "description": "Name of the interconnect to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -91418,16 +106913,9 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/detachDisk", + // "path": "projects/{project}/global/interconnects/{interconnect}", // "response": { // "$ref": "Operation" // }, @@ -91439,37 +106927,34 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instances.get": +// method id "compute.interconnects.get": -type InstancesGetCall struct { +type InterconnectsGetCall struct { s *Service project string - zone string - instance string + interconnect string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified Instance resource. Gets a list of -// available instances by making a list() request. +// Get: Returns the specified Interconnect. Get a list of available +// Interconnects by making a list() request. // -// - instance: Name of the instance resource to return. +// - interconnect: Name of the interconnect to return. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { - c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { + c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.interconnect = interconnect return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { +func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -91479,7 +106964,7 @@ func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { +func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -91487,21 +106972,21 @@ func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { +func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetCall) Header() http.Header { +func (c *InterconnectsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -91514,7 +106999,7 @@ func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -91522,40 +107007,39 @@ func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.get" call. -// Exactly one of *Instance or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Instance.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.interconnects.get" call. +// Exactly one of *Interconnect or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Interconnect.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { +func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Instance{ + ret := &Interconnect{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -91567,18 +107051,17 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } return ret, nil // { - // "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}", + // "description": "Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.", + // "flatPath": "projects/{project}/global/interconnects/{interconnect}", // "httpMethod": "GET", - // "id": "compute.instances.get", + // "id": "compute.interconnects.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "interconnect" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to return.", + // "interconnect": { + // "description": "Name of the interconnect to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -91590,18 +107073,11 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}", + // "path": "projects/{project}/global/interconnects/{interconnect}", // "response": { - // "$ref": "Instance" + // "$ref": "Interconnect" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -91612,40 +107088,34 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } -// method id "compute.instances.getEffectiveFirewalls": +// method id "compute.interconnects.getDiagnostics": -type InstancesGetEffectiveFirewallsCall struct { +type InterconnectsGetDiagnosticsCall struct { s *Service project string - zone string - instance string + interconnect string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetEffectiveFirewalls: Returns effective firewalls applied to an -// interface of the instance. +// GetDiagnostics: Returns the interconnectDiagnostics for the specified +// Interconnect. // -// - instance: Name of the instance scoping this request. -// - networkInterface: The name of the network interface to get the -// effective firewalls. +// - interconnect: Name of the interconnect resource to query. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) GetEffectiveFirewalls(project string, zone string, instance string, networkInterface string) *InstancesGetEffectiveFirewallsCall { - c := &InstancesGetEffectiveFirewallsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { + c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) + c.interconnect = interconnect return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetEffectiveFirewallsCall) Fields(s ...googleapi.Field) *InstancesGetEffectiveFirewallsCall { +func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *InterconnectsGetDiagnosticsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -91655,7 +107125,7 @@ func (c *InstancesGetEffectiveFirewallsCall) Fields(s ...googleapi.Field) *Insta // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesGetEffectiveFirewallsCall) IfNoneMatch(entityTag string) *InstancesGetEffectiveFirewallsCall { +func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *InterconnectsGetDiagnosticsCall { c.ifNoneMatch_ = entityTag return c } @@ -91663,21 +107133,21 @@ func (c *InstancesGetEffectiveFirewallsCall) IfNoneMatch(entityTag string) *Inst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetEffectiveFirewallsCall) Context(ctx context.Context) *InstancesGetEffectiveFirewallsCall { +func (c *InterconnectsGetDiagnosticsCall) Context(ctx context.Context) *InterconnectsGetDiagnosticsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetEffectiveFirewallsCall) Header() http.Header { +func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetEffectiveFirewallsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -91690,7 +107160,7 @@ func (c *InstancesGetEffectiveFirewallsCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/getEffectiveFirewalls") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}/getDiagnostics") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -91698,41 +107168,40 @@ func (c *InstancesGetEffectiveFirewallsCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getEffectiveFirewalls" call. -// Exactly one of *InstancesGetEffectiveFirewallsResponse or error will -// be non-nil. Any non-2xx status code is an error. Response headers are -// in either -// *InstancesGetEffectiveFirewallsResponse.ServerResponse.Header or (if -// a response was returned at all) in error.(*googleapi.Error).Header. -// Use googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (*InstancesGetEffectiveFirewallsResponse, error) { +// Do executes the "compute.interconnects.getDiagnostics" call. +// Exactly one of *InterconnectsGetDiagnosticsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectsGetDiagnosticsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetDiagnosticsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstancesGetEffectiveFirewallsResponse{ + ret := &InterconnectsGetDiagnosticsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -91744,30 +107213,180 @@ func (c *InstancesGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns effective firewalls applied to an interface of the instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/getEffectiveFirewalls", + // "description": "Returns the interconnectDiagnostics for the specified Interconnect.", + // "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", // "httpMethod": "GET", - // "id": "compute.instances.getEffectiveFirewalls", + // "id": "compute.interconnects.getDiagnostics", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "networkInterface" + // "interconnect" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "interconnect": { + // "description": "Name of the interconnect resource to query.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface to get the effective firewalls.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, + // } + // }, + // "path": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", + // "response": { + // "$ref": "InterconnectsGetDiagnosticsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.interconnects.insert": + +type InterconnectsInsertCall struct { + s *Service + project string + interconnect *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an Interconnect in the specified project using the +// data included in the request. +// +// - project: Project ID for this request. +func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { + c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnect = interconnect + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an Interconnect in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/global/interconnects", + // "httpMethod": "POST", + // "id": "compute.interconnects.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -91775,71 +107394,134 @@ func (c *InstancesGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/getEffectiveFirewalls", + // "path": "projects/{project}/global/interconnects", + // "request": { + // "$ref": "Interconnect" + // }, // "response": { - // "$ref": "InstancesGetEffectiveFirewallsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getGuestAttributes": +// method id "compute.interconnects.list": -type InstancesGetGuestAttributesCall struct { +type InterconnectsListCall struct { s *Service project string - zone string - instance string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetGuestAttributes: Returns the specified guest attributes entry. +// List: Retrieves the list of Interconnects available to the specified +// project. // -// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) GetGuestAttributes(project string, zone string, instance string) *InstancesGetGuestAttributesCall { - c := &InstancesGetGuestAttributesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InterconnectsService) List(project string) *InterconnectsListCall { + c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance return c } -// QueryPath sets the optional parameter "queryPath": Specifies the -// guest attributes path to be queried. -func (c *InstancesGetGuestAttributesCall) QueryPath(queryPath string) *InstancesGetGuestAttributesCall { - c.urlParams_.Set("queryPath", queryPath) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { + c.urlParams_.Set("filter", filter) return c } -// VariableKey sets the optional parameter "variableKey": Specifies the -// key for the guest attributes entry. -func (c *InstancesGetGuestAttributesCall) VariableKey(variableKey string) *InstancesGetGuestAttributesCall { - c.urlParams_.Set("variableKey", variableKey) +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *InterconnectsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetGuestAttributesCall) Fields(s ...googleapi.Field) *InstancesGetGuestAttributesCall { +func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -91849,7 +107531,7 @@ func (c *InstancesGetGuestAttributesCall) Fields(s ...googleapi.Field) *Instance // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesGetGuestAttributesCall) IfNoneMatch(entityTag string) *InstancesGetGuestAttributesCall { +func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { c.ifNoneMatch_ = entityTag return c } @@ -91857,21 +107539,21 @@ func (c *InstancesGetGuestAttributesCall) IfNoneMatch(entityTag string) *Instanc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetGuestAttributesCall) Context(ctx context.Context) *InstancesGetGuestAttributesCall { +func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetGuestAttributesCall) Header() http.Header { +func (c *InterconnectsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -91884,7 +107566,7 @@ func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/getGuestAttributes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -91892,40 +107574,38 @@ func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getGuestAttributes" call. -// Exactly one of *GuestAttributes or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *GuestAttributes.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.interconnects.list" call. +// Exactly one of *InterconnectList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*GuestAttributes, error) { +func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &GuestAttributes{ + ret := &InterconnectList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -91937,51 +107617,53 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue } return ret, nil // { - // "description": "Returns the specified guest attributes entry.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/getGuestAttributes", + // "description": "Retrieves the list of Interconnects available to the specified project.", + // "flatPath": "projects/{project}/global/interconnects", // "httpMethod": "GET", - // "id": "compute.instances.getGuestAttributes", + // "id": "compute.interconnects.list", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" // }, - // "queryPath": { - // "description": "Specifies the guest attributes path to be queried.", + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, - // "variableKey": { - // "description": "Specifies the key for the guest attributes entry.", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/getGuestAttributes", + // "path": "projects/{project}/global/interconnects", // "response": { - // "$ref": "GuestAttributes" + // "$ref": "InterconnectList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -91992,130 +107674,150 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue } -// method id "compute.instances.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.interconnects.patch": + +type InterconnectsPatchCall struct { + s *Service + project string + interconnect string + interconnect2 *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// Patch: Updates the specified Interconnect with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. // +// - interconnect: Name of the interconnect to update. // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) GetIamPolicy(project string, zone string, resource string) *InstancesGetIamPolicyCall { - c := &InstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { + c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource + c.interconnect = interconnect + c.interconnect2 = interconnect2 return c } -// OptionsRequestedPolicyVersion sets the optional parameter -// "optionsRequestedPolicyVersion": Requested IAM Policy version. -func (c *InstancesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *InstancesGetIamPolicyCall { - c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesGetIamPolicyCall { +func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstancesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetIamPolicyCall) Context(ctx context.Context) *InstancesGetIamPolicyCall { +func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetIamPolicyCall) Header() http.Header { +func (c *InterconnectsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.interconnects.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -92127,173 +107829,156 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{resource}/getIamPolicy", - // "httpMethod": "GET", - // "id": "compute.instances.getIamPolicy", + // "description": "Updates the specified Interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/global/interconnects/{interconnect}", + // "httpMethod": "PATCH", + // "id": "compute.interconnects.patch", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "interconnect" // ], // "parameters": { - // "optionsRequestedPolicyVersion": { - // "description": "Requested IAM Policy version.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "project": { - // "description": "Project ID for this request.", + // "interconnect": { + // "description": "Name of the interconnect to update.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{resource}/getIamPolicy", + // "path": "projects/{project}/global/interconnects/{interconnect}", + // "request": { + // "$ref": "Interconnect" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getScreenshot": +// method id "compute.interconnects.setLabels": -type InstancesGetScreenshotCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InterconnectsSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetScreenshot: Returns the screenshot from the specified instance. +// SetLabels: Sets the labels on an Interconnect. To learn more about +// labels, read the Labeling Resources documentation. // -// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) GetScreenshot(project string, zone string, instance string) *InstancesGetScreenshotCall { - c := &InstancesGetScreenshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *InterconnectsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *InterconnectsSetLabelsCall { + c := &InterconnectsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetScreenshotCall) Fields(s ...googleapi.Field) *InstancesGetScreenshotCall { +func (c *InterconnectsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectsSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetScreenshotCall) IfNoneMatch(entityTag string) *InstancesGetScreenshotCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetScreenshotCall) Context(ctx context.Context) *InstancesGetScreenshotCall { +func (c *InterconnectsSetLabelsCall) Context(ctx context.Context) *InterconnectsSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetScreenshotCall) Header() http.Header { +func (c *InterconnectsSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetScreenshotCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/screenshot") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getScreenshot" call. -// Exactly one of *Screenshot or error will be non-nil. Any non-2xx +// Do executes the "compute.interconnects.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Screenshot.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesGetScreenshotCall) Do(opts ...googleapi.CallOption) (*Screenshot, error) { +func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Screenshot{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -92305,23 +107990,15 @@ func (c *InstancesGetScreenshotCall) Do(opts ...googleapi.CallOption) (*Screensh } return ret, nil // { - // "description": "Returns the screenshot from the specified instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/screenshot", - // "httpMethod": "GET", - // "id": "compute.instances.getScreenshot", + // "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/global/interconnects/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.interconnects.setLabels", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -92329,83 +108006,60 @@ func (c *InstancesGetScreenshotCall) Do(opts ...googleapi.CallOption) (*Screensh // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/screenshot", + // "path": "projects/{project}/global/interconnects/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, // "response": { - // "$ref": "Screenshot" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getSerialPortOutput": +// method id "compute.licenseCodes.get": -type InstancesGetSerialPortOutputCall struct { +type LicenseCodesGetCall struct { s *Service project string - zone string - instance string + licenseCode string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetSerialPortOutput: Returns the last 1 MB of serial port output from -// the specified instance. +// Get: Return a specified license code. License codes are mirrored +// across all projects that have permissions to read the License Code. +// *Caution* This resource is intended for use only by third-party +// partners who are creating Cloud Marketplace images. // -// - instance: Name of the instance for this request. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { - c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - licenseCode: Number corresponding to the License code resource to +// return. +// - project: Project ID for this request. +func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { + c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - return c -} - -// Port sets the optional parameter "port": Specifies which COM or -// serial port to retrieve data from. -func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("port", fmt.Sprint(port)) - return c -} - -// Start sets the optional parameter "start": Specifies the starting -// byte position of the output to return. To start with the first byte -// of output to the specified port, omit this field or set it to `0`. If -// the output for that byte position is available, this field matches -// the `start` parameter sent with the request. If the amount of serial -// console output exceeds the size of the buffer (1 MB), the oldest -// output is discarded and is no longer available. If the requested -// start position refers to discarded output, the start position is -// adjusted to the oldest output still available, and the adjusted start -// position is returned as the `start` property value. You can also -// provide a negative start position, which translates to the most -// recent number of bytes written to the serial port. For example, -3 is -// interpreted as the most recent 3 bytes written to the serial console. -func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("start", fmt.Sprint(start)) + c.licenseCode = licenseCode return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { +func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -92415,7 +108069,7 @@ func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *Instanc // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { +func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -92423,21 +108077,21 @@ func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { +func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetSerialPortOutputCall) Header() http.Header { +func (c *LicenseCodesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { +func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -92450,7 +108104,7 @@ func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/serialPort") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenseCodes/{licenseCode}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -92458,40 +108112,39 @@ func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "licenseCode": c.licenseCode, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getSerialPortOutput" call. -// Exactly one of *SerialPortOutput or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SerialPortOutput.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { +// Do executes the "compute.licenseCodes.get" call. +// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LicenseCode.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &SerialPortOutput{ + ret := &LicenseCode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -92503,56 +108156,33 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se } return ret, nil // { - // "description": "Returns the last 1 MB of serial port output from the specified instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/serialPort", + // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", + // "flatPath": "projects/{project}/global/licenseCodes/{licenseCode}", // "httpMethod": "GET", - // "id": "compute.instances.getSerialPortOutput", + // "id": "compute.licenseCodes.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "licenseCode" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance for this request.", + // "licenseCode": { + // "description": "Number corresponding to the License code resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[0-9]{0,61}?", // "required": true, // "type": "string" // }, - // "port": { - // "default": "1", - // "description": "Specifies which COM or serial port to retrieve data from.", - // "format": "int32", - // "location": "query", - // "maximum": "4", - // "minimum": "1", - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "start": { - // "description": "Specifies the starting byte position of the output to return. To start with the first byte of output to the specified port, omit this field or set it to `0`. If the output for that byte position is available, this field matches the `start` parameter sent with the request. If the amount of serial console output exceeds the size of the buffer (1 MB), the oldest output is discarded and is no longer available. If the requested start position refers to discarded output, the start position is adjusted to the oldest output still available, and the adjusted start position is returned as the `start` property value. You can also provide a negative start position, which translates to the most recent number of bytes written to the serial port. For example, -3 is interpreted as the most recent 3 bytes written to the serial console.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/serialPort", + // "path": "projects/{project}/global/licenseCodes/{licenseCode}", // "response": { - // "$ref": "SerialPortOutput" + // "$ref": "LicenseCode" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -92563,123 +108193,113 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se } -// method id "compute.instances.getShieldedInstanceIdentity": +// method id "compute.licenseCodes.testIamPermissions": -type InstancesGetShieldedInstanceIdentityCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type LicenseCodesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetShieldedInstanceIdentity: Returns the Shielded Instance Identity -// of an instance +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. *Caution* This resource is intended for use only +// by third-party partners who are creating Cloud Marketplace images. // -// - instance: Name or id of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) GetShieldedInstanceIdentity(project string, zone string, instance string) *InstancesGetShieldedInstanceIdentityCall { - c := &InstancesGetShieldedInstanceIdentityCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { + c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetShieldedInstanceIdentityCall) Fields(s ...googleapi.Field) *InstancesGetShieldedInstanceIdentityCall { +func (c *LicenseCodesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicenseCodesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetShieldedInstanceIdentityCall) IfNoneMatch(entityTag string) *InstancesGetShieldedInstanceIdentityCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetShieldedInstanceIdentityCall) Context(ctx context.Context) *InstancesGetShieldedInstanceIdentityCall { +func (c *LicenseCodesTestIamPermissionsCall) Context(ctx context.Context) *LicenseCodesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetShieldedInstanceIdentityCall) Header() http.Header { +func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetShieldedInstanceIdentityCall) doRequest(alt string) (*http.Response, error) { +func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenseCodes/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getShieldedInstanceIdentity" call. -// Exactly one of *ShieldedInstanceIdentity or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *ShieldedInstanceIdentity.ServerResponse.Header or (if a response was +// Do executes the "compute.licenseCodes.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOption) (*ShieldedInstanceIdentity, error) { +func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ShieldedInstanceIdentity{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -92691,23 +108311,15 @@ func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Returns the Shielded Instance Identity of an instance", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity", - // "httpMethod": "GET", - // "id": "compute.instances.getShieldedInstanceIdentity", + // "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", + // "flatPath": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.licenseCodes.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name or id of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -92715,17 +108327,20 @@ func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOpti // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity", + // "path": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "ShieldedInstanceIdentity" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -92736,28 +108351,27 @@ func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOpti } -// method id "compute.instances.insert": +// method id "compute.licenses.delete": -type InstancesInsertCall struct { +type LicensesDeleteCall struct { s *Service project string - zone string - instance *Instance + license string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates an instance resource in the specified project using -// the data included in the request. +// Delete: Deletes the specified license. *Caution* This resource is +// intended for use only by third-party partners who are creating Cloud +// Marketplace images. // +// - license: Name of the license resource to delete. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { - c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { + c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.license = license return c } @@ -92772,41 +108386,15 @@ func (r *InstancesService) Insert(project string, zone string, instance *Instanc // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { +func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } -// SourceInstanceTemplate sets the optional parameter -// "sourceInstanceTemplate": Specifies instance template to create the -// instance. This field is optional. It can be a full or partial URL. -// For example, the following are all valid URLs to an instance -// template: - https://www.googleapis.com/compute/v1/projects/project -// /global/instanceTemplates/instanceTemplate - -// projects/project/global/instanceTemplates/instanceTemplate - -// global/instanceTemplates/instanceTemplate -func (c *InstancesInsertCall) SourceInstanceTemplate(sourceInstanceTemplate string) *InstancesInsertCall { - c.urlParams_.Set("sourceInstanceTemplate", sourceInstanceTemplate) - return c -} - -// SourceMachineImage sets the optional parameter "sourceMachineImage": -// Specifies the machine image to use to create the instance. This field -// is optional. It can be a full or partial URL. For example, the -// following are all valid URLs to a machine image: - -// https://www.googleapis.com/compute/v1/projects/project/global/global -// /machineImages/machineImage - -// projects/project/global/global/machineImages/machineImage - -// global/machineImages/machineImage -func (c *InstancesInsertCall) SourceMachineImage(sourceMachineImage string) *InstancesInsertCall { - c.urlParams_.Set("sourceMachineImage", sourceMachineImage) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { +func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -92814,21 +108402,21 @@ func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { +func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesInsertCall) Header() http.Header { +func (c *LicensesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -92836,52 +108424,47 @@ func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.insert" call. +// Do executes the "compute.licenses.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -92895,15 +108478,22 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an instance resource in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/zones/{zone}/instances", - // "httpMethod": "POST", - // "id": "compute.instances.insert", + // "description": "Deletes the specified license. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", + // "flatPath": "projects/{project}/global/licenses/{license}", + // "httpMethod": "DELETE", + // "id": "compute.licenses.delete", // "parameterOrder": [ // "project", - // "zone" + // "license" // ], // "parameters": { + // "license": { + // "description": "Name of the license resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -92915,29 +108505,9 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "sourceInstanceTemplate": { - // "description": "Specifies instance template to create the instance. This field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: - https://www.googleapis.com/compute/v1/projects/project /global/instanceTemplates/instanceTemplate - projects/project/global/instanceTemplates/instanceTemplate - global/instanceTemplates/instanceTemplate ", - // "location": "query", - // "type": "string" - // }, - // "sourceMachineImage": { - // "description": "Specifies the machine image to use to create the instance. This field is optional. It can be a full or partial URL. For example, the following are all valid URLs to a machine image: - https://www.googleapis.com/compute/v1/projects/project/global/global /machineImages/machineImage - projects/project/global/global/machineImages/machineImage - global/machineImages/machineImage ", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances", - // "request": { - // "$ref": "Instance" - // }, + // "path": "projects/{project}/global/licenses/{license}", // "response": { // "$ref": "Operation" // }, @@ -92949,103 +108519,35 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.instances.list": +// method id "compute.licenses.get": -type InstancesListCall struct { +type LicensesGetCall struct { s *Service project string - zone string + license string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of instances contained within the specified -// zone. +// Get: Returns the specified License resource. *Caution* This resource +// is intended for use only by third-party partners who are creating +// Cloud Marketplace images. // +// - license: Name of the License resource to return. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) List(project string, zone string) *InstancesListCall { - c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *LicensesService) Get(project string, license string) *LicensesGetCall { + c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InstancesListCall) Filter(filter string) *InstancesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *InstancesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + c.license = license return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { +func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -93055,7 +108557,7 @@ func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { +func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -93063,21 +108565,21 @@ func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { +func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesListCall) Header() http.Header { +func (c *LicensesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -93090,7 +108592,7 @@ func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -93099,38 +108601,38 @@ func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.list" call. -// Exactly one of *InstanceList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { +// Do executes the "compute.licenses.get" call. +// Exactly one of *License or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *License.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceList{ + ret := &License{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -93142,36 +108644,20 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err } return ret, nil // { - // "description": "Retrieves the list of instances contained within the specified zone.", - // "flatPath": "projects/{project}/zones/{zone}/instances", + // "description": "Returns the specified License resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", + // "flatPath": "projects/{project}/global/licenses/{license}", // "httpMethod": "GET", - // "id": "compute.instances.list", + // "id": "compute.licenses.get", // "parameterOrder": [ // "project", - // "zone" + // "license" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "license": { + // "description": "Name of the License resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -93180,23 +108666,11 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances", + // "path": "projects/{project}/global/licenses/{license}", // "response": { - // "$ref": "InstanceList" + // "$ref": "License" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -93207,131 +108681,43 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.listReferrers": +// method id "compute.licenses.getIamPolicy": -type InstancesListReferrersCall struct { +type LicensesGetIamPolicyCall struct { s *Service project string - zone string - instance string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// ListReferrers: Retrieves a list of resources that refer to the VM -// instance specified in the request. For example, if the VM instance is -// part of a managed or unmanaged instance group, the referrers list -// includes the instance group. For more information, read Viewing -// referrers to VM instances. +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. *Caution* This resource +// is intended for use only by third-party partners who are creating +// Cloud Marketplace images. // -// - instance: Name of the target instance scoping this request, or '-' -// if the request should span over all instances in the container. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { - c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { + c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *InstancesListReferrersCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesListReferrersCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *LicensesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *LicensesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { +func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -93341,7 +108727,7 @@ func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesList // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { +func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -93349,21 +108735,21 @@ func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesLis // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { +func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesListReferrersCall) Header() http.Header { +func (c *LicensesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -93376,7 +108762,7 @@ func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/referrers") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -93385,39 +108771,38 @@ func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, erro req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.listReferrers" call. -// Exactly one of *InstanceListReferrers or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceListReferrers.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { +// Do executes the "compute.licenses.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceListReferrers{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -93429,46 +108814,21 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance } return ret, nil // { - // "description": "Retrieves a list of resources that refer to the VM instance specified in the request. For example, if the VM instance is part of a managed or unmanaged instance group, the referrers list includes the instance group. For more information, read Viewing referrers to VM instances.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/referrers", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", + // "flatPath": "projects/{project}/global/licenses/{resource}/getIamPolicy", // "httpMethod": "GET", - // "id": "compute.instances.listReferrers", + // "id": "compute.licenses.getIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "instance": { - // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", - // "location": "path", - // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", // "location": "query", - // "minimum": "0", // "type": "integer" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -93476,22 +108836,17 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/referrers", + // "path": "projects/{project}/global/licenses/{resource}/getIamPolicy", // "response": { - // "$ref": "InstanceListReferrers" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -93502,51 +108857,26 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.removeResourcePolicies": +// method id "compute.licenses.insert": -type InstancesRemoveResourcePoliciesCall struct { - s *Service - project string - zone string - instance string - instancesremoveresourcepoliciesrequest *InstancesRemoveResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesInsertCall struct { + s *Service + project string + license *License + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveResourcePolicies: Removes resource policies from an instance. +// Insert: Create a License resource in the specified project. *Caution* +// This resource is intended for use only by third-party partners who +// are creating Cloud Marketplace images. // -// - instance: The instance name for this request. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) RemoveResourcePolicies(project string, zone string, instance string, instancesremoveresourcepoliciesrequest *InstancesRemoveResourcePoliciesRequest) *InstancesRemoveResourcePoliciesCall { - c := &InstancesRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { + c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancesremoveresourcepoliciesrequest = instancesremoveresourcepoliciesrequest + c.license = license return c } @@ -93561,7 +108891,7 @@ func (r *InstancesService) RemoveResourcePolicies(project string, zone string, i // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesRemoveResourcePoliciesCall) RequestId(requestId string) *InstancesRemoveResourcePoliciesCall { +func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -93569,7 +108899,7 @@ func (c *InstancesRemoveResourcePoliciesCall) RequestId(requestId string) *Insta // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *InstancesRemoveResourcePoliciesCall { +func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -93577,21 +108907,21 @@ func (c *InstancesRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *Inst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesRemoveResourcePoliciesCall) Context(ctx context.Context) *InstancesRemoveResourcePoliciesCall { +func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesRemoveResourcePoliciesCall) Header() http.Header { +func (c *LicensesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -93599,14 +108929,14 @@ func (c *InstancesRemoveResourcePoliciesCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesremoveresourcepoliciesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/removeResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -93614,38 +108944,36 @@ func (c *InstancesRemoveResourcePoliciesCall) doRequest(alt string) (*http.Respo } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.removeResourcePolicies" call. +// Do executes the "compute.licenses.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -93659,23 +108987,14 @@ func (c *InstancesRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Removes resource policies from an instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/removeResourcePolicies", + // "description": "Create a License resource in the specified project. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", + // "flatPath": "projects/{project}/global/licenses", // "httpMethod": "POST", - // "id": "compute.instances.removeResourcePolicies", + // "id": "compute.licenses.insert", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -93687,150 +109006,223 @@ func (c *InstancesRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) ( // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/removeResourcePolicies", + // "path": "projects/{project}/global/licenses", // "request": { - // "$ref": "InstancesRemoveResourcePoliciesRequest" + // "$ref": "License" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.instances.reset": +// method id "compute.licenses.list": -type InstancesResetCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Reset: Performs a reset on the instance. This is a hard reset the VM -// does not do a graceful shutdown. For more information, see Resetting -// an instance. +// List: Retrieves the list of licenses available in the specified +// project. This method does not get any licenses that belong to other +// projects, including licenses attached to publicly-available images, +// like Debian 9. If you want to get a list of publicly-available +// licenses, use this method to make a request to the respective image +// project, such as debian-cloud or windows-cloud. *Caution* This +// resource is intended for use only by third-party partners who are +// creating Cloud Marketplace images. // -// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { - c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *LicensesService) List(project string) *LicensesListCall { + c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *LicensesListCall) Filter(filter string) *LicensesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *LicensesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *LicensesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { +func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { +func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesResetCall) Header() http.Header { +func (c *LicensesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/reset") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.reset" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.licenses.list" call. +// Exactly one of *LicensesListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *LicensesListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &LicensesListResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -93842,101 +109234,115 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/reset", - // "httpMethod": "POST", - // "id": "compute.instances.reset", + // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", + // "flatPath": "projects/{project}/global/licenses", + // "httpMethod": "GET", + // "id": "compute.licenses.list", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/reset", + // "path": "projects/{project}/global/licenses", // "response": { - // "$ref": "Operation" + // "$ref": "LicensesListResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.resume": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesResumeCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.licenses.setIamPolicy": + +type LicensesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resume: Resumes an instance that was suspended using the -// instances().suspend method. +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. *Caution* This resource is +// intended for use only by third-party partners who are creating Cloud +// Marketplace images. // -// - instance: Name of the instance resource to resume. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) Resume(project string, zone string, instance string) *InstancesResumeCall { - c := &InstancesResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { + c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesResumeCall) RequestId(requestId string) *InstancesResumeCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesResumeCall) Fields(s ...googleapi.Field) *InstancesResumeCall { +func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -93944,21 +109350,21 @@ func (c *InstancesResumeCall) Fields(s ...googleapi.Field) *InstancesResumeCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesResumeCall) Context(ctx context.Context) *InstancesResumeCall { +func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesResumeCall) Header() http.Header { +func (c *LicensesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesResumeCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -93966,9 +109372,14 @@ func (c *InstancesResumeCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/resume") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -93977,39 +109388,38 @@ func (c *InstancesResumeCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.resume" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.licenses.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -94021,23 +109431,15 @@ func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Resumes an instance that was suspended using the instances().suspend method.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/resume", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", + // "flatPath": "projects/{project}/global/licenses/{resource}/setIamPolicy", // "httpMethod": "POST", - // "id": "compute.instances.resume", + // "id": "compute.licenses.setIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to resume.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -94045,22 +109447,20 @@ func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/resume", + // "path": "projects/{project}/global/licenses/{resource}/setIamPolicy", + // "request": { + // "$ref": "GlobalSetPolicyRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -94070,35 +109470,36 @@ func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.instances.sendDiagnosticInterrupt": +// method id "compute.licenses.testIamPermissions": -type InstancesSendDiagnosticInterruptCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SendDiagnosticInterrupt: Sends diagnostic interrupt to the instance. +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. *Caution* This resource is intended for use only +// by third-party partners who are creating Cloud Marketplace images. // -// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) SendDiagnosticInterrupt(project string, zone string, instance string) *InstancesSendDiagnosticInterruptCall { - c := &InstancesSendDiagnosticInterruptCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { + c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSendDiagnosticInterruptCall) Fields(s ...googleapi.Field) *InstancesSendDiagnosticInterruptCall { +func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -94106,21 +109507,21 @@ func (c *InstancesSendDiagnosticInterruptCall) Fields(s ...googleapi.Field) *Ins // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSendDiagnosticInterruptCall) Context(ctx context.Context) *InstancesSendDiagnosticInterruptCall { +func (c *LicensesTestIamPermissionsCall) Context(ctx context.Context) *LicensesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSendDiagnosticInterruptCall) Header() http.Header { +func (c *LicensesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSendDiagnosticInterruptCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -94128,9 +109529,14 @@ func (c *InstancesSendDiagnosticInterruptCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/sendDiagnosticInterrupt") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -94139,42 +109545,58 @@ func (c *InstancesSendDiagnosticInterruptCall) doRequest(alt string) (*http.Resp req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.sendDiagnosticInterrupt" call. -func (c *InstancesSendDiagnosticInterruptCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "compute.licenses.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return nil, gensupport.WrapError(err) } - return nil + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Sends diagnostic interrupt to the instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/sendDiagnosticInterrupt", + // "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", + // "flatPath": "projects/{project}/global/licenses/{resource}/testIamPermissions", // "httpMethod": "POST", - // "id": "compute.instances.sendDiagnosticInterrupt", + // "id": "compute.licenses.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -94182,52 +109604,50 @@ func (c *InstancesSendDiagnosticInterruptCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/sendDiagnosticInterrupt", + // "path": "projects/{project}/global/licenses/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setDeletionProtection": +// method id "compute.machineImages.delete": -type InstancesSetDeletionProtectionCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineImagesDeleteCall struct { + s *Service + project string + machineImage string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetDeletionProtection: Sets deletion protection on the instance. +// Delete: Deletes the specified machine image. Deleting a machine image +// is permanent and cannot be undone. // +// - machineImage: The name of the machine image to delete. // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { - c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *MachineImagesService) Delete(project string, machineImage string) *MachineImagesDeleteCall { + c := &MachineImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - return c -} - -// DeletionProtection sets the optional parameter "deletionProtection": -// Whether the resource should be protected against deletion. -func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtection bool) *InstancesSetDeletionProtectionCall { - c.urlParams_.Set("deletionProtection", fmt.Sprint(deletionProtection)) + c.machineImage = machineImage return c } @@ -94242,7 +109662,7 @@ func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtecti // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *InstancesSetDeletionProtectionCall { +func (c *MachineImagesDeleteCall) RequestId(requestId string) *MachineImagesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -94250,7 +109670,7 @@ func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *Instan // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *InstancesSetDeletionProtectionCall { +func (c *MachineImagesDeleteCall) Fields(s ...googleapi.Field) *MachineImagesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -94258,21 +109678,21 @@ func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *Insta // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetDeletionProtectionCall) Context(ctx context.Context) *InstancesSetDeletionProtectionCall { +func (c *MachineImagesDeleteCall) Context(ctx context.Context) *MachineImagesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetDeletionProtectionCall) Header() http.Header { +func (c *MachineImagesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -94282,46 +109702,45 @@ func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{resource}/setDeletionProtection") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{machineImage}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "machineImage": c.machineImage, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setDeletionProtection" call. +// Do executes the "compute.machineImages.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -94335,21 +109754,21 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets deletion protection on the instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{resource}/setDeletionProtection", - // "httpMethod": "POST", - // "id": "compute.instances.setDeletionProtection", + // "description": "Deletes the specified machine image. Deleting a machine image is permanent and cannot be undone.", + // "flatPath": "projects/{project}/global/machineImages/{machineImage}", + // "httpMethod": "DELETE", + // "id": "compute.machineImages.delete", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "machineImage" // ], // "parameters": { - // "deletionProtection": { - // "default": "true", - // "description": "Whether the resource should be protected against deletion.", - // "location": "query", - // "type": "boolean" + // "machineImage": { + // "description": "The name of the machine image to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // }, // "project": { // "description": "Project ID for this request.", @@ -94362,23 +109781,9 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{resource}/setDeletionProtection", + // "path": "projects/{project}/global/machineImages/{machineImage}", // "response": { // "$ref": "Operation" // }, @@ -94390,132 +109795,119 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.instances.setDiskAutoDelete": +// method id "compute.machineImages.get": -type InstancesSetDiskAutoDeleteCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineImagesGetCall struct { + s *Service + project string + machineImage string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to -// an instance. +// Get: Returns the specified machine image. Gets a list of available +// machine images by making a list() request. // -// - autoDelete: Whether to auto-delete the disk when the instance is -// deleted. -// - deviceName: The device name of the disk to modify. Make a get() -// request on the instance to view currently attached disks and device -// names. -// - instance: The instance name for this request. +// - machineImage: The name of the machine image. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { - c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *MachineImagesService) Get(project string, machineImage string) *MachineImagesGetCall { + c := &MachineImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) - c.urlParams_.Set("deviceName", deviceName) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.machineImage = machineImage return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { +func (c *MachineImagesGetCall) Fields(s ...googleapi.Field) *MachineImagesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineImagesGetCall) IfNoneMatch(entityTag string) *MachineImagesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { +func (c *MachineImagesGetCall) Context(ctx context.Context) *MachineImagesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { +func (c *MachineImagesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{machineImage}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "machineImage": c.machineImage, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setDiskAutoDelete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.machineImages.get" call. +// Exactly one of *MachineImage or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *MachineImage.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &MachineImage{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -94527,33 +109919,17 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the auto-delete flag for a disk attached to an instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", - // "httpMethod": "POST", - // "id": "compute.instances.setDiskAutoDelete", + // "description": "Returns the specified machine image. Gets a list of available machine images by making a list() request.", + // "flatPath": "projects/{project}/global/machineImages/{machineImage}", + // "httpMethod": "GET", + // "id": "compute.machineImages.get", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "autoDelete", - // "deviceName" + // "machineImage" // ], // "parameters": { - // "autoDelete": { - // "description": "Whether to auto-delete the disk when the instance is deleted.", - // "location": "query", - // "required": true, - // "type": "boolean" - // }, - // "deviceName": { - // "description": "The device name of the disk to modify. Make a get() request on the instance to view currently attached disks and device names.", - // "location": "query", - // "pattern": "\\w[\\w.-]{0,254}", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "The instance name for this request.", + // "machineImage": { + // "description": "The name of the machine image.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -94565,140 +109941,139 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + // "path": "projects/{project}/global/machineImages/{machineImage}", // "response": { - // "$ref": "Operation" + // "$ref": "MachineImage" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setIamPolicy": +// method id "compute.machineImages.getIamPolicy": -type InstancesSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineImagesGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. // // - project: Project ID for this request. // - resource: Name or id of the resource for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *InstancesSetIamPolicyCall { - c := &InstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *MachineImagesService) GetIamPolicy(project string, resource string) *MachineImagesGetIamPolicyCall { + c := &MachineImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *MachineImagesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *MachineImagesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIamPolicyCall { +func (c *MachineImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *MachineImagesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetIamPolicyCall) Context(ctx context.Context) *InstancesSetIamPolicyCall { +func (c *MachineImagesGetIamPolicyCall) Context(ctx context.Context) *MachineImagesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetIamPolicyCall) Header() http.Header { +func (c *MachineImagesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setIamPolicy" call. +// Do executes the "compute.machineImages.getIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -94712,16 +110087,21 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{resource}/setIamPolicy", - // "httpMethod": "POST", - // "id": "compute.instances.setIamPolicy", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/global/machineImages/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.machineImages.getIamPolicy", // "parameterOrder": [ // "project", - // "zone", // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -94732,58 +110112,46 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{resource}/setIamPolicy", - // "request": { - // "$ref": "ZoneSetPolicyRequest" - // }, + // "path": "projects/{project}/global/machineImages/{resource}/getIamPolicy", // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setLabels": +// method id "compute.machineImages.insert": -type InstancesSetLabelsCall struct { - s *Service - project string - zone string - instance string - instancessetlabelsrequest *InstancesSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineImagesInsertCall struct { + s *Service + project string + machineimage *MachineImage + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets labels on an instance. To learn more about labels, -// read the Labeling Resources documentation. +// Insert: Creates a machine image in the specified project using the +// data that is included in the request. If you are creating a new +// machine image to update an existing instance, your new machine image +// should use the same network or, if applicable, the same subnetwork as +// the original instance. // -// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { - c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *MachineImagesService) Insert(project string, machineimage *MachineImage) *MachineImagesInsertCall { + c := &MachineImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetlabelsrequest = instancessetlabelsrequest + c.machineimage = machineimage return c } @@ -94798,15 +110166,23 @@ func (r *InstancesService) SetLabels(project string, zone string, instance strin // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { +func (c *MachineImagesInsertCall) RequestId(requestId string) *MachineImagesInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// SourceInstance sets the optional parameter "sourceInstance": +// Required. Source instance that is used to create the machine image +// from. +func (c *MachineImagesInsertCall) SourceInstance(sourceInstance string) *MachineImagesInsertCall { + c.urlParams_.Set("sourceInstance", sourceInstance) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { +func (c *MachineImagesInsertCall) Fields(s ...googleapi.Field) *MachineImagesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -94814,21 +110190,21 @@ func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { +func (c *MachineImagesInsertCall) Context(ctx context.Context) *MachineImagesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetLabelsCall) Header() http.Header { +func (c *MachineImagesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -94836,14 +110212,14 @@ func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.machineimage) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -94851,38 +110227,36 @@ func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setLabels" call. +// Do executes the "compute.machineImages.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -94896,23 +110270,14 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setLabels", + // "description": "Creates a machine image in the specified project using the data that is included in the request. If you are creating a new machine image to update an existing instance, your new machine image should use the same network or, if applicable, the same subnetwork as the original instance.", + // "flatPath": "projects/{project}/global/machineImages", // "httpMethod": "POST", - // "id": "compute.instances.setLabels", + // "id": "compute.machineImages.insert", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -94925,17 +110290,15 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "sourceInstance": { + // "description": "Required. Source instance that is used to create the machine image from.", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setLabels", + // "path": "projects/{project}/global/machineImages", // "request": { - // "$ref": "InstancesSetLabelsRequest" + // "$ref": "MachineImage" // }, // "response": { // "$ref": "Operation" @@ -94948,132 +110311,197 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.instances.setMachineResources": +// method id "compute.machineImages.list": -type InstancesSetMachineResourcesCall struct { - s *Service - project string - zone string - instance string - instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineImagesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of machine images that are contained within +// the specified project. +// +// - project: Project ID for this request. +func (r *MachineImagesService) List(project string) *MachineImagesListCall { + c := &MachineImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *MachineImagesListCall) Filter(filter string) *MachineImagesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *MachineImagesListCall) MaxResults(maxResults int64) *MachineImagesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c } -// SetMachineResources: Changes the number and/or type of accelerator -// for a stopped instance to the values specified in the request. -// -// - instance: Name of the instance scoping this request. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { - c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *MachineImagesListCall) OrderBy(orderBy string) *MachineImagesListCall { + c.urlParams_.Set("orderBy", orderBy) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { - c.urlParams_.Set("requestId", requestId) +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *MachineImagesListCall) PageToken(pageToken string) *MachineImagesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *MachineImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineImagesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { +func (c *MachineImagesListCall) Fields(s ...googleapi.Field) *MachineImagesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineImagesListCall) IfNoneMatch(entityTag string) *MachineImagesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { +func (c *MachineImagesListCall) Context(ctx context.Context) *MachineImagesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMachineResourcesCall) Header() http.Header { +func (c *MachineImagesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setMachineResources") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineResources" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.machineImages.list" call. +// Exactly one of *MachineImageList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *MachineImageList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &MachineImageList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -95085,106 +110513,113 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setMachineResources", - // "httpMethod": "POST", - // "id": "compute.instances.setMachineResources", + // "description": "Retrieves a list of machine images that are contained within the specified project.", + // "flatPath": "projects/{project}/global/machineImages", + // "httpMethod": "GET", + // "id": "compute.machineImages.list", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setMachineResources", - // "request": { - // "$ref": "InstancesSetMachineResourcesRequest" - // }, + // "path": "projects/{project}/global/machineImages", // "response": { - // "$ref": "Operation" + // "$ref": "MachineImageList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setMachineType": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineImagesListCall) Pages(ctx context.Context, f func(*MachineImageList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesSetMachineTypeCall struct { - s *Service - project string - zone string - instance string - instancessetmachinetyperequest *InstancesSetMachineTypeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.machineImages.setIamPolicy": + +type MachineImagesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMachineType: Changes the machine type for a stopped instance to -// the machine type specified in the request. +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. // -// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { - c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *MachineImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *MachineImagesSetIamPolicyCall { + c := &MachineImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetmachinetyperequest = instancessetmachinetyperequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { +func (c *MachineImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -95192,21 +110627,21 @@ func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { +func (c *MachineImagesSetIamPolicyCall) Context(ctx context.Context) *MachineImagesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMachineTypeCall) Header() http.Header { +func (c *MachineImagesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -95214,14 +110649,14 @@ func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setMachineType") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -95230,39 +110665,38 @@ func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, err req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineType" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.machineImages.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -95274,23 +110708,15 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setMachineType", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/global/machineImages/{resource}/setIamPolicy", // "httpMethod": "POST", - // "id": "compute.instances.setMachineType", + // "id": "compute.machineImages.setIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -95298,25 +110724,20 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setMachineType", + // "path": "projects/{project}/global/machineImages/{resource}/setIamPolicy", // "request": { - // "$ref": "InstancesSetMachineTypeRequest" + // "$ref": "GlobalSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -95326,54 +110747,35 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instances.setMetadata": +// method id "compute.machineImages.testIamPermissions": -type InstancesSetMetadataCall struct { - s *Service - project string - zone string - instance string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineImagesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMetadata: Sets metadata for the specified instance to the data -// included in the request. +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. // -// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { - c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *MachineImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *MachineImagesTestIamPermissionsCall { + c := &MachineImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.metadata = metadata - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { +func (c *MachineImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *MachineImagesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -95381,21 +110783,21 @@ func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { +func (c *MachineImagesTestIamPermissionsCall) Context(ctx context.Context) *MachineImagesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMetadataCall) Header() http.Header { +func (c *MachineImagesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -95403,14 +110805,14 @@ func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -95419,39 +110821,38 @@ func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMetadata" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.machineImages.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -95463,23 +110864,15 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets metadata for the specified instance to the data included in the request.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setMetadata", + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/global/machineImages/{resource}/testIamPermissions", // "httpMethod": "POST", - // "id": "compute.instances.setMetadata", + // "id": "compute.machineImages.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -95487,162 +110880,233 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setMetadata", + // "path": "projects/{project}/global/machineImages/{resource}/testIamPermissions", // "request": { - // "$ref": "Metadata" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setMinCpuPlatform": +// method id "compute.machineTypes.aggregatedList": -type InstancesSetMinCpuPlatformCall struct { - s *Service - project string - zone string - instance string - instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetMinCpuPlatform: Changes the minimum CPU platform that this -// instance should use. This method can only be called on a stopped -// instance. For more information, read Specifying a Minimum CPU -// Platform. +// AggregatedList: Retrieves an aggregated list of machine types. // -// - instance: Name of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { - c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { + c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *MachineTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *MachineTypesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *MachineTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { +func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { +func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { +func (c *MachineTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/machineTypes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMinCpuPlatform" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.machineTypes.aggregatedList" call. +// Exactly one of *MachineTypeAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *MachineTypeAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &MachineTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -95654,188 +111118,206 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", - // "httpMethod": "POST", - // "id": "compute.instances.setMinCpuPlatform", + // "description": "Retrieves an aggregated list of machine types.", + // "flatPath": "projects/{project}/aggregated/machineTypes", + // "httpMethod": "GET", + // "id": "compute.machineTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", - // "request": { - // "$ref": "InstancesSetMinCpuPlatformRequest" - // }, + // "path": "projects/{project}/aggregated/machineTypes", // "response": { - // "$ref": "Operation" + // "$ref": "MachineTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setScheduling": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesSetSchedulingCall struct { - s *Service - project string - zone string - instance string - scheduling *Scheduling - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.machineTypes.get": + +type MachineTypesGetCall struct { + s *Service + project string + zone string + machineType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetScheduling: Sets an instance's scheduling options. You can only -// call this method on a stopped instance, that is, a VM instance that -// is in a `TERMINATED` state. See Instance Life Cycle for more -// information on the possible instance states. For more information -// about setting scheduling options for a VM, see Set VM availability -// policies. +// Get: Returns the specified machine type. Gets a list of available +// machine types by making a list() request. // -// - instance: Instance name for this request. +// - machineType: Name of the machine type to return. // - project: Project ID for this request. // - zone: The name of the zone for this request. -func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { - c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { + c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.scheduling = scheduling - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { - c.urlParams_.Set("requestId", requestId) + c.machineType = machineType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { +func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { +func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetSchedulingCall) Header() http.Header { +func (c *MachineTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setScheduling") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/machineTypes/{machineType}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "machineType": c.machineType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setScheduling" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.machineTypes.get" call. +// Exactly one of *MachineType or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *MachineType.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &MachineType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -95847,18 +111329,18 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Sets an instance's scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. For more information about setting scheduling options for a VM, see Set VM availability policies.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setScheduling", - // "httpMethod": "POST", - // "id": "compute.instances.setScheduling", + // "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", + // "flatPath": "projects/{project}/zones/{zone}/machineTypes/{machineType}", + // "httpMethod": "GET", + // "id": "compute.machineTypes.get", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "machineType" // ], // "parameters": { - // "instance": { - // "description": "Instance name for this request.", + // "machineType": { + // "description": "Name of the machine type to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -95871,11 +111353,6 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -95884,148 +111361,214 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setScheduling", - // "request": { - // "$ref": "Scheduling" - // }, + // "path": "projects/{project}/zones/{zone}/machineTypes/{machineType}", // "response": { - // "$ref": "Operation" + // "$ref": "MachineType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setServiceAccount": +// method id "compute.machineTypes.list": -type InstancesSetServiceAccountCall struct { - s *Service - project string - zone string - instance string - instancessetserviceaccountrequest *InstancesSetServiceAccountRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetServiceAccount: Sets the service account on the instance. For more -// information, read Changing the service account and access scopes for -// an instance. +// List: Retrieves a list of machine types available to the specified +// project. // -// - instance: Name of the instance resource to start. // - project: Project ID for this request. // - zone: The name of the zone for this request. -func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { - c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { + c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.instancessetserviceaccountrequest = instancessetserviceaccountrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *MachineTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { +func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { +func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetServiceAccountCall) Header() http.Header { +func (c *MachineTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/machineTypes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setServiceAccount" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.machineTypes.list" call. +// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *MachineTypeList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &MachineTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -96037,21 +111580,36 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", - // "httpMethod": "POST", - // "id": "compute.instances.setServiceAccount", + // "description": "Retrieves a list of machine types available to the specified project.", + // "flatPath": "projects/{project}/zones/{zone}/machineTypes", + // "httpMethod": "GET", + // "id": "compute.machineTypes.list", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "zone" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -96061,10 +111619,10 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // }, // "zone": { // "description": "The name of the zone for this request.", @@ -96074,149 +111632,244 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", - // "request": { - // "$ref": "InstancesSetServiceAccountRequest" - // }, + // "path": "projects/{project}/zones/{zone}/machineTypes", // "response": { - // "$ref": "Operation" + // "$ref": "MachineTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setShieldedInstanceIntegrityPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.networkAttachments.aggregatedList": -type InstancesSetShieldedInstanceIntegrityPolicyCall struct { - s *Service - project string - zone string - instance string - shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkAttachmentsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance -// integrity policy for an instance. You can only use this method on a -// running instance. This method supports PATCH semantics and uses the -// JSON merge patch format and processing rules. +// AggregatedList: Retrieves the list of all NetworkAttachment +// resources, regional and global, available to the specified project. // -// - instance: Name or id of the instance scoping this request. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { - c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkAttachmentsService) AggregatedList(project string) *NetworkAttachmentsAggregatedListCall { + c := &NetworkAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NetworkAttachmentsAggregatedListCall) Filter(filter string) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *NetworkAttachmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworkAttachmentsAggregatedListCall) MaxResults(maxResults int64) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *NetworkAttachmentsAggregatedListCall) OrderBy(orderBy string) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NetworkAttachmentsAggregatedListCall) PageToken(pageToken string) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NetworkAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { +func (c *NetworkAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkAttachmentsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { +func (c *NetworkAttachmentsAggregatedListCall) Context(ctx context.Context) *NetworkAttachmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { +func (c *NetworkAttachmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkAttachments") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkAttachments.aggregatedList" call. +// Exactly one of *NetworkAttachmentAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *NetworkAttachmentAggregatedList.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkAttachmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &NetworkAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -96228,83 +111881,112 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", - // "httpMethod": "PATCH", - // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", + // "description": "Retrieves the list of all NetworkAttachment resources, regional and global, available to the specified project.", + // "flatPath": "projects/{project}/aggregated/networkAttachments", + // "httpMethod": "GET", + // "id": "compute.networkAttachments.aggregatedList", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name or id of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", - // "request": { - // "$ref": "ShieldedInstanceIntegrityPolicy" - // }, + // "path": "projects/{project}/aggregated/networkAttachments", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkAttachmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setTags": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkAttachmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesSetTagsCall struct { - s *Service - project string - zone string - instance string - tags *Tags - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.networkAttachments.delete": + +type NetworkAttachmentsDeleteCall struct { + s *Service + project string + region string + networkAttachment string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTags: Sets network tags for the specified instance to the data -// included in the request. +// Delete: Deletes the specified NetworkAttachment in the given scope // -// - instance: Name of the instance scoping this request. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { - c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkAttachment: Name of the NetworkAttachment resource to +// delete. +// - project: Project ID for this request. +// - region: Name of the region of this request. +func (r *NetworkAttachmentsService) Delete(project string, region string, networkAttachment string) *NetworkAttachmentsDeleteCall { + c := &NetworkAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.tags = tags + c.region = region + c.networkAttachment = networkAttachment return c } @@ -96318,8 +112000,9 @@ func (r *InstancesService) SetTags(project string, zone string, instance string, // received, and if so, will ignore the second request. This prevents // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { +// supported ( 00000000-0000-0000-0000-000000000000). end_interface: +// MixerMutationRequestBuilder +func (c *NetworkAttachmentsDeleteCall) RequestId(requestId string) *NetworkAttachmentsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -96327,7 +112010,7 @@ func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { +func (c *NetworkAttachmentsDeleteCall) Fields(s ...googleapi.Field) *NetworkAttachmentsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -96335,21 +112018,21 @@ func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { +func (c *NetworkAttachmentsDeleteCall) Context(ctx context.Context) *NetworkAttachmentsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetTagsCall) Header() http.Header { +func (c *NetworkAttachmentsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -96357,53 +112040,48 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setTags") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "networkAttachment": c.networkAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setTags" call. +// Do executes the "compute.networkAttachments.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -96417,18 +112095,18 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Sets network tags for the specified instance to the data included in the request.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setTags", - // "httpMethod": "POST", - // "id": "compute.instances.setTags", + // "description": "Deletes the specified NetworkAttachment in the given scope", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "httpMethod": "DELETE", + // "id": "compute.networkAttachments.delete", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "networkAttachment" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "networkAttachment": { + // "description": "Name of the NetworkAttachment resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -96441,23 +112119,20 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region of this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setTags", - // "request": { - // "$ref": "Tags" - // }, + // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", // "response": { // "$ref": "Operation" // }, @@ -96469,109 +112144,124 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.instances.simulateMaintenanceEvent": +// method id "compute.networkAttachments.get": -type InstancesSimulateMaintenanceEventCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkAttachmentsGetCall struct { + s *Service + project string + region string + networkAttachment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SimulateMaintenanceEvent: Simulates a host maintenance event on a VM. -// For more information, see Simulate a host maintenance event. +// Get: Returns the specified NetworkAttachment resource in the given +// scope. // -// - instance: Name of the instance scoping this request. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { - c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkAttachment: Name of the NetworkAttachment resource to +// return. +// - project: Project ID for this request. +// - region: Name of the region of this request. +func (r *NetworkAttachmentsService) Get(project string, region string, networkAttachment string) *NetworkAttachmentsGetCall { + c := &NetworkAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region + c.networkAttachment = networkAttachment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { +func (c *NetworkAttachmentsGetCall) Fields(s ...googleapi.Field) *NetworkAttachmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkAttachmentsGetCall) IfNoneMatch(entityTag string) *NetworkAttachmentsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { +func (c *NetworkAttachmentsGetCall) Context(ctx context.Context) *NetworkAttachmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { +func (c *NetworkAttachmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "networkAttachment": c.networkAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.simulateMaintenanceEvent" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkAttachments.get" call. +// Exactly one of *NetworkAttachment or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkAttachment.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAttachment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &NetworkAttachment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -96583,18 +112273,18 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Simulates a host maintenance event on a VM. For more information, see Simulate a host maintenance event.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", - // "httpMethod": "POST", - // "id": "compute.instances.simulateMaintenanceEvent", + // "description": "Returns the specified NetworkAttachment resource in the given scope.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "httpMethod": "GET", + // "id": "compute.networkAttachments.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "networkAttachment" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "networkAttachment": { + // "description": "Name of the NetworkAttachment resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -96607,145 +112297,151 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region of this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkAttachment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.start": +// method id "compute.networkAttachments.getIamPolicy": -type InstancesStartCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkAttachmentsGetIamPolicyCall struct { + s *Service + project string + region string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Start: Starts an instance that was stopped using the instances().stop -// method. For more information, see Restart an instance. +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. // -// - instance: Name of the instance resource to start. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { - c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *NetworkAttachmentsService) GetIamPolicy(project string, region string, resource string) *NetworkAttachmentsGetIamPolicyCall { + c := &NetworkAttachmentsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region + c.resource = resource return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { - c.urlParams_.Set("requestId", requestId) +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *NetworkAttachmentsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *NetworkAttachmentsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { +func (c *NetworkAttachmentsGetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkAttachmentsGetIamPolicyCall) IfNoneMatch(entityTag string) *NetworkAttachmentsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { +func (c *NetworkAttachmentsGetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartCall) Header() http.Header { +func (c *NetworkAttachmentsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/start") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.start" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkAttachments.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NetworkAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -96757,22 +112453,21 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/start", - // "httpMethod": "POST", - // "id": "compute.instances.start", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.networkAttachments.getIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" // }, // "project": { // "description": "Project ID for this request.", @@ -96781,57 +112476,56 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/start", + // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.startWithEncryptionKey": +// method id "compute.networkAttachments.insert": -type InstancesStartWithEncryptionKeyCall struct { - s *Service - project string - zone string - instance string - instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkAttachmentsInsertCall struct { + s *Service + project string + region string + networkattachment *NetworkAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// StartWithEncryptionKey: Starts an instance that was stopped using the -// instances().stop method. For more information, see Restart an -// instance. +// Insert: Creates a NetworkAttachment in the specified project in the +// given scope using the parameters that are included in the request. // -// - instance: Name of the instance resource to start. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { - c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region of this request. +func (r *NetworkAttachmentsService) Insert(project string, region string, networkattachment *NetworkAttachment) *NetworkAttachmentsInsertCall { + c := &NetworkAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest + c.region = region + c.networkattachment = networkattachment return c } @@ -96845,8 +112539,9 @@ func (r *InstancesService) StartWithEncryptionKey(project string, zone string, i // received, and if so, will ignore the second request. This prevents // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { +// supported ( 00000000-0000-0000-0000-000000000000). end_interface: +// MixerMutationRequestBuilder +func (c *NetworkAttachmentsInsertCall) RequestId(requestId string) *NetworkAttachmentsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -96854,7 +112549,7 @@ func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *Insta // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { +func (c *NetworkAttachmentsInsertCall) Fields(s ...googleapi.Field) *NetworkAttachmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -96862,21 +112557,21 @@ func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *Inst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { +func (c *NetworkAttachmentsInsertCall) Context(ctx context.Context) *NetworkAttachmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { +func (c *NetworkAttachmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -96884,14 +112579,14 @@ func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkattachment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -96899,38 +112594,37 @@ func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Respo } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.startWithEncryptionKey" call. +// Do executes the "compute.networkAttachments.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -96944,23 +112638,15 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + // "description": "Creates a NetworkAttachment in the specified project in the given scope using the parameters that are included in the request.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments", // "httpMethod": "POST", - // "id": "compute.instances.startWithEncryptionKey", + // "id": "compute.networkAttachments.insert", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -96968,22 +112654,22 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region of this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + // "path": "projects/{project}/regions/{region}/networkAttachments", // "request": { - // "$ref": "InstancesStartWithEncryptionKeyRequest" + // "$ref": "NetworkAttachment" // }, // "response": { // "$ref": "Operation" @@ -96996,129 +112682,200 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.instances.stop": +// method id "compute.networkAttachments.list": -type InstancesStopCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkAttachmentsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Stop: Stops a running instance, shutting it down cleanly, and allows -// you to restart the instance at a later time. Stopped instances do not -// incur VM usage charges while they are stopped. However, resources -// that the VM is using, such as persistent disks and static IP -// addresses, will continue to be charged until they are deleted. For -// more information, see Stopping an instance. +// List: Lists the NetworkAttachments for a project in the given scope. // -// - instance: Name of the instance resource to stop. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { - c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region of this request. +func (r *NetworkAttachmentsService) List(project string, region string) *NetworkAttachmentsListCall { + c := &NetworkAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NetworkAttachmentsListCall) Filter(filter string) *NetworkAttachmentsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworkAttachmentsListCall) MaxResults(maxResults int64) *NetworkAttachmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *NetworkAttachmentsListCall) OrderBy(orderBy string) *NetworkAttachmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NetworkAttachmentsListCall) PageToken(pageToken string) *NetworkAttachmentsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NetworkAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkAttachmentsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { +func (c *NetworkAttachmentsListCall) Fields(s ...googleapi.Field) *NetworkAttachmentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkAttachmentsListCall) IfNoneMatch(entityTag string) *NetworkAttachmentsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { +func (c *NetworkAttachmentsListCall) Context(ctx context.Context) *NetworkAttachmentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStopCall) Header() http.Header { +func (c *NetworkAttachmentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/stop") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.stop" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkAttachments.list" call. +// Exactly one of *NetworkAttachmentList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkAttachmentList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkAttachmentList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &NetworkAttachmentList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -97130,21 +112887,36 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/stop", - // "httpMethod": "POST", - // "id": "compute.instances.stop", + // "description": "Lists the NetworkAttachments for a project in the given scope.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments", + // "httpMethod": "GET", + // "id": "compute.networkAttachments.list", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to stop.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -97154,83 +112926,85 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region of this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/stop", + // "path": "projects/{project}/regions/{region}/networkAttachments", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkAttachmentList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.suspend": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkAttachmentsListCall) Pages(ctx context.Context, f func(*NetworkAttachmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesSuspendCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.networkAttachments.setIamPolicy": + +type NetworkAttachmentsSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Suspend: This method suspends a running instance, saving its state to -// persistent storage, and allows you to resume the instance at a later -// time. Suspended instances have no compute costs (cores or RAM), and -// incur only storage charges for the saved VM memory and localSSD data. -// Any charged resources the virtual machine was using, such as -// persistent disks and static IP addresses, will continue to be charged -// while the instance is suspended. For more information, see Suspending -// and resuming an instance. +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. // -// - instance: Name of the instance resource to suspend. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) Suspend(project string, zone string, instance string) *InstancesSuspendCall { - c := &InstancesSuspendCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *NetworkAttachmentsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NetworkAttachmentsSetIamPolicyCall { + c := &NetworkAttachmentsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCall { +func (c *NetworkAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -97238,21 +113012,21 @@ func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSuspendCall) Context(ctx context.Context) *InstancesSuspendCall { +func (c *NetworkAttachmentsSetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSuspendCall) Header() http.Header { +func (c *NetworkAttachmentsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -97260,9 +113034,14 @@ func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/suspend") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -97271,39 +113050,39 @@ func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.suspend" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkAttachments.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -97315,23 +113094,16 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "This method suspends a running instance, saving its state to persistent storage, and allows you to resume the instance at a later time. Suspended instances have no compute costs (cores or RAM), and incur only storage charges for the saved VM memory and localSSD data. Any charged resources the virtual machine was using, such as persistent disks and static IP addresses, will continue to be charged while the instance is suspended. For more information, see Suspending and resuming an instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/suspend", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", // "httpMethod": "POST", - // "id": "compute.instances.suspend", + // "id": "compute.networkAttachments.setIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to suspend.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -97339,22 +113111,27 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/suspend", + // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", + // "request": { + // "$ref": "RegionSetPolicyRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -97364,12 +113141,12 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.instances.testIamPermissions": +// method id "compute.networkAttachments.testIamPermissions": -type InstancesTestIamPermissionsCall struct { +type NetworkAttachmentsTestIamPermissionsCall struct { s *Service project string - zone string + region string resource string testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams @@ -97381,12 +113158,12 @@ type InstancesTestIamPermissionsCall struct { // specified resource. // // - project: Project ID for this request. +// - region: The name of the region for this request. // - resource: Name or id of the resource for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { - c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkAttachmentsTestIamPermissionsCall { + c := &NetworkAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone + c.region = region c.resource = resource c.testpermissionsrequest = testpermissionsrequest return c @@ -97395,7 +113172,7 @@ func (r *InstancesService) TestIamPermissions(project string, zone string, resou // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { +func (c *NetworkAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkAttachmentsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -97403,21 +113180,21 @@ func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { +func (c *NetworkAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *NetworkAttachmentsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesTestIamPermissionsCall) Header() http.Header { +func (c *NetworkAttachmentsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -97432,7 +113209,7 @@ func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -97441,37 +113218,37 @@ func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "region": c.region, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.testIamPermissions" call. +// Do executes the "compute.networkAttachments.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -97486,12 +113263,12 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes return ret, nil // { // "description": "Returns permissions that a caller has on the specified resource.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{resource}/testIamPermissions", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", // "httpMethod": "POST", - // "id": "compute.instances.testIamPermissions", + // "id": "compute.networkAttachments.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", + // "region", // "resource" // ], // "parameters": { @@ -97502,22 +113279,22 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{resource}/testIamPermissions", + // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -97533,166 +113310,212 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } -// method id "compute.instances.update": +// method id "compute.networkEdgeSecurityServices.aggregatedList": -type InstancesUpdateCall struct { - s *Service - project string - zone string - instance string - instance2 *Instance - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEdgeSecurityServicesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates an instance only if the necessary resources are -// available. This method can update only a specific set of instance -// properties. See Updating a running instance for a list of updatable -// instance properties. +// AggregatedList: Retrieves the list of all NetworkEdgeSecurityService +// resources available to the specified project. // -// - instance: Name of the instance resource to update. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) Update(project string, zone string, instance string, instance2 *Instance) *InstancesUpdateCall { - c := &InstancesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Name of the project scoping this request. +func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *NetworkEdgeSecurityServicesAggregatedListCall { + c := &NetworkEdgeSecurityServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instance2 = instance2 return c } -// MinimalAction sets the optional parameter "minimalAction": Specifies -// the action to take when updating an instance even if the updated -// properties do not require it. If not specified, then Compute Engine -// acts based on the minimum action that the updated properties require. -// -// Possible values: -// "INVALID" -// "NO_EFFECT" - No changes can be made to the instance. -// "REFRESH" - The instance will not restart. -// "RESTART" - The instance will restart. -func (c *InstancesUpdateCall) MinimalAction(minimalAction string) *InstancesUpdateCall { - c.urlParams_.Set("minimalAction", minimalAction) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Filter(filter string) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworkEdgeSecurityServicesAggregatedListCall) MaxResults(maxResults int64) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) OrderBy(orderBy string) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) return c } -// MostDisruptiveAllowedAction sets the optional parameter -// "mostDisruptiveAllowedAction": Specifies the most disruptive action -// that can be taken on the instance as part of the update. Compute -// Engine returns an error if the instance properties require a more -// disruptive action as part of the instance update. Valid options from -// lowest to highest are NO_EFFECT, REFRESH, and RESTART. -// -// Possible values: -// "INVALID" -// "NO_EFFECT" - No changes can be made to the instance. -// "REFRESH" - The instance will not restart. -// "RESTART" - The instance will restart. -func (c *InstancesUpdateCall) MostDisruptiveAllowedAction(mostDisruptiveAllowedAction string) *InstancesUpdateCall { - c.urlParams_.Set("mostDisruptiveAllowedAction", mostDisruptiveAllowedAction) +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) PageToken(pageToken string) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateCall) RequestId(requestId string) *InstancesUpdateCall { - c.urlParams_.Set("requestId", requestId) +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateCall) Fields(s ...googleapi.Field) *InstancesUpdateCall { +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateCall) Context(ctx context.Context) *InstancesUpdateCall { +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateCall) Header() http.Header { +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance2) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEdgeSecurityServices") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkEdgeSecurityServices.aggregatedList" call. +// Exactly one of *NetworkEdgeSecurityServiceAggregatedList or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *NetworkEdgeSecurityServiceAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityServiceAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &NetworkEdgeSecurityServiceAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -97704,122 +113527,112 @@ func (c *InstancesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Updates an instance only if the necessary resources are available. This method can update only a specific set of instance properties. See Updating a running instance for a list of updatable instance properties.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}", - // "httpMethod": "PUT", - // "id": "compute.instances.update", + // "description": "Retrieves the list of all NetworkEdgeSecurityService resources available to the specified project.", + // "flatPath": "projects/{project}/aggregated/networkEdgeSecurityServices", + // "httpMethod": "GET", + // "id": "compute.networkEdgeSecurityServices.aggregatedList", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", // "type": "string" // }, - // "minimalAction": { - // "description": "Specifies the action to take when updating an instance even if the updated properties do not require it. If not specified, then Compute Engine acts based on the minimum action that the updated properties require.", - // "enum": [ - // "INVALID", - // "NO_EFFECT", - // "REFRESH", - // "RESTART" - // ], - // "enumDescriptions": [ - // "", - // "No changes can be made to the instance.", - // "The instance will not restart.", - // "The instance will restart." - // ], + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", // "type": "string" // }, - // "mostDisruptiveAllowedAction": { - // "description": "Specifies the most disruptive action that can be taken on the instance as part of the update. Compute Engine returns an error if the instance properties require a more disruptive action as part of the instance update. Valid options from lowest to highest are NO_EFFECT, REFRESH, and RESTART.", - // "enum": [ - // "INVALID", - // "NO_EFFECT", - // "REFRESH", - // "RESTART" - // ], - // "enumDescriptions": [ - // "", - // "No changes can be made to the instance.", - // "The instance will not restart.", - // "The instance will restart." - // ], + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "Name of the project scoping this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" + // "type": "boolean" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}", - // "request": { - // "$ref": "Instance" - // }, + // "path": "projects/{project}/aggregated/networkEdgeSecurityServices", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkEdgeSecurityServiceAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.updateAccessConfig": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEdgeSecurityServiceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesUpdateAccessConfigCall struct { - s *Service - project string - zone string - instance string - accessconfig *AccessConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.networkEdgeSecurityServices.delete": + +type NetworkEdgeSecurityServicesDeleteCall struct { + s *Service + project string + region string + networkEdgeSecurityService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateAccessConfig: Updates the specified access config from an -// instance's network interface with the data included in the request. -// This method supports PATCH semantics and uses the JSON merge patch -// format and processing rules. +// Delete: Deletes the specified service. // -// - instance: The instance name for this request. -// - networkInterface: The name of the network interface where the -// access config is attached. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { - c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkEdgeSecurityService: Name of the network edge security +// service to delete. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *NetworkEdgeSecurityServicesService) Delete(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesDeleteCall { + c := &NetworkEdgeSecurityServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig + c.region = region + c.networkEdgeSecurityService = networkEdgeSecurityService return c } @@ -97834,7 +113647,7 @@ func (r *InstancesService) UpdateAccessConfig(project string, zone string, insta // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *InstancesUpdateAccessConfigCall { +func (c *NetworkEdgeSecurityServicesDeleteCall) RequestId(requestId string) *NetworkEdgeSecurityServicesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -97842,7 +113655,7 @@ func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *Instances // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateAccessConfigCall { +func (c *NetworkEdgeSecurityServicesDeleteCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -97850,21 +113663,21 @@ func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateAccessConfigCall) Context(ctx context.Context) *InstancesUpdateAccessConfigCall { +func (c *NetworkEdgeSecurityServicesDeleteCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateAccessConfigCall) Header() http.Header { +func (c *NetworkEdgeSecurityServicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -97872,53 +113685,48 @@ func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/updateAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "networkEdgeSecurityService": c.networkEdgeSecurityService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateAccessConfig" call. +// Do executes the "compute.networkEdgeSecurityServices.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -97932,30 +113740,23 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/updateAccessConfig", - // "httpMethod": "POST", - // "id": "compute.instances.updateAccessConfig", + // "description": "Deletes the specified service.", + // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "httpMethod": "DELETE", + // "id": "compute.networkEdgeSecurityServices.delete", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "networkInterface" + // "region", + // "networkEdgeSecurityService" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "networkEdgeSecurityService": { + // "description": "Name of the network edge security service to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface where the access config is attached.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -97963,23 +113764,20 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/updateAccessConfig", - // "request": { - // "$ref": "AccessConfig" - // }, + // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", // "response": { // "$ref": "Operation" // }, @@ -97991,134 +113789,123 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instances.updateDisplayDevice": +// method id "compute.networkEdgeSecurityServices.get": -type InstancesUpdateDisplayDeviceCall struct { - s *Service - project string - zone string - instance string - displaydevice *DisplayDevice - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEdgeSecurityServicesGetCall struct { + s *Service + project string + region string + networkEdgeSecurityService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// UpdateDisplayDevice: Updates the Display config for a VM instance. -// You can only use this method on a stopped VM instance. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. +// Get: Gets a specified NetworkEdgeSecurityService. // -// - instance: Name of the instance scoping this request. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) UpdateDisplayDevice(project string, zone string, instance string, displaydevice *DisplayDevice) *InstancesUpdateDisplayDeviceCall { - c := &InstancesUpdateDisplayDeviceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkEdgeSecurityService: Name of the network edge security +// service to get. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *NetworkEdgeSecurityServicesService) Get(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesGetCall { + c := &NetworkEdgeSecurityServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.displaydevice = displaydevice - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateDisplayDeviceCall) RequestId(requestId string) *InstancesUpdateDisplayDeviceCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.networkEdgeSecurityService = networkEdgeSecurityService return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateDisplayDeviceCall) Fields(s ...googleapi.Field) *InstancesUpdateDisplayDeviceCall { +func (c *NetworkEdgeSecurityServicesGetCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkEdgeSecurityServicesGetCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateDisplayDeviceCall) Context(ctx context.Context) *InstancesUpdateDisplayDeviceCall { +func (c *NetworkEdgeSecurityServicesGetCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateDisplayDeviceCall) Header() http.Header { +func (c *NetworkEdgeSecurityServicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateDisplayDeviceCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.displaydevice) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/updateDisplayDevice") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "networkEdgeSecurityService": c.networkEdgeSecurityService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateDisplayDevice" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkEdgeSecurityServices.get" call. +// Exactly one of *NetworkEdgeSecurityService or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NetworkEdgeSecurityService.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityService, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &NetworkEdgeSecurityService{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -98130,20 +113917,20 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Updates the Display config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/updateDisplayDevice", - // "httpMethod": "PATCH", - // "id": "compute.instances.updateDisplayDevice", + // "description": "Gets a specified NetworkEdgeSecurityService.", + // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "httpMethod": "GET", + // "id": "compute.networkEdgeSecurityServices.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "networkEdgeSecurityService" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "networkEdgeSecurityService": { + // "description": "Name of the network edge security service to get.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -98154,65 +113941,49 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/updateDisplayDevice", - // "request": { - // "$ref": "DisplayDevice" - // }, + // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkEdgeSecurityService" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.updateNetworkInterface": +// method id "compute.networkEdgeSecurityServices.insert": -type InstancesUpdateNetworkInterfaceCall struct { - s *Service - project string - zone string - instance string - networkinterface *NetworkInterface - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEdgeSecurityServicesInsertCall struct { + s *Service + project string + region string + networkedgesecurityservice *NetworkEdgeSecurityService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateNetworkInterface: Updates an instance's network interface. This -// method can only update an interface's alias IP range and attached -// network. See Modifying alias IP ranges for an existing instance for -// instructions on changing alias IP ranges. See Migrating a VM between -// networks for instructions on migrating an interface. This method -// follows PATCH semantics. +// Insert: Creates a new service in the specified project using the data +// included in the request. // -// - instance: The instance name for this request. -// - networkInterface: The name of the network interface to update. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { - c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *NetworkEdgeSecurityServicesService) Insert(project string, region string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesInsertCall { + c := &NetworkEdgeSecurityServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.networkinterface = networkinterface + c.region = region + c.networkedgesecurityservice = networkedgesecurityservice return c } @@ -98227,15 +113998,22 @@ func (r *InstancesService) UpdateNetworkInterface(project string, zone string, i // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *InstancesUpdateNetworkInterfaceCall { +func (c *NetworkEdgeSecurityServicesInsertCall) RequestId(requestId string) *NetworkEdgeSecurityServicesInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *NetworkEdgeSecurityServicesInsertCall) ValidateOnly(validateOnly bool) *NetworkEdgeSecurityServicesInsertCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesUpdateNetworkInterfaceCall { +func (c *NetworkEdgeSecurityServicesInsertCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -98243,21 +114021,21 @@ func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *Inst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateNetworkInterfaceCall) Context(ctx context.Context) *InstancesUpdateNetworkInterfaceCall { +func (c *NetworkEdgeSecurityServicesInsertCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { +func (c *NetworkEdgeSecurityServicesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEdgeSecurityServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -98265,53 +114043,52 @@ func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkinterface) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateNetworkInterface" call. +// Do executes the "compute.networkEdgeSecurityServices.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -98325,34 +114102,26 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Updates an instance's network interface. This method can only update an interface's alias IP range and attached network. See Modifying alias IP ranges for an existing instance for instructions on changing alias IP ranges. See Migrating a VM between networks for instructions on migrating an interface. This method follows PATCH semantics.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", - // "httpMethod": "PATCH", - // "id": "compute.instances.updateNetworkInterface", + // "description": "Creates a new service in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + // "httpMethod": "POST", + // "id": "compute.networkEdgeSecurityServices.insert", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "networkInterface" + // "region" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "networkInterface": { - // "description": "The name of the network interface to update.", - // "location": "query", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -98361,17 +114130,15 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", + // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices", // "request": { - // "$ref": "NetworkInterface" + // "$ref": "NetworkEdgeSecurityService" // }, // "response": { // "$ref": "Operation" @@ -98384,33 +114151,38 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.instances.updateShieldedInstanceConfig": +// method id "compute.networkEdgeSecurityServices.patch": -type InstancesUpdateShieldedInstanceConfigCall struct { - s *Service - project string - zone string - instance string - shieldedinstanceconfig *ShieldedInstanceConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEdgeSecurityServicesPatchCall struct { + s *Service + project string + region string + networkEdgeSecurityService string + networkedgesecurityservice *NetworkEdgeSecurityService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateShieldedInstanceConfig: Updates the Shielded Instance config -// for an instance. You can only use this method on a stopped instance. -// This method supports PATCH semantics and uses the JSON merge patch -// format and processing rules. +// Patch: Patches the specified policy with the data included in the +// request. // -// - instance: Name or id of the instance scoping this request. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) UpdateShieldedInstanceConfig(project string, zone string, instance string, shieldedinstanceconfig *ShieldedInstanceConfig) *InstancesUpdateShieldedInstanceConfigCall { - c := &InstancesUpdateShieldedInstanceConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkEdgeSecurityService: Name of the network edge security +// service to update. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *NetworkEdgeSecurityServicesService) Patch(project string, region string, networkEdgeSecurityService string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesPatchCall { + c := &NetworkEdgeSecurityServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.shieldedinstanceconfig = shieldedinstanceconfig + c.region = region + c.networkEdgeSecurityService = networkEdgeSecurityService + c.networkedgesecurityservice = networkedgesecurityservice + return c +} + +// Paths sets the optional parameter "paths": +func (c *NetworkEdgeSecurityServicesPatchCall) Paths(paths ...string) *NetworkEdgeSecurityServicesPatchCall { + c.urlParams_.SetMulti("paths", append([]string{}, paths...)) return c } @@ -98425,15 +114197,22 @@ func (r *InstancesService) UpdateShieldedInstanceConfig(project string, zone str // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateShieldedInstanceConfigCall) RequestId(requestId string) *InstancesUpdateShieldedInstanceConfigCall { +func (c *NetworkEdgeSecurityServicesPatchCall) RequestId(requestId string) *NetworkEdgeSecurityServicesPatchCall { c.urlParams_.Set("requestId", requestId) return c } +// UpdateMask sets the optional parameter "updateMask": Indicates fields +// to be updated as part of this request. +func (c *NetworkEdgeSecurityServicesPatchCall) UpdateMask(updateMask string) *NetworkEdgeSecurityServicesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateShieldedInstanceConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateShieldedInstanceConfigCall { +func (c *NetworkEdgeSecurityServicesPatchCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -98441,21 +114220,21 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateShieldedInstanceConfigCall) Context(ctx context.Context) *InstancesUpdateShieldedInstanceConfigCall { +func (c *NetworkEdgeSecurityServicesPatchCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateShieldedInstanceConfigCall) Header() http.Header { +func (c *NetworkEdgeSecurityServicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateShieldedInstanceConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEdgeSecurityServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -98463,14 +114242,14 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) doRequest(alt string) (*http } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceconfig) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -98478,38 +114257,38 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) doRequest(alt string) (*http } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "networkEdgeSecurityService": c.networkEdgeSecurityService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateShieldedInstanceConfig" call. +// Do executes the "compute.networkEdgeSecurityServices.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -98523,23 +114302,28 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig", + // "description": "Patches the specified policy with the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", // "httpMethod": "PATCH", - // "id": "compute.instances.updateShieldedInstanceConfig", + // "id": "compute.networkEdgeSecurityServices.patch", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "networkEdgeSecurityService" // ], // "parameters": { - // "instance": { - // "description": "Name or id of the instance scoping this request.", + // "networkEdgeSecurityService": { + // "description": "Name of the network edge security service to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "paths": { + // "location": "query", + // "repeated": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -98547,22 +114331,28 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOpt // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "updateMask": { + // "description": "Indicates fields to be updated as part of this request.", + // "format": "google-fieldmask", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig", + // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", // "request": { - // "$ref": "ShieldedInstanceConfig" + // "$ref": "NetworkEdgeSecurityService" // }, // "response": { // "$ref": "Operation" @@ -98575,9 +114365,9 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOpt } -// method id "compute.interconnectAttachments.aggregatedList": +// method id "compute.networkEndpointGroups.aggregatedList": -type InterconnectAttachmentsAggregatedListCall struct { +type NetworkEndpointGroupsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -98586,30 +114376,33 @@ type InterconnectAttachmentsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of interconnect -// attachments. +// AggregatedList: Retrieves the list of network endpoint groups and +// sorts them by zone. // // - project: Project ID for this request. -func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { - c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { + c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -98617,8 +114410,18 @@ func (r *InterconnectAttachmentsService) AggregatedList(project string) *Interco // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -98631,7 +114434,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *Inter // response. For resource types which predate this field, if this flag // is omitted or false, only scopes of the scope types where the // resource type is expected to be found will be included. -func (c *InterconnectAttachmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InterconnectAttachmentsAggregatedListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) return c } @@ -98642,7 +114445,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) IncludeAllScopes(includeAllS // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -98656,7 +114459,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -98664,7 +114467,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *Int // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -98673,7 +114476,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *InterconnectAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsAggregatedListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -98681,7 +114484,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) ReturnPartialSuccess(returnP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -98691,7 +114494,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -98699,21 +114502,21 @@ func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Context(ctx context.Context) *NetworkEndpointGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { +func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -98726,7 +114529,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -98739,34 +114542,34 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.aggregatedList" call. -// Exactly one of *InterconnectAttachmentAggregatedList or error will be +// Do executes the "compute.networkEndpointGroups.aggregatedList" call. +// Exactly one of *NetworkEndpointGroupAggregatedList or error will be // non-nil. Any non-2xx status code is an error. Response headers are in -// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or +// either *NetworkEndpointGroupAggregatedList.ServerResponse.Header or // (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was // returned. -func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { +func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InterconnectAttachmentAggregatedList{ + ret := &NetworkEndpointGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -98778,16 +114581,16 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Retrieves an aggregated list of interconnect attachments.", - // "flatPath": "projects/{project}/aggregated/interconnectAttachments", + // "description": "Retrieves the list of network endpoint groups and sorts them by zone.", + // "flatPath": "projects/{project}/aggregated/networkEndpointGroups", // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.aggregatedList", + // "id": "compute.networkEndpointGroups.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -98827,9 +114630,9 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/interconnectAttachments", + // "path": "projects/{project}/aggregated/networkEndpointGroups", // "response": { - // "$ref": "InterconnectAttachmentAggregatedList" + // "$ref": "NetworkEndpointGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -98843,7 +114646,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { +func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -98861,29 +114664,34 @@ func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f } } -// method id "compute.interconnectAttachments.delete": +// method id "compute.networkEndpointGroups.attachNetworkEndpoints": -type InterconnectAttachmentsDeleteCall struct { - s *Service - project string - region string - interconnectAttachment string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified interconnect attachment. +// AttachNetworkEndpoints: Attach a list of network endpoints to the +// specified network endpoint group. // -// - interconnectAttachment: Name of the interconnect attachment to -// delete. -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { - c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkEndpointGroup: The name of the network endpoint group where +// you are attaching network endpoints to. It should comply with +// RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. +func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupsattachendpointsrequest = networkendpointgroupsattachendpointsrequest return c } @@ -98898,7 +114706,377 @@ func (r *InterconnectAttachmentsService) Delete(project string, region string, i // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsattachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEndpointGroups.attachNetworkEndpoints" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Attach a list of network endpoints to the specified network endpoint group.", + // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", + // "parameterOrder": [ + // "project", + // "zone", + // "networkEndpointGroup" + // ], + // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "request": { + // "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networkEndpointGroups.delete": + +type NetworkEndpointGroupsDeleteCall struct { + s *Service + project string + zone string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified network endpoint group. The network +// endpoints in the NEG and the VM instances they belong to are not +// terminated when the NEG is deleted. Note that the NEG cannot be +// deleted if there are backend services referencing it. +// +// - networkEndpointGroup: The name of the network endpoint group to +// delete. It should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. +func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { + c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEndpointGroupsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *NetworkEndpointGroupsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEndpointGroups.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", + // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + // "httpMethod": "DELETE", + // "id": "compute.networkEndpointGroups.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "networkEndpointGroup" + // ], + // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networkEndpointGroups.detachNetworkEndpoints": + +type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DetachNetworkEndpoints: Detach a list of network endpoints from the +// specified network endpoint group. +// +// - networkEndpointGroup: The name of the network endpoint group where +// you are removing network endpoints. It should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. +func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupsdetachendpointsrequest = networkendpointgroupsdetachendpointsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -98906,7 +115084,7 @@ func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *Interco // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -98914,21 +115092,21 @@ func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *Interc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -98936,48 +115114,53 @@ func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsdetachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.delete" call. +// Do executes the "compute.networkEndpointGroups.detachNetworkEndpoints" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -98991,20 +115174,19 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Deletes the specified interconnect attachment.", - // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", - // "httpMethod": "DELETE", - // "id": "compute.interconnectAttachments.delete", + // "description": "Detach a list of network endpoints from the specified network endpoint group.", + // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", // "parameterOrder": [ // "project", - // "region", - // "interconnectAttachment" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to delete.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -99015,20 +115197,22 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + // "request": { + // "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -99040,37 +115224,39 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.interconnectAttachments.get": +// method id "compute.networkEndpointGroups.get": -type InterconnectAttachmentsGetCall struct { - s *Service - project string - region string - interconnectAttachment string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsGetCall struct { + s *Service + project string + zone string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified interconnect attachment. +// Get: Returns the specified network endpoint group. Gets a list of +// available network endpoint groups by making a list() request. // -// - interconnectAttachment: Name of the interconnect attachment to -// return. -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { - c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkEndpointGroup: The name of the network endpoint group. It +// should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. +func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { + c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { +func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -99080,7 +115266,7 @@ func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *Interconn // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { +func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -99088,21 +115274,21 @@ func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *Intercon // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { +func (c *NetworkEndpointGroupsGetCall) Context(ctx context.Context) *NetworkEndpointGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsGetCall) Header() http.Header { +func (c *NetworkEndpointGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -99115,7 +115301,7 @@ func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -99123,40 +115309,40 @@ func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.get" call. -// Exactly one of *InterconnectAttachment or error will be non-nil. Any +// Do executes the "compute.networkEndpointGroups.get" call. +// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InterconnectAttachment.ServerResponse.Header or (if a response was +// *NetworkEndpointGroup.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { +func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InterconnectAttachment{ + ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -99168,20 +115354,19 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte } return ret, nil // { - // "description": "Returns the specified interconnect attachment.", - // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.get", + // "id": "compute.networkEndpointGroups.get", // "parameterOrder": [ // "project", - // "region", - // "interconnectAttachment" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to return.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -99192,17 +115377,16 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", // "response": { - // "$ref": "InterconnectAttachment" + // "$ref": "NetworkEndpointGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -99213,28 +115397,29 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte } -// method id "compute.interconnectAttachments.insert": +// method id "compute.networkEndpointGroups.insert": -type InterconnectAttachmentsInsertCall struct { - s *Service - project string - region string - interconnectattachment *InterconnectAttachment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsInsertCall struct { + s *Service + project string + zone string + networkendpointgroup *NetworkEndpointGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an InterconnectAttachment in the specified project -// using the data included in the request. +// Insert: Creates a network endpoint group in the specified project +// using the parameters that are included in the request. // -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { - c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the network +// endpoint group. It should comply with RFC1035. +func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { + c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectattachment = interconnectattachment + c.zone = zone + c.networkendpointgroup = networkendpointgroup return c } @@ -99249,22 +115434,15 @@ func (r *InterconnectAttachmentsService) Insert(project string, region string, i // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { +func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEndpointGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } -// ValidateOnly sets the optional parameter "validateOnly": If true, the -// request will not be committed. -func (c *InterconnectAttachmentsInsertCall) ValidateOnly(validateOnly bool) *InterconnectAttachmentsInsertCall { - c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { +func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -99272,21 +115450,21 @@ func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *Interc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { +func (c *NetworkEndpointGroupsInsertCall) Context(ctx context.Context) *NetworkEndpointGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsInsertCall) Header() http.Header { +func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -99294,14 +115472,14 @@ func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -99310,36 +115488,36 @@ func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Respons req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.insert" call. +// Do executes the "compute.networkEndpointGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -99353,13 +115531,13 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", + // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", // "httpMethod": "POST", - // "id": "compute.interconnectAttachments.insert", + // "id": "compute.networkEndpointGroups.insert", // "parameterOrder": [ // "project", - // "region" + // "zone" // ], // "parameters": { // "project": { @@ -99369,27 +115547,21 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "validateOnly": { - // "description": "If true, the request will not be committed.", - // "location": "query", - // "type": "boolean" + // "zone": { + // "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/interconnectAttachments", + // "path": "projects/{project}/zones/{zone}/networkEndpointGroups", // "request": { - // "$ref": "InterconnectAttachment" + // "$ref": "NetworkEndpointGroup" // }, // "response": { // "$ref": "Operation" @@ -99402,44 +115574,48 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.interconnectAttachments.list": +// method id "compute.networkEndpointGroups.list": -type InterconnectAttachmentsListCall struct { +type NetworkEndpointGroupsListCall struct { s *Service project string - region string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of interconnect attachments contained within -// the specified region. +// List: Retrieves the list of network endpoint groups that are located +// in the specified project and zone. // -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { - c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. +func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { + c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region + c.zone = zone return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -99447,8 +115623,18 @@ func (r *InterconnectAttachmentsService) List(project string, region string) *In // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { c.urlParams_.Set("filter", filter) return c } @@ -99459,7 +115645,7 @@ func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAtt // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { +func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -99473,7 +115659,7 @@ func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *Intercon // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { +func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpointGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -99481,7 +115667,7 @@ func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectA // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { +func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndpointGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -99490,7 +115676,7 @@ func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *Interconn // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *InterconnectAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsListCall { +func (c *NetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -99498,7 +115684,7 @@ func (c *InterconnectAttachmentsListCall) ReturnPartialSuccess(returnPartialSucc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { +func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -99508,7 +115694,7 @@ func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *Intercon // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { +func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -99516,21 +115702,21 @@ func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *Interco // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { +func (c *NetworkEndpointGroupsListCall) Context(ctx context.Context) *NetworkEndpointGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsListCall) Header() http.Header { +func (c *NetworkEndpointGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -99543,7 +115729,7 @@ func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -99552,38 +115738,38 @@ func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.list" call. -// Exactly one of *InterconnectAttachmentList or error will be non-nil. +// Do executes the "compute.networkEndpointGroups.list" call. +// Exactly one of *NetworkEndpointGroupList or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *InterconnectAttachmentList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { +func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InterconnectAttachmentList{ + ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -99595,17 +115781,17 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Retrieves the list of interconnect attachments contained within the specified region.", - // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", + // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", + // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.list", + // "id": "compute.networkEndpointGroups.list", // "parameterOrder": [ // "project", - // "region" + // "zone" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -99634,22 +115820,21 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "returnPartialSuccess": { // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/interconnectAttachments", + // "path": "projects/{project}/zones/{zone}/networkEndpointGroups", // "response": { - // "$ref": "InterconnectAttachmentList" + // "$ref": "NetworkEndpointGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -99663,7 +115848,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { +func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -99681,56 +115866,123 @@ func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*Int } } -// method id "compute.interconnectAttachments.patch": +// method id "compute.networkEndpointGroups.listNetworkEndpoints": -type InterconnectAttachmentsPatchCall struct { - s *Service - project string - region string - interconnectAttachment string - interconnectattachment *InterconnectAttachment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsListNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified interconnect attachment with the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. +// ListNetworkEndpoints: Lists the network endpoints in the specified +// network endpoint group. // -// - interconnectAttachment: Name of the interconnect attachment to -// patch. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { - c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkEndpointGroup: The name of the network endpoint group from +// which you want to generate a list of included network endpoints. It +// should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. +func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { + c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment - c.interconnectattachment = interconnectattachment + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupslistendpointsrequest = networkendpointgroupslistendpointsrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -99738,21 +115990,21 @@ func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *Interco // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsListNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsPatchCall) Header() http.Header { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -99760,55 +116012,57 @@ func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupslistendpointsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkEndpointGroups.listNetworkEndpoints" call. +// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &NetworkEndpointGroupsListNetworkEndpoints{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -99820,23 +116074,45 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", - // "httpMethod": "PATCH", - // "id": "compute.interconnectAttachments.patch", + // "description": "Lists the network endpoints in the specified network endpoint group.", + // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.listNetworkEndpoints", // "parameterOrder": [ // "project", - // "region", - // "interconnectAttachment" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to patch.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -99844,148 +116120,165 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", // "request": { - // "$ref": "InterconnectAttachment" + // "$ref": "NetworkEndpointGroupsListEndpointsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.interconnectLocations.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InterconnectLocationsGetCall struct { - s *Service - project string - interconnectLocation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.networkEndpointGroups.testIamPermissions": + +type NetworkEndpointGroupsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the details for the specified interconnect location. -// Gets a list of available interconnect locations by making a list() -// request. +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. // -// - interconnectLocation: Name of the interconnect location to return. // - project: Project ID for this request. -func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { - c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. +func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { + c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnectLocation = interconnectLocation + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Context(ctx context.Context) *NetworkEndpointGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectLocationsGetCall) Header() http.Header { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectLocations/{interconnectLocation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnectLocation": c.interconnectLocation, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectLocations.get" call. -// Exactly one of *InterconnectLocation or error will be non-nil. Any +// Do executes the "compute.networkEndpointGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InterconnectLocation.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InterconnectLocation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -99997,279 +116290,44 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc } return ret, nil // { - // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", - // "flatPath": "projects/{project}/global/interconnectLocations/{interconnectLocation}", - // "httpMethod": "GET", - // "id": "compute.interconnectLocations.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.testIamPermissions", // "parameterOrder": [ // "project", - // "interconnectLocation" + // "zone", + // "resource" // ], // "parameters": { - // "interconnectLocation": { - // "description": "Name of the interconnect location to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // } - // }, - // "path": "projects/{project}/global/interconnectLocations/{interconnectLocation}", - // "response": { - // "$ref": "InterconnectLocation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.interconnectLocations.list": - -type InterconnectLocationsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of interconnect locations available to the -// specified project. -// -// - project: Project ID for this request. -func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { - c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *InterconnectLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectLocationsListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InterconnectLocationsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectLocations") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.interconnectLocations.list" call. -// Exactly one of *InterconnectLocationList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InterconnectLocationList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InterconnectLocationList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of interconnect locations available to the specified project.", - // "flatPath": "projects/{project}/global/interconnectLocations", - // "httpMethod": "GET", - // "id": "compute.interconnectLocations.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/global/interconnectLocations", + // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "InterconnectLocationList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -100280,46 +116338,38 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.interconnects.delete": +// method id "compute.networkFirewallPolicies.addAssociation": -type InterconnectsDeleteCall struct { - s *Service - project string - interconnect string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkFirewallPoliciesAddAssociationCall struct { + s *Service + project string + firewallPolicy string + firewallpolicyassociation *FirewallPolicyAssociation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified interconnect. +// AddAssociation: Inserts an association for the specified firewall +// policy. // -// - interconnect: Name of the interconnect to delete. +// - firewallPolicy: Name of the firewall policy to update. // - project: Project ID for this request. -func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { - c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkFirewallPoliciesService) AddAssociation(project string, firewallPolicy string, firewallpolicyassociation *FirewallPolicyAssociation) *NetworkFirewallPoliciesAddAssociationCall { + c := &NetworkFirewallPoliciesAddAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.firewallPolicy = firewallPolicy + c.firewallpolicyassociation = firewallpolicyassociation + return c +} + +// ReplaceExistingAssociation sets the optional parameter +// "replaceExistingAssociation": Indicates whether or not to replace it +// if an association of the attachment already exists. This is false by +// default, in which case an error will be returned if an association +// already exists. +func (c *NetworkFirewallPoliciesAddAssociationCall) ReplaceExistingAssociation(replaceExistingAssociation bool) *NetworkFirewallPoliciesAddAssociationCall { + c.urlParams_.Set("replaceExistingAssociation", fmt.Sprint(replaceExistingAssociation)) return c } @@ -100334,7 +116384,7 @@ func (r *InterconnectsService) Delete(project string, interconnect string) *Inte // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { +func (c *NetworkFirewallPoliciesAddAssociationCall) RequestId(requestId string) *NetworkFirewallPoliciesAddAssociationCall { c.urlParams_.Set("requestId", requestId) return c } @@ -100342,7 +116392,7 @@ func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDele // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { +func (c *NetworkFirewallPoliciesAddAssociationCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesAddAssociationCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -100350,21 +116400,21 @@ func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { +func (c *NetworkFirewallPoliciesAddAssociationCall) Context(ctx context.Context) *NetworkFirewallPoliciesAddAssociationCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsDeleteCall) Header() http.Header { +func (c *NetworkFirewallPoliciesAddAssociationCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesAddAssociationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -100372,47 +116422,52 @@ func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyassociation) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.delete" call. +// Do executes the "compute.networkFirewallPolicies.addAssociation" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -100426,17 +116481,17 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified interconnect.", - // "flatPath": "projects/{project}/global/interconnects/{interconnect}", - // "httpMethod": "DELETE", - // "id": "compute.interconnects.delete", + // "description": "Inserts an association for the specified firewall policy.", + // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation", + // "httpMethod": "POST", + // "id": "compute.networkFirewallPolicies.addAssociation", // "parameterOrder": [ // "project", - // "interconnect" + // "firewallPolicy" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to delete.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -100449,13 +116504,21 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, + // "replaceExistingAssociation": { + // "description": "Indicates whether or not to replace it if an association of the attachment already exists. This is false by default, in which case an error will be returned if an association already exists.", + // "location": "query", + // "type": "boolean" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/global/interconnects/{interconnect}", + // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation", + // "request": { + // "$ref": "FirewallPolicyAssociation" + // }, // "response": { // "$ref": "Operation" // }, @@ -100467,119 +116530,145 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.interconnects.get": +// method id "compute.networkFirewallPolicies.addRule": -type InterconnectsGetCall struct { - s *Service - project string - interconnect string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworkFirewallPoliciesAddRuleCall struct { + s *Service + project string + firewallPolicy string + firewallpolicyrule *FirewallPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified interconnect. Get a list of available -// interconnects by making a list() request. +// AddRule: Inserts a rule into a firewall policy. // -// - interconnect: Name of the interconnect to return. +// - firewallPolicy: Name of the firewall policy to update. // - project: Project ID for this request. -func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { - c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkFirewallPoliciesService) AddRule(project string, firewallPolicy string, firewallpolicyrule *FirewallPolicyRule) *NetworkFirewallPoliciesAddRuleCall { + c := &NetworkFirewallPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.firewallPolicy = firewallPolicy + c.firewallpolicyrule = firewallpolicyrule + return c +} + +// MaxPriority sets the optional parameter "maxPriority": When +// rule.priority is not specified, auto choose a unused priority between +// minPriority and maxPriority>. This field is exclusive with +// rule.priority. +func (c *NetworkFirewallPoliciesAddRuleCall) MaxPriority(maxPriority int64) *NetworkFirewallPoliciesAddRuleCall { + c.urlParams_.Set("maxPriority", fmt.Sprint(maxPriority)) + return c +} + +// MinPriority sets the optional parameter "minPriority": When +// rule.priority is not specified, auto choose a unused priority between +// minPriority and maxPriority>. This field is exclusive with +// rule.priority. +func (c *NetworkFirewallPoliciesAddRuleCall) MinPriority(minPriority int64) *NetworkFirewallPoliciesAddRuleCall { + c.urlParams_.Set("minPriority", fmt.Sprint(minPriority)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworkFirewallPoliciesAddRuleCall) RequestId(requestId string) *NetworkFirewallPoliciesAddRuleCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { +func (c *NetworkFirewallPoliciesAddRuleCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesAddRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { +func (c *NetworkFirewallPoliciesAddRuleCall) Context(ctx context.Context) *NetworkFirewallPoliciesAddRuleCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsGetCall) Header() http.Header { +func (c *NetworkFirewallPoliciesAddRuleCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.get" call. -// Exactly one of *Interconnect or error will be non-nil. Any non-2xx +// Do executes the "compute.networkFirewallPolicies.addRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Interconnect.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { +func (c *NetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Interconnect{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -100591,157 +116680,183 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, } return ret, nil // { - // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", - // "flatPath": "projects/{project}/global/interconnects/{interconnect}", - // "httpMethod": "GET", - // "id": "compute.interconnects.get", + // "description": "Inserts a rule into a firewall policy.", + // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule", + // "httpMethod": "POST", + // "id": "compute.networkFirewallPolicies.addRule", // "parameterOrder": [ // "project", - // "interconnect" + // "firewallPolicy" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to return.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "maxPriority": { + // "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "minPriority": { + // "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/global/interconnects/{interconnect}", + // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule", + // "request": { + // "$ref": "FirewallPolicyRule" + // }, // "response": { - // "$ref": "Interconnect" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.interconnects.getDiagnostics": +// method id "compute.networkFirewallPolicies.cloneRules": -type InterconnectsGetDiagnosticsCall struct { - s *Service - project string - interconnect string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworkFirewallPoliciesCloneRulesCall struct { + s *Service + project string + firewallPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetDiagnostics: Returns the interconnectDiagnostics for the specified -// interconnect. +// CloneRules: Copies rules to the specified firewall policy. // -// - interconnect: Name of the interconnect resource to query. +// - firewallPolicy: Name of the firewall policy to update. // - project: Project ID for this request. -func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { - c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkFirewallPoliciesService) CloneRules(project string, firewallPolicy string) *NetworkFirewallPoliciesCloneRulesCall { + c := &NetworkFirewallPoliciesCloneRulesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.firewallPolicy = firewallPolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworkFirewallPoliciesCloneRulesCall) RequestId(requestId string) *NetworkFirewallPoliciesCloneRulesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// SourceFirewallPolicy sets the optional parameter +// "sourceFirewallPolicy": The firewall policy from which to copy rules. +func (c *NetworkFirewallPoliciesCloneRulesCall) SourceFirewallPolicy(sourceFirewallPolicy string) *NetworkFirewallPoliciesCloneRulesCall { + c.urlParams_.Set("sourceFirewallPolicy", sourceFirewallPolicy) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *InterconnectsGetDiagnosticsCall { +func (c *NetworkFirewallPoliciesCloneRulesCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesCloneRulesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *InterconnectsGetDiagnosticsCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsGetDiagnosticsCall) Context(ctx context.Context) *InterconnectsGetDiagnosticsCall { +func (c *NetworkFirewallPoliciesCloneRulesCall) Context(ctx context.Context) *NetworkFirewallPoliciesCloneRulesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { +func (c *NetworkFirewallPoliciesCloneRulesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesCloneRulesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}/getDiagnostics") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/cloneRules") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.getDiagnostics" call. -// Exactly one of *InterconnectsGetDiagnosticsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InterconnectsGetDiagnosticsResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetDiagnosticsResponse, error) { +// Do executes the "compute.networkFirewallPolicies.cloneRules" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkFirewallPoliciesCloneRulesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InterconnectsGetDiagnosticsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -100753,17 +116868,17 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Returns the interconnectDiagnostics for the specified interconnect.", - // "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", - // "httpMethod": "GET", - // "id": "compute.interconnects.getDiagnostics", + // "description": "Copies rules to the specified firewall policy.", + // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/cloneRules", + // "httpMethod": "POST", + // "id": "compute.networkFirewallPolicies.cloneRules", // "parameterOrder": [ // "project", - // "interconnect" + // "firewallPolicy" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect resource to query.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -100775,40 +116890,49 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sourceFirewallPolicy": { + // "description": "The firewall policy from which to copy rules.", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", + // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/cloneRules", // "response": { - // "$ref": "InterconnectsGetDiagnosticsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.interconnects.insert": +// method id "compute.networkFirewallPolicies.delete": -type InterconnectsInsertCall struct { - s *Service - project string - interconnect *Interconnect - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkFirewallPoliciesDeleteCall struct { + s *Service + project string + firewallPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a Interconnect in the specified project using the -// data included in the request. +// Delete: Deletes the specified policy. // +// - firewallPolicy: Name of the firewall policy to delete. // - project: Project ID for this request. -func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { - c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkFirewallPoliciesService) Delete(project string, firewallPolicy string) *NetworkFirewallPoliciesDeleteCall { + c := &NetworkFirewallPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.firewallPolicy = firewallPolicy return c } @@ -100823,7 +116947,7 @@ func (r *InterconnectsService) Insert(project string, interconnect *Interconnect // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { +func (c *NetworkFirewallPoliciesDeleteCall) RequestId(requestId string) *NetworkFirewallPoliciesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -100831,7 +116955,7 @@ func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInse // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { +func (c *NetworkFirewallPoliciesDeleteCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -100839,21 +116963,21 @@ func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { +func (c *NetworkFirewallPoliciesDeleteCall) Context(ctx context.Context) *NetworkFirewallPoliciesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsInsertCall) Header() http.Header { +func (c *NetworkFirewallPoliciesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -100861,51 +116985,47 @@ func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.insert" call. +// Do executes the "compute.networkFirewallPolicies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkFirewallPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -100919,14 +117039,22 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a Interconnect in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/global/interconnects", - // "httpMethod": "POST", - // "id": "compute.interconnects.insert", + // "description": "Deletes the specified policy.", + // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}", + // "httpMethod": "DELETE", + // "id": "compute.networkFirewallPolicies.delete", // "parameterOrder": [ - // "project" + // "project", + // "firewallPolicy" // ], // "parameters": { + // "firewallPolicy": { + // "description": "Name of the firewall policy to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -100940,10 +117068,7 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "projects/{project}/global/interconnects", - // "request": { - // "$ref": "Interconnect" - // }, + // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}", // "response": { // "$ref": "Operation" // }, @@ -100955,100 +117080,33 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.interconnects.list": +// method id "compute.networkFirewallPolicies.get": -type InterconnectsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworkFirewallPoliciesGetCall struct { + s *Service + project string + firewallPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of interconnect available to the specified -// project. +// Get: Returns the specified network firewall policy. // +// - firewallPolicy: Name of the firewall policy to get. // - project: Project ID for this request. -func (r *InterconnectsService) List(project string) *InterconnectsListCall { - c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkFirewallPoliciesService) Get(project string, firewallPolicy string) *NetworkFirewallPoliciesGetCall { + c := &NetworkFirewallPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *InterconnectsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectsListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + c.firewallPolicy = firewallPolicy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { +func (c *NetworkFirewallPoliciesGetCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101058,7 +117116,7 @@ func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { +func (c *NetworkFirewallPoliciesGetCall) IfNoneMatch(entityTag string) *NetworkFirewallPoliciesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -101066,21 +117124,21 @@ func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsList // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { +func (c *NetworkFirewallPoliciesGetCall) Context(ctx context.Context) *NetworkFirewallPoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsListCall) Header() http.Header { +func (c *NetworkFirewallPoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -101093,7 +117151,7 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -101101,38 +117159,39 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.list" call. -// Exactly one of *InterconnectList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InterconnectList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.networkFirewallPolicies.get" call. +// Exactly one of *FirewallPolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *FirewallPolicy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { +func (c *NetworkFirewallPoliciesGetCall) Do(opts ...googleapi.CallOption) (*FirewallPolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InterconnectList{ + ret := &FirewallPolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -101144,35 +117203,20 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL } return ret, nil // { - // "description": "Retrieves the list of interconnect available to the specified project.", - // "flatPath": "projects/{project}/global/interconnects", + // "description": "Returns the specified network firewall policy.", + // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}", // "httpMethod": "GET", - // "id": "compute.interconnects.list", + // "id": "compute.networkFirewallPolicies.get", // "parameterOrder": [ - // "project" + // "project", + // "firewallPolicy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "firewallPolicy": { + // "description": "Name of the firewall policy to get.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -101181,16 +117225,11 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/global/interconnects", + // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}", // "response": { - // "$ref": "InterconnectList" + // "$ref": "FirewallPolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -101201,150 +117240,126 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.interconnects.patch": +// method id "compute.networkFirewallPolicies.getAssociation": -type InterconnectsPatchCall struct { - s *Service - project string - interconnect string - interconnect2 *Interconnect - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkFirewallPoliciesGetAssociationCall struct { + s *Service + project string + firewallPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified interconnect with the data included in -// the request. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. +// GetAssociation: Gets an association with the specified name. // -// - interconnect: Name of the interconnect to update. -// - project: Project ID for this request. -func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { - c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - firewallPolicy: Name of the firewall policy to which the queried +// association belongs. +// - project: Project ID for this request. +func (r *NetworkFirewallPoliciesService) GetAssociation(project string, firewallPolicy string) *NetworkFirewallPoliciesGetAssociationCall { + c := &NetworkFirewallPoliciesGetAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect - c.interconnect2 = interconnect2 + c.firewallPolicy = firewallPolicy return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { - c.urlParams_.Set("requestId", requestId) +// Name sets the optional parameter "name": The name of the association +// to get from the firewall policy. +func (c *NetworkFirewallPoliciesGetAssociationCall) Name(name string) *NetworkFirewallPoliciesGetAssociationCall { + c.urlParams_.Set("name", name) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { +func (c *NetworkFirewallPoliciesGetAssociationCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesGetAssociationCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkFirewallPoliciesGetAssociationCall) IfNoneMatch(entityTag string) *NetworkFirewallPoliciesGetAssociationCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { +func (c *NetworkFirewallPoliciesGetAssociationCall) Context(ctx context.Context) *NetworkFirewallPoliciesGetAssociationCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsPatchCall) Header() http.Header { +func (c *NetworkFirewallPoliciesGetAssociationCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesGetAssociationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/getAssociation") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkFirewallPolicies.getAssociation" call. +// Exactly one of *FirewallPolicyAssociation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *FirewallPolicyAssociation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkFirewallPoliciesGetAssociationCall) Do(opts ...googleapi.CallOption) (*FirewallPolicyAssociation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &FirewallPolicyAssociation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -101356,81 +117371,83 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/global/interconnects/{interconnect}", - // "httpMethod": "PATCH", - // "id": "compute.interconnects.patch", + // "description": "Gets an association with the specified name.", + // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/getAssociation", + // "httpMethod": "GET", + // "id": "compute.networkFirewallPolicies.getAssociation", // "parameterOrder": [ // "project", - // "interconnect" + // "firewallPolicy" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to update.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to which the queried association belongs.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "name": { + // "description": "The name of the association to get from the firewall policy.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/global/interconnects/{interconnect}", - // "request": { - // "$ref": "Interconnect" - // }, + // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/getAssociation", // "response": { - // "$ref": "Operation" + // "$ref": "FirewallPolicyAssociation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.licenseCodes.get": +// method id "compute.networkFirewallPolicies.getIamPolicy": -type LicenseCodesGetCall struct { +type NetworkFirewallPoliciesGetIamPolicyCall struct { s *Service project string - licenseCode string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Return a specified license code. License codes are mirrored -// across all projects that have permissions to read the License Code. -// *Caution* This resource is intended for use only by third-party -// partners who are creating Cloud Marketplace images. +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. // -// - licenseCode: Number corresponding to the License code resource to -// return. // - project: Project ID for this request. -func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { - c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *NetworkFirewallPoliciesService) GetIamPolicy(project string, resource string) *NetworkFirewallPoliciesGetIamPolicyCall { + c := &NetworkFirewallPoliciesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.licenseCode = licenseCode + c.resource = resource + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *NetworkFirewallPoliciesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *NetworkFirewallPoliciesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { +func (c *NetworkFirewallPoliciesGetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101440,7 +117457,7 @@ func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { +func (c *NetworkFirewallPoliciesGetIamPolicyCall) IfNoneMatch(entityTag string) *NetworkFirewallPoliciesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -101448,21 +117465,21 @@ func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { +func (c *NetworkFirewallPoliciesGetIamPolicyCall) Context(ctx context.Context) *NetworkFirewallPoliciesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicenseCodesGetCall) Header() http.Header { +func (c *NetworkFirewallPoliciesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -101475,7 +117492,7 @@ func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenseCodes/{licenseCode}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -101483,39 +117500,39 @@ func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "licenseCode": c.licenseCode, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenseCodes.get" call. -// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LicenseCode.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { +// Do executes the "compute.networkFirewallPolicies.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NetworkFirewallPoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &LicenseCode{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -101527,21 +117544,20 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er } return ret, nil // { - // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", - // "flatPath": "projects/{project}/global/licenseCodes/{licenseCode}", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/global/firewallPolicies/{resource}/getIamPolicy", // "httpMethod": "GET", - // "id": "compute.licenseCodes.get", + // "id": "compute.networkFirewallPolicies.getIamPolicy", // "parameterOrder": [ // "project", - // "licenseCode" + // "resource" // ], // "parameters": { - // "licenseCode": { - // "description": "Number corresponding to the License code resource to return.", - // "location": "path", - // "pattern": "[0-9]{0,61}?", - // "required": true, - // "type": "string" + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" // }, // "project": { // "description": "Project ID for this request.", @@ -101549,11 +117565,18 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/licenseCodes/{licenseCode}", + // "path": "projects/{project}/global/firewallPolicies/{resource}/getIamPolicy", // "response": { - // "$ref": "LicenseCode" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -101564,113 +117587,126 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er } -// method id "compute.licenseCodes.testIamPermissions": +// method id "compute.networkFirewallPolicies.getRule": -type LicenseCodesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkFirewallPoliciesGetRuleCall struct { + s *Service + project string + firewallPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. *Caution* This resource is intended for use only -// by third-party partners who are creating Cloud Marketplace images. +// GetRule: Gets a rule of the specified priority. // -// - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { - c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - firewallPolicy: Name of the firewall policy to which the queried +// rule belongs. +// - project: Project ID for this request. +func (r *NetworkFirewallPoliciesService) GetRule(project string, firewallPolicy string) *NetworkFirewallPoliciesGetRuleCall { + c := &NetworkFirewallPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.firewallPolicy = firewallPolicy + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to get from the firewall policy. +func (c *NetworkFirewallPoliciesGetRuleCall) Priority(priority int64) *NetworkFirewallPoliciesGetRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicenseCodesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicenseCodesTestIamPermissionsCall { +func (c *NetworkFirewallPoliciesGetRuleCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesGetRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkFirewallPoliciesGetRuleCall) IfNoneMatch(entityTag string) *NetworkFirewallPoliciesGetRuleCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicenseCodesTestIamPermissionsCall) Context(ctx context.Context) *LicenseCodesTestIamPermissionsCall { +func (c *NetworkFirewallPoliciesGetRuleCall) Context(ctx context.Context) *NetworkFirewallPoliciesGetRuleCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { +func (c *NetworkFirewallPoliciesGetRuleCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenseCodes/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/getRule") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenseCodes.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.networkFirewallPolicies.getRule" call. +// Exactly one of *FirewallPolicyRule or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *FirewallPolicyRule.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *NetworkFirewallPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*FirewallPolicyRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &TestPermissionsResponse{ + ret := &FirewallPolicyRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -101682,36 +117718,39 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", - // "flatPath": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", - // "httpMethod": "POST", - // "id": "compute.licenseCodes.testIamPermissions", + // "description": "Gets a rule of the specified priority.", + // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/getRule", + // "httpMethod": "GET", + // "id": "compute.networkFirewallPolicies.getRule", // "parameterOrder": [ // "project", - // "resource" + // "firewallPolicy" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to which the queried rule belongs.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "priority": { + // "description": "The priority of the rule to get from the firewall policy.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/getRule", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "FirewallPolicyRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -101722,27 +117761,25 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.licenses.delete": +// method id "compute.networkFirewallPolicies.insert": -type LicensesDeleteCall struct { - s *Service - project string - license string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkFirewallPoliciesInsertCall struct { + s *Service + project string + firewallpolicy *FirewallPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified license. *Caution* This resource is -// intended for use only by third-party partners who are creating Cloud -// Marketplace images. +// Insert: Creates a new policy in the specified project using the data +// included in the request. // -// - license: Name of the license resource to delete. // - project: Project ID for this request. -func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { - c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkFirewallPoliciesService) Insert(project string, firewallpolicy *FirewallPolicy) *NetworkFirewallPoliciesInsertCall { + c := &NetworkFirewallPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + c.firewallpolicy = firewallpolicy return c } @@ -101757,7 +117794,7 @@ func (r *LicensesService) Delete(project string, license string) *LicensesDelete // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { +func (c *NetworkFirewallPoliciesInsertCall) RequestId(requestId string) *NetworkFirewallPoliciesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -101765,7 +117802,7 @@ func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { +func (c *NetworkFirewallPoliciesInsertCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101773,21 +117810,21 @@ func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { +func (c *NetworkFirewallPoliciesInsertCall) Context(ctx context.Context) *NetworkFirewallPoliciesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesDeleteCall) Header() http.Header { +func (c *NetworkFirewallPoliciesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -101795,47 +117832,51 @@ func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{license}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.delete" call. +// Do executes the "compute.networkFirewallPolicies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkFirewallPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -101849,22 +117890,14 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the specified license. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", - // "flatPath": "projects/{project}/global/licenses/{license}", - // "httpMethod": "DELETE", - // "id": "compute.licenses.delete", + // "description": "Creates a new policy in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/global/firewallPolicies", + // "httpMethod": "POST", + // "id": "compute.networkFirewallPolicies.insert", // "parameterOrder": [ - // "project", - // "license" + // "project" // ], // "parameters": { - // "license": { - // "description": "Name of the license resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -101878,7 +117911,10 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "projects/{project}/global/licenses/{license}", + // "path": "projects/{project}/global/firewallPolicies", + // "request": { + // "$ref": "FirewallPolicy" + // }, // "response": { // "$ref": "Operation" // }, @@ -101890,35 +117926,113 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.licenses.get": +// method id "compute.networkFirewallPolicies.list": -type LicensesGetCall struct { +type NetworkFirewallPoliciesListCall struct { s *Service project string - license string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified License resource. *Caution* This resource -// is intended for use only by third-party partners who are creating -// Cloud Marketplace images. +// List: Lists all the policies that have been configured for the +// specified project. // -// - license: Name of the License resource to return. // - project: Project ID for this request. -func (r *LicensesService) Get(project string, license string) *LicensesGetCall { - c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkFirewallPoliciesService) List(project string) *NetworkFirewallPoliciesListCall { + c := &NetworkFirewallPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NetworkFirewallPoliciesListCall) Filter(filter string) *NetworkFirewallPoliciesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworkFirewallPoliciesListCall) MaxResults(maxResults int64) *NetworkFirewallPoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *NetworkFirewallPoliciesListCall) OrderBy(orderBy string) *NetworkFirewallPoliciesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NetworkFirewallPoliciesListCall) PageToken(pageToken string) *NetworkFirewallPoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NetworkFirewallPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkFirewallPoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { +func (c *NetworkFirewallPoliciesListCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101928,7 +118042,7 @@ func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { +func (c *NetworkFirewallPoliciesListCall) IfNoneMatch(entityTag string) *NetworkFirewallPoliciesListCall { c.ifNoneMatch_ = entityTag return c } @@ -101936,21 +118050,21 @@ func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { +func (c *NetworkFirewallPoliciesListCall) Context(ctx context.Context) *NetworkFirewallPoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesGetCall) Header() http.Header { +func (c *NetworkFirewallPoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -101963,7 +118077,7 @@ func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{license}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -101972,38 +118086,248 @@ func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.get" call. -// Exactly one of *License or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *License.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { +// Do executes the "compute.networkFirewallPolicies.list" call. +// Exactly one of *FirewallPolicyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *FirewallPolicyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkFirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*FirewallPolicyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &FirewallPolicyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all the policies that have been configured for the specified project.", + // "flatPath": "projects/{project}/global/firewallPolicies", + // "httpMethod": "GET", + // "id": "compute.networkFirewallPolicies.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/global/firewallPolicies", + // "response": { + // "$ref": "FirewallPolicyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkFirewallPoliciesListCall) Pages(ctx context.Context, f func(*FirewallPolicyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.networkFirewallPolicies.patch": + +type NetworkFirewallPoliciesPatchCall struct { + s *Service + project string + firewallPolicy string + firewallpolicy *FirewallPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified policy with the data included in the +// request. +// +// - firewallPolicy: Name of the firewall policy to update. +// - project: Project ID for this request. +func (r *NetworkFirewallPoliciesService) Patch(project string, firewallPolicy string, firewallpolicy *FirewallPolicy) *NetworkFirewallPoliciesPatchCall { + c := &NetworkFirewallPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewallPolicy = firewallPolicy + c.firewallpolicy = firewallpolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworkFirewallPoliciesPatchCall) RequestId(requestId string) *NetworkFirewallPoliciesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkFirewallPoliciesPatchCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkFirewallPoliciesPatchCall) Context(ctx context.Context) *NetworkFirewallPoliciesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkFirewallPoliciesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkFirewallPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "firewallPolicy": c.firewallPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkFirewallPolicies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkFirewallPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &License{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -102015,17 +118339,17 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { } return ret, nil // { - // "description": "Returns the specified License resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", - // "flatPath": "projects/{project}/global/licenses/{license}", - // "httpMethod": "GET", - // "id": "compute.licenses.get", + // "description": "Patches the specified policy with the data included in the request.", + // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}", + // "httpMethod": "PATCH", + // "id": "compute.networkFirewallPolicies.patch", // "parameterOrder": [ // "project", - // "license" + // "firewallPolicy" // ], // "parameters": { - // "license": { - // "description": "Name of the License resource to return.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -102037,143 +118361,156 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/global/licenses/{license}", + // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}", + // "request": { + // "$ref": "FirewallPolicy" + // }, // "response": { - // "$ref": "License" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.licenses.getIamPolicy": +// method id "compute.networkFirewallPolicies.patchRule": -type LicensesGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworkFirewallPoliciesPatchRuleCall struct { + s *Service + project string + firewallPolicy string + firewallpolicyrule *FirewallPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. *Caution* This resource -// is intended for use only by third-party partners who are creating -// Cloud Marketplace images. +// PatchRule: Patches a rule of the specified priority. // +// - firewallPolicy: Name of the firewall policy to update. // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { - c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkFirewallPoliciesService) PatchRule(project string, firewallPolicy string, firewallpolicyrule *FirewallPolicyRule) *NetworkFirewallPoliciesPatchRuleCall { + c := &NetworkFirewallPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.firewallPolicy = firewallPolicy + c.firewallpolicyrule = firewallpolicyrule return c } -// OptionsRequestedPolicyVersion sets the optional parameter -// "optionsRequestedPolicyVersion": Requested IAM Policy version. -func (c *LicensesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *LicensesGetIamPolicyCall { - c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) +// Priority sets the optional parameter "priority": The priority of the +// rule to patch. +func (c *NetworkFirewallPoliciesPatchRuleCall) Priority(priority int64) *NetworkFirewallPoliciesPatchRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworkFirewallPoliciesPatchRuleCall) RequestId(requestId string) *NetworkFirewallPoliciesPatchRuleCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { +func (c *NetworkFirewallPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesPatchRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { +func (c *NetworkFirewallPoliciesPatchRuleCall) Context(ctx context.Context) *NetworkFirewallPoliciesPatchRuleCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesGetIamPolicyCall) Header() http.Header { +func (c *NetworkFirewallPoliciesPatchRuleCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/patchRule") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.networkFirewallPolicies.patchRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkFirewallPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -102185,17 +118522,24 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", - // "flatPath": "projects/{project}/global/licenses/{resource}/getIamPolicy", - // "httpMethod": "GET", - // "id": "compute.licenses.getIamPolicy", + // "description": "Patches a rule of the specified priority.", + // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/patchRule", + // "httpMethod": "POST", + // "id": "compute.networkFirewallPolicies.patchRule", // "parameterOrder": [ // "project", - // "resource" + // "firewallPolicy" // ], // "parameters": { - // "optionsRequestedPolicyVersion": { - // "description": "Requested IAM Policy version.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "priority": { + // "description": "The priority of the rule to patch.", // "format": "int32", // "location": "query", // "type": "integer" @@ -102207,47 +118551,54 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/global/licenses/{resource}/getIamPolicy", + // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/patchRule", + // "request": { + // "$ref": "FirewallPolicyRule" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.licenses.insert": +// method id "compute.networkFirewallPolicies.removeAssociation": -type LicensesInsertCall struct { - s *Service - project string - license *License - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkFirewallPoliciesRemoveAssociationCall struct { + s *Service + project string + firewallPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Create a License resource in the specified project. *Caution* -// This resource is intended for use only by third-party partners who -// are creating Cloud Marketplace images. +// RemoveAssociation: Removes an association for the specified firewall +// policy. // +// - firewallPolicy: Name of the firewall policy to update. // - project: Project ID for this request. -func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { - c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkFirewallPoliciesService) RemoveAssociation(project string, firewallPolicy string) *NetworkFirewallPoliciesRemoveAssociationCall { + c := &NetworkFirewallPoliciesRemoveAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + c.firewallPolicy = firewallPolicy + return c +} + +// Name sets the optional parameter "name": Name for the attachment that +// will be removed. +func (c *NetworkFirewallPoliciesRemoveAssociationCall) Name(name string) *NetworkFirewallPoliciesRemoveAssociationCall { + c.urlParams_.Set("name", name) return c } @@ -102262,7 +118613,7 @@ func (r *LicensesService) Insert(project string, license *License) *LicensesInse // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { +func (c *NetworkFirewallPoliciesRemoveAssociationCall) RequestId(requestId string) *NetworkFirewallPoliciesRemoveAssociationCall { c.urlParams_.Set("requestId", requestId) return c } @@ -102270,7 +118621,7 @@ func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { +func (c *NetworkFirewallPoliciesRemoveAssociationCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesRemoveAssociationCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102278,21 +118629,21 @@ func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { +func (c *NetworkFirewallPoliciesRemoveAssociationCall) Context(ctx context.Context) *NetworkFirewallPoliciesRemoveAssociationCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesInsertCall) Header() http.Header { +func (c *NetworkFirewallPoliciesRemoveAssociationCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesRemoveAssociationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -102300,14 +118651,9 @@ func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/removeAssociation") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -102315,36 +118661,37 @@ func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.insert" call. +// Do executes the "compute.networkFirewallPolicies.removeAssociation" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkFirewallPoliciesRemoveAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -102358,14 +118705,27 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Create a License resource in the specified project. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", - // "flatPath": "projects/{project}/global/licenses", + // "description": "Removes an association for the specified firewall policy.", + // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/removeAssociation", // "httpMethod": "POST", - // "id": "compute.licenses.insert", + // "id": "compute.networkFirewallPolicies.removeAssociation", // "parameterOrder": [ - // "project" + // "project", + // "firewallPolicy" // ], // "parameters": { + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "name": { + // "description": "Name for the attachment that will be removed.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -102379,208 +118739,139 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "projects/{project}/global/licenses", - // "request": { - // "$ref": "License" - // }, + // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/removeAssociation", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.licenses.list": +// method id "compute.networkFirewallPolicies.removeRule": -type LicensesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworkFirewallPoliciesRemoveRuleCall struct { + s *Service + project string + firewallPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of licenses available in the specified -// project. This method does not get any licenses that belong to other -// projects, including licenses attached to publicly-available images, -// like Debian 9. If you want to get a list of publicly-available -// licenses, use this method to make a request to the respective image -// project, such as debian-cloud or windows-cloud. *Caution* This -// resource is intended for use only by third-party partners who are -// creating Cloud Marketplace images. +// RemoveRule: Deletes a rule of the specified priority. // +// - firewallPolicy: Name of the firewall policy to update. // - project: Project ID for this request. -func (r *LicensesService) List(project string) *LicensesListCall { - c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkFirewallPoliciesService) RemoveRule(project string, firewallPolicy string) *NetworkFirewallPoliciesRemoveRuleCall { + c := &NetworkFirewallPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.firewallPolicy = firewallPolicy return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *LicensesListCall) Filter(filter string) *LicensesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { - c.urlParams_.Set("pageToken", pageToken) +// Priority sets the optional parameter "priority": The priority of the +// rule to remove from the firewall policy. +func (c *NetworkFirewallPoliciesRemoveRuleCall) Priority(priority int64) *NetworkFirewallPoliciesRemoveRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *LicensesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *LicensesListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworkFirewallPoliciesRemoveRuleCall) RequestId(requestId string) *NetworkFirewallPoliciesRemoveRuleCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { +func (c *NetworkFirewallPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesRemoveRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { +func (c *NetworkFirewallPoliciesRemoveRuleCall) Context(ctx context.Context) *NetworkFirewallPoliciesRemoveRuleCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesListCall) Header() http.Header { +func (c *NetworkFirewallPoliciesRemoveRuleCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/removeRule") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.list" call. -// Exactly one of *LicensesListResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *LicensesListResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { +// Do executes the "compute.networkFirewallPolicies.removeRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkFirewallPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &LicensesListResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -102592,37 +118883,28 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon } return ret, nil // { - // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", - // "flatPath": "projects/{project}/global/licenses", - // "httpMethod": "GET", - // "id": "compute.licenses.list", + // "description": "Deletes a rule of the specified priority.", + // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/removeRule", + // "httpMethod": "POST", + // "id": "compute.networkFirewallPolicies.removeRule", // "parameterOrder": [ - // "project" + // "project", + // "firewallPolicy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", + // "priority": { + // "description": "The priority of the rule to remove from the firewall policy.", + // "format": "int32", // "location": "query", - // "minimum": "0", // "type": "integer" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -102630,49 +118912,27 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // } // }, - // "path": "projects/{project}/global/licenses", + // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/removeRule", // "response": { - // "$ref": "LicensesListResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.licenses.setIamPolicy": +// method id "compute.networkFirewallPolicies.setIamPolicy": -type LicensesSetIamPolicyCall struct { +type NetworkFirewallPoliciesSetIamPolicyCall struct { s *Service project string resource string @@ -102683,14 +118943,12 @@ type LicensesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. *Caution* This resource is -// intended for use only by third-party partners who are creating Cloud -// Marketplace images. +// resource. Replaces any existing policy. // // - project: Project ID for this request. // - resource: Name or id of the resource for this request. -func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { - c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkFirewallPoliciesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *NetworkFirewallPoliciesSetIamPolicyCall { + c := &NetworkFirewallPoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource c.globalsetpolicyrequest = globalsetpolicyrequest @@ -102700,7 +118958,7 @@ func (r *LicensesService) SetIamPolicy(project string, resource string, globalse // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { +func (c *NetworkFirewallPoliciesSetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102708,21 +118966,21 @@ func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamP // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { +func (c *NetworkFirewallPoliciesSetIamPolicyCall) Context(ctx context.Context) *NetworkFirewallPoliciesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesSetIamPolicyCall) Header() http.Header { +func (c *NetworkFirewallPoliciesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -102737,7 +118995,7 @@ func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -102751,31 +119009,31 @@ func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.setIamPolicy" call. +// Do executes the "compute.networkFirewallPolicies.setIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *NetworkFirewallPoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -102789,10 +119047,10 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", - // "flatPath": "projects/{project}/global/licenses/{resource}/setIamPolicy", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/global/firewallPolicies/{resource}/setIamPolicy", // "httpMethod": "POST", - // "id": "compute.licenses.setIamPolicy", + // "id": "compute.networkFirewallPolicies.setIamPolicy", // "parameterOrder": [ // "project", // "resource" @@ -102813,7 +119071,7 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // "type": "string" // } // }, - // "path": "projects/{project}/global/licenses/{resource}/setIamPolicy", + // "path": "projects/{project}/global/firewallPolicies/{resource}/setIamPolicy", // "request": { // "$ref": "GlobalSetPolicyRequest" // }, @@ -102828,9 +119086,9 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } -// method id "compute.licenses.testIamPermissions": +// method id "compute.networkFirewallPolicies.testIamPermissions": -type LicensesTestIamPermissionsCall struct { +type NetworkFirewallPoliciesTestIamPermissionsCall struct { s *Service project string resource string @@ -102841,13 +119099,12 @@ type LicensesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. *Caution* This resource is intended for use only -// by third-party partners who are creating Cloud Marketplace images. +// specified resource. // // - project: Project ID for this request. // - resource: Name or id of the resource for this request. -func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { - c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkFirewallPoliciesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkFirewallPoliciesTestIamPermissionsCall { + c := &NetworkFirewallPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource c.testpermissionsrequest = testpermissionsrequest @@ -102857,7 +119114,7 @@ func (r *LicensesService) TestIamPermissions(project string, resource string, te // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesTestIamPermissionsCall { +func (c *NetworkFirewallPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102865,21 +119122,21 @@ func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesT // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesTestIamPermissionsCall) Context(ctx context.Context) *LicensesTestIamPermissionsCall { +func (c *NetworkFirewallPoliciesTestIamPermissionsCall) Context(ctx context.Context) *NetworkFirewallPoliciesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesTestIamPermissionsCall) Header() http.Header { +func (c *NetworkFirewallPoliciesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkFirewallPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -102894,7 +119151,7 @@ func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -102908,31 +119165,31 @@ func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.testIamPermissions" call. +// Do executes the "compute.networkFirewallPolicies.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *NetworkFirewallPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -102946,10 +119203,10 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", - // "flatPath": "projects/{project}/global/licenses/{resource}/testIamPermissions", + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/global/firewallPolicies/{resource}/testIamPermissions", // "httpMethod": "POST", - // "id": "compute.licenses.testIamPermissions", + // "id": "compute.networkFirewallPolicies.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -102970,7 +119227,7 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test // "type": "string" // } // }, - // "path": "projects/{project}/global/licenses/{resource}/testIamPermissions", + // "path": "projects/{project}/global/firewallPolicies/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -102986,26 +119243,27 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } -// method id "compute.machineImages.delete": +// method id "compute.networks.addPeering": -type MachineImagesDeleteCall struct { - s *Service - project string - machineImage string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksAddPeeringCall struct { + s *Service + project string + network string + networksaddpeeringrequest *NetworksAddPeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified machine image. Deleting a machine image -// is permanent and cannot be undone. +// AddPeering: Adds a peering to the specified network. // -// - machineImage: The name of the machine image to delete. +// - network: Name of the network resource to add peering to. // - project: Project ID for this request. -func (r *MachineImagesService) Delete(project string, machineImage string) *MachineImagesDeleteCall { - c := &MachineImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { + c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.machineImage = machineImage + c.network = network + c.networksaddpeeringrequest = networksaddpeeringrequest return c } @@ -103020,7 +119278,7 @@ func (r *MachineImagesService) Delete(project string, machineImage string) *Mach // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *MachineImagesDeleteCall) RequestId(requestId string) *MachineImagesDeleteCall { +func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeeringCall { c.urlParams_.Set("requestId", requestId) return c } @@ -103028,7 +119286,7 @@ func (c *MachineImagesDeleteCall) RequestId(requestId string) *MachineImagesDele // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineImagesDeleteCall) Fields(s ...googleapi.Field) *MachineImagesDeleteCall { +func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103036,21 +119294,21 @@ func (c *MachineImagesDeleteCall) Fields(s ...googleapi.Field) *MachineImagesDel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineImagesDeleteCall) Context(ctx context.Context) *MachineImagesDeleteCall { +func (c *NetworksAddPeeringCall) Context(ctx context.Context) *NetworksAddPeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineImagesDeleteCall) Header() http.Header { +func (c *NetworksAddPeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -103058,48 +119316,222 @@ func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksaddpeeringrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{machineImage}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}/addPeering") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "machineImage": c.machineImage, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineImages.delete" call. +// Do executes the "compute.networks.addPeering" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds a peering to the specified network.", + // "flatPath": "projects/{project}/global/networks/{network}/addPeering", + // "httpMethod": "POST", + // "id": "compute.networks.addPeering", + // "parameterOrder": [ + // "project", + // "network" + // ], + // "parameters": { + // "network": { + // "description": "Name of the network resource to add peering to.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/networks/{network}/addPeering", + // "request": { + // "$ref": "NetworksAddPeeringRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networks.delete": + +type NetworksDeleteCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified network. +// +// - network: Name of the network to delete. +// - project: Project ID for this request. +func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { + c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "network": c.network, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { return nil, err } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, @@ -103112,17 +119544,17 @@ func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified machine image. Deleting a machine image is permanent and cannot be undone.", - // "flatPath": "projects/{project}/global/machineImages/{machineImage}", + // "description": "Deletes the specified network.", + // "flatPath": "projects/{project}/global/networks/{network}", // "httpMethod": "DELETE", - // "id": "compute.machineImages.delete", + // "id": "compute.networks.delete", // "parameterOrder": [ // "project", - // "machineImage" + // "network" // ], // "parameters": { - // "machineImage": { - // "description": "The name of the machine image to delete.", + // "network": { + // "description": "Name of the network to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -103141,7 +119573,7 @@ func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "projects/{project}/global/machineImages/{machineImage}", + // "path": "projects/{project}/global/networks/{network}", // "response": { // "$ref": "Operation" // }, @@ -103153,34 +119585,34 @@ func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.machineImages.get": +// method id "compute.networks.get": -type MachineImagesGetCall struct { +type NetworksGetCall struct { s *Service project string - machineImage string + network string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified machine image. Gets a list of available -// machine images by making a list() request. +// Get: Returns the specified network. Gets a list of available networks +// by making a list() request. // -// - machineImage: The name of the machine image. +// - network: Name of the network to return. // - project: Project ID for this request. -func (r *MachineImagesService) Get(project string, machineImage string) *MachineImagesGetCall { - c := &MachineImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworksService) Get(project string, network string) *NetworksGetCall { + c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.machineImage = machineImage + c.network = network return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineImagesGetCall) Fields(s ...googleapi.Field) *MachineImagesGetCall { +func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103190,7 +119622,7 @@ func (c *MachineImagesGetCall) Fields(s ...googleapi.Field) *MachineImagesGetCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *MachineImagesGetCall) IfNoneMatch(entityTag string) *MachineImagesGetCall { +func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -103198,21 +119630,21 @@ func (c *MachineImagesGetCall) IfNoneMatch(entityTag string) *MachineImagesGetCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineImagesGetCall) Context(ctx context.Context) *MachineImagesGetCall { +func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineImagesGetCall) Header() http.Header { +func (c *NetworksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -103225,7 +119657,7 @@ func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{machineImage}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -103233,39 +119665,39 @@ func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "machineImage": c.machineImage, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineImages.get" call. -// Exactly one of *MachineImage or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *MachineImage.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, error) { +// Do executes the "compute.networks.get" call. +// Exactly one of *Network or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Network.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &MachineImage{ + ret := &Network{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103277,17 +119709,17 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, } return ret, nil // { - // "description": "Returns the specified machine image. Gets a list of available machine images by making a list() request.", - // "flatPath": "projects/{project}/global/machineImages/{machineImage}", + // "description": "Returns the specified network. Gets a list of available networks by making a list() request.", + // "flatPath": "projects/{project}/global/networks/{network}", // "httpMethod": "GET", - // "id": "compute.machineImages.get", + // "id": "compute.networks.get", // "parameterOrder": [ // "project", - // "machineImage" + // "network" // ], // "parameters": { - // "machineImage": { - // "description": "The name of the machine image.", + // "network": { + // "description": "Name of the network to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -103301,9 +119733,9 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, // "type": "string" // } // }, - // "path": "projects/{project}/global/machineImages/{machineImage}", + // "path": "projects/{project}/global/networks/{network}", // "response": { - // "$ref": "MachineImage" + // "$ref": "Network" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -103314,41 +119746,34 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, } -// method id "compute.machineImages.getIamPolicy": +// method id "compute.networks.getEffectiveFirewalls": -type MachineImagesGetIamPolicyCall struct { +type NetworksGetEffectiveFirewallsCall struct { s *Service project string - resource string + network string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// GetEffectiveFirewalls: Returns the effective firewalls on a given +// network. // +// - network: Name of the network for this request. // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -func (r *MachineImagesService) GetIamPolicy(project string, resource string) *MachineImagesGetIamPolicyCall { - c := &MachineImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworksService) GetEffectiveFirewalls(project string, network string) *NetworksGetEffectiveFirewallsCall { + c := &NetworksGetEffectiveFirewallsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - return c -} - -// OptionsRequestedPolicyVersion sets the optional parameter -// "optionsRequestedPolicyVersion": Requested IAM Policy version. -func (c *MachineImagesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *MachineImagesGetIamPolicyCall { - c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + c.network = network return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesGetIamPolicyCall { +func (c *NetworksGetEffectiveFirewallsCall) Fields(s ...googleapi.Field) *NetworksGetEffectiveFirewallsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103358,7 +119783,7 @@ func (c *MachineImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *MachineIma // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *MachineImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *MachineImagesGetIamPolicyCall { +func (c *NetworksGetEffectiveFirewallsCall) IfNoneMatch(entityTag string) *NetworksGetEffectiveFirewallsCall { c.ifNoneMatch_ = entityTag return c } @@ -103366,21 +119791,21 @@ func (c *MachineImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *MachineIm // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineImagesGetIamPolicyCall) Context(ctx context.Context) *MachineImagesGetIamPolicyCall { +func (c *NetworksGetEffectiveFirewallsCall) Context(ctx context.Context) *NetworksGetEffectiveFirewallsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineImagesGetIamPolicyCall) Header() http.Header { +func (c *NetworksGetEffectiveFirewallsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksGetEffectiveFirewallsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -103393,7 +119818,7 @@ func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}/getEffectiveFirewalls") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -103401,39 +119826,206 @@ func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, e } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineImages.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.networks.getEffectiveFirewalls" call. +// Exactly one of *NetworksGetEffectiveFirewallsResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *NetworksGetEffectiveFirewallsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworksGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (*NetworksGetEffectiveFirewallsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &NetworksGetEffectiveFirewallsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the effective firewalls on a given network.", + // "flatPath": "projects/{project}/global/networks/{network}/getEffectiveFirewalls", + // "httpMethod": "GET", + // "id": "compute.networks.getEffectiveFirewalls", + // "parameterOrder": [ + // "project", + // "network" + // ], + // "parameters": { + // "network": { + // "description": "Name of the network for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/networks/{network}/getEffectiveFirewalls", + // "response": { + // "$ref": "NetworksGetEffectiveFirewallsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.networks.insert": + +type NetworksInsertCall struct { + s *Service + project string + network *Network + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a network in the specified project using the data +// included in the request. +// +// - project: Project ID for this request. +func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { + c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworksInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { return nil, err } - ret := &Policy{ + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103445,21 +120037,14 @@ func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "flatPath": "projects/{project}/global/machineImages/{resource}/getIamPolicy", - // "httpMethod": "GET", - // "id": "compute.machineImages.getIamPolicy", + // "description": "Creates a network in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/global/networks", + // "httpMethod": "POST", + // "id": "compute.networks.insert", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { - // "optionsRequestedPolicyVersion": { - // "description": "Requested IAM Policy version.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -103467,119 +120052,181 @@ func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/global/machineImages/{resource}/getIamPolicy", + // "path": "projects/{project}/global/networks", + // "request": { + // "$ref": "Network" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.machineImages.insert": +// method id "compute.networks.list": -type MachineImagesInsertCall struct { +type NetworksListCall struct { s *Service project string - machineimage *MachineImage urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Insert: Creates a machine image in the specified project using the -// data that is included in the request. If you are creating a new -// machine image to update an existing instance, your new machine image -// should use the same network or, if applicable, the same subnetwork as -// the original instance. +// List: Retrieves the list of networks available to the specified +// project. // // - project: Project ID for this request. -func (r *MachineImagesService) Insert(project string, machineimage *MachineImage) *MachineImagesInsertCall { - c := &MachineImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworksService) List(project string) *NetworksListCall { + c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.machineimage = machineimage return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *MachineImagesInsertCall) RequestId(requestId string) *MachineImagesInsertCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NetworksListCall) Filter(filter string) *NetworksListCall { + c.urlParams_.Set("filter", filter) return c } -// SourceInstance sets the optional parameter "sourceInstance": -// Required. Source instance that is used to create the machine image -// from. -func (c *MachineImagesInsertCall) SourceInstance(sourceInstance string) *MachineImagesInsertCall { - c.urlParams_.Set("sourceInstance", sourceInstance) +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NetworksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineImagesInsertCall) Fields(s ...googleapi.Field) *MachineImagesInsertCall { +func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineImagesInsertCall) Context(ctx context.Context) *MachineImagesInsertCall { +func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineImagesInsertCall) Header() http.Header { +func (c *NetworksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.machineimage) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -103590,33 +120237,33 @@ func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineImages.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.networks.list" call. +// Exactly one of *NetworkList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *NetworkList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &NetworkList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103628,14 +120275,37 @@ func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a machine image in the specified project using the data that is included in the request. If you are creating a new machine image to update an existing instance, your new machine image should use the same network or, if applicable, the same subnetwork as the original instance.", - // "flatPath": "projects/{project}/global/machineImages", - // "httpMethod": "POST", - // "id": "compute.machineImages.insert", + // "description": "Retrieves the list of networks available to the specified project.", + // "flatPath": "projects/{project}/global/networks", + // "httpMethod": "GET", + // "id": "compute.networks.list", // "parameterOrder": [ // "project" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -103643,67 +120313,99 @@ func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "sourceInstance": { - // "description": "Required. Source instance that is used to create the machine image from.", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // } // }, - // "path": "projects/{project}/global/machineImages", - // "request": { - // "$ref": "MachineImage" - // }, + // "path": "projects/{project}/global/networks", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.machineImages.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type MachineImagesListCall struct { +// method id "compute.networks.listPeeringRoutes": + +type NetworksListPeeringRoutesCall struct { s *Service project string + network string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of machine images that are contained within -// the specified project. +// ListPeeringRoutes: Lists the peering routes exchanged over peering +// connection. // +// - network: Name of the network for this request. // - project: Project ID for this request. -func (r *MachineImagesService) List(project string) *MachineImagesListCall { - c := &MachineImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworksService) ListPeeringRoutes(project string, network string) *NetworksListPeeringRoutesCall { + c := &NetworksListPeeringRoutesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.network = network + return c +} + +// Direction sets the optional parameter "direction": The direction of +// the exchanged routes. +// +// Possible values: +// +// "INCOMING" - For routes exported from peer network. +// "OUTGOING" - For routes exported from local network. +func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("direction", direction) return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -103711,8 +120413,18 @@ func (r *MachineImagesService) List(project string) *MachineImagesListCall { // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *MachineImagesListCall) Filter(filter string) *MachineImagesListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NetworksListPeeringRoutesCall) Filter(filter string) *NetworksListPeeringRoutesCall { c.urlParams_.Set("filter", filter) return c } @@ -103723,7 +120435,7 @@ func (c *MachineImagesListCall) Filter(filter string) *MachineImagesListCall { // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *MachineImagesListCall) MaxResults(maxResults int64) *MachineImagesListCall { +func (c *NetworksListPeeringRoutesCall) MaxResults(maxResults int64) *NetworksListPeeringRoutesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -103737,7 +120449,7 @@ func (c *MachineImagesListCall) MaxResults(maxResults int64) *MachineImagesListC // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *MachineImagesListCall) OrderBy(orderBy string) *MachineImagesListCall { +func (c *NetworksListPeeringRoutesCall) OrderBy(orderBy string) *NetworksListPeeringRoutesCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -103745,16 +120457,31 @@ func (c *MachineImagesListCall) OrderBy(orderBy string) *MachineImagesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *MachineImagesListCall) PageToken(pageToken string) *MachineImagesListCall { +func (c *NetworksListPeeringRoutesCall) PageToken(pageToken string) *NetworksListPeeringRoutesCall { c.urlParams_.Set("pageToken", pageToken) return c } +// PeeringName sets the optional parameter "peeringName": The response +// will show routes exchanged over the given peering connection. +func (c *NetworksListPeeringRoutesCall) PeeringName(peeringName string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("peeringName", peeringName) + return c +} + +// Region sets the optional parameter "region": The region of the +// request. The response will include all subnet routes, static routes +// and dynamic routes in the region. +func (c *NetworksListPeeringRoutesCall) Region(region string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("region", region) + return c +} + // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *MachineImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineImagesListCall { +func (c *NetworksListPeeringRoutesCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworksListPeeringRoutesCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -103762,7 +120489,7 @@ func (c *MachineImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineImagesListCall) Fields(s ...googleapi.Field) *MachineImagesListCall { +func (c *NetworksListPeeringRoutesCall) Fields(s ...googleapi.Field) *NetworksListPeeringRoutesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103772,7 +120499,7 @@ func (c *MachineImagesListCall) Fields(s ...googleapi.Field) *MachineImagesListC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *MachineImagesListCall) IfNoneMatch(entityTag string) *MachineImagesListCall { +func (c *NetworksListPeeringRoutesCall) IfNoneMatch(entityTag string) *NetworksListPeeringRoutesCall { c.ifNoneMatch_ = entityTag return c } @@ -103780,21 +120507,21 @@ func (c *MachineImagesListCall) IfNoneMatch(entityTag string) *MachineImagesList // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineImagesListCall) Context(ctx context.Context) *MachineImagesListCall { +func (c *NetworksListPeeringRoutesCall) Context(ctx context.Context) *NetworksListPeeringRoutesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineImagesListCall) Header() http.Header { +func (c *NetworksListPeeringRoutesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -103807,7 +120534,7 @@ func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}/listPeeringRoutes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -103816,37 +120543,38 @@ func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineImages.list" call. -// Exactly one of *MachineImageList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *MachineImageList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.networks.listPeeringRoutes" call. +// Exactly one of *ExchangedPeeringRoutesList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ExchangedPeeringRoutesList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageList, error) { +func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*ExchangedPeeringRoutesList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &MachineImageList{ + ret := &ExchangedPeeringRoutesList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103858,16 +120586,30 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL } return ret, nil // { - // "description": "Retrieves a list of machine images that are contained within the specified project.", - // "flatPath": "projects/{project}/global/machineImages", + // "description": "Lists the peering routes exchanged over peering connection.", + // "flatPath": "projects/{project}/global/networks/{network}/listPeeringRoutes", // "httpMethod": "GET", - // "id": "compute.machineImages.list", + // "id": "compute.networks.listPeeringRoutes", // "parameterOrder": [ - // "project" + // "project", + // "network" // ], // "parameters": { + // "direction": { + // "description": "The direction of the exchanged routes.", + // "enum": [ + // "INCOMING", + // "OUTGOING" + // ], + // "enumDescriptions": [ + // "For routes exported from peer network.", + // "For routes exported from local network." + // ], + // "location": "query", + // "type": "string" + // }, // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -103879,6 +120621,13 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL // "minimum": "0", // "type": "integer" // }, + // "network": { + // "description": "Name of the network for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", @@ -103889,6 +120638,11 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL // "location": "query", // "type": "string" // }, + // "peeringName": { + // "description": "The response will show routes exchanged over the given peering connection.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -103896,15 +120650,20 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL // "required": true, // "type": "string" // }, + // "region": { + // "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", + // "location": "query", + // "type": "string" + // }, // "returnPartialSuccess": { // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } // }, - // "path": "projects/{project}/global/machineImages", + // "path": "projects/{project}/global/networks/{network}/listPeeringRoutes", // "response": { - // "$ref": "MachineImageList" + // "$ref": "ExchangedPeeringRoutesList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -103918,7 +120677,7 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *MachineImagesListCall) Pages(ctx context.Context, f func(*MachineImageList) error) error { +func (c *NetworksListPeeringRoutesCall) Pages(ctx context.Context, f func(*ExchangedPeeringRoutesList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -103936,35 +120695,52 @@ func (c *MachineImagesListCall) Pages(ctx context.Context, f func(*MachineImageL } } -// method id "compute.machineImages.setIamPolicy": +// method id "compute.networks.patch": -type MachineImagesSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksPatchCall struct { + s *Service + project string + network string + network2 *Network + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// Patch: Patches the specified network with the data included in the +// request. Only the following fields can be modified: +// routingConfig.routingMode. // +// - network: Name of the network to update. // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -func (r *MachineImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *MachineImagesSetIamPolicyCall { - c := &MachineImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { + c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.network = network + c.network2 = network2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesSetIamPolicyCall { +func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103972,21 +120748,21 @@ func (c *MachineImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *MachineIma // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineImagesSetIamPolicyCall) Context(ctx context.Context) *MachineImagesSetIamPolicyCall { +func (c *NetworksPatchCall) Context(ctx context.Context) *NetworksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineImagesSetIamPolicyCall) Header() http.Header { +func (c *NetworksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -103994,54 +120770,54 @@ func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineImages.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.networks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104053,15 +120829,22 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "flatPath": "projects/{project}/global/machineImages/{resource}/setIamPolicy", - // "httpMethod": "POST", - // "id": "compute.machineImages.setIamPolicy", + // "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", + // "flatPath": "projects/{project}/global/networks/{network}", + // "httpMethod": "PATCH", + // "id": "compute.networks.patch", // "parameterOrder": [ // "project", - // "resource" + // "network" // ], // "parameters": { + // "network": { + // "description": "Name of the network to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -104069,20 +120852,18 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/global/machineImages/{resource}/setIamPolicy", + // "path": "projects/{project}/global/networks/{network}", // "request": { - // "$ref": "GlobalSetPolicyRequest" + // "$ref": "Network" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -104092,35 +120873,50 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } -// method id "compute.machineImages.testIamPermissions": +// method id "compute.networks.removePeering": -type MachineImagesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksRemovePeeringCall struct { + s *Service + project string + network string + networksremovepeeringrequest *NetworksRemovePeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// RemovePeering: Removes a peering from the specified network. // +// - network: Name of the network resource to remove peering from. // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -func (r *MachineImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *MachineImagesTestIamPermissionsCall { - c := &MachineImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { + c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.network = network + c.networksremovepeeringrequest = networksremovepeeringrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemovePeeringCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *MachineImagesTestIamPermissionsCall { +func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemovePeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -104128,21 +120924,21 @@ func (c *MachineImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Mach // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineImagesTestIamPermissionsCall) Context(ctx context.Context) *MachineImagesTestIamPermissionsCall { +func (c *NetworksRemovePeeringCall) Context(ctx context.Context) *NetworksRemovePeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineImagesTestIamPermissionsCall) Header() http.Header { +func (c *NetworksRemovePeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -104150,14 +120946,14 @@ func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksremovepeeringrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}/removePeering") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -104165,39 +120961,39 @@ func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Respo } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineImages.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.networks.removePeering" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104209,15 +121005,22 @@ func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "flatPath": "projects/{project}/global/machineImages/{resource}/testIamPermissions", + // "description": "Removes a peering from the specified network.", + // "flatPath": "projects/{project}/global/networks/{network}/removePeering", // "httpMethod": "POST", - // "id": "compute.machineImages.testIamPermissions", + // "id": "compute.networks.removePeering", // "parameterOrder": [ // "project", - // "resource" + // "network" // ], // "parameters": { + // "network": { + // "description": "Name of the network resource to remove peering from.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -104225,220 +121028,142 @@ func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/global/machineImages/{resource}/testIamPermissions", + // "path": "projects/{project}/global/networks/{network}/removePeering", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "NetworksRemovePeeringRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.machineTypes.aggregatedList": +// method id "compute.networks.switchToCustomMode": -type MachineTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworksSwitchToCustomModeCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of machine types. +// SwitchToCustomMode: Switches the network mode from auto subnet mode +// to custom subnet mode. // +// - network: Name of the network to be updated. // - project: Project ID for this request. -func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { - c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { + c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.network = network return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *MachineTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *MachineTypesAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *MachineTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesAggregatedListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSwitchToCustomModeCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { +func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksSwitchToCustomModeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { +func (c *NetworksSwitchToCustomModeCall) Context(ctx context.Context) *NetworksSwitchToCustomModeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesAggregatedListCall) Header() http.Header { +func (c *NetworksSwitchToCustomModeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}/switchToCustomMode") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.aggregatedList" call. -// Exactly one of *MachineTypeAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *MachineTypeAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { +// Do executes the "compute.networks.switchToCustomMode" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &MachineTypeAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104450,40 +121175,20 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach } return ret, nil // { - // "description": "Retrieves an aggregated list of machine types.", - // "flatPath": "projects/{project}/aggregated/machineTypes", - // "httpMethod": "GET", - // "id": "compute.machineTypes.aggregatedList", + // "description": "Switches the network mode from auto subnet mode to custom subnet mode.", + // "flatPath": "projects/{project}/global/networks/{network}/switchToCustomMode", + // "httpMethod": "POST", + // "id": "compute.networks.switchToCustomMode", // "parameterOrder": [ - // "project" + // "project", + // "network" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "network": { + // "description": "Name of the network to be updated.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -104493,163 +121198,149 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // } // }, - // "path": "projects/{project}/aggregated/machineTypes", + // "path": "projects/{project}/global/networks/{network}/switchToCustomMode", // "response": { - // "$ref": "MachineTypeAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.machineTypes.get": +// method id "compute.networks.updatePeering": -type MachineTypesGetCall struct { - s *Service - project string - zone string - machineType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworksUpdatePeeringCall struct { + s *Service + project string + network string + networksupdatepeeringrequest *NetworksUpdatePeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified machine type. Gets a list of available -// machine types by making a list() request. +// UpdatePeering: Updates the specified network peering with the data +// included in the request. You can only modify the +// NetworkPeering.export_custom_routes field and the +// NetworkPeering.import_custom_routes field. // -// - machineType: Name of the machine type to return. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { - c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - network: Name of the network resource which the updated peering is +// belonging to. +// - project: Project ID for this request. +func (r *NetworksService) UpdatePeering(project string, network string, networksupdatepeeringrequest *NetworksUpdatePeeringRequest) *NetworksUpdatePeeringCall { + c := &NetworksUpdatePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.machineType = machineType + c.network = network + c.networksupdatepeeringrequest = networksupdatepeeringrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworksUpdatePeeringCall) RequestId(requestId string) *NetworksUpdatePeeringCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { +func (c *NetworksUpdatePeeringCall) Fields(s ...googleapi.Field) *NetworksUpdatePeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { +func (c *NetworksUpdatePeeringCall) Context(ctx context.Context) *NetworksUpdatePeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesGetCall) Header() http.Header { +func (c *NetworksUpdatePeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksUpdatePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksupdatepeeringrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/machineTypes/{machineType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}/updatePeering") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "machineType": c.machineType, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.get" call. -// Exactly one of *MachineType or error will be non-nil. Any non-2xx +// Do executes the "compute.networks.updatePeering" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *MachineType.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { +func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &MachineType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104661,18 +121352,17 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er } return ret, nil // { - // "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", - // "flatPath": "projects/{project}/zones/{zone}/machineTypes/{machineType}", - // "httpMethod": "GET", - // "id": "compute.machineTypes.get", + // "description": "Updates the specified network peering with the data included in the request. You can only modify the NetworkPeering.export_custom_routes field and the NetworkPeering.import_custom_routes field.", + // "flatPath": "projects/{project}/global/networks/{network}/updatePeering", + // "httpMethod": "PATCH", + // "id": "compute.networks.updatePeering", // "parameterOrder": [ // "project", - // "zone", - // "machineType" + // "network" // ], // "parameters": { - // "machineType": { - // "description": "Name of the machine type to return.", + // "network": { + // "description": "Name of the network resource which the updated peering is belonging to.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -104685,209 +121375,152 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/machineTypes/{machineType}", + // "path": "projects/{project}/global/networks/{network}/updatePeering", + // "request": { + // "$ref": "NetworksUpdatePeeringRequest" + // }, // "response": { - // "$ref": "MachineType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.machineTypes.list": +// method id "compute.nodeGroups.addNodes": -type MachineTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NodeGroupsAddNodesCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of machine types available to the specified -// project. +// AddNodes: Adds specified number of nodes to the node group. // -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { - c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { - c.urlParams_.Set("pageToken", pageToken) +// - nodeGroup: Name of the NodeGroup resource. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *NodeGroupsService) AddNodes(project string, zone string, nodeGroup string, nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest) *NodeGroupsAddNodesCall { + c := &NodeGroupsAddNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.nodeGroup = nodeGroup + c.nodegroupsaddnodesrequest = nodegroupsaddnodesrequest return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *MachineTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NodeGroupsAddNodesCall) RequestId(requestId string) *NodeGroupsAddNodesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { +func (c *NodeGroupsAddNodesCall) Fields(s ...googleapi.Field) *NodeGroupsAddNodesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { +func (c *NodeGroupsAddNodesCall) Context(ctx context.Context) *NodeGroupsAddNodesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesListCall) Header() http.Header { +func (c *NodeGroupsAddNodesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsaddnodesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.list" call. -// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx +// Do executes the "compute.nodeGroups.addNodes" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *MachineTypeList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &MachineTypeList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104899,36 +121532,21 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis } return ret, nil // { - // "description": "Retrieves a list of machine types available to the specified project.", - // "flatPath": "projects/{project}/zones/{zone}/machineTypes", - // "httpMethod": "GET", - // "id": "compute.machineTypes.list", + // "description": "Adds specified number of nodes to the node group.", + // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.addNodes", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "nodeGroup" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "nodeGroup": { + // "description": "Name of the NodeGroup resource.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -104938,10 +121556,10 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // }, // "zone": { // "description": "The name of the zone for this request.", @@ -104951,43 +121569,24 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/machineTypes", + // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes", + // "request": { + // "$ref": "NodeGroupsAddNodesRequest" + // }, // "response": { - // "$ref": "MachineTypeList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networkEndpointGroups.aggregatedList": +// method id "compute.nodeGroups.aggregatedList": -type NetworkEndpointGroupsAggregatedListCall struct { +type NodeGroupsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -104996,30 +121595,33 @@ type NetworkEndpointGroupsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves the list of network endpoint groups and -// sorts them by zone. +// AggregatedList: Retrieves an aggregated list of node groups. Note: +// use nodeGroups.listNodes for more details about each group. // // - project: Project ID for this request. -func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { - c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregatedListCall { + c := &NodeGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -105027,8 +121629,18 @@ func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEn // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NodeGroupsAggregatedListCall) Filter(filter string) *NodeGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -105041,7 +121653,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *Network // response. For resource types which predate this field, if this flag // is omitted or false, only scopes of the scope types where the // resource type is expected to be found will be included. -func (c *NetworkEndpointGroupsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEndpointGroupsAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NodeGroupsAggregatedListCall { c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) return c } @@ -105052,7 +121664,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) IncludeAllScopes(includeAllSco // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) MaxResults(maxResults int64) *NodeGroupsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -105066,7 +121678,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) * // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *NetworkEndpointGroupsAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) OrderBy(orderBy string) *NodeGroupsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -105074,7 +121686,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *Netwo // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *NetworkEndpointGroupsAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -105083,7 +121695,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *N // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *NetworkEndpointGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -105091,7 +121703,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) ReturnPartialSuccess(returnPar // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NodeGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -105101,7 +121713,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) * // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NodeGroupsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -105109,21 +121721,21 @@ func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsAggregatedListCall) Context(ctx context.Context) *NetworkEndpointGroupsAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) Context(ctx context.Context) *NodeGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { +func (c *NodeGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -105136,7 +121748,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.R var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/nodeGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -105149,34 +121761,33 @@ func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.R return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.aggregatedList" call. -// Exactly one of *NetworkEndpointGroupAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *NetworkEndpointGroupAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupAggregatedList, error) { +// Do executes the "compute.nodeGroups.aggregatedList" call. +// Exactly one of *NodeGroupAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NodeGroupAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworkEndpointGroupAggregatedList{ + ret := &NodeGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -105188,16 +121799,16 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Retrieves the list of network endpoint groups and sorts them by zone.", - // "flatPath": "projects/{project}/aggregated/networkEndpointGroups", + // "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group.", + // "flatPath": "projects/{project}/aggregated/nodeGroups", // "httpMethod": "GET", - // "id": "compute.networkEndpointGroups.aggregatedList", + // "id": "compute.nodeGroups.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -105237,9 +121848,9 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/networkEndpointGroups", + // "path": "projects/{project}/aggregated/nodeGroups", // "response": { - // "$ref": "NetworkEndpointGroupAggregatedList" + // "$ref": "NodeGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -105253,7 +121864,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupAggregatedList) error) error { +func (c *NodeGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NodeGroupAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -105271,34 +121882,28 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f f } } -// method id "compute.networkEndpointGroups.attachNetworkEndpoints": +// method id "compute.nodeGroups.delete": -type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { - s *Service - project string - zone string - networkEndpointGroup string - networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsDeleteCall struct { + s *Service + project string + zone string + nodeGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AttachNetworkEndpoints: Attach a list of network endpoints to the -// specified network endpoint group. +// Delete: Deletes the specified NodeGroup resource. // -// - networkEndpointGroup: The name of the network endpoint group where -// you are attaching network endpoints to. It should comply with -// RFC1035. +// - nodeGroup: Name of the NodeGroup resource to delete. // - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. -func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { - c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *NodeGroupsService) Delete(project string, zone string, nodeGroup string) *NodeGroupsDeleteCall { + c := &NodeGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.networkEndpointGroup = networkEndpointGroup - c.networkendpointgroupsattachendpointsrequest = networkendpointgroupsattachendpointsrequest + c.nodeGroup = nodeGroup return c } @@ -105313,7 +121918,7 @@ func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zo // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsAttachNetworkEndpointsCall { +func (c *NodeGroupsDeleteCall) RequestId(requestId string) *NodeGroupsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -105321,7 +121926,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId st // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAttachNetworkEndpointsCall { +func (c *NodeGroupsDeleteCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -105329,21 +121934,21 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsAttachNetworkEndpointsCall { +func (c *NodeGroupsDeleteCall) Context(ctx context.Context) *NodeGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { +func (c *NodeGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -105351,53 +121956,48 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsattachendpointsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.attachNetworkEndpoints" call. +// Do executes the "compute.nodeGroups.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -105411,19 +122011,20 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Attach a list of network endpoints to the specified network endpoint group.", - // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", - // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", + // "description": "Deletes the specified NodeGroup resource.", + // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "httpMethod": "DELETE", + // "id": "compute.nodeGroups.delete", // "parameterOrder": [ // "project", // "zone", - // "networkEndpointGroup" + // "nodeGroup" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", + // "nodeGroup": { + // "description": "Name of the NodeGroup resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -105440,16 +122041,14 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", - // "request": { - // "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" - // }, + // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", // "response": { // "$ref": "Operation" // }, @@ -105461,33 +122060,31 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C } -// method id "compute.networkEndpointGroups.delete": +// method id "compute.nodeGroups.deleteNodes": -type NetworkEndpointGroupsDeleteCall struct { - s *Service - project string - zone string - networkEndpointGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsDeleteNodesCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified network endpoint group. The network -// endpoints in the NEG and the VM instances they belong to are not -// terminated when the NEG is deleted. Note that the NEG cannot be -// deleted if there are backend services referencing it. +// DeleteNodes: Deletes specified nodes from the node group. // -// - networkEndpointGroup: The name of the network endpoint group to -// delete. It should comply with RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. -func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { - c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - nodeGroup: Name of the NodeGroup resource whose nodes will be +// deleted. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup string, nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest) *NodeGroupsDeleteNodesCall { + c := &NodeGroupsDeleteNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.networkEndpointGroup = networkEndpointGroup + c.nodeGroup = nodeGroup + c.nodegroupsdeletenodesrequest = nodegroupsdeletenodesrequest return c } @@ -105502,7 +122099,7 @@ func (r *NetworkEndpointGroupsService) Delete(project string, zone string, netwo // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEndpointGroupsDeleteCall { +func (c *NodeGroupsDeleteNodesCall) RequestId(requestId string) *NodeGroupsDeleteNodesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -105510,7 +122107,7 @@ func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDeleteCall { +func (c *NodeGroupsDeleteNodesCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteNodesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -105518,21 +122115,21 @@ func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkE // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *NetworkEndpointGroupsDeleteCall { +func (c *NodeGroupsDeleteNodesCall) Context(ctx context.Context) *NodeGroupsDeleteNodesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { +func (c *NodeGroupsDeleteNodesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -105540,48 +122137,53 @@ func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsdeletenodesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.delete" call. +// Do executes the "compute.nodeGroups.deleteNodes" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -105595,19 +122197,20 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", - // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", - // "httpMethod": "DELETE", - // "id": "compute.networkEndpointGroups.delete", + // "description": "Deletes specified nodes from the node group.", + // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.deleteNodes", // "parameterOrder": [ // "project", // "zone", - // "networkEndpointGroup" + // "nodeGroup" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + // "nodeGroup": { + // "description": "Name of the NodeGroup resource whose nodes will be deleted.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -105624,13 +122227,17 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes", + // "request": { + // "$ref": "NodeGroupsDeleteNodesRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -105642,134 +122249,124 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.networkEndpointGroups.detachNetworkEndpoints": +// method id "compute.nodeGroups.get": -type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { - s *Service - project string - zone string - networkEndpointGroup string - networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsGetCall struct { + s *Service + project string + zone string + nodeGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DetachNetworkEndpoints: Detach a list of network endpoints from the -// specified network endpoint group. +// Get: Returns the specified NodeGroup. Get a list of available +// NodeGroups by making a list() request. Note: the "nodes" field should +// not be used. Use nodeGroups.listNodes instead. // -// - networkEndpointGroup: The name of the network endpoint group where -// you are removing network endpoints. It should comply with RFC1035. +// - nodeGroup: Name of the node group to return. // - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. -func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { - c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *NodeGroupsService) Get(project string, zone string, nodeGroup string) *NodeGroupsGetCall { + c := &NodeGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.networkEndpointGroup = networkEndpointGroup - c.networkendpointgroupsdetachendpointsrequest = networkendpointgroupsdetachendpointsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsDetachNetworkEndpointsCall { - c.urlParams_.Set("requestId", requestId) + c.nodeGroup = nodeGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +func (c *NodeGroupsGetCall) Fields(s ...googleapi.Field) *NodeGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeGroupsGetCall) IfNoneMatch(entityTag string) *NodeGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +func (c *NodeGroupsGetCall) Context(ctx context.Context) *NodeGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { +func (c *NodeGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsdetachendpointsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.detachNetworkEndpoints" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.nodeGroups.get" call. +// Exactly one of *NodeGroup or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *NodeGroup.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &NodeGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -105781,19 +122378,20 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Detach a list of network endpoints from the specified network endpoint group.", - // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", - // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", + // "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead.", + // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "httpMethod": "GET", + // "id": "compute.nodeGroups.get", // "parameterOrder": [ // "project", // "zone", - // "networkEndpointGroup" + // "nodeGroup" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", + // "nodeGroup": { + // "description": "Name of the node group to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -105804,66 +122402,65 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", - // "request": { - // "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" - // }, + // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", // "response": { - // "$ref": "Operation" + // "$ref": "NodeGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networkEndpointGroups.get": +// method id "compute.nodeGroups.getIamPolicy": -type NetworkEndpointGroupsGetCall struct { - s *Service - project string - zone string - networkEndpointGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NodeGroupsGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified network endpoint group. Gets a list of -// available network endpoint groups by making a list() request. +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. // -// - networkEndpointGroup: The name of the network endpoint group. It -// should comply with RFC1035. // - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. -func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { - c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. +func (r *NodeGroupsService) GetIamPolicy(project string, zone string, resource string) *NodeGroupsGetIamPolicyCall { + c := &NodeGroupsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.networkEndpointGroup = networkEndpointGroup + c.resource = resource + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *NodeGroupsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *NodeGroupsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsGetCall { +func (c *NodeGroupsGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -105873,7 +122470,7 @@ func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndp // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsGetCall { +func (c *NodeGroupsGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeGroupsGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -105881,21 +122478,21 @@ func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEnd // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsGetCall) Context(ctx context.Context) *NetworkEndpointGroupsGetCall { +func (c *NodeGroupsGetIamPolicyCall) Context(ctx context.Context) *NodeGroupsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsGetCall) Header() http.Header { +func (c *NodeGroupsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -105908,7 +122505,7 @@ func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -105916,40 +122513,40 @@ func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.get" call. -// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NetworkEndpointGroup.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { +// Do executes the "compute.nodeGroups.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworkEndpointGroup{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -105961,21 +122558,21 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ } return ret, nil // { - // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", - // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy", // "httpMethod": "GET", - // "id": "compute.networkEndpointGroups.get", + // "id": "compute.nodeGroups.getIamPolicy", // "parameterOrder": [ // "project", // "zone", - // "networkEndpointGroup" + // "resource" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" // }, // "project": { // "description": "Project ID for this request.", @@ -105984,16 +122581,24 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ // "required": true, // "type": "string" // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + // "path": "projects/{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy", // "response": { - // "$ref": "NetworkEndpointGroup" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -106004,29 +122609,30 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ } -// method id "compute.networkEndpointGroups.insert": +// method id "compute.nodeGroups.insert": -type NetworkEndpointGroupsInsertCall struct { - s *Service - project string - zone string - networkendpointgroup *NetworkEndpointGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsInsertCall struct { + s *Service + project string + zone string + nodegroup *NodeGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a network endpoint group in the specified project -// using the parameters that are included in the request. +// Insert: Creates a NodeGroup resource in the specified project using +// the data included in the request. // +// - initialNodeCount: Initial count of nodes in the node group. // - project: Project ID for this request. -// - zone: The name of the zone where you want to create the network -// endpoint group. It should comply with RFC1035. -func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { - c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *NodeGroupsService) Insert(project string, zone string, initialNodeCount int64, nodegroup *NodeGroup) *NodeGroupsInsertCall { + c := &NodeGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.networkendpointgroup = networkendpointgroup + c.urlParams_.Set("initialNodeCount", fmt.Sprint(initialNodeCount)) + c.nodegroup = nodegroup return c } @@ -106041,7 +122647,7 @@ func (r *NetworkEndpointGroupsService) Insert(project string, zone string, netwo // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEndpointGroupsInsertCall { +func (c *NodeGroupsInsertCall) RequestId(requestId string) *NodeGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -106049,7 +122655,7 @@ func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsInsertCall { +func (c *NodeGroupsInsertCall) Fields(s ...googleapi.Field) *NodeGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -106057,21 +122663,21 @@ func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkE // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsInsertCall) Context(ctx context.Context) *NetworkEndpointGroupsInsertCall { +func (c *NodeGroupsInsertCall) Context(ctx context.Context) *NodeGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { +func (c *NodeGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -106079,14 +122685,14 @@ func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -106100,31 +122706,31 @@ func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.insert" call. +// Do executes the "compute.nodeGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -106138,15 +122744,23 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", - // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", + // "description": "Creates a NodeGroup resource in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/zones/{zone}/nodeGroups", // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.insert", + // "id": "compute.nodeGroups.insert", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "initialNodeCount" // ], // "parameters": { + // "initialNodeCount": { + // "description": "Initial count of nodes in the node group.", + // "format": "int32", + // "location": "query", + // "required": true, + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -106160,15 +122774,16 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/networkEndpointGroups", + // "path": "projects/{project}/zones/{zone}/nodeGroups", // "request": { - // "$ref": "NetworkEndpointGroup" + // "$ref": "NodeGroup" // }, // "response": { // "$ref": "Operation" @@ -106181,9 +122796,9 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.networkEndpointGroups.list": +// method id "compute.nodeGroups.list": -type NetworkEndpointGroupsListCall struct { +type NodeGroupsListCall struct { s *Service project string zone string @@ -106193,33 +122808,36 @@ type NetworkEndpointGroupsListCall struct { header_ http.Header } -// List: Retrieves the list of network endpoint groups that are located -// in the specified project and zone. +// List: Retrieves a list of node groups available to the specified +// project. Note: use nodeGroups.listNodes for more details about each +// group. // // - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. -func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { - c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCall { + c := &NodeGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -106227,8 +122845,18 @@ func (r *NetworkEndpointGroupsService) List(project string, zone string) *Networ // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NodeGroupsListCall) Filter(filter string) *NodeGroupsListCall { c.urlParams_.Set("filter", filter) return c } @@ -106239,7 +122867,7 @@ func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGr // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListCall { +func (c *NodeGroupsListCall) MaxResults(maxResults int64) *NodeGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -106253,7 +122881,7 @@ func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEnd // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpointGroupsListCall { +func (c *NodeGroupsListCall) OrderBy(orderBy string) *NodeGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -106261,7 +122889,7 @@ func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpoint // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndpointGroupsListCall { +func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -106270,7 +122898,7 @@ func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndp // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *NetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListCall { +func (c *NodeGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -106278,7 +122906,7 @@ func (c *NetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSucces // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListCall { +func (c *NodeGroupsListCall) Fields(s ...googleapi.Field) *NodeGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -106288,7 +122916,7 @@ func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEnd // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsListCall { +func (c *NodeGroupsListCall) IfNoneMatch(entityTag string) *NodeGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -106296,21 +122924,21 @@ func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEn // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsListCall) Context(ctx context.Context) *NetworkEndpointGroupsListCall { +func (c *NodeGroupsListCall) Context(ctx context.Context) *NodeGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsListCall) Header() http.Header { +func (c *NodeGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -106323,7 +122951,7 @@ func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -106337,33 +122965,33 @@ func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.list" call. -// Exactly one of *NetworkEndpointGroupList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.nodeGroups.list" call. +// Exactly one of *NodeGroupList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NodeGroupList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { +func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworkEndpointGroupList{ + ret := &NodeGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -106375,17 +123003,17 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo } return ret, nil // { - // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", - // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", + // "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group.", + // "flatPath": "projects/{project}/zones/{zone}/nodeGroups", // "httpMethod": "GET", - // "id": "compute.networkEndpointGroups.list", + // "id": "compute.nodeGroups.list", // "parameterOrder": [ // "project", // "zone" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -106420,15 +123048,306 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo // "type": "boolean" // }, // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/nodeGroups", + // "response": { + // "$ref": "NodeGroupList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeGroupsListCall) Pages(ctx context.Context, f func(*NodeGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.nodeGroups.listNodes": + +type NodeGroupsListNodesCall struct { + s *Service + project string + zone string + nodeGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ListNodes: Lists nodes in the node group. +// +// - nodeGroup: Name of the NodeGroup resource whose nodes you want to +// list. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup string) *NodeGroupsListNodesCall { + c := &NodeGroupsListNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.nodeGroup = nodeGroup + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NodeGroupsListNodesCall) Filter(filter string) *NodeGroupsListNodesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NodeGroupsListNodesCall) MaxResults(maxResults int64) *NodeGroupsListNodesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *NodeGroupsListNodesCall) OrderBy(orderBy string) *NodeGroupsListNodesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NodeGroupsListNodesCall) PageToken(pageToken string) *NodeGroupsListNodesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NodeGroupsListNodesCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsListNodesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeGroupsListNodesCall) Fields(s ...googleapi.Field) *NodeGroupsListNodesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeGroupsListNodesCall) Context(ctx context.Context) *NodeGroupsListNodesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeGroupsListNodesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeGroups.listNodes" call. +// Exactly one of *NodeGroupsListNodes or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NodeGroupsListNodes.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsListNodes, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &NodeGroupsListNodes{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists nodes in the node group.", + // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.listNodes", + // "parameterOrder": [ + // "project", + // "zone", + // "nodeGroup" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "nodeGroup": { + // "description": "Name of the NodeGroup resource whose nodes you want to list.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/networkEndpointGroups", + // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes", // "response": { - // "$ref": "NetworkEndpointGroupList" + // "$ref": "NodeGroupsListNodes" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -106442,7 +123361,7 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { +func (c *NodeGroupsListNodesCall) Pages(ctx context.Context, f func(*NodeGroupsListNodes) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -106460,110 +123379,53 @@ func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*Netwo } } -// method id "compute.networkEndpointGroups.listNetworkEndpoints": +// method id "compute.nodeGroups.patch": -type NetworkEndpointGroupsListNetworkEndpointsCall struct { - s *Service - project string - zone string - networkEndpointGroup string - networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsPatchCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroup *NodeGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListNetworkEndpoints: Lists the network endpoints in the specified -// network endpoint group. +// Patch: Updates the specified node group. // -// - networkEndpointGroup: The name of the network endpoint group from -// which you want to generate a list of included network endpoints. It -// should comply with RFC1035. +// - nodeGroup: Name of the NodeGroup resource to update. // - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. -func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { - c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *NodeGroupsService) Patch(project string, zone string, nodeGroup string, nodegroup *NodeGroup) *NodeGroupsPatchCall { + c := &NodeGroupsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.networkEndpointGroup = networkEndpointGroup - c.networkendpointgroupslistendpointsrequest = networkendpointgroupslistendpointsrequest - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListNetworkEndpointsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *NetworkEndpointGroupsListNetworkEndpointsCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *NetworkEndpointGroupsListNetworkEndpointsCall { - c.urlParams_.Set("pageToken", pageToken) + c.nodeGroup = nodeGroup + c.nodegroup = nodegroup return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListNetworkEndpointsCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NodeGroupsPatchCall) RequestId(requestId string) *NodeGroupsPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListNetworkEndpointsCall { +func (c *NodeGroupsPatchCall) Fields(s ...googleapi.Field) *NodeGroupsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -106571,21 +123433,21 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Fi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsListNetworkEndpointsCall { +func (c *NodeGroupsPatchCall) Context(ctx context.Context) *NodeGroupsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { +func (c *NodeGroupsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -106593,57 +123455,55 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (* } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupslistendpointsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.listNetworkEndpoints" call. -// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { +// Do executes the "compute.nodeGroups.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeGroupsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworkEndpointGroupsListNetworkEndpoints{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -106655,45 +123515,23 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists the network endpoints in the specified network endpoint group.", - // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", - // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.listNetworkEndpoints", + // "description": "Updates the specified node group.", + // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "httpMethod": "PATCH", + // "id": "compute.nodeGroups.patch", // "parameterOrder": [ // "project", // "zone", - // "networkEndpointGroup" + // "nodeGroup" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", + // "nodeGroup": { + // "description": "Name of the NodeGroup resource to update.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -106701,87 +123539,66 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", // "request": { - // "$ref": "NetworkEndpointGroupsListEndpointsRequest" + // "$ref": "NodeGroup" // }, // "response": { - // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networkEndpointGroups.testIamPermissions": +// method id "compute.nodeGroups.setIamPolicy": -type NetworkEndpointGroupsTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. // // - project: Project ID for this request. // - resource: Name or id of the resource for this request. // - zone: The name of the zone for this request. -func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { - c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NodeGroupsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *NodeGroupsSetIamPolicyCall { + c := &NodeGroupsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zonesetpolicyrequest = zonesetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsTestIamPermissionsCall { +func (c *NodeGroupsSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -106789,21 +123606,21 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Fiel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Context(ctx context.Context) *NetworkEndpointGroupsTestIamPermissionsCall { +func (c *NodeGroupsSetIamPolicyCall) Context(ctx context.Context) *NodeGroupsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { +func (c *NodeGroupsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -106811,14 +123628,14 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*ht } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -106833,33 +123650,33 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*ht return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.nodeGroups.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &TestPermissionsResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -106871,10 +123688,10 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.testIamPermissions", + // "id": "compute.nodeGroups.setIamPolicy", // "parameterOrder": [ // "project", // "zone", @@ -106903,43 +123720,45 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", + // "path": "projects/{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "ZoneSetPolicyRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.networks.addPeering": +// method id "compute.nodeGroups.setNodeTemplate": -type NetworksAddPeeringCall struct { - s *Service - project string - network string - networksaddpeeringrequest *NetworksAddPeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsSetNodeTemplateCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddPeering: Adds a peering to the specified network. +// SetNodeTemplate: Updates the node template of the node group. // -// - network: Name of the network resource to add peering to. +// - nodeGroup: Name of the NodeGroup resource to update. // - project: Project ID for this request. -func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { - c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *NodeGroupsService) SetNodeTemplate(project string, zone string, nodeGroup string, nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest) *NodeGroupsSetNodeTemplateCall { + c := &NodeGroupsSetNodeTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.networksaddpeeringrequest = networksaddpeeringrequest + c.zone = zone + c.nodeGroup = nodeGroup + c.nodegroupssetnodetemplaterequest = nodegroupssetnodetemplaterequest return c } @@ -106954,7 +123773,7 @@ func (r *NetworksService) AddPeering(project string, network string, networksadd // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeeringCall { +func (c *NodeGroupsSetNodeTemplateCall) RequestId(requestId string) *NodeGroupsSetNodeTemplateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -106962,7 +123781,7 @@ func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeering // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeeringCall { +func (c *NodeGroupsSetNodeTemplateCall) Fields(s ...googleapi.Field) *NodeGroupsSetNodeTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -106970,21 +123789,21 @@ func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeerin // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksAddPeeringCall) Context(ctx context.Context) *NetworksAddPeeringCall { +func (c *NodeGroupsSetNodeTemplateCall) Context(ctx context.Context) *NodeGroupsSetNodeTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksAddPeeringCall) Header() http.Header { +func (c *NodeGroupsSetNodeTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -106992,14 +123811,14 @@ func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksaddpeeringrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupssetnodetemplaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}/addPeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -107007,37 +123826,38 @@ func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.addPeering" call. +// Do executes the "compute.nodeGroups.setNodeTemplate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -107051,17 +123871,18 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Adds a peering to the specified network.", - // "flatPath": "projects/{project}/global/networks/{network}/addPeering", + // "description": "Updates the node template of the node group.", + // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", // "httpMethod": "POST", - // "id": "compute.networks.addPeering", + // "id": "compute.nodeGroups.setNodeTemplate", // "parameterOrder": [ // "project", - // "network" + // "zone", + // "nodeGroup" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource to add peering to.", + // "nodeGroup": { + // "description": "Name of the NodeGroup resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -107078,11 +123899,18 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/networks/{network}/addPeering", + // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", // "request": { - // "$ref": "NetworksAddPeeringRequest" + // "$ref": "NodeGroupsSetNodeTemplateRequest" // }, // "response": { // "$ref": "Operation" @@ -107095,48 +123923,38 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.networks.delete": +// method id "compute.nodeGroups.testIamPermissions": -type NetworksDeleteCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified network. +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. // -// - network: Name of the network to delete. // - project: Project ID for this request. -func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { - c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. +func (r *NodeGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeGroupsTestIamPermissionsCall { + c := &NodeGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { +func (c *NodeGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107144,21 +123962,21 @@ func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { +func (c *NodeGroupsTestIamPermissionsCall) Context(ctx context.Context) *NodeGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksDeleteCall) Header() http.Header { +func (c *NodeGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -107166,49 +123984,55 @@ func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -107220,75 +124044,173 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the specified network.", - // "flatPath": "projects/{project}/global/networks/{network}", - // "httpMethod": "DELETE", - // "id": "compute.networks.delete", + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.testIamPermissions", // "parameterOrder": [ // "project", - // "network" + // "zone", + // "resource" // ], // "parameters": { - // "network": { - // "description": "Name of the network to delete.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/networks/{network}", + // "path": "projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networks.get": +// method id "compute.nodeTemplates.aggregatedList": -type NetworksGetCall struct { +type NodeTemplatesAggregatedListCall struct { s *Service project string - network string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified network. Gets a list of available networks -// by making a list() request. +// AggregatedList: Retrieves an aggregated list of node templates. // -// - network: Name of the network to return. // - project: Project ID for this request. -func (r *NetworksService) Get(project string, network string) *NetworksGetCall { - c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NodeTemplatesService) AggregatedList(project string) *NodeTemplatesAggregatedListCall { + c := &NodeTemplatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NodeTemplatesAggregatedListCall) Filter(filter string) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *NodeTemplatesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NodeTemplatesAggregatedListCall) MaxResults(maxResults int64) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *NodeTemplatesAggregatedListCall) OrderBy(orderBy string) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NodeTemplatesAggregatedListCall) PageToken(pageToken string) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NodeTemplatesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { +func (c *NodeTemplatesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTemplatesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107298,7 +124220,7 @@ func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { +func (c *NodeTemplatesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTemplatesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -107306,21 +124228,21 @@ func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { +func (c *NodeTemplatesAggregatedListCall) Context(ctx context.Context) *NodeTemplatesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksGetCall) Header() http.Header { +func (c *NodeTemplatesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -107333,7 +124255,7 @@ func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/nodeTemplates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -107342,38 +124264,37 @@ func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.get" call. -// Exactly one of *Network or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Network.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { +// Do executes the "compute.nodeTemplates.aggregatedList" call. +// Exactly one of *NodeTemplateAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NodeTemplateAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Network{ + ret := &NodeTemplateAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -107385,17 +124306,230 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { } return ret, nil // { - // "description": "Returns the specified network. Gets a list of available networks by making a list() request.", - // "flatPath": "projects/{project}/global/networks/{network}", + // "description": "Retrieves an aggregated list of node templates.", + // "flatPath": "projects/{project}/aggregated/nodeTemplates", // "httpMethod": "GET", - // "id": "compute.networks.get", + // "id": "compute.nodeTemplates.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/nodeTemplates", + // "response": { + // "$ref": "NodeTemplateAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeTemplatesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTemplateAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.nodeTemplates.delete": + +type NodeTemplatesDeleteCall struct { + s *Service + project string + region string + nodeTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified NodeTemplate resource. +// +// - nodeTemplate: Name of the NodeTemplate resource to delete. +// - project: Project ID for this request. +// - region: The name of the region for this request. +func (r *NodeTemplatesService) Delete(project string, region string, nodeTemplate string) *NodeTemplatesDeleteCall { + c := &NodeTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.nodeTemplate = nodeTemplate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NodeTemplatesDeleteCall) RequestId(requestId string) *NodeTemplatesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeTemplatesDeleteCall) Fields(s ...googleapi.Field) *NodeTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeTemplatesDeleteCall) Context(ctx context.Context) *NodeTemplatesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeTemplatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "nodeTemplate": c.nodeTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeTemplates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified NodeTemplate resource.", + // "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + // "httpMethod": "DELETE", + // "id": "compute.nodeTemplates.delete", // "parameterOrder": [ // "project", - // "network" + // "region", + // "nodeTemplate" // ], // "parameters": { - // "network": { - // "description": "Name of the network to return.", + // "nodeTemplate": { + // "description": "Name of the NodeTemplate resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -107407,49 +124541,63 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/global/networks/{network}", + // "path": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", // "response": { - // "$ref": "Network" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.networks.getEffectiveFirewalls": +// method id "compute.nodeTemplates.get": -type NetworksGetEffectiveFirewallsCall struct { +type NodeTemplatesGetCall struct { s *Service project string - network string + region string + nodeTemplate string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetEffectiveFirewalls: Returns the effective firewalls on a given -// network. +// Get: Returns the specified node template. Gets a list of available +// node templates by making a list() request. // -// - network: Name of the network for this request. +// - nodeTemplate: Name of the node template to return. // - project: Project ID for this request. -func (r *NetworksService) GetEffectiveFirewalls(project string, network string) *NetworksGetEffectiveFirewallsCall { - c := &NetworksGetEffectiveFirewallsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +func (r *NodeTemplatesService) Get(project string, region string, nodeTemplate string) *NodeTemplatesGetCall { + c := &NodeTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.region = region + c.nodeTemplate = nodeTemplate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksGetEffectiveFirewallsCall) Fields(s ...googleapi.Field) *NetworksGetEffectiveFirewallsCall { +func (c *NodeTemplatesGetCall) Fields(s ...googleapi.Field) *NodeTemplatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107459,7 +124607,7 @@ func (c *NetworksGetEffectiveFirewallsCall) Fields(s ...googleapi.Field) *Networ // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworksGetEffectiveFirewallsCall) IfNoneMatch(entityTag string) *NetworksGetEffectiveFirewallsCall { +func (c *NodeTemplatesGetCall) IfNoneMatch(entityTag string) *NodeTemplatesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -107467,21 +124615,21 @@ func (c *NetworksGetEffectiveFirewallsCall) IfNoneMatch(entityTag string) *Netwo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksGetEffectiveFirewallsCall) Context(ctx context.Context) *NetworksGetEffectiveFirewallsCall { +func (c *NodeTemplatesGetCall) Context(ctx context.Context) *NodeTemplatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksGetEffectiveFirewallsCall) Header() http.Header { +func (c *NodeTemplatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksGetEffectiveFirewallsCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -107494,7 +124642,7 @@ func (c *NetworksGetEffectiveFirewallsCall) doRequest(alt string) (*http.Respons var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}/getEffectiveFirewalls") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -107502,40 +124650,40 @@ func (c *NetworksGetEffectiveFirewallsCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "region": c.region, + "nodeTemplate": c.nodeTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.getEffectiveFirewalls" call. -// Exactly one of *NetworksGetEffectiveFirewallsResponse or error will -// be non-nil. Any non-2xx status code is an error. Response headers are -// in either -// *NetworksGetEffectiveFirewallsResponse.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NetworksGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (*NetworksGetEffectiveFirewallsResponse, error) { +// Do executes the "compute.nodeTemplates.get" call. +// Exactly one of *NodeTemplate or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NodeTemplate.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworksGetEffectiveFirewallsResponse{ + ret := &NodeTemplate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -107547,19 +124695,20 @@ func (c *NetworksGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (*N } return ret, nil // { - // "description": "Returns the effective firewalls on a given network.", - // "flatPath": "projects/{project}/global/networks/{network}/getEffectiveFirewalls", + // "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", + // "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", // "httpMethod": "GET", - // "id": "compute.networks.getEffectiveFirewalls", + // "id": "compute.nodeTemplates.get", // "parameterOrder": [ // "project", - // "network" + // "region", + // "nodeTemplate" // ], // "parameters": { - // "network": { - // "description": "Name of the network for this request.", + // "nodeTemplate": { + // "description": "Name of the node template to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -107569,11 +124718,18 @@ func (c *NetworksGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (*N // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/global/networks/{network}/getEffectiveFirewalls", + // "path": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", // "response": { - // "$ref": "NetworksGetEffectiveFirewallsResponse" + // "$ref": "NodeTemplate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -107584,124 +124740,130 @@ func (c *NetworksGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (*N } -// method id "compute.networks.insert": +// method id "compute.nodeTemplates.getIamPolicy": -type NetworksInsertCall struct { - s *Service - project string - network *Network - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeTemplatesGetIamPolicyCall struct { + s *Service + project string + region string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a network in the specified project using the data -// included in the request. +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. // // - project: Project ID for this request. -func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { - c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *NodeTemplatesService) GetIamPolicy(project string, region string, resource string) *NodeTemplatesGetIamPolicyCall { + c := &NodeTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.region = region + c.resource = resource return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { - c.urlParams_.Set("requestId", requestId) +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *NodeTemplatesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *NodeTemplatesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { +func (c *NodeTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeTemplatesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { +func (c *NodeTemplatesGetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksInsertCall) Header() http.Header { +func (c *NodeTemplatesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeTemplates.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -107713,14 +124875,22 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Creates a network in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/global/networks", - // "httpMethod": "POST", - // "id": "compute.networks.insert", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.nodeTemplates.getIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -107728,205 +124898,156 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/networks", - // "request": { - // "$ref": "Network" - // }, + // "path": "projects/{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networks.list": +// method id "compute.nodeTemplates.insert": -type NetworksListCall struct { +type NodeTemplatesInsertCall struct { s *Service project string + region string + nodetemplate *NodeTemplate urlParams_ gensupport.URLParams - ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of networks available to the specified -// project. +// Insert: Creates a NodeTemplate resource in the specified project +// using the data included in the request. // // - project: Project ID for this request. -func (r *NetworksService) List(project string) *NetworksListCall { - c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +func (r *NodeTemplatesService) Insert(project string, region string, nodetemplate *NodeTemplate) *NodeTemplatesInsertCall { + c := &NodeTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.nodetemplate = nodetemplate return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *NetworksListCall) Filter(filter string) *NetworksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *NetworksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworksListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NodeTemplatesInsertCall) RequestId(requestId string) *NodeTemplatesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { +func (c *NodeTemplatesInsertCall) Fields(s ...googleapi.Field) *NodeTemplatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { +func (c *NodeTemplatesInsertCall) Context(ctx context.Context) *NodeTemplatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksListCall) Header() http.Header { +func (c *NodeTemplatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodetemplate) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/nodeTemplates") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.list" call. -// Exactly one of *NetworkList or error will be non-nil. Any non-2xx +// Do executes the "compute.nodeTemplates.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *NetworkList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { +func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworkList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -107938,37 +125059,15 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error } return ret, nil // { - // "description": "Retrieves the list of networks available to the specified project.", - // "flatPath": "projects/{project}/global/networks", - // "httpMethod": "GET", - // "id": "compute.networks.list", + // "description": "Creates a NodeTemplate resource in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/nodeTemplates", + // "httpMethod": "POST", + // "id": "compute.nodeTemplates.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -107976,95 +125075,75 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // } // }, - // "path": "projects/{project}/global/networks", + // "path": "projects/{project}/regions/{region}/nodeTemplates", + // "request": { + // "$ref": "NodeTemplate" + // }, // "response": { - // "$ref": "NetworkList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networks.listPeeringRoutes": +// method id "compute.nodeTemplates.list": -type NetworksListPeeringRoutesCall struct { +type NodeTemplatesListCall struct { s *Service project string - network string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// ListPeeringRoutes: Lists the peering routes exchanged over peering -// connection. +// List: Retrieves a list of node templates available to the specified +// project. // -// - network: Name of the network for this request. // - project: Project ID for this request. -func (r *NetworksService) ListPeeringRoutes(project string, network string) *NetworksListPeeringRoutesCall { - c := &NetworksListPeeringRoutesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +func (r *NodeTemplatesService) List(project string, region string) *NodeTemplatesListCall { + c := &NodeTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - return c -} - -// Direction sets the optional parameter "direction": The direction of -// the exchanged routes. -// -// Possible values: -// "INCOMING" - For routes exported from peer network. -// "OUTGOING" - For routes exported from local network. -func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksListPeeringRoutesCall { - c.urlParams_.Set("direction", direction) + c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -108072,8 +125151,18 @@ func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksLis // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *NetworksListPeeringRoutesCall) Filter(filter string) *NetworksListPeeringRoutesCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NodeTemplatesListCall) Filter(filter string) *NodeTemplatesListCall { c.urlParams_.Set("filter", filter) return c } @@ -108084,7 +125173,7 @@ func (c *NetworksListPeeringRoutesCall) Filter(filter string) *NetworksListPeeri // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *NetworksListPeeringRoutesCall) MaxResults(maxResults int64) *NetworksListPeeringRoutesCall { +func (c *NodeTemplatesListCall) MaxResults(maxResults int64) *NodeTemplatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -108098,7 +125187,7 @@ func (c *NetworksListPeeringRoutesCall) MaxResults(maxResults int64) *NetworksLi // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *NetworksListPeeringRoutesCall) OrderBy(orderBy string) *NetworksListPeeringRoutesCall { +func (c *NodeTemplatesListCall) OrderBy(orderBy string) *NodeTemplatesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -108106,31 +125195,16 @@ func (c *NetworksListPeeringRoutesCall) OrderBy(orderBy string) *NetworksListPee // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NetworksListPeeringRoutesCall) PageToken(pageToken string) *NetworksListPeeringRoutesCall { +func (c *NodeTemplatesListCall) PageToken(pageToken string) *NodeTemplatesListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// PeeringName sets the optional parameter "peeringName": The response -// will show routes exchanged over the given peering connection. -func (c *NetworksListPeeringRoutesCall) PeeringName(peeringName string) *NetworksListPeeringRoutesCall { - c.urlParams_.Set("peeringName", peeringName) - return c -} - -// Region sets the optional parameter "region": The region of the -// request. The response will include all subnet routes, static routes -// and dynamic routes in the region. -func (c *NetworksListPeeringRoutesCall) Region(region string) *NetworksListPeeringRoutesCall { - c.urlParams_.Set("region", region) - return c -} - // ReturnPartialSuccess sets the optional parameter // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *NetworksListPeeringRoutesCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworksListPeeringRoutesCall { +func (c *NodeTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTemplatesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -108138,7 +125212,7 @@ func (c *NetworksListPeeringRoutesCall) ReturnPartialSuccess(returnPartialSucces // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksListPeeringRoutesCall) Fields(s ...googleapi.Field) *NetworksListPeeringRoutesCall { +func (c *NodeTemplatesListCall) Fields(s ...googleapi.Field) *NodeTemplatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108148,7 +125222,7 @@ func (c *NetworksListPeeringRoutesCall) Fields(s ...googleapi.Field) *NetworksLi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworksListPeeringRoutesCall) IfNoneMatch(entityTag string) *NetworksListPeeringRoutesCall { +func (c *NodeTemplatesListCall) IfNoneMatch(entityTag string) *NodeTemplatesListCall { c.ifNoneMatch_ = entityTag return c } @@ -108156,21 +125230,21 @@ func (c *NetworksListPeeringRoutesCall) IfNoneMatch(entityTag string) *NetworksL // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksListPeeringRoutesCall) Context(ctx context.Context) *NetworksListPeeringRoutesCall { +func (c *NodeTemplatesListCall) Context(ctx context.Context) *NodeTemplatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksListPeeringRoutesCall) Header() http.Header { +func (c *NodeTemplatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -108183,7 +125257,7 @@ func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}/listPeeringRoutes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/nodeTemplates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -108192,38 +125266,38 @@ func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, e req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.listPeeringRoutes" call. -// Exactly one of *ExchangedPeeringRoutesList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *ExchangedPeeringRoutesList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.nodeTemplates.list" call. +// Exactly one of *NodeTemplateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NodeTemplateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*ExchangedPeeringRoutesList, error) { +func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ExchangedPeeringRoutesList{ + ret := &NodeTemplateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -108235,30 +125309,17 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha } return ret, nil // { - // "description": "Lists the peering routes exchanged over peering connection.", - // "flatPath": "projects/{project}/global/networks/{network}/listPeeringRoutes", + // "description": "Retrieves a list of node templates available to the specified project.", + // "flatPath": "projects/{project}/regions/{region}/nodeTemplates", // "httpMethod": "GET", - // "id": "compute.networks.listPeeringRoutes", + // "id": "compute.nodeTemplates.list", // "parameterOrder": [ // "project", - // "network" + // "region" // ], // "parameters": { - // "direction": { - // "description": "The direction of the exchanged routes.", - // "enum": [ - // "INCOMING", - // "OUTGOING" - // ], - // "enumDescriptions": [ - // "For routes exported from peer network.", - // "For routes exported from local network." - // ], - // "location": "query", - // "type": "string" - // }, // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -108270,13 +125331,6 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha // "minimum": "0", // "type": "integer" // }, - // "network": { - // "description": "Name of the network for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", @@ -108287,11 +125341,6 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha // "location": "query", // "type": "string" // }, - // "peeringName": { - // "description": "The response will show routes exchanged over the given peering connection.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -108300,8 +125349,10 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha // "type": "string" // }, // "region": { - // "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", - // "location": "query", + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "returnPartialSuccess": { @@ -108310,9 +125361,9 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha // "type": "boolean" // } // }, - // "path": "projects/{project}/global/networks/{network}/listPeeringRoutes", + // "path": "projects/{project}/regions/{region}/nodeTemplates", // "response": { - // "$ref": "ExchangedPeeringRoutesList" + // "$ref": "NodeTemplateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -108326,7 +125377,7 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworksListPeeringRoutesCall) Pages(ctx context.Context, f func(*ExchangedPeeringRoutesList) error) error { +func (c *NodeTemplatesListCall) Pages(ctx context.Context, f func(*NodeTemplateList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -108344,52 +125395,38 @@ func (c *NetworksListPeeringRoutesCall) Pages(ctx context.Context, f func(*Excha } } -// method id "compute.networks.patch": +// method id "compute.nodeTemplates.setIamPolicy": -type NetworksPatchCall struct { - s *Service - project string - network string - network2 *Network - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeTemplatesSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified network with the data included in the -// request. Only the following fields can be modified: -// routingConfig.routingMode. +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. // -// - network: Name of the network to update. // - project: Project ID for this request. -func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { - c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *NodeTemplatesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NodeTemplatesSetIamPolicyCall { + c := &NodeTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.network2 = network2 - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { +func (c *NodeTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108397,21 +125434,21 @@ func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksPatchCall) Context(ctx context.Context) *NetworksPatchCall { +func (c *NodeTemplatesSetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksPatchCall) Header() http.Header { +func (c *NodeTemplatesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -108419,54 +125456,55 @@ func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.network2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeTemplates.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -108478,41 +125516,44 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", - // "flatPath": "projects/{project}/global/networks/{network}", - // "httpMethod": "PATCH", - // "id": "compute.networks.patch", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy", + // "httpMethod": "POST", + // "id": "compute.nodeTemplates.setIamPolicy", // "parameterOrder": [ // "project", - // "network" + // "region", + // "resource" // ], // "parameters": { - // "network": { - // "description": "Name of the network to update.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/networks/{network}", + // "path": "projects/{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy", // "request": { - // "$ref": "Network" + // "$ref": "RegionSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -108522,50 +125563,38 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.networks.removePeering": +// method id "compute.nodeTemplates.testIamPermissions": -type NetworksRemovePeeringCall struct { - s *Service - project string - network string - networksremovepeeringrequest *NetworksRemovePeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeTemplatesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemovePeering: Removes a peering from the specified network. +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. // -// - network: Name of the network resource to remove peering from. // - project: Project ID for this request. -func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { - c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *NodeTemplatesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeTemplatesTestIamPermissionsCall { + c := &NodeTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.networksremovepeeringrequest = networksremovepeeringrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemovePeeringCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemovePeeringCall { +func (c *NodeTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeTemplatesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108573,21 +125602,21 @@ func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemove // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksRemovePeeringCall) Context(ctx context.Context) *NetworksRemovePeeringCall { +func (c *NodeTemplatesTestIamPermissionsCall) Context(ctx context.Context) *NodeTemplatesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksRemovePeeringCall) Header() http.Header { +func (c *NodeTemplatesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -108595,14 +125624,14 @@ func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksremovepeeringrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}/removePeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -108610,39 +125639,40 @@ func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.removePeering" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeTemplates.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -108654,165 +125684,257 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Removes a peering from the specified network.", - // "flatPath": "projects/{project}/global/networks/{network}/removePeering", + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", // "httpMethod": "POST", - // "id": "compute.networks.removePeering", + // "id": "compute.nodeTemplates.testIamPermissions", // "parameterOrder": [ // "project", - // "network" + // "region", + // "resource" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource to remove peering from.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/networks/{network}/removePeering", + // "path": "projects/{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", // "request": { - // "$ref": "NetworksRemovePeeringRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networks.switchToCustomMode": +// method id "compute.nodeTypes.aggregatedList": -type NetworksSwitchToCustomModeCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SwitchToCustomMode: Switches the network mode from auto subnet mode -// to custom subnet mode. +// AggregatedList: Retrieves an aggregated list of node types. // -// - network: Name of the network to be updated. // - project: Project ID for this request. -func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { - c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NodeTypesService) AggregatedList(project string) *NodeTypesAggregatedListCall { + c := &NodeTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSwitchToCustomModeCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NodeTypesAggregatedListCall) Filter(filter string) *NodeTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *NodeTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NodeTypesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NodeTypesAggregatedListCall) MaxResults(maxResults int64) *NodeTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *NodeTypesAggregatedListCall) OrderBy(orderBy string) *NodeTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NodeTypesAggregatedListCall) PageToken(pageToken string) *NodeTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NodeTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTypesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksSwitchToCustomModeCall { +func (c *NodeTypesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTypesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksSwitchToCustomModeCall) Context(ctx context.Context) *NetworksSwitchToCustomModeCall { +func (c *NodeTypesAggregatedListCall) Context(ctx context.Context) *NodeTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksSwitchToCustomModeCall) Header() http.Header { +func (c *NodeTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}/switchToCustomMode") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/nodeTypes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.switchToCustomMode" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeTypes.aggregatedList" call. +// Exactly one of *NodeTypeAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NodeTypeAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &NodeTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -108824,20 +125946,40 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Switches the network mode from auto subnet mode to custom subnet mode.", - // "flatPath": "projects/{project}/global/networks/{network}/switchToCustomMode", - // "httpMethod": "POST", - // "id": "compute.networks.switchToCustomMode", + // "description": "Retrieves an aggregated list of node types.", + // "flatPath": "projects/{project}/aggregated/nodeTypes", + // "httpMethod": "GET", + // "id": "compute.nodeTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "network" + // "project" // ], // "parameters": { - // "network": { - // "description": "Name of the network to be updated.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -108847,149 +125989,163 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // } // }, - // "path": "projects/{project}/global/networks/{network}/switchToCustomMode", + // "path": "projects/{project}/aggregated/nodeTypes", // "response": { - // "$ref": "Operation" + // "$ref": "NodeTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networks.updatePeering": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeTypesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NetworksUpdatePeeringCall struct { - s *Service - project string - network string - networksupdatepeeringrequest *NetworksUpdatePeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.nodeTypes.get": + +type NodeTypesGetCall struct { + s *Service + project string + zone string + nodeType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// UpdatePeering: Updates the specified network peering with the data -// included in the request. You can only modify the -// NetworkPeering.export_custom_routes field and the -// NetworkPeering.import_custom_routes field. +// Get: Returns the specified node type. Gets a list of available node +// types by making a list() request. // -// - network: Name of the network resource which the updated peering is -// belonging to. +// - nodeType: Name of the node type to return. // - project: Project ID for this request. -func (r *NetworksService) UpdatePeering(project string, network string, networksupdatepeeringrequest *NetworksUpdatePeeringRequest) *NetworksUpdatePeeringCall { - c := &NetworksUpdatePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - zone: The name of the zone for this request. +func (r *NodeTypesService) Get(project string, zone string, nodeType string) *NodeTypesGetCall { + c := &NodeTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.networksupdatepeeringrequest = networksupdatepeeringrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworksUpdatePeeringCall) RequestId(requestId string) *NetworksUpdatePeeringCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.nodeType = nodeType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksUpdatePeeringCall) Fields(s ...googleapi.Field) *NetworksUpdatePeeringCall { +func (c *NodeTypesGetCall) Fields(s ...googleapi.Field) *NodeTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTypesGetCall) IfNoneMatch(entityTag string) *NodeTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksUpdatePeeringCall) Context(ctx context.Context) *NetworksUpdatePeeringCall { +func (c *NodeTypesGetCall) Context(ctx context.Context) *NodeTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksUpdatePeeringCall) Header() http.Header { +func (c *NodeTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksUpdatePeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksupdatepeeringrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/networks/{network}/updatePeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeTypes/{nodeType}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "zone": c.zone, + "nodeType": c.nodeType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.updatePeering" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.nodeTypes.get" call. +// Exactly one of *NodeType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *NodeType.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &NodeType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -109001,17 +126157,18 @@ func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates the specified network peering with the data included in the request. You can only modify the NetworkPeering.export_custom_routes field and the NetworkPeering.import_custom_routes field.", - // "flatPath": "projects/{project}/global/networks/{network}/updatePeering", - // "httpMethod": "PATCH", - // "id": "compute.networks.updatePeering", + // "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", + // "flatPath": "projects/{project}/zones/{zone}/nodeTypes/{nodeType}", + // "httpMethod": "GET", + // "id": "compute.nodeTypes.get", // "parameterOrder": [ // "project", - // "network" + // "zone", + // "nodeType" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource which the updated peering is belonging to.", + // "nodeType": { + // "description": "Name of the node type to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -109024,152 +126181,222 @@ func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/networks/{network}/updatePeering", - // "request": { - // "$ref": "NetworksUpdatePeeringRequest" - // }, + // "path": "projects/{project}/zones/{zone}/nodeTypes/{nodeType}", // "response": { - // "$ref": "Operation" + // "$ref": "NodeType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.nodeGroups.addNodes": +// method id "compute.nodeTypes.list": -type NodeGroupsAddNodesCall struct { - s *Service - project string - zone string - nodeGroup string - nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// AddNodes: Adds specified number of nodes to the node group. +// List: Retrieves a list of node types available to the specified +// project. // -// - nodeGroup: Name of the NodeGroup resource. // - project: Project ID for this request. // - zone: The name of the zone for this request. -func (r *NodeGroupsService) AddNodes(project string, zone string, nodeGroup string, nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest) *NodeGroupsAddNodesCall { - c := &NodeGroupsAddNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall { + c := &NodeTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.nodeGroup = nodeGroup - c.nodegroupsaddnodesrequest = nodegroupsaddnodesrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsAddNodesCall) RequestId(requestId string) *NodeGroupsAddNodesCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NodeTypesListCall) Filter(filter string) *NodeTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NodeTypesListCall) MaxResults(maxResults int64) *NodeTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *NodeTypesListCall) OrderBy(orderBy string) *NodeTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NodeTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsAddNodesCall) Fields(s ...googleapi.Field) *NodeGroupsAddNodesCall { +func (c *NodeTypesListCall) Fields(s ...googleapi.Field) *NodeTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTypesListCall) IfNoneMatch(entityTag string) *NodeTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsAddNodesCall) Context(ctx context.Context) *NodeGroupsAddNodesCall { +func (c *NodeTypesListCall) Context(ctx context.Context) *NodeTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsAddNodesCall) Header() http.Header { +func (c *NodeTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsaddnodesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeTypes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.addNodes" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.nodeTypes.list" call. +// Exactly one of *NodeTypeList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *NodeTypeList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &NodeTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -109181,21 +126408,36 @@ func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Adds specified number of nodes to the node group.", - // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.addNodes", + // "description": "Retrieves a list of node types available to the specified project.", + // "flatPath": "projects/{project}/zones/{zone}/nodeTypes", + // "httpMethod": "GET", + // "id": "compute.nodeTypes.list", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "zone" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -109205,10 +126447,10 @@ func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // }, // "zone": { // "description": "The name of the zone for this request.", @@ -109218,24 +126460,43 @@ func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes", - // "request": { - // "$ref": "NodeGroupsAddNodesRequest" - // }, + // "path": "projects/{project}/zones/{zone}/nodeTypes", // "response": { - // "$ref": "Operation" + // "$ref": "NodeTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.nodeGroups.aggregatedList": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeTypesListCall) Pages(ctx context.Context, f func(*NodeTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NodeGroupsAggregatedListCall struct { +// method id "compute.packetMirrorings.aggregatedList": + +type PacketMirroringsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -109244,30 +126505,32 @@ type NodeGroupsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of node groups. Note: -// use nodeGroups.listNodes for more details about each group. +// AggregatedList: Retrieves an aggregated list of packetMirrorings. // // - project: Project ID for this request. -func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregatedListCall { - c := &NodeGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *PacketMirroringsService) AggregatedList(project string) *PacketMirroringsAggregatedListCall { + c := &PacketMirroringsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -109275,8 +126538,18 @@ func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregated // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *NodeGroupsAggregatedListCall) Filter(filter string) *NodeGroupsAggregatedListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *PacketMirroringsAggregatedListCall) Filter(filter string) *PacketMirroringsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -109289,7 +126562,7 @@ func (c *NodeGroupsAggregatedListCall) Filter(filter string) *NodeGroupsAggregat // response. For resource types which predate this field, if this flag // is omitted or false, only scopes of the scope types where the // resource type is expected to be found will be included. -func (c *NodeGroupsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NodeGroupsAggregatedListCall { +func (c *PacketMirroringsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *PacketMirroringsAggregatedListCall { c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) return c } @@ -109300,7 +126573,7 @@ func (c *NodeGroupsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) * // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *NodeGroupsAggregatedListCall) MaxResults(maxResults int64) *NodeGroupsAggregatedListCall { +func (c *PacketMirroringsAggregatedListCall) MaxResults(maxResults int64) *PacketMirroringsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -109314,7 +126587,7 @@ func (c *NodeGroupsAggregatedListCall) MaxResults(maxResults int64) *NodeGroupsA // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *NodeGroupsAggregatedListCall) OrderBy(orderBy string) *NodeGroupsAggregatedListCall { +func (c *PacketMirroringsAggregatedListCall) OrderBy(orderBy string) *PacketMirroringsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -109322,7 +126595,7 @@ func (c *NodeGroupsAggregatedListCall) OrderBy(orderBy string) *NodeGroupsAggreg // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAggregatedListCall { +func (c *PacketMirroringsAggregatedListCall) PageToken(pageToken string) *PacketMirroringsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -109331,7 +126604,7 @@ func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAg // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *NodeGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsAggregatedListCall { +func (c *PacketMirroringsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PacketMirroringsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -109339,7 +126612,7 @@ func (c *NodeGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NodeGroupsAggregatedListCall { +func (c *PacketMirroringsAggregatedListCall) Fields(s ...googleapi.Field) *PacketMirroringsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -109349,7 +126622,7 @@ func (c *NodeGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NodeGroupsA // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NodeGroupsAggregatedListCall { +func (c *PacketMirroringsAggregatedListCall) IfNoneMatch(entityTag string) *PacketMirroringsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -109357,21 +126630,21 @@ func (c *NodeGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NodeGroups // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsAggregatedListCall) Context(ctx context.Context) *NodeGroupsAggregatedListCall { +func (c *PacketMirroringsAggregatedListCall) Context(ctx context.Context) *PacketMirroringsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsAggregatedListCall) Header() http.Header { +func (c *PacketMirroringsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *PacketMirroringsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -109384,7 +126657,7 @@ func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/nodeGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/packetMirrorings") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -109397,33 +126670,33 @@ func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.aggregatedList" call. -// Exactly one of *NodeGroupAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NodeGroupAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.packetMirrorings.aggregatedList" call. +// Exactly one of *PacketMirroringAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *PacketMirroringAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGroupAggregatedList, error) { +func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (*PacketMirroringAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NodeGroupAggregatedList{ + ret := &PacketMirroringAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -109435,16 +126708,16 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr } return ret, nil // { - // "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group.", - // "flatPath": "projects/{project}/aggregated/nodeGroups", + // "description": "Retrieves an aggregated list of packetMirrorings.", + // "flatPath": "projects/{project}/aggregated/packetMirrorings", // "httpMethod": "GET", - // "id": "compute.nodeGroups.aggregatedList", + // "id": "compute.packetMirrorings.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -109484,9 +126757,9 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/nodeGroups", + // "path": "projects/{project}/aggregated/packetMirrorings", // "response": { - // "$ref": "NodeGroupAggregatedList" + // "$ref": "PacketMirroringAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -109500,7 +126773,7 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NodeGroupAggregatedList) error) error { +func (c *PacketMirroringsAggregatedListCall) Pages(ctx context.Context, f func(*PacketMirroringAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -109518,28 +126791,28 @@ func (c *NodeGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NodeGr } } -// method id "compute.nodeGroups.delete": +// method id "compute.packetMirrorings.delete": -type NodeGroupsDeleteCall struct { - s *Service - project string - zone string - nodeGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type PacketMirroringsDeleteCall struct { + s *Service + project string + region string + packetMirroring string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified NodeGroup resource. +// Delete: Deletes the specified PacketMirroring resource. // -// - nodeGroup: Name of the NodeGroup resource to delete. +// - packetMirroring: Name of the PacketMirroring resource to delete. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *NodeGroupsService) Delete(project string, zone string, nodeGroup string) *NodeGroupsDeleteCall { - c := &NodeGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *PacketMirroringsService) Delete(project string, region string, packetMirroring string) *PacketMirroringsDeleteCall { + c := &PacketMirroringsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup + c.region = region + c.packetMirroring = packetMirroring return c } @@ -109554,7 +126827,7 @@ func (r *NodeGroupsService) Delete(project string, zone string, nodeGroup string // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsDeleteCall) RequestId(requestId string) *NodeGroupsDeleteCall { +func (c *PacketMirroringsDeleteCall) RequestId(requestId string) *PacketMirroringsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -109562,7 +126835,7 @@ func (c *NodeGroupsDeleteCall) RequestId(requestId string) *NodeGroupsDeleteCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsDeleteCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteCall { +func (c *PacketMirroringsDeleteCall) Fields(s ...googleapi.Field) *PacketMirroringsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -109570,21 +126843,21 @@ func (c *NodeGroupsDeleteCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsDeleteCall) Context(ctx context.Context) *NodeGroupsDeleteCall { +func (c *PacketMirroringsDeleteCall) Context(ctx context.Context) *PacketMirroringsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsDeleteCall) Header() http.Header { +func (c *PacketMirroringsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *PacketMirroringsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -109594,7 +126867,7 @@ func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -109602,38 +126875,38 @@ func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "region": c.region, + "packetMirroring": c.packetMirroring, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.delete" call. +// Do executes the "compute.packetMirrorings.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *PacketMirroringsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -109647,18 +126920,18 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Deletes the specified NodeGroup resource.", - // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "description": "Deletes the specified PacketMirroring resource.", + // "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", // "httpMethod": "DELETE", - // "id": "compute.nodeGroups.delete", + // "id": "compute.packetMirrorings.delete", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "region", + // "packetMirroring" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource to delete.", + // "packetMirroring": { + // "description": "Name of the PacketMirroring resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -109671,20 +126944,20 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "path": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", // "response": { // "$ref": "Operation" // }, @@ -109696,132 +126969,122 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.nodeGroups.deleteNodes": +// method id "compute.packetMirrorings.get": -type NodeGroupsDeleteNodesCall struct { - s *Service - project string - zone string - nodeGroup string - nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type PacketMirroringsGetCall struct { + s *Service + project string + region string + packetMirroring string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DeleteNodes: Deletes specified nodes from the node group. +// Get: Returns the specified PacketMirroring resource. // -// - nodeGroup: Name of the NodeGroup resource whose nodes will be -// deleted. +// - packetMirroring: Name of the PacketMirroring resource to return. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup string, nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest) *NodeGroupsDeleteNodesCall { - c := &NodeGroupsDeleteNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *PacketMirroringsService) Get(project string, region string, packetMirroring string) *PacketMirroringsGetCall { + c := &PacketMirroringsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup - c.nodegroupsdeletenodesrequest = nodegroupsdeletenodesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsDeleteNodesCall) RequestId(requestId string) *NodeGroupsDeleteNodesCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.packetMirroring = packetMirroring return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsDeleteNodesCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteNodesCall { +func (c *PacketMirroringsGetCall) Fields(s ...googleapi.Field) *PacketMirroringsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *PacketMirroringsGetCall) IfNoneMatch(entityTag string) *PacketMirroringsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsDeleteNodesCall) Context(ctx context.Context) *NodeGroupsDeleteNodesCall { +func (c *PacketMirroringsGetCall) Context(ctx context.Context) *PacketMirroringsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsDeleteNodesCall) Header() http.Header { +func (c *PacketMirroringsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { +func (c *PacketMirroringsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsdeletenodesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "region": c.region, + "packetMirroring": c.packetMirroring, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.deleteNodes" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.packetMirrorings.get" call. +// Exactly one of *PacketMirroring or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *PacketMirroring.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PacketMirroringsGetCall) Do(opts ...googleapi.CallOption) (*PacketMirroring, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &PacketMirroring{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -109833,18 +127096,18 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes specified nodes from the node group.", - // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.deleteNodes", + // "description": "Returns the specified PacketMirroring resource.", + // "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", + // "httpMethod": "GET", + // "id": "compute.packetMirrorings.get", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "region", + // "packetMirroring" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource whose nodes will be deleted.", + // "packetMirroring": { + // "description": "Name of the PacketMirroring resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -109857,152 +127120,149 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes", - // "request": { - // "$ref": "NodeGroupsDeleteNodesRequest" - // }, + // "path": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", // "response": { - // "$ref": "Operation" + // "$ref": "PacketMirroring" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.nodeGroups.get": +// method id "compute.packetMirrorings.insert": -type NodeGroupsGetCall struct { - s *Service - project string - zone string - nodeGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type PacketMirroringsInsertCall struct { + s *Service + project string + region string + packetmirroring *PacketMirroring + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified NodeGroup. Get a list of available -// NodeGroups by making a list() request. Note: the "nodes" field should -// not be used. Use nodeGroups.listNodes instead. +// Insert: Creates a PacketMirroring resource in the specified project +// and region using the data included in the request. // -// - nodeGroup: Name of the node group to return. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *NodeGroupsService) Get(project string, zone string, nodeGroup string) *NodeGroupsGetCall { - c := &NodeGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *PacketMirroringsService) Insert(project string, region string, packetmirroring *PacketMirroring) *PacketMirroringsInsertCall { + c := &PacketMirroringsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup + c.region = region + c.packetmirroring = packetmirroring + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *PacketMirroringsInsertCall) RequestId(requestId string) *PacketMirroringsInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsGetCall) Fields(s ...googleapi.Field) *NodeGroupsGetCall { +func (c *PacketMirroringsInsertCall) Fields(s ...googleapi.Field) *PacketMirroringsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeGroupsGetCall) IfNoneMatch(entityTag string) *NodeGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsGetCall) Context(ctx context.Context) *NodeGroupsGetCall { +func (c *PacketMirroringsInsertCall) Context(ctx context.Context) *PacketMirroringsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsGetCall) Header() http.Header { +func (c *PacketMirroringsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *PacketMirroringsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.packetmirroring) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.get" call. -// Exactly one of *NodeGroup or error will be non-nil. Any non-2xx +// Do executes the "compute.packetMirrorings.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *NodeGroup.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) { +func (c *PacketMirroringsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NodeGroup{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -110014,23 +127274,15 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) } return ret, nil // { - // "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead.", - // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", - // "httpMethod": "GET", - // "id": "compute.nodeGroups.get", + // "description": "Creates a PacketMirroring resource in the specified project and region using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/packetMirrorings", + // "httpMethod": "POST", + // "id": "compute.packetMirrorings.insert", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "region" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the node group to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -110038,65 +127290,144 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "path": "projects/{project}/regions/{region}/packetMirrorings", + // "request": { + // "$ref": "PacketMirroring" + // }, // "response": { - // "$ref": "NodeGroup" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeGroups.getIamPolicy": +// method id "compute.packetMirrorings.list": -type NodeGroupsGetIamPolicyCall struct { +type PacketMirroringsListCall struct { s *Service project string - zone string - resource string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// List: Retrieves a list of PacketMirroring resources available to the +// specified project and region. // // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -// - zone: The name of the zone for this request. -func (r *NodeGroupsService) GetIamPolicy(project string, zone string, resource string) *NodeGroupsGetIamPolicyCall { - c := &NodeGroupsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *PacketMirroringsService) List(project string, region string) *PacketMirroringsListCall { + c := &PacketMirroringsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource + c.region = region return c } -// OptionsRequestedPolicyVersion sets the optional parameter -// "optionsRequestedPolicyVersion": Requested IAM Policy version. -func (c *NodeGroupsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *NodeGroupsGetIamPolicyCall { - c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *PacketMirroringsListCall) Filter(filter string) *PacketMirroringsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *PacketMirroringsListCall) MaxResults(maxResults int64) *PacketMirroringsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *PacketMirroringsListCall) OrderBy(orderBy string) *PacketMirroringsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *PacketMirroringsListCall) PageToken(pageToken string) *PacketMirroringsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *PacketMirroringsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PacketMirroringsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsGetIamPolicyCall { +func (c *PacketMirroringsListCall) Fields(s ...googleapi.Field) *PacketMirroringsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -110106,7 +127437,7 @@ func (c *NodeGroupsGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsGet // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeGroupsGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeGroupsGetIamPolicyCall { +func (c *PacketMirroringsListCall) IfNoneMatch(entityTag string) *PacketMirroringsListCall { c.ifNoneMatch_ = entityTag return c } @@ -110114,21 +127445,21 @@ func (c *NodeGroupsGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeGroupsGe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsGetIamPolicyCall) Context(ctx context.Context) *NodeGroupsGetIamPolicyCall { +func (c *PacketMirroringsListCall) Context(ctx context.Context) *PacketMirroringsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsGetIamPolicyCall) Header() http.Header { +func (c *PacketMirroringsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *PacketMirroringsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -110141,7 +127472,7 @@ func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -110149,40 +127480,39 @@ func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.packetMirrorings.list" call. +// Exactly one of *PacketMirroringList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PacketMirroringList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PacketMirroringsListCall) Do(opts ...googleapi.CallOption) (*PacketMirroringList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &PacketMirroringList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -110194,22 +127524,38 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy", + // "description": "Retrieves a list of PacketMirroring resources available to the specified project and region.", + // "flatPath": "projects/{project}/regions/{region}/packetMirrorings", // "httpMethod": "GET", - // "id": "compute.nodeGroups.getIamPolicy", + // "id": "compute.packetMirrorings.list", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "region" // ], // "parameters": { - // "optionsRequestedPolicyVersion": { - // "description": "Requested IAM Policy version.", - // "format": "int32", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", // "type": "integer" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -110217,24 +127563,22 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy", + // "path": "projects/{project}/regions/{region}/packetMirrorings", // "response": { - // "$ref": "Policy" + // "$ref": "PacketMirroringList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -110245,30 +127589,53 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } -// method id "compute.nodeGroups.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *PacketMirroringsListCall) Pages(ctx context.Context, f func(*PacketMirroringList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NodeGroupsInsertCall struct { - s *Service - project string - zone string - nodegroup *NodeGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.packetMirrorings.patch": + +type PacketMirroringsPatchCall struct { + s *Service + project string + region string + packetMirroring string + packetmirroring *PacketMirroring + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a NodeGroup resource in the specified project using -// the data included in the request. +// Patch: Patches the specified PacketMirroring resource with the data +// included in the request. This method supports PATCH semantics and +// uses JSON merge patch format and processing rules. // -// - initialNodeCount: Initial count of nodes in the node group. +// - packetMirroring: Name of the PacketMirroring resource to patch. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *NodeGroupsService) Insert(project string, zone string, initialNodeCount int64, nodegroup *NodeGroup) *NodeGroupsInsertCall { - c := &NodeGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *PacketMirroringsService) Patch(project string, region string, packetMirroring string, packetmirroring *PacketMirroring) *PacketMirroringsPatchCall { + c := &PacketMirroringsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.urlParams_.Set("initialNodeCount", fmt.Sprint(initialNodeCount)) - c.nodegroup = nodegroup + c.region = region + c.packetMirroring = packetMirroring + c.packetmirroring = packetmirroring return c } @@ -110283,7 +127650,7 @@ func (r *NodeGroupsService) Insert(project string, zone string, initialNodeCount // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsInsertCall) RequestId(requestId string) *NodeGroupsInsertCall { +func (c *PacketMirroringsPatchCall) RequestId(requestId string) *PacketMirroringsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -110291,7 +127658,7 @@ func (c *NodeGroupsInsertCall) RequestId(requestId string) *NodeGroupsInsertCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsInsertCall) Fields(s ...googleapi.Field) *NodeGroupsInsertCall { +func (c *PacketMirroringsPatchCall) Fields(s ...googleapi.Field) *PacketMirroringsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -110299,21 +127666,21 @@ func (c *NodeGroupsInsertCall) Fields(s ...googleapi.Field) *NodeGroupsInsertCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsInsertCall) Context(ctx context.Context) *NodeGroupsInsertCall { +func (c *PacketMirroringsPatchCall) Context(ctx context.Context) *PacketMirroringsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsInsertCall) Header() http.Header { +func (c *PacketMirroringsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *PacketMirroringsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -110321,52 +127688,53 @@ func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroup) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.packetmirroring) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "region": c.region, + "packetMirroring": c.packetMirroring, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.insert" call. +// Do executes the "compute.packetMirrorings.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *PacketMirroringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -110380,22 +127748,22 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Creates a NodeGroup resource in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/zones/{zone}/nodeGroups", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.insert", + // "description": "Patches the specified PacketMirroring resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", + // "httpMethod": "PATCH", + // "id": "compute.packetMirrorings.patch", // "parameterOrder": [ // "project", - // "zone", - // "initialNodeCount" + // "region", + // "packetMirroring" // ], // "parameters": { - // "initialNodeCount": { - // "description": "Initial count of nodes in the node group.", - // "format": "int32", - // "location": "query", + // "packetMirroring": { + // "description": "Name of the PacketMirroring resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, - // "type": "integer" + // "type": "string" // }, // "project": { // "description": "Project ID for this request.", @@ -110404,22 +127772,22 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/nodeGroups", + // "path": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", // "request": { - // "$ref": "NodeGroup" + // "$ref": "PacketMirroring" // }, // "response": { // "$ref": "Operation" @@ -110432,189 +127800,116 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.nodeGroups.list": +// method id "compute.packetMirrorings.testIamPermissions": -type NodeGroupsListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type PacketMirroringsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of node groups available to the specified -// project. Note: use nodeGroups.listNodes for more details about each -// group. +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. // // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCall { - c := &NodeGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *PacketMirroringsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *PacketMirroringsTestIamPermissionsCall { + c := &PacketMirroringsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *NodeGroupsListCall) Filter(filter string) *NodeGroupsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *NodeGroupsListCall) MaxResults(maxResults int64) *NodeGroupsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *NodeGroupsListCall) OrderBy(orderBy string) *NodeGroupsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *NodeGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsListCall) Fields(s ...googleapi.Field) *NodeGroupsListCall { +func (c *PacketMirroringsTestIamPermissionsCall) Fields(s ...googleapi.Field) *PacketMirroringsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeGroupsListCall) IfNoneMatch(entityTag string) *NodeGroupsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsListCall) Context(ctx context.Context) *NodeGroupsListCall { +func (c *PacketMirroringsTestIamPermissionsCall) Context(ctx context.Context) *PacketMirroringsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsListCall) Header() http.Header { +func (c *PacketMirroringsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *PacketMirroringsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.list" call. -// Exactly one of *NodeGroupList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *NodeGroupList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.packetMirrorings.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, error) { +func (c *PacketMirroringsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NodeGroupList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -110626,38 +127921,16 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e } return ret, nil // { - // "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group.", - // "flatPath": "projects/{project}/zones/{zone}/nodeGroups", - // "httpMethod": "GET", - // "id": "compute.nodeGroups.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.packetMirrorings.testIamPermissions", // "parameterOrder": [ // "project", - // "zone" + // "region", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -110665,22 +127938,27 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/nodeGroups", + // "path": "projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "NodeGroupList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -110691,126 +127969,45 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NodeGroupsListCall) Pages(ctx context.Context, f func(*NodeGroupList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.nodeGroups.listNodes": +// method id "compute.projects.disableXpnHost": -type NodeGroupsListNodesCall struct { +type ProjectsDisableXpnHostCall struct { s *Service project string - zone string - nodeGroup string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// ListNodes: Lists nodes in the node group. +// DisableXpnHost: Disable this project as a shared VPC host project. // -// - nodeGroup: Name of the NodeGroup resource whose nodes you want to -// list. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup string) *NodeGroupsListNodesCall { - c := &NodeGroupsListNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { + c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *NodeGroupsListNodesCall) Filter(filter string) *NodeGroupsListNodesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *NodeGroupsListNodesCall) MaxResults(maxResults int64) *NodeGroupsListNodesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *NodeGroupsListNodesCall) OrderBy(orderBy string) *NodeGroupsListNodesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *NodeGroupsListNodesCall) PageToken(pageToken string) *NodeGroupsListNodesCall { - c.urlParams_.Set("pageToken", pageToken) return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *NodeGroupsListNodesCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsListNodesCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisableXpnHostCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsListNodesCall) Fields(s ...googleapi.Field) *NodeGroupsListNodesCall { +func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnHostCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -110818,21 +128015,21 @@ func (c *NodeGroupsListNodesCall) Fields(s ...googleapi.Field) *NodeGroupsListNo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsListNodesCall) Context(ctx context.Context) *NodeGroupsListNodesCall { +func (c *ProjectsDisableXpnHostCall) Context(ctx context.Context) *ProjectsDisableXpnHostCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsListNodesCall) Header() http.Header { +func (c *ProjectsDisableXpnHostCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -110842,7 +128039,7 @@ func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/disableXpnHost") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -110850,40 +128047,38 @@ func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.listNodes" call. -// Exactly one of *NodeGroupsListNodes or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NodeGroupsListNodes.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsListNodes, error) { +// Do executes the "compute.projects.disableXpnHost" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NodeGroupsListNodes{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -110895,46 +128090,14 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL } return ret, nil // { - // "description": "Lists nodes in the node group.", - // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes", + // "description": "Disable this project as a shared VPC host project.", + // "flatPath": "projects/{project}/disableXpnHost", // "httpMethod": "POST", - // "id": "compute.nodeGroups.listNodes", + // "id": "compute.projects.disableXpnHost", // "parameterOrder": [ - // "project", - // "zone", - // "nodeGroup" + // "project" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "nodeGroup": { - // "description": "Name of the NodeGroup resource whose nodes you want to list.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -110942,77 +128105,43 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes", + // "path": "projects/{project}/disableXpnHost", // "response": { - // "$ref": "NodeGroupsListNodes" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NodeGroupsListNodesCall) Pages(ctx context.Context, f func(*NodeGroupsListNodes) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.nodeGroups.patch": +// method id "compute.projects.disableXpnResource": -type NodeGroupsPatchCall struct { - s *Service - project string - zone string - nodeGroup string - nodegroup *NodeGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsDisableXpnResourceCall struct { + s *Service + project string + projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified node group. +// DisableXpnResource: Disable a service resource (also known as service +// project) associated with this host project. // -// - nodeGroup: Name of the NodeGroup resource to update. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *NodeGroupsService) Patch(project string, zone string, nodeGroup string, nodegroup *NodeGroup) *NodeGroupsPatchCall { - c := &NodeGroupsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { + c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup - c.nodegroup = nodegroup + c.projectsdisablexpnresourcerequest = projectsdisablexpnresourcerequest return c } @@ -111027,7 +128156,7 @@ func (r *NodeGroupsService) Patch(project string, zone string, nodeGroup string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsPatchCall) RequestId(requestId string) *NodeGroupsPatchCall { +func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDisableXpnResourceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -111035,7 +128164,7 @@ func (c *NodeGroupsPatchCall) RequestId(requestId string) *NodeGroupsPatchCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsPatchCall) Fields(s ...googleapi.Field) *NodeGroupsPatchCall { +func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnResourceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -111043,21 +128172,21 @@ func (c *NodeGroupsPatchCall) Fields(s ...googleapi.Field) *NodeGroupsPatchCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsPatchCall) Context(ctx context.Context) *NodeGroupsPatchCall { +func (c *ProjectsDisableXpnResourceCall) Context(ctx context.Context) *ProjectsDisableXpnResourceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsPatchCall) Header() http.Header { +func (c *ProjectsDisableXpnResourceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -111065,53 +128194,51 @@ func (c *NodeGroupsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroup) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsdisablexpnresourcerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/disableXpnResource") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.patch" call. +// Do executes the "compute.projects.disableXpnResource" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -111125,23 +128252,14 @@ func (c *NodeGroupsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Updates the specified node group.", - // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", - // "httpMethod": "PATCH", - // "id": "compute.nodeGroups.patch", + // "description": "Disable a service resource (also known as service project) associated with this host project.", + // "flatPath": "projects/{project}/disableXpnResource", + // "httpMethod": "POST", + // "id": "compute.projects.disableXpnResource", // "parameterOrder": [ - // "project", - // "zone", - // "nodeGroup" + // "project" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -111153,18 +128271,11 @@ func (c *NodeGroupsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "path": "projects/{project}/disableXpnResource", // "request": { - // "$ref": "NodeGroup" + // "$ref": "ProjectsDisableXpnResourceRequest" // }, // "response": { // "$ref": "Operation" @@ -111177,38 +128288,45 @@ func (c *NodeGroupsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.nodeGroups.setIamPolicy": +// method id "compute.projects.enableXpnHost": -type NodeGroupsSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsEnableXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// EnableXpnHost: Enable this project as a shared VPC host project. // // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -// - zone: The name of the zone for this request. -func (r *NodeGroupsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *NodeGroupsSetIamPolicyCall { - c := &NodeGroupsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { + c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableXpnHostCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsSetIamPolicyCall { +func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnHostCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -111216,21 +128334,21 @@ func (c *NodeGroupsSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsSet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsSetIamPolicyCall) Context(ctx context.Context) *NodeGroupsSetIamPolicyCall { +func (c *ProjectsEnableXpnHostCall) Context(ctx context.Context) *ProjectsEnableXpnHostCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsSetIamPolicyCall) Header() http.Header { +func (c *ProjectsEnableXpnHostCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -111238,14 +128356,9 @@ func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/enableXpnHost") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -111253,40 +128366,38 @@ func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.projects.enableXpnHost" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -111298,14 +128409,12 @@ func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", + // "description": "Enable this project as a shared VPC host project.", + // "flatPath": "projects/{project}/enableXpnHost", // "httpMethod": "POST", - // "id": "compute.nodeGroups.setIamPolicy", + // "id": "compute.projects.enableXpnHost", // "parameterOrder": [ - // "project", - // "zone", - // "resource" + // "project" // ], // "parameters": { // "project": { @@ -111315,27 +128424,15 @@ func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", - // "request": { - // "$ref": "ZoneSetPolicyRequest" - // }, + // "path": "projects/{project}/enableXpnHost", // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -111345,30 +128442,26 @@ func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } -// method id "compute.nodeGroups.setNodeTemplate": +// method id "compute.projects.enableXpnResource": -type NodeGroupsSetNodeTemplateCall struct { +type ProjectsEnableXpnResourceCall struct { s *Service project string - zone string - nodeGroup string - nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest + projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetNodeTemplate: Updates the node template of the node group. +// EnableXpnResource: Enable service resource (a.k.a service project) +// for a host project, so that subnets in the host project can be used +// by instances in the service project. // -// - nodeGroup: Name of the NodeGroup resource to update. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *NodeGroupsService) SetNodeTemplate(project string, zone string, nodeGroup string, nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest) *NodeGroupsSetNodeTemplateCall { - c := &NodeGroupsSetNodeTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { + c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup - c.nodegroupssetnodetemplaterequest = nodegroupssetnodetemplaterequest + c.projectsenablexpnresourcerequest = projectsenablexpnresourcerequest return c } @@ -111383,7 +128476,7 @@ func (r *NodeGroupsService) SetNodeTemplate(project string, zone string, nodeGro // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsSetNodeTemplateCall) RequestId(requestId string) *NodeGroupsSetNodeTemplateCall { +func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEnableXpnResourceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -111391,7 +128484,7 @@ func (c *NodeGroupsSetNodeTemplateCall) RequestId(requestId string) *NodeGroupsS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsSetNodeTemplateCall) Fields(s ...googleapi.Field) *NodeGroupsSetNodeTemplateCall { +func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnResourceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -111399,21 +128492,21 @@ func (c *NodeGroupsSetNodeTemplateCall) Fields(s ...googleapi.Field) *NodeGroups // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsSetNodeTemplateCall) Context(ctx context.Context) *NodeGroupsSetNodeTemplateCall { +func (c *ProjectsEnableXpnResourceCall) Context(ctx context.Context) *ProjectsEnableXpnResourceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsSetNodeTemplateCall) Header() http.Header { +func (c *ProjectsEnableXpnResourceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -111421,14 +128514,14 @@ func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupssetnodetemplaterequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsenablexpnresourcerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/enableXpnResource") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -111436,38 +128529,36 @@ func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, e } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.setNodeTemplate" call. +// Do executes the "compute.projects.enableXpnResource" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -111481,23 +128572,14 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Updates the node template of the node group.", - // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", + // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", + // "flatPath": "projects/{project}/enableXpnResource", // "httpMethod": "POST", - // "id": "compute.nodeGroups.setNodeTemplate", + // "id": "compute.projects.enableXpnResource", // "parameterOrder": [ - // "project", - // "zone", - // "nodeGroup" + // "project" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -111509,18 +128591,11 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", + // "path": "projects/{project}/enableXpnResource", // "request": { - // "$ref": "NodeGroupsSetNodeTemplateRequest" + // "$ref": "ProjectsEnableXpnResourceRequest" // }, // "response": { // "$ref": "Operation" @@ -111533,116 +128608,121 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.nodeGroups.testIamPermissions": +// method id "compute.projects.get": -type NodeGroupsTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsGetCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// Get: Returns the specified Project resource. To decrease latency for +// this method, you can optionally omit any unneeded information from +// the response by using a field mask. This practice is especially +// recommended for unused quota information (the `quotas` field). To +// exclude one or more fields, set your request's `fields` query +// parameter to only include the fields you need. For example, to only +// include the `id` and `selfLink` fields, add the query parameter +// `?fields=id,selfLink` to your request. // // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -// - zone: The name of the zone for this request. -func (r *NodeGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeGroupsTestIamPermissionsCall { - c := &NodeGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ProjectsService) Get(project string) *ProjectsGetCall { + c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeGroupsTestIamPermissionsCall { +func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsTestIamPermissionsCall) Context(ctx context.Context) *NodeGroupsTestIamPermissionsCall { +func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsTestIamPermissionsCall) Header() http.Header { +func (c *ProjectsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.projects.get" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &TestPermissionsResponse{ + ret := &Project{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -111654,14 +128734,12 @@ func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.testIamPermissions", + // "description": "Returns the specified Project resource. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", + // "flatPath": "projects/{project}", + // "httpMethod": "GET", + // "id": "compute.projects.get", // "parameterOrder": [ - // "project", - // "zone", - // "resource" + // "project" // ], // "parameters": { // "project": { @@ -111670,41 +128748,172 @@ func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // } + // }, + // "path": "projects/{project}", + // "response": { + // "$ref": "Project" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.projects.getXpnHost": + +type ProjectsGetXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetXpnHost: Gets the shared VPC host project that this project links +// to. May be empty if no link exists. +// +// - project: Project ID for this request. +func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { + c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHostCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHostCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsGetXpnHostCall) Context(ctx context.Context) *ProjectsGetXpnHostCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsGetXpnHostCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/getXpnHost") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.getXpnHost" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Project{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists.", + // "flatPath": "projects/{project}/getXpnHost", + // "httpMethod": "GET", + // "id": "compute.projects.getXpnHost", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "projects/{project}/getXpnHost", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Project" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeTemplates.aggregatedList": +// method id "compute.projects.getXpnResources": -type NodeTemplatesAggregatedListCall struct { +type ProjectsGetXpnResourcesCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -111713,29 +128922,33 @@ type NodeTemplatesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of node templates. +// GetXpnResources: Gets service resources (a.k.a service project) +// associated with this host project. // // - project: Project ID for this request. -func (r *NodeTemplatesService) AggregatedList(project string) *NodeTemplatesAggregatedListCall { - c := &NodeTemplatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { + c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -111743,32 +128956,29 @@ func (r *NodeTemplatesService) AggregatedList(project string) *NodeTemplatesAggr // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *NodeTemplatesAggregatedListCall) Filter(filter string) *NodeTemplatesAggregatedListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("filter", filter) return c } -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *NodeTemplatesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NodeTemplatesAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) - return c -} - // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of // available results is larger than `maxResults`, Compute Engine returns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *NodeTemplatesAggregatedListCall) MaxResults(maxResults int64) *NodeTemplatesAggregatedListCall { +func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -111782,7 +128992,7 @@ func (c *NodeTemplatesAggregatedListCall) MaxResults(maxResults int64) *NodeTemp // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *NodeTemplatesAggregatedListCall) OrderBy(orderBy string) *NodeTemplatesAggregatedListCall { +func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -111790,7 +129000,7 @@ func (c *NodeTemplatesAggregatedListCall) OrderBy(orderBy string) *NodeTemplates // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NodeTemplatesAggregatedListCall) PageToken(pageToken string) *NodeTemplatesAggregatedListCall { +func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -111799,7 +129009,7 @@ func (c *NodeTemplatesAggregatedListCall) PageToken(pageToken string) *NodeTempl // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *NodeTemplatesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTemplatesAggregatedListCall { +func (c *ProjectsGetXpnResourcesCall) ReturnPartialSuccess(returnPartialSuccess bool) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -111807,7 +129017,7 @@ func (c *NodeTemplatesAggregatedListCall) ReturnPartialSuccess(returnPartialSucc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTemplatesAggregatedListCall { +func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -111817,7 +129027,7 @@ func (c *NodeTemplatesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTemp // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTemplatesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTemplatesAggregatedListCall { +func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGetXpnResourcesCall { c.ifNoneMatch_ = entityTag return c } @@ -111825,21 +129035,21 @@ func (c *NodeTemplatesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTem // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesAggregatedListCall) Context(ctx context.Context) *NodeTemplatesAggregatedListCall { +func (c *ProjectsGetXpnResourcesCall) Context(ctx context.Context) *ProjectsGetXpnResourcesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesAggregatedListCall) Header() http.Header { +func (c *ProjectsGetXpnResourcesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -111852,7 +129062,7 @@ func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/nodeTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/getXpnResources") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -111865,33 +129075,33 @@ func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.aggregatedList" call. -// Exactly one of *NodeTemplateAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *NodeTemplateAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.projects.getXpnResources" call. +// Exactly one of *ProjectsGetXpnResources or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ProjectsGetXpnResources.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateAggregatedList, error) { +func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*ProjectsGetXpnResources, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NodeTemplateAggregatedList{ + ret := &ProjectsGetXpnResources{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -111903,24 +129113,19 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod } return ret, nil // { - // "description": "Retrieves an aggregated list of node templates.", - // "flatPath": "projects/{project}/aggregated/nodeTemplates", + // "description": "Gets service resources (a.k.a service project) associated with this host project.", + // "flatPath": "projects/{project}/getXpnResources", // "httpMethod": "GET", - // "id": "compute.nodeTemplates.aggregatedList", + // "id": "compute.projects.getXpnResources", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -111952,14 +129157,13 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/nodeTemplates", + // "path": "projects/{project}/getXpnResources", // "response": { - // "$ref": "NodeTemplateAggregatedList" + // "$ref": "ProjectsGetXpnResources" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -111968,7 +129172,7 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeTemplatesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTemplateAggregatedList) error) error { +func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*ProjectsGetXpnResources) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -111986,51 +129190,114 @@ func (c *NodeTemplatesAggregatedListCall) Pages(ctx context.Context, f func(*Nod } } -// method id "compute.nodeTemplates.delete": +// method id "compute.projects.listXpnHosts": -type NodeTemplatesDeleteCall struct { - s *Service - project string - region string - nodeTemplate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsListXpnHostsCall struct { + s *Service + project string + projectslistxpnhostsrequest *ProjectsListXpnHostsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified NodeTemplate resource. +// ListXpnHosts: Lists all shared VPC host projects visible to the user +// in an organization. // -// - nodeTemplate: Name of the NodeTemplate resource to delete. // - project: Project ID for this request. -// - region: The name of the region for this request. -func (r *NodeTemplatesService) Delete(project string, region string, nodeTemplate string) *NodeTemplatesDeleteCall { - c := &NodeTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { + c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.nodeTemplate = nodeTemplate + c.projectslistxpnhostsrequest = projectslistxpnhostsrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NodeTemplatesDeleteCall) RequestId(requestId string) *NodeTemplatesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpnHostsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *ProjectsListXpnHostsCall) ReturnPartialSuccess(returnPartialSuccess bool) *ProjectsListXpnHostsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesDeleteCall) Fields(s ...googleapi.Field) *NodeTemplatesDeleteCall { +func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpnHostsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -112038,21 +129305,21 @@ func (c *NodeTemplatesDeleteCall) Fields(s ...googleapi.Field) *NodeTemplatesDel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesDeleteCall) Context(ctx context.Context) *NodeTemplatesDeleteCall { +func (c *ProjectsListXpnHostsCall) Context(ctx context.Context) *ProjectsListXpnHostsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesDeleteCall) Header() http.Header { +func (c *ProjectsListXpnHostsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -112060,50 +129327,53 @@ func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectslistxpnhostsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/listXpnHosts") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "nodeTemplate": c.nodeTemplate, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.projects.listXpnHosts" call. +// Exactly one of *XpnHostList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *XpnHostList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &XpnHostList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -112115,21 +129385,35 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified NodeTemplate resource.", - // "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", - // "httpMethod": "DELETE", - // "id": "compute.nodeTemplates.delete", + // "description": "Lists all shared VPC host projects visible to the user in an organization.", + // "flatPath": "projects/{project}/listXpnHosts", + // "httpMethod": "POST", + // "id": "compute.projects.listXpnHosts", // "parameterOrder": [ - // "project", - // "region", - // "nodeTemplate" + // "project" // ], // "parameters": { - // "nodeTemplate": { - // "description": "Name of the NodeTemplate resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -112139,22 +129423,18 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + // "path": "projects/{project}/listXpnHosts", + // "request": { + // "$ref": "ProjectsListXpnHostsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "XpnHostList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -112164,123 +129444,144 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.nodeTemplates.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NodeTemplatesGetCall struct { - s *Service - project string - region string - nodeTemplate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.projects.moveDisk": + +type ProjectsMoveDiskCall struct { + s *Service + project string + diskmoverequest *DiskMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified node template. Gets a list of available -// node templates by making a list() request. +// MoveDisk: Moves a persistent disk from one zone to another. // -// - nodeTemplate: Name of the node template to return. // - project: Project ID for this request. -// - region: The name of the region for this request. -func (r *NodeTemplatesService) Get(project string, region string, nodeTemplate string) *NodeTemplatesGetCall { - c := &NodeTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { + c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.nodeTemplate = nodeTemplate + c.diskmoverequest = diskmoverequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesGetCall) Fields(s ...googleapi.Field) *NodeTemplatesGetCall { +func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeTemplatesGetCall) IfNoneMatch(entityTag string) *NodeTemplatesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesGetCall) Context(ctx context.Context) *NodeTemplatesGetCall { +func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesGetCall) Header() http.Header { +func (c *ProjectsMoveDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/moveDisk") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "nodeTemplate": c.nodeTemplate, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.get" call. -// Exactly one of *NodeTemplate or error will be non-nil. Any non-2xx +// Do executes the "compute.projects.moveDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *NodeTemplate.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, error) { +func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NodeTemplate{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -112292,23 +129593,14 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, } return ret, nil // { - // "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", - // "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", - // "httpMethod": "GET", - // "id": "compute.nodeTemplates.get", + // "description": "Moves a persistent disk from one zone to another.", + // "flatPath": "projects/{project}/moveDisk", + // "httpMethod": "POST", + // "id": "compute.projects.moveDisk", // "parameterOrder": [ - // "project", - // "region", - // "nodeTemplate" + // "project" // ], // "parameters": { - // "nodeTemplate": { - // "description": "Name of the node template to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -112316,151 +129608,149 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + // "path": "projects/{project}/moveDisk", + // "request": { + // "$ref": "DiskMoveRequest" + // }, // "response": { - // "$ref": "NodeTemplate" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeTemplates.getIamPolicy": +// method id "compute.projects.moveInstance": -type NodeTemplatesGetIamPolicyCall struct { - s *Service - project string - region string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsMoveInstanceCall struct { + s *Service + project string + instancemoverequest *InstanceMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// MoveInstance: Moves an instance and its attached persistent disks +// from one zone to another. *Note*: Moving VMs or disks by using this +// method might cause unexpected behavior. For more information, see the +// known issue +// (/compute/docs/troubleshooting/known-issues#moving_vms_or_disks_using_ +// the_moveinstance_api_or_the_causes_unexpected_behavior). // // - project: Project ID for this request. -// - region: The name of the region for this request. -// - resource: Name or id of the resource for this request. -func (r *NodeTemplatesService) GetIamPolicy(project string, region string, resource string) *NodeTemplatesGetIamPolicyCall { - c := &NodeTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { + c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource + c.instancemoverequest = instancemoverequest return c } -// OptionsRequestedPolicyVersion sets the optional parameter -// "optionsRequestedPolicyVersion": Requested IAM Policy version. -func (c *NodeTemplatesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *NodeTemplatesGetIamPolicyCall { - c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesGetIamPolicyCall { +func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeTemplatesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesGetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesGetIamPolicyCall { +func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesGetIamPolicyCall) Header() http.Header { +func (c *ProjectsMoveInstanceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/moveInstance") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.projects.moveInstance" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -112472,22 +129762,14 @@ func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", - // "httpMethod": "GET", - // "id": "compute.nodeTemplates.getIamPolicy", + // "description": "Moves an instance and its attached persistent disks from one zone to another. *Note*: Moving VMs or disks by using this method might cause unexpected behavior. For more information, see the [known issue](/compute/docs/troubleshooting/known-issues#moving_vms_or_disks_using_the_moveinstance_api_or_the_causes_unexpected_behavior).", + // "flatPath": "projects/{project}/moveInstance", + // "httpMethod": "POST", + // "id": "compute.projects.moveInstance", // "parameterOrder": [ - // "project", - // "region", - // "resource" + // "project" // ], // "parameters": { - // "optionsRequestedPolicyVersion": { - // "description": "Requested IAM Policy version.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -112495,56 +129777,46 @@ func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", + // "path": "projects/{project}/moveInstance", + // "request": { + // "$ref": "InstanceMoveRequest" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeTemplates.insert": +// method id "compute.projects.setCommonInstanceMetadata": -type NodeTemplatesInsertCall struct { - s *Service - project string - region string - nodetemplate *NodeTemplate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsSetCommonInstanceMetadataCall struct { + s *Service + project string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a NodeTemplate resource in the specified project -// using the data included in the request. +// SetCommonInstanceMetadata: Sets metadata common to all instances +// within the specified project using the data included in the request. // // - project: Project ID for this request. -// - region: The name of the region for this request. -func (r *NodeTemplatesService) Insert(project string, region string, nodetemplate *NodeTemplate) *NodeTemplatesInsertCall { - c := &NodeTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { + c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.nodetemplate = nodetemplate + c.metadata = metadata return c } @@ -112559,7 +129831,7 @@ func (r *NodeTemplatesService) Insert(project string, region string, nodetemplat // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *NodeTemplatesInsertCall) RequestId(requestId string) *NodeTemplatesInsertCall { +func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall { c.urlParams_.Set("requestId", requestId) return c } @@ -112567,7 +129839,7 @@ func (c *NodeTemplatesInsertCall) RequestId(requestId string) *NodeTemplatesInse // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesInsertCall) Fields(s ...googleapi.Field) *NodeTemplatesInsertCall { +func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -112575,21 +129847,21 @@ func (c *NodeTemplatesInsertCall) Fields(s ...googleapi.Field) *NodeTemplatesIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesInsertCall) Context(ctx context.Context) *NodeTemplatesInsertCall { +func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesInsertCall) Header() http.Header { +func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -112597,14 +129869,14 @@ func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodetemplate) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/nodeTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setCommonInstanceMetadata") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -112613,36 +129885,35 @@ func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.insert" call. +// Do executes the "compute.projects.setCommonInstanceMetadata" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -112656,13 +129927,12 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a NodeTemplate resource in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/nodeTemplates", + // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + // "flatPath": "projects/{project}/setCommonInstanceMetadata", // "httpMethod": "POST", - // "id": "compute.nodeTemplates.insert", + // "id": "compute.projects.setCommonInstanceMetadata", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "project": { @@ -112672,22 +129942,15 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/nodeTemplates", + // "path": "projects/{project}/setCommonInstanceMetadata", // "request": { - // "$ref": "NodeTemplate" + // "$ref": "Metadata" // }, // "response": { // "$ref": "Operation" @@ -112700,188 +129963,126 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.nodeTemplates.list": +// method id "compute.projects.setDefaultNetworkTier": -type NodeTemplatesListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsSetDefaultNetworkTierCall struct { + s *Service + project string + projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of node templates available to the specified -// project. +// SetDefaultNetworkTier: Sets the default network tier of the project. +// The default network tier is used when an +// address/forwardingRule/instance is created without specifying the +// network tier field. // // - project: Project ID for this request. -// - region: The name of the region for this request. -func (r *NodeTemplatesService) List(project string, region string) *NodeTemplatesListCall { - c := &NodeTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall { + c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *NodeTemplatesListCall) Filter(filter string) *NodeTemplatesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *NodeTemplatesListCall) MaxResults(maxResults int64) *NodeTemplatesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *NodeTemplatesListCall) OrderBy(orderBy string) *NodeTemplatesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *NodeTemplatesListCall) PageToken(pageToken string) *NodeTemplatesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.projectssetdefaultnetworktierrequest = projectssetdefaultnetworktierrequest return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *NodeTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTemplatesListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *ProjectsSetDefaultNetworkTierCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesListCall) Fields(s ...googleapi.Field) *NodeTemplatesListCall { +func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultNetworkTierCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeTemplatesListCall) IfNoneMatch(entityTag string) *NodeTemplatesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesListCall) Context(ctx context.Context) *NodeTemplatesListCall { +func (c *ProjectsSetDefaultNetworkTierCall) Context(ctx context.Context) *ProjectsSetDefaultNetworkTierCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesListCall) Header() http.Header { +func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultnetworktierrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/nodeTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setDefaultNetworkTier") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.list" call. -// Exactly one of *NodeTemplateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NodeTemplateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateList, error) { +// Do executes the "compute.projects.setDefaultNetworkTier" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NodeTemplateList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -112893,38 +130094,14 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL } return ret, nil // { - // "description": "Retrieves a list of node templates available to the specified project.", - // "flatPath": "projects/{project}/regions/{region}/nodeTemplates", - // "httpMethod": "GET", - // "id": "compute.nodeTemplates.list", + // "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", + // "flatPath": "projects/{project}/setDefaultNetworkTier", + // "httpMethod": "POST", + // "id": "compute.projects.setDefaultNetworkTier", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -112932,85 +130109,71 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/nodeTemplates", + // "path": "projects/{project}/setDefaultNetworkTier", + // "request": { + // "$ref": "ProjectsSetDefaultNetworkTierRequest" + // }, // "response": { - // "$ref": "NodeTemplateList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NodeTemplatesListCall) Pages(ctx context.Context, f func(*NodeTemplateList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.nodeTemplates.setIamPolicy": +// method id "compute.projects.setUsageExportBucket": -type NodeTemplatesSetIamPolicyCall struct { - s *Service - project string - region string - resource string - regionsetpolicyrequest *RegionSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsSetUsageExportBucketCall struct { + s *Service + project string + usageexportlocation *UsageExportLocation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// SetUsageExportBucket: Enables the usage export feature and sets the +// usage export bucket where reports are stored. If you provide an empty +// request body using this method, the usage export feature will be +// disabled. // // - project: Project ID for this request. -// - region: The name of the region for this request. -// - resource: Name or id of the resource for this request. -func (r *NodeTemplatesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NodeTemplatesSetIamPolicyCall { - c := &NodeTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { + c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.regionsetpolicyrequest = regionsetpolicyrequest + c.usageexportlocation = usageexportlocation + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesSetIamPolicyCall { +func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -113018,21 +130181,21 @@ func (c *NodeTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTempla // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesSetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesSetIamPolicyCall { +func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesSetIamPolicyCall) Header() http.Header { +func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -113040,14 +130203,14 @@ func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setUsageExportBucket") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -113055,40 +130218,38 @@ func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, e } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.projects.setUsageExportBucket" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -113100,14 +130261,12 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy", + // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + // "flatPath": "projects/{project}/setUsageExportBucket", // "httpMethod": "POST", - // "id": "compute.nodeTemplates.setIamPolicy", + // "id": "compute.projects.setUsageExportBucket", // "parameterOrder": [ - // "project", - // "region", - // "resource" + // "project" // ], // "parameters": { // "project": { @@ -113117,68 +130276,73 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy", + // "path": "projects/{project}/setUsageExportBucket", // "request": { - // "$ref": "RegionSetPolicyRequest" + // "$ref": "UsageExportLocation" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.nodeTemplates.testIamPermissions": +// method id "compute.publicAdvertisedPrefixes.delete": -type NodeTemplatesTestIamPermissionsCall struct { +type PublicAdvertisedPrefixesDeleteCall struct { s *Service project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest + publicAdvertisedPrefix string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// Delete: Deletes the specified PublicAdvertisedPrefix // -// - project: Project ID for this request. -// - region: The name of the region for this request. -// - resource: Name or id of the resource for this request. -func (r *NodeTemplatesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeTemplatesTestIamPermissionsCall { - c := &NodeTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource +// to delete. +func (r *PublicAdvertisedPrefixesService) Delete(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesDeleteCall { + c := &PublicAdvertisedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.publicAdvertisedPrefix = publicAdvertisedPrefix + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *PublicAdvertisedPrefixesDeleteCall) RequestId(requestId string) *PublicAdvertisedPrefixesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeTemplatesTestIamPermissionsCall { +func (c *PublicAdvertisedPrefixesDeleteCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -113186,21 +130350,21 @@ func (c *NodeTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Node // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesTestIamPermissionsCall) Context(ctx context.Context) *NodeTemplatesTestIamPermissionsCall { +func (c *PublicAdvertisedPrefixesDeleteCall) Context(ctx context.Context) *PublicAdvertisedPrefixesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesTestIamPermissionsCall) Header() http.Header { +func (c *PublicAdvertisedPrefixesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicAdvertisedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -113208,55 +130372,49 @@ func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "publicAdvertisedPrefix": c.publicAdvertisedPrefix, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.publicAdvertisedPrefixes.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -113268,14 +130426,13 @@ func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", - // "httpMethod": "POST", - // "id": "compute.nodeTemplates.testIamPermissions", + // "description": "Deletes the specified PublicAdvertisedPrefix", + // "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + // "httpMethod": "DELETE", + // "id": "compute.publicAdvertisedPrefixes.delete", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "publicAdvertisedPrefix" // ], // "parameters": { // "project": { @@ -113285,143 +130442,59 @@ func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "publicAdvertisedPrefix": { + // "description": "Name of the PublicAdvertisedPrefix resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeTypes.aggregatedList": +// method id "compute.publicAdvertisedPrefixes.get": -type NodeTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type PublicAdvertisedPrefixesGetCall struct { + s *Service + project string + publicAdvertisedPrefix string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of node types. +// Get: Returns the specified PublicAdvertisedPrefix resource. // -// - project: Project ID for this request. -func (r *NodeTypesService) AggregatedList(project string) *NodeTypesAggregatedListCall { - c := &NodeTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource +// to return. +func (r *PublicAdvertisedPrefixesService) Get(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesGetCall { + c := &PublicAdvertisedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *NodeTypesAggregatedListCall) Filter(filter string) *NodeTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *NodeTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NodeTypesAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *NodeTypesAggregatedListCall) MaxResults(maxResults int64) *NodeTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *NodeTypesAggregatedListCall) OrderBy(orderBy string) *NodeTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *NodeTypesAggregatedListCall) PageToken(pageToken string) *NodeTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *NodeTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTypesAggregatedListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + c.publicAdvertisedPrefix = publicAdvertisedPrefix return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTypesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTypesAggregatedListCall { +func (c *PublicAdvertisedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -113431,7 +130504,7 @@ func (c *NodeTypesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTypesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTypesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTypesAggregatedListCall { +func (c *PublicAdvertisedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicAdvertisedPrefixesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -113439,21 +130512,21 @@ func (c *NodeTypesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTypesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTypesAggregatedListCall) Context(ctx context.Context) *NodeTypesAggregatedListCall { +func (c *PublicAdvertisedPrefixesGetCall) Context(ctx context.Context) *PublicAdvertisedPrefixesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTypesAggregatedListCall) Header() http.Header { +func (c *PublicAdvertisedPrefixesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicAdvertisedPrefixesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -113466,7 +130539,7 @@ func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/nodeTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -113474,38 +130547,39 @@ func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "publicAdvertisedPrefix": c.publicAdvertisedPrefix, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTypes.aggregatedList" call. -// Exactly one of *NodeTypeAggregatedList or error will be non-nil. Any +// Do executes the "compute.publicAdvertisedPrefixes.get" call. +// Exactly one of *PublicAdvertisedPrefix or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *NodeTypeAggregatedList.ServerResponse.Header or (if a response was +// *PublicAdvertisedPrefix.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTypeAggregatedList, error) { +func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicAdvertisedPrefix, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NodeTypeAggregatedList{ + ret := &PublicAdvertisedPrefix{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -113517,42 +130591,15 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp } return ret, nil // { - // "description": "Retrieves an aggregated list of node types.", - // "flatPath": "projects/{project}/aggregated/nodeTypes", + // "description": "Returns the specified PublicAdvertisedPrefix resource.", + // "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", // "httpMethod": "GET", - // "id": "compute.nodeTypes.aggregatedList", + // "id": "compute.publicAdvertisedPrefixes.get", // "parameterOrder": [ - // "project" + // "project", + // "publicAdvertisedPrefix" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -113560,15 +130607,17 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" + // "publicAdvertisedPrefix": { + // "description": "Name of the PublicAdvertisedPrefix resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/aggregated/nodeTypes", + // "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", // "response": { - // "$ref": "NodeTypeAggregatedList" + // "$ref": "PublicAdvertisedPrefix" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -113579,144 +130628,124 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NodeTypesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.nodeTypes.get": +// method id "compute.publicAdvertisedPrefixes.insert": -type NodeTypesGetCall struct { - s *Service - project string - zone string - nodeType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type PublicAdvertisedPrefixesInsertCall struct { + s *Service + project string + publicadvertisedprefix *PublicAdvertisedPrefix + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified node type. Gets a list of available node -// types by making a list() request. +// Insert: Creates a PublicAdvertisedPrefix in the specified project +// using the parameters that are included in the request. // -// - nodeType: Name of the node type to return. // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *NodeTypesService) Get(project string, zone string, nodeType string) *NodeTypesGetCall { - c := &NodeTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *PublicAdvertisedPrefixesService) Insert(project string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesInsertCall { + c := &PublicAdvertisedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeType = nodeType + c.publicadvertisedprefix = publicadvertisedprefix + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *PublicAdvertisedPrefixesInsertCall) RequestId(requestId string) *PublicAdvertisedPrefixesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTypesGetCall) Fields(s ...googleapi.Field) *NodeTypesGetCall { +func (c *PublicAdvertisedPrefixesInsertCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeTypesGetCall) IfNoneMatch(entityTag string) *NodeTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTypesGetCall) Context(ctx context.Context) *NodeTypesGetCall { +func (c *PublicAdvertisedPrefixesInsertCall) Context(ctx context.Context) *PublicAdvertisedPrefixesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTypesGetCall) Header() http.Header { +func (c *PublicAdvertisedPrefixesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicAdvertisedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicadvertisedprefix) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeTypes/{nodeType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeType": c.nodeType, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTypes.get" call. -// Exactly one of *NodeType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *NodeType.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.publicAdvertisedPrefixes.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { +func (c *PublicAdvertisedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NodeType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -113728,23 +130757,14 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { } return ret, nil // { - // "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", - // "flatPath": "projects/{project}/zones/{zone}/nodeTypes/{nodeType}", - // "httpMethod": "GET", - // "id": "compute.nodeTypes.get", + // "description": "Creates a PublicAdvertisedPrefix in the specified project using the parameters that are included in the request.", + // "flatPath": "projects/{project}/global/publicAdvertisedPrefixes", + // "httpMethod": "POST", + // "id": "compute.publicAdvertisedPrefixes.insert", // "parameterOrder": [ - // "project", - // "zone", - // "nodeType" + // "project" // ], // "parameters": { - // "nodeType": { - // "description": "Name of the node type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -113752,65 +130772,64 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/nodeTypes/{nodeType}", + // "path": "projects/{project}/global/publicAdvertisedPrefixes", + // "request": { + // "$ref": "PublicAdvertisedPrefix" + // }, // "response": { - // "$ref": "NodeType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeTypes.list": +// method id "compute.publicAdvertisedPrefixes.list": -type NodeTypesListCall struct { +type PublicAdvertisedPrefixesListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of node types available to the specified -// project. +// List: Lists the PublicAdvertisedPrefixes for a project. // // - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall { - c := &NodeTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *PublicAdvertisedPrefixesService) List(project string) *PublicAdvertisedPrefixesListCall { + c := &PublicAdvertisedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -113818,8 +130837,18 @@ func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *NodeTypesListCall) Filter(filter string) *NodeTypesListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *PublicAdvertisedPrefixesListCall) Filter(filter string) *PublicAdvertisedPrefixesListCall { c.urlParams_.Set("filter", filter) return c } @@ -113830,7 +130859,7 @@ func (c *NodeTypesListCall) Filter(filter string) *NodeTypesListCall { // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *NodeTypesListCall) MaxResults(maxResults int64) *NodeTypesListCall { +func (c *PublicAdvertisedPrefixesListCall) MaxResults(maxResults int64) *PublicAdvertisedPrefixesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -113844,7 +130873,7 @@ func (c *NodeTypesListCall) MaxResults(maxResults int64) *NodeTypesListCall { // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *NodeTypesListCall) OrderBy(orderBy string) *NodeTypesListCall { +func (c *PublicAdvertisedPrefixesListCall) OrderBy(orderBy string) *PublicAdvertisedPrefixesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -113852,7 +130881,7 @@ func (c *NodeTypesListCall) OrderBy(orderBy string) *NodeTypesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { +func (c *PublicAdvertisedPrefixesListCall) PageToken(pageToken string) *PublicAdvertisedPrefixesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -113861,7 +130890,7 @@ func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *NodeTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTypesListCall { +func (c *PublicAdvertisedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicAdvertisedPrefixesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -113869,7 +130898,7 @@ func (c *NodeTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *Nod // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTypesListCall) Fields(s ...googleapi.Field) *NodeTypesListCall { +func (c *PublicAdvertisedPrefixesListCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -113879,7 +130908,7 @@ func (c *NodeTypesListCall) Fields(s ...googleapi.Field) *NodeTypesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTypesListCall) IfNoneMatch(entityTag string) *NodeTypesListCall { +func (c *PublicAdvertisedPrefixesListCall) IfNoneMatch(entityTag string) *PublicAdvertisedPrefixesListCall { c.ifNoneMatch_ = entityTag return c } @@ -113887,21 +130916,21 @@ func (c *NodeTypesListCall) IfNoneMatch(entityTag string) *NodeTypesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTypesListCall) Context(ctx context.Context) *NodeTypesListCall { +func (c *PublicAdvertisedPrefixesListCall) Context(ctx context.Context) *PublicAdvertisedPrefixesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTypesListCall) Header() http.Header { +func (c *PublicAdvertisedPrefixesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicAdvertisedPrefixesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -113914,7 +130943,7 @@ func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -113923,38 +130952,37 @@ func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTypes.list" call. -// Exactly one of *NodeTypeList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *NodeTypeList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, error) { +// Do executes the "compute.publicAdvertisedPrefixes.list" call. +// Exactly one of *PublicAdvertisedPrefixList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *PublicAdvertisedPrefixList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicAdvertisedPrefixList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NodeTypeList{ + ret := &PublicAdvertisedPrefixList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -113966,17 +130994,16 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err } return ret, nil // { - // "description": "Retrieves a list of node types available to the specified project.", - // "flatPath": "projects/{project}/zones/{zone}/nodeTypes", + // "description": "Lists the PublicAdvertisedPrefixes for a project.", + // "flatPath": "projects/{project}/global/publicAdvertisedPrefixes", // "httpMethod": "GET", - // "id": "compute.nodeTypes.list", + // "id": "compute.publicAdvertisedPrefixes.list", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -114009,18 +131036,11 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/nodeTypes", + // "path": "projects/{project}/global/publicAdvertisedPrefixes", // "response": { - // "$ref": "NodeTypeList" + // "$ref": "PublicAdvertisedPrefixList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -114034,7 +131054,7 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeTypesListCall) Pages(ctx context.Context, f func(*NodeTypeList) error) error { +func (c *PublicAdvertisedPrefixesListCall) Pages(ctx context.Context, f func(*PublicAdvertisedPrefixList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -114052,9 +131072,188 @@ func (c *NodeTypesListCall) Pages(ctx context.Context, f func(*NodeTypeList) err } } -// method id "compute.packetMirrorings.aggregatedList": +// method id "compute.publicAdvertisedPrefixes.patch": -type PacketMirroringsAggregatedListCall struct { +type PublicAdvertisedPrefixesPatchCall struct { + s *Service + project string + publicAdvertisedPrefix string + publicadvertisedprefix *PublicAdvertisedPrefix + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified Router resource with the data included +// in the request. This method supports PATCH semantics and uses JSON +// merge patch format and processing rules. +// +// - project: Project ID for this request. +// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource +// to patch. +func (r *PublicAdvertisedPrefixesService) Patch(project string, publicAdvertisedPrefix string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesPatchCall { + c := &PublicAdvertisedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.publicAdvertisedPrefix = publicAdvertisedPrefix + c.publicadvertisedprefix = publicadvertisedprefix + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *PublicAdvertisedPrefixesPatchCall) RequestId(requestId string) *PublicAdvertisedPrefixesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PublicAdvertisedPrefixesPatchCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PublicAdvertisedPrefixesPatchCall) Context(ctx context.Context) *PublicAdvertisedPrefixesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PublicAdvertisedPrefixesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *PublicAdvertisedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicadvertisedprefix) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "publicAdvertisedPrefix": c.publicAdvertisedPrefix, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.publicAdvertisedPrefixes.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + // "httpMethod": "PATCH", + // "id": "compute.publicAdvertisedPrefixes.patch", + // "parameterOrder": [ + // "project", + // "publicAdvertisedPrefix" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "publicAdvertisedPrefix": { + // "description": "Name of the PublicAdvertisedPrefix resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + // "request": { + // "$ref": "PublicAdvertisedPrefix" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.publicDelegatedPrefixes.aggregatedList": + +type PublicDelegatedPrefixesAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -114063,29 +131262,33 @@ type PacketMirroringsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of packetMirrorings. +// AggregatedList: Lists all PublicDelegatedPrefix resources owned by +// the specific project across all scopes. // -// - project: Project ID for this request. -func (r *PacketMirroringsService) AggregatedList(project string) *PacketMirroringsAggregatedListCall { - c := &PacketMirroringsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Name of the project scoping this request. +func (r *PublicDelegatedPrefixesService) AggregatedList(project string) *PublicDelegatedPrefixesAggregatedListCall { + c := &PublicDelegatedPrefixesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -114093,8 +131296,18 @@ func (r *PacketMirroringsService) AggregatedList(project string) *PacketMirrorin // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *PacketMirroringsAggregatedListCall) Filter(filter string) *PacketMirroringsAggregatedListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *PublicDelegatedPrefixesAggregatedListCall) Filter(filter string) *PublicDelegatedPrefixesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -114107,7 +131320,7 @@ func (c *PacketMirroringsAggregatedListCall) Filter(filter string) *PacketMirror // response. For resource types which predate this field, if this flag // is omitted or false, only scopes of the scope types where the // resource type is expected to be found will be included. -func (c *PacketMirroringsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *PacketMirroringsAggregatedListCall { +func (c *PublicDelegatedPrefixesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *PublicDelegatedPrefixesAggregatedListCall { c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) return c } @@ -114118,7 +131331,7 @@ func (c *PacketMirroringsAggregatedListCall) IncludeAllScopes(includeAllScopes b // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *PacketMirroringsAggregatedListCall) MaxResults(maxResults int64) *PacketMirroringsAggregatedListCall { +func (c *PublicDelegatedPrefixesAggregatedListCall) MaxResults(maxResults int64) *PublicDelegatedPrefixesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -114132,7 +131345,7 @@ func (c *PacketMirroringsAggregatedListCall) MaxResults(maxResults int64) *Packe // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *PacketMirroringsAggregatedListCall) OrderBy(orderBy string) *PacketMirroringsAggregatedListCall { +func (c *PublicDelegatedPrefixesAggregatedListCall) OrderBy(orderBy string) *PublicDelegatedPrefixesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -114140,7 +131353,7 @@ func (c *PacketMirroringsAggregatedListCall) OrderBy(orderBy string) *PacketMirr // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *PacketMirroringsAggregatedListCall) PageToken(pageToken string) *PacketMirroringsAggregatedListCall { +func (c *PublicDelegatedPrefixesAggregatedListCall) PageToken(pageToken string) *PublicDelegatedPrefixesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -114149,7 +131362,7 @@ func (c *PacketMirroringsAggregatedListCall) PageToken(pageToken string) *Packet // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *PacketMirroringsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PacketMirroringsAggregatedListCall { +func (c *PublicDelegatedPrefixesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicDelegatedPrefixesAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -114157,7 +131370,7 @@ func (c *PacketMirroringsAggregatedListCall) ReturnPartialSuccess(returnPartialS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PacketMirroringsAggregatedListCall) Fields(s ...googleapi.Field) *PacketMirroringsAggregatedListCall { +func (c *PublicDelegatedPrefixesAggregatedListCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -114167,7 +131380,7 @@ func (c *PacketMirroringsAggregatedListCall) Fields(s ...googleapi.Field) *Packe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *PacketMirroringsAggregatedListCall) IfNoneMatch(entityTag string) *PacketMirroringsAggregatedListCall { +func (c *PublicDelegatedPrefixesAggregatedListCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -114175,21 +131388,21 @@ func (c *PacketMirroringsAggregatedListCall) IfNoneMatch(entityTag string) *Pack // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PacketMirroringsAggregatedListCall) Context(ctx context.Context) *PacketMirroringsAggregatedListCall { +func (c *PublicDelegatedPrefixesAggregatedListCall) Context(ctx context.Context) *PublicDelegatedPrefixesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PacketMirroringsAggregatedListCall) Header() http.Header { +func (c *PublicDelegatedPrefixesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PacketMirroringsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicDelegatedPrefixesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -114202,7 +131415,7 @@ func (c *PacketMirroringsAggregatedListCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/packetMirrorings") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/publicDelegatedPrefixes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -114215,33 +131428,34 @@ func (c *PacketMirroringsAggregatedListCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.packetMirrorings.aggregatedList" call. -// Exactly one of *PacketMirroringAggregatedList or error will be +// Do executes the "compute.publicDelegatedPrefixes.aggregatedList" call. +// Exactly one of *PublicDelegatedPrefixAggregatedList or error will be // non-nil. Any non-2xx status code is an error. Response headers are in -// either *PacketMirroringAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (*PacketMirroringAggregatedList, error) { +// either *PublicDelegatedPrefixAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &PacketMirroringAggregatedList{ + ret := &PublicDelegatedPrefixAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -114253,16 +131467,16 @@ func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of packetMirrorings.", - // "flatPath": "projects/{project}/aggregated/packetMirrorings", + // "description": "Lists all PublicDelegatedPrefix resources owned by the specific project across all scopes.", + // "flatPath": "projects/{project}/aggregated/publicDelegatedPrefixes", // "httpMethod": "GET", - // "id": "compute.packetMirrorings.aggregatedList", + // "id": "compute.publicDelegatedPrefixes.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -114290,7 +131504,7 @@ func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "Name of the project scoping this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, @@ -114302,9 +131516,9 @@ func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/packetMirrorings", + // "path": "projects/{project}/aggregated/publicDelegatedPrefixes", // "response": { - // "$ref": "PacketMirroringAggregatedList" + // "$ref": "PublicDelegatedPrefixAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -114318,7 +131532,7 @@ func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *PacketMirroringsAggregatedListCall) Pages(ctx context.Context, f func(*PacketMirroringAggregatedList) error) error { +func (c *PublicDelegatedPrefixesAggregatedListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -114336,28 +131550,30 @@ func (c *PacketMirroringsAggregatedListCall) Pages(ctx context.Context, f func(* } } -// method id "compute.packetMirrorings.delete": +// method id "compute.publicDelegatedPrefixes.delete": -type PacketMirroringsDeleteCall struct { - s *Service - project string - region string - packetMirroring string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type PublicDelegatedPrefixesDeleteCall struct { + s *Service + project string + region string + publicDelegatedPrefix string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified PacketMirroring resource. +// Delete: Deletes the specified PublicDelegatedPrefix in the given +// region. // -// - packetMirroring: Name of the PacketMirroring resource to delete. -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *PacketMirroringsService) Delete(project string, region string, packetMirroring string) *PacketMirroringsDeleteCall { - c := &PacketMirroringsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to delete. +// - region: Name of the region of this request. +func (r *PublicDelegatedPrefixesService) Delete(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesDeleteCall { + c := &PublicDelegatedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.packetMirroring = packetMirroring + c.publicDelegatedPrefix = publicDelegatedPrefix return c } @@ -114372,7 +131588,7 @@ func (r *PacketMirroringsService) Delete(project string, region string, packetMi // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *PacketMirroringsDeleteCall) RequestId(requestId string) *PacketMirroringsDeleteCall { +func (c *PublicDelegatedPrefixesDeleteCall) RequestId(requestId string) *PublicDelegatedPrefixesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -114380,7 +131596,7 @@ func (c *PacketMirroringsDeleteCall) RequestId(requestId string) *PacketMirrorin // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PacketMirroringsDeleteCall) Fields(s ...googleapi.Field) *PacketMirroringsDeleteCall { +func (c *PublicDelegatedPrefixesDeleteCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -114388,21 +131604,21 @@ func (c *PacketMirroringsDeleteCall) Fields(s ...googleapi.Field) *PacketMirrori // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PacketMirroringsDeleteCall) Context(ctx context.Context) *PacketMirroringsDeleteCall { +func (c *PublicDelegatedPrefixesDeleteCall) Context(ctx context.Context) *PublicDelegatedPrefixesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PacketMirroringsDeleteCall) Header() http.Header { +func (c *PublicDelegatedPrefixesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PacketMirroringsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicDelegatedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -114412,7 +131628,7 @@ func (c *PacketMirroringsDeleteCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -114420,38 +131636,38 @@ func (c *PacketMirroringsDeleteCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "packetMirroring": c.packetMirroring, + "project": c.project, + "region": c.region, + "publicDelegatedPrefix": c.publicDelegatedPrefix, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.packetMirrorings.delete" call. +// Do executes the "compute.publicDelegatedPrefixes.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *PacketMirroringsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *PublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -114465,32 +131681,32 @@ func (c *PacketMirroringsDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified PacketMirroring resource.", - // "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", + // "description": "Deletes the specified PublicDelegatedPrefix in the given region.", + // "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", // "httpMethod": "DELETE", - // "id": "compute.packetMirrorings.delete", + // "id": "compute.publicDelegatedPrefixes.delete", // "parameterOrder": [ // "project", // "region", - // "packetMirroring" + // "publicDelegatedPrefix" // ], // "parameters": { - // "packetMirroring": { - // "description": "Name of the PacketMirroring resource to delete.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "publicDelegatedPrefix": { + // "description": "Name of the PublicDelegatedPrefix resource to delete.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region of this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -114502,7 +131718,7 @@ func (c *PacketMirroringsDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", + // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", // "response": { // "$ref": "Operation" // }, @@ -114514,36 +131730,38 @@ func (c *PacketMirroringsDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.packetMirrorings.get": +// method id "compute.publicDelegatedPrefixes.get": -type PacketMirroringsGetCall struct { - s *Service - project string - region string - packetMirroring string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type PublicDelegatedPrefixesGetCall struct { + s *Service + project string + region string + publicDelegatedPrefix string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified PacketMirroring resource. +// Get: Returns the specified PublicDelegatedPrefix resource in the +// given region. // -// - packetMirroring: Name of the PacketMirroring resource to return. -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *PacketMirroringsService) Get(project string, region string, packetMirroring string) *PacketMirroringsGetCall { - c := &PacketMirroringsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to return. +// - region: Name of the region of this request. +func (r *PublicDelegatedPrefixesService) Get(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesGetCall { + c := &PublicDelegatedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.packetMirroring = packetMirroring + c.publicDelegatedPrefix = publicDelegatedPrefix return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PacketMirroringsGetCall) Fields(s ...googleapi.Field) *PacketMirroringsGetCall { +func (c *PublicDelegatedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -114553,7 +131771,7 @@ func (c *PacketMirroringsGetCall) Fields(s ...googleapi.Field) *PacketMirrorings // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *PacketMirroringsGetCall) IfNoneMatch(entityTag string) *PacketMirroringsGetCall { +func (c *PublicDelegatedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -114561,21 +131779,21 @@ func (c *PacketMirroringsGetCall) IfNoneMatch(entityTag string) *PacketMirroring // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PacketMirroringsGetCall) Context(ctx context.Context) *PacketMirroringsGetCall { +func (c *PublicDelegatedPrefixesGetCall) Context(ctx context.Context) *PublicDelegatedPrefixesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PacketMirroringsGetCall) Header() http.Header { +func (c *PublicDelegatedPrefixesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PacketMirroringsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicDelegatedPrefixesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -114588,7 +131806,7 @@ func (c *PacketMirroringsGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -114596,40 +131814,40 @@ func (c *PacketMirroringsGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "packetMirroring": c.packetMirroring, + "project": c.project, + "region": c.region, + "publicDelegatedPrefix": c.publicDelegatedPrefix, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.packetMirrorings.get" call. -// Exactly one of *PacketMirroring or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *PacketMirroring.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.publicDelegatedPrefixes.get" call. +// Exactly one of *PublicDelegatedPrefix or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PublicDelegatedPrefix.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *PacketMirroringsGetCall) Do(opts ...googleapi.CallOption) (*PacketMirroring, error) { +func (c *PublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefix, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &PacketMirroring{ + ret := &PublicDelegatedPrefix{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -114641,41 +131859,41 @@ func (c *PacketMirroringsGetCall) Do(opts ...googleapi.CallOption) (*PacketMirro } return ret, nil // { - // "description": "Returns the specified PacketMirroring resource.", - // "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", + // "description": "Returns the specified PublicDelegatedPrefix resource in the given region.", + // "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", // "httpMethod": "GET", - // "id": "compute.packetMirrorings.get", + // "id": "compute.publicDelegatedPrefixes.get", // "parameterOrder": [ // "project", // "region", - // "packetMirroring" + // "publicDelegatedPrefix" // ], // "parameters": { - // "packetMirroring": { - // "description": "Name of the PacketMirroring resource to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "publicDelegatedPrefix": { + // "description": "Name of the PublicDelegatedPrefix resource to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region of this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", + // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", // "response": { - // "$ref": "PacketMirroring" + // "$ref": "PublicDelegatedPrefix" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -114686,28 +131904,29 @@ func (c *PacketMirroringsGetCall) Do(opts ...googleapi.CallOption) (*PacketMirro } -// method id "compute.packetMirrorings.insert": +// method id "compute.publicDelegatedPrefixes.insert": -type PacketMirroringsInsertCall struct { - s *Service - project string - region string - packetmirroring *PacketMirroring - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type PublicDelegatedPrefixesInsertCall struct { + s *Service + project string + region string + publicdelegatedprefix *PublicDelegatedPrefix + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a PacketMirroring resource in the specified project -// and region using the data included in the request. +// Insert: Creates a PublicDelegatedPrefix in the specified project in +// the given region using the parameters that are included in the +// request. // // - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *PacketMirroringsService) Insert(project string, region string, packetmirroring *PacketMirroring) *PacketMirroringsInsertCall { - c := &PacketMirroringsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region of this request. +func (r *PublicDelegatedPrefixesService) Insert(project string, region string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesInsertCall { + c := &PublicDelegatedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.packetmirroring = packetmirroring + c.publicdelegatedprefix = publicdelegatedprefix return c } @@ -114722,7 +131941,7 @@ func (r *PacketMirroringsService) Insert(project string, region string, packetmi // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *PacketMirroringsInsertCall) RequestId(requestId string) *PacketMirroringsInsertCall { +func (c *PublicDelegatedPrefixesInsertCall) RequestId(requestId string) *PublicDelegatedPrefixesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -114730,7 +131949,7 @@ func (c *PacketMirroringsInsertCall) RequestId(requestId string) *PacketMirrorin // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PacketMirroringsInsertCall) Fields(s ...googleapi.Field) *PacketMirroringsInsertCall { +func (c *PublicDelegatedPrefixesInsertCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -114738,21 +131957,21 @@ func (c *PacketMirroringsInsertCall) Fields(s ...googleapi.Field) *PacketMirrori // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PacketMirroringsInsertCall) Context(ctx context.Context) *PacketMirroringsInsertCall { +func (c *PublicDelegatedPrefixesInsertCall) Context(ctx context.Context) *PublicDelegatedPrefixesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PacketMirroringsInsertCall) Header() http.Header { +func (c *PublicDelegatedPrefixesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PacketMirroringsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -114760,14 +131979,14 @@ func (c *PacketMirroringsInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.packetmirroring) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -114781,31 +132000,31 @@ func (c *PacketMirroringsInsertCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.packetMirrorings.insert" call. +// Do executes the "compute.publicDelegatedPrefixes.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *PacketMirroringsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -114819,10 +132038,10 @@ func (c *PacketMirroringsInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a PacketMirroring resource in the specified project and region using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/packetMirrorings", + // "description": "Creates a PublicDelegatedPrefix in the specified project in the given region using the parameters that are included in the request.", + // "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes", // "httpMethod": "POST", - // "id": "compute.packetMirrorings.insert", + // "id": "compute.publicDelegatedPrefixes.insert", // "parameterOrder": [ // "project", // "region" @@ -114836,7 +132055,7 @@ func (c *PacketMirroringsInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region of this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -114848,9 +132067,9 @@ func (c *PacketMirroringsInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/packetMirrorings", + // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes", // "request": { - // "$ref": "PacketMirroring" + // "$ref": "PublicDelegatedPrefix" // }, // "response": { // "$ref": "Operation" @@ -114863,9 +132082,9 @@ func (c *PacketMirroringsInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.packetMirrorings.list": +// method id "compute.publicDelegatedPrefixes.list": -type PacketMirroringsListCall struct { +type PublicDelegatedPrefixesListCall struct { s *Service project string region string @@ -114875,32 +132094,35 @@ type PacketMirroringsListCall struct { header_ http.Header } -// List: Retrieves a list of PacketMirroring resources available to the -// specified project and region. +// List: Lists the PublicDelegatedPrefixes for a project in the given +// region. // // - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *PacketMirroringsService) List(project string, region string) *PacketMirroringsListCall { - c := &PacketMirroringsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region of this request. +func (r *PublicDelegatedPrefixesService) List(project string, region string) *PublicDelegatedPrefixesListCall { + c := &PublicDelegatedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -114908,8 +132130,18 @@ func (r *PacketMirroringsService) List(project string, region string) *PacketMir // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *PacketMirroringsListCall) Filter(filter string) *PacketMirroringsListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *PublicDelegatedPrefixesListCall) Filter(filter string) *PublicDelegatedPrefixesListCall { c.urlParams_.Set("filter", filter) return c } @@ -114920,7 +132152,7 @@ func (c *PacketMirroringsListCall) Filter(filter string) *PacketMirroringsListCa // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *PacketMirroringsListCall) MaxResults(maxResults int64) *PacketMirroringsListCall { +func (c *PublicDelegatedPrefixesListCall) MaxResults(maxResults int64) *PublicDelegatedPrefixesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -114934,7 +132166,7 @@ func (c *PacketMirroringsListCall) MaxResults(maxResults int64) *PacketMirroring // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *PacketMirroringsListCall) OrderBy(orderBy string) *PacketMirroringsListCall { +func (c *PublicDelegatedPrefixesListCall) OrderBy(orderBy string) *PublicDelegatedPrefixesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -114942,7 +132174,7 @@ func (c *PacketMirroringsListCall) OrderBy(orderBy string) *PacketMirroringsList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *PacketMirroringsListCall) PageToken(pageToken string) *PacketMirroringsListCall { +func (c *PublicDelegatedPrefixesListCall) PageToken(pageToken string) *PublicDelegatedPrefixesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -114951,7 +132183,7 @@ func (c *PacketMirroringsListCall) PageToken(pageToken string) *PacketMirrorings // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *PacketMirroringsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PacketMirroringsListCall { +func (c *PublicDelegatedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicDelegatedPrefixesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -114959,7 +132191,7 @@ func (c *PacketMirroringsListCall) ReturnPartialSuccess(returnPartialSuccess boo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PacketMirroringsListCall) Fields(s ...googleapi.Field) *PacketMirroringsListCall { +func (c *PublicDelegatedPrefixesListCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -114969,7 +132201,7 @@ func (c *PacketMirroringsListCall) Fields(s ...googleapi.Field) *PacketMirroring // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *PacketMirroringsListCall) IfNoneMatch(entityTag string) *PacketMirroringsListCall { +func (c *PublicDelegatedPrefixesListCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesListCall { c.ifNoneMatch_ = entityTag return c } @@ -114977,21 +132209,21 @@ func (c *PacketMirroringsListCall) IfNoneMatch(entityTag string) *PacketMirrorin // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PacketMirroringsListCall) Context(ctx context.Context) *PacketMirroringsListCall { +func (c *PublicDelegatedPrefixesListCall) Context(ctx context.Context) *PublicDelegatedPrefixesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PacketMirroringsListCall) Header() http.Header { +func (c *PublicDelegatedPrefixesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PacketMirroringsListCall) doRequest(alt string) (*http.Response, error) { +func (c *PublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -115004,7 +132236,7 @@ func (c *PacketMirroringsListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -115018,33 +132250,258 @@ func (c *PacketMirroringsListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.packetMirrorings.list" call. -// Exactly one of *PacketMirroringList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *PacketMirroringList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *PacketMirroringsListCall) Do(opts ...googleapi.CallOption) (*PacketMirroringList, error) { +// Do executes the "compute.publicDelegatedPrefixes.list" call. +// Exactly one of *PublicDelegatedPrefixList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *PublicDelegatedPrefixList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &PublicDelegatedPrefixList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the PublicDelegatedPrefixes for a project in the given region.", + // "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes", + // "httpMethod": "GET", + // "id": "compute.publicDelegatedPrefixes.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region of this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes", + // "response": { + // "$ref": "PublicDelegatedPrefixList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *PublicDelegatedPrefixesListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.publicDelegatedPrefixes.patch": + +type PublicDelegatedPrefixesPatchCall struct { + s *Service + project string + region string + publicDelegatedPrefix string + publicdelegatedprefix *PublicDelegatedPrefix + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified PublicDelegatedPrefix resource with the +// data included in the request. This method supports PATCH semantics +// and uses JSON merge patch format and processing rules. +// +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to patch. +// - region: Name of the region for this request. +func (r *PublicDelegatedPrefixesService) Patch(project string, region string, publicDelegatedPrefix string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesPatchCall { + c := &PublicDelegatedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.publicDelegatedPrefix = publicDelegatedPrefix + c.publicdelegatedprefix = publicdelegatedprefix + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *PublicDelegatedPrefixesPatchCall) RequestId(requestId string) *PublicDelegatedPrefixesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PublicDelegatedPrefixesPatchCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PublicDelegatedPrefixesPatchCall) Context(ctx context.Context) *PublicDelegatedPrefixesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PublicDelegatedPrefixesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *PublicDelegatedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "publicDelegatedPrefix": c.publicDelegatedPrefix, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.publicDelegatedPrefixes.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &PacketMirroringList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -115056,38 +132513,16 @@ func (c *PacketMirroringsListCall) Do(opts ...googleapi.CallOption) (*PacketMirr } return ret, nil // { - // "description": "Retrieves a list of PacketMirroring resources available to the specified project and region.", - // "flatPath": "projects/{project}/regions/{region}/packetMirrorings", - // "httpMethod": "GET", - // "id": "compute.packetMirrorings.list", + // "description": "Patches the specified PublicDelegatedPrefix resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", + // "httpMethod": "PATCH", + // "id": "compute.publicDelegatedPrefixes.patch", // "parameterOrder": [ // "project", - // "region" + // "region", + // "publicDelegatedPrefix" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -115095,6 +132530,13 @@ func (c *PacketMirroringsListCall) Do(opts ...googleapi.CallOption) (*PacketMirr // "required": true, // "type": "string" // }, + // "publicDelegatedPrefix": { + // "description": "Name of the PublicDelegatedPrefix resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "region": { // "description": "Name of the region for this request.", // "location": "path", @@ -115102,72 +132544,49 @@ func (c *PacketMirroringsListCall) Do(opts ...googleapi.CallOption) (*PacketMirr // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/packetMirrorings", + // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", + // "request": { + // "$ref": "PublicDelegatedPrefix" + // }, // "response": { - // "$ref": "PacketMirroringList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *PacketMirroringsListCall) Pages(ctx context.Context, f func(*PacketMirroringList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.packetMirrorings.patch": +// method id "compute.regionAutoscalers.delete": -type PacketMirroringsPatchCall struct { - s *Service - project string - region string - packetMirroring string - packetmirroring *PacketMirroring - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersDeleteCall struct { + s *Service + project string + region string + autoscaler string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified PacketMirroring resource with the data -// included in the request. This method supports PATCH semantics and -// uses JSON merge patch format and processing rules. +// Delete: Deletes the specified autoscaler. // -// - packetMirroring: Name of the PacketMirroring resource to patch. +// - autoscaler: Name of the autoscaler to delete. // - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *PacketMirroringsService) Patch(project string, region string, packetMirroring string, packetmirroring *PacketMirroring) *PacketMirroringsPatchCall { - c := &PacketMirroringsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { + c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.packetMirroring = packetMirroring - c.packetmirroring = packetmirroring + c.autoscaler = autoscaler return c } @@ -115182,7 +132601,7 @@ func (r *PacketMirroringsService) Patch(project string, region string, packetMir // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *PacketMirroringsPatchCall) RequestId(requestId string) *PacketMirroringsPatchCall { +func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -115190,7 +132609,7 @@ func (c *PacketMirroringsPatchCall) RequestId(requestId string) *PacketMirroring // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PacketMirroringsPatchCall) Fields(s ...googleapi.Field) *PacketMirroringsPatchCall { +func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -115198,21 +132617,21 @@ func (c *PacketMirroringsPatchCall) Fields(s ...googleapi.Field) *PacketMirrorin // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PacketMirroringsPatchCall) Context(ctx context.Context) *PacketMirroringsPatchCall { +func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PacketMirroringsPatchCall) Header() http.Header { +func (c *RegionAutoscalersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PacketMirroringsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -115220,53 +132639,48 @@ func (c *PacketMirroringsPatchCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.packetmirroring) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "packetMirroring": c.packetMirroring, + "project": c.project, + "region": c.region, + "autoscaler": c.autoscaler, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.packetMirrorings.patch" call. +// Do executes the "compute.regionAutoscalers.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *PacketMirroringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -115280,18 +132694,18 @@ func (c *PacketMirroringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Patches the specified PacketMirroring resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", - // "httpMethod": "PATCH", - // "id": "compute.packetMirrorings.patch", + // "description": "Deletes the specified autoscaler.", + // "flatPath": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", + // "httpMethod": "DELETE", + // "id": "compute.regionAutoscalers.delete", // "parameterOrder": [ // "project", // "region", - // "packetMirroring" + // "autoscaler" // ], // "parameters": { - // "packetMirroring": { - // "description": "Name of the PacketMirroring resource to patch.", + // "autoscaler": { + // "description": "Name of the autoscaler to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -115305,7 +132719,7 @@ func (c *PacketMirroringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -115317,10 +132731,7 @@ func (c *PacketMirroringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/packetMirrorings/{packetMirroring}", - // "request": { - // "$ref": "PacketMirroring" - // }, + // "path": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", // "response": { // "$ref": "Operation" // }, @@ -115332,116 +132743,122 @@ func (c *PacketMirroringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.packetMirrorings.testIamPermissions": +// method id "compute.regionAutoscalers.get": -type PacketMirroringsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersGetCall struct { + s *Service + project string + region string + autoscaler string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// Get: Returns the specified autoscaler. // +// - autoscaler: Name of the autoscaler to return. // - project: Project ID for this request. -// - region: The name of the region for this request. -// - resource: Name or id of the resource for this request. -func (r *PacketMirroringsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *PacketMirroringsTestIamPermissionsCall { - c := &PacketMirroringsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { + c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.autoscaler = autoscaler return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PacketMirroringsTestIamPermissionsCall) Fields(s ...googleapi.Field) *PacketMirroringsTestIamPermissionsCall { +func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PacketMirroringsTestIamPermissionsCall) Context(ctx context.Context) *PacketMirroringsTestIamPermissionsCall { +func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PacketMirroringsTestIamPermissionsCall) Header() http.Header { +func (c *RegionAutoscalersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PacketMirroringsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "autoscaler": c.autoscaler, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.packetMirrorings.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *PacketMirroringsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.regionAutoscalers.get" call. +// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Autoscaler.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &TestPermissionsResponse{ + ret := &Autoscaler{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -115453,16 +132870,23 @@ func (c *PacketMirroringsTestIamPermissionsCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "flatPath": "projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions", - // "httpMethod": "POST", - // "id": "compute.packetMirrorings.testIamPermissions", + // "description": "Returns the specified autoscaler.", + // "flatPath": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", + // "httpMethod": "GET", + // "id": "compute.regionAutoscalers.get", // "parameterOrder": [ // "project", // "region", - // "resource" + // "autoscaler" // ], // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -115471,26 +132895,16 @@ func (c *PacketMirroringsTestIamPermissionsCall) Do(opts ...googleapi.CallOption // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Autoscaler" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -115501,22 +132915,28 @@ func (c *PacketMirroringsTestIamPermissionsCall) Do(opts ...googleapi.CallOption } -// method id "compute.projects.disableXpnHost": +// method id "compute.regionAutoscalers.insert": -type ProjectsDisableXpnHostCall struct { +type RegionAutoscalersInsertCall struct { s *Service project string + region string + autoscaler *Autoscaler urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// DisableXpnHost: Disable this project as a shared VPC host project. +// Insert: Creates an autoscaler in the specified project using the data +// included in the request. // // - project: Project ID for this request. -func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { - c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { + c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.autoscaler = autoscaler return c } @@ -115531,7 +132951,7 @@ func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHost // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisableXpnHostCall { +func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -115539,7 +132959,7 @@ func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisabl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnHostCall { +func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -115547,21 +132967,21 @@ func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisab // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsDisableXpnHostCall) Context(ctx context.Context) *ProjectsDisableXpnHostCall { +func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsDisableXpnHostCall) Header() http.Header { +func (c *RegionAutoscalersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -115569,9 +132989,14 @@ func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/disableXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -115580,35 +133005,36 @@ func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, erro req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.disableXpnHost" call. +// Do executes the "compute.regionAutoscalers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -115622,12 +133048,13 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Disable this project as a shared VPC host project.", - // "flatPath": "projects/{project}/disableXpnHost", + // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/autoscalers", // "httpMethod": "POST", - // "id": "compute.projects.disableXpnHost", + // "id": "compute.regionAutoscalers.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "project": { @@ -115637,13 +133064,23 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/disableXpnHost", + // "path": "projects/{project}/regions/{region}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, // "response": { // "$ref": "Operation" // }, @@ -115655,124 +133092,201 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.projects.disableXpnResource": +// method id "compute.regionAutoscalers.list": -type ProjectsDisableXpnResourceCall struct { - s *Service - project string - projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DisableXpnResource: Disable a service resource (also known as service -// project) associated with this host project. +// List: Retrieves a list of autoscalers contained within the specified +// region. // // - project: Project ID for this request. -func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { - c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { + c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectsdisablexpnresourcerequest = projectsdisablexpnresourcerequest + c.region = region return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDisableXpnResourceCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionAutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionAutoscalersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnResourceCall { +func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsDisableXpnResourceCall) Context(ctx context.Context) *ProjectsDisableXpnResourceCall { +func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsDisableXpnResourceCall) Header() http.Header { +func (c *RegionAutoscalersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsdisablexpnresourcerequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/disableXpnResource") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.disableXpnResource" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionAutoscalers.list" call. +// Exactly one of *RegionAutoscalerList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionAutoscalerList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &RegionAutoscalerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -115784,14 +133298,38 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Disable a service resource (also known as service project) associated with this host project.", - // "flatPath": "projects/{project}/disableXpnResource", - // "httpMethod": "POST", - // "id": "compute.projects.disableXpnResource", + // "description": "Retrieves a list of autoscalers contained within the specified region.", + // "flatPath": "projects/{project}/regions/{region}/autoscalers", + // "httpMethod": "GET", + // "id": "compute.regionAutoscalers.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -115799,43 +133337,83 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/disableXpnResource", - // "request": { - // "$ref": "ProjectsDisableXpnResourceRequest" - // }, + // "path": "projects/{project}/regions/{region}/autoscalers", // "response": { - // "$ref": "Operation" + // "$ref": "RegionAutoscalerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.enableXpnHost": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsEnableXpnHostCall struct { +// method id "compute.regionAutoscalers.patch": + +type RegionAutoscalersPatchCall struct { s *Service project string + region string + autoscaler *Autoscaler urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// EnableXpnHost: Enable this project as a shared VPC host project. +// Patch: Updates an autoscaler in the specified project using the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. // // - project: Project ID for this request. -func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { - c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall { + c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to patch. +func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall { + c.urlParams_.Set("autoscaler", autoscaler) return c } @@ -115850,7 +133428,7 @@ func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCa // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableXpnHostCall { +func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -115858,7 +133436,7 @@ func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableX // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnHostCall { +func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -115866,21 +133444,21 @@ func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnable // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsEnableXpnHostCall) Context(ctx context.Context) *ProjectsEnableXpnHostCall { +func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsEnableXpnHostCall) Header() http.Header { +func (c *RegionAutoscalersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -115888,46 +133466,52 @@ func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/enableXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.enableXpnHost" call. +// Do executes the "compute.regionAutoscalers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -115941,14 +133525,21 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Enable this project as a shared VPC host project.", - // "flatPath": "projects/{project}/enableXpnHost", - // "httpMethod": "POST", - // "id": "compute.projects.enableXpnHost", + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/regions/{region}/autoscalers", + // "httpMethod": "PATCH", + // "id": "compute.regionAutoscalers.patch", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to patch.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -115956,13 +133547,23 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/enableXpnHost", + // "path": "projects/{project}/regions/{region}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, // "response": { // "$ref": "Operation" // }, @@ -115974,26 +133575,35 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.projects.enableXpnResource": +// method id "compute.regionAutoscalers.update": -type ProjectsEnableXpnResourceCall struct { - s *Service - project string - projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersUpdateCall struct { + s *Service + project string + region string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// EnableXpnResource: Enable service resource (a.k.a service project) -// for a host project, so that subnets in the host project can be used -// by instances in the service project. +// Update: Updates an autoscaler in the specified project using the data +// included in the request. // // - project: Project ID for this request. -func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { - c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { + c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectsenablexpnresourcerequest = projectsenablexpnresourcerequest + c.region = region + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to update. +func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall { + c.urlParams_.Set("autoscaler", autoscaler) return c } @@ -116008,7 +133618,7 @@ func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnres // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEnableXpnResourceCall { +func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -116016,7 +133626,7 @@ func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEna // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnResourceCall { +func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -116024,21 +133634,21 @@ func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEn // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsEnableXpnResourceCall) Context(ctx context.Context) *ProjectsEnableXpnResourceCall { +func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsEnableXpnResourceCall) Header() http.Header { +func (c *RegionAutoscalersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -116046,51 +133656,52 @@ func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsenablexpnresourcerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/enableXpnResource") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.enableXpnResource" call. +// Do executes the "compute.regionAutoscalers.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -116104,14 +133715,21 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", - // "flatPath": "projects/{project}/enableXpnResource", - // "httpMethod": "POST", - // "id": "compute.projects.enableXpnResource", + // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/autoscalers", + // "httpMethod": "PUT", + // "id": "compute.regionAutoscalers.update", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -116119,15 +133737,22 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/enableXpnResource", + // "path": "projects/{project}/regions/{region}/autoscalers", // "request": { - // "$ref": "ProjectsEnableXpnResourceRequest" + // "$ref": "Autoscaler" // }, // "response": { // "$ref": "Operation" @@ -116140,121 +133765,124 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.projects.get": +// method id "compute.regionBackendServices.delete": -type ProjectsGetCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionBackendServicesDeleteCall struct { + s *Service + project string + region string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Project resource. To decrease latency for -// this method, you can optionally omit any unneeded information from -// the response by using a field mask. This practice is especially -// recommended for unused quota information (the `quotas` field). To -// exclude one or more fields, set your request's `fields` query -// parameter to only include the fields you need. For example, to only -// include the `id` and `selfLink` fields, add the query parameter -// `?fields=id,selfLink` to your request. +// Delete: Deletes the specified regional BackendService resource. // +// - backendService: Name of the BackendService resource to delete. // - project: Project ID for this request. -func (r *ProjectsService) Get(project string) *ProjectsGetCall { - c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { + c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.backendService = backendService + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionBackendServicesDeleteCall) RequestId(requestId string) *RegionBackendServicesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { +func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBackendServicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { +func (c *RegionBackendServicesDeleteCall) Context(ctx context.Context) *RegionBackendServicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetCall) Header() http.Header { +func (c *RegionBackendServicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.get" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { +// Do executes the "compute.regionBackendServices.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Project{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -116266,60 +133894,85 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } return ret, nil // { - // "description": "Returns the specified Project resource. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", - // "flatPath": "projects/{project}", - // "httpMethod": "GET", - // "id": "compute.projects.get", + // "description": "Deletes the specified regional BackendService resource.", + // "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", + // "httpMethod": "DELETE", + // "id": "compute.regionBackendServices.delete", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "backendService" // ], // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}", + // "path": "projects/{project}/regions/{region}/backendServices/{backendService}", // "response": { - // "$ref": "Project" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.projects.getXpnHost": +// method id "compute.regionBackendServices.get": -type ProjectsGetXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionBackendServicesGetCall struct { + s *Service + project string + region string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// GetXpnHost: Gets the shared VPC host project that this project links -// to. May be empty if no link exists. +// Get: Returns the specified regional BackendService resource. // +// - backendService: Name of the BackendService resource to return. // - project: Project ID for this request. -func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { - c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { + c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.backendService = backendService return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHostCall { +func (c *RegionBackendServicesGetCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -116329,7 +133982,7 @@ func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHos // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHostCall { +func (c *RegionBackendServicesGetCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -116337,21 +133990,21 @@ func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetXpnHostCall) Context(ctx context.Context) *ProjectsGetXpnHostCall { +func (c *RegionBackendServicesGetCall) Context(ctx context.Context) *RegionBackendServicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetXpnHostCall) Header() http.Header { +func (c *RegionBackendServicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -116364,7 +134017,7 @@ func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/getXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -116372,38 +134025,40 @@ func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.getXpnHost" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, error) { +// Do executes the "compute.regionBackendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendService.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Project{ + ret := &BackendService{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -116415,212 +134070,162 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err } return ret, nil // { - // "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists.", - // "flatPath": "projects/{project}/getXpnHost", + // "description": "Returns the specified regional BackendService resource.", + // "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", // "httpMethod": "GET", - // "id": "compute.projects.getXpnHost", + // "id": "compute.regionBackendServices.get", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "backendService" // ], // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // } - // }, - // "path": "projects/{project}/getXpnHost", - // "response": { - // "$ref": "Project" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.projects.getXpnResources": - -type ProjectsGetXpnResourcesCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetXpnResources: Gets service resources (a.k.a service project) -// associated with this host project. -// -// - project: Project ID for this request. -func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { - c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("filter", filter) - return c -} + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/backendServices/{backendService}", + // "response": { + // "$ref": "BackendService" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} +// method id "compute.regionBackendServices.getHealth": -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("pageToken", pageToken) - return c +type RegionBackendServicesGetHealthCall struct { + s *Service + project string + region string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *ProjectsGetXpnResourcesCall) ReturnPartialSuccess(returnPartialSuccess bool) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// GetHealth: Gets the most recent health check results for this +// regional BackendService. +// +// - backendService: Name of the BackendService resource for which to +// get health. +// - project: . +// - region: Name of the region scoping this request. +func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { + c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetXpnResourcesCall { +func (c *RegionBackendServicesGetHealthCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetHealthCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGetXpnResourcesCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetXpnResourcesCall) Context(ctx context.Context) *ProjectsGetXpnResourcesCall { +func (c *RegionBackendServicesGetHealthCall) Context(ctx context.Context) *RegionBackendServicesGetHealthCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetXpnResourcesCall) Header() http.Header { +func (c *RegionBackendServicesGetHealthCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/getXpnResources") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.getXpnResources" call. -// Exactly one of *ProjectsGetXpnResources or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ProjectsGetXpnResources.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionBackendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*ProjectsGetXpnResources, error) { +func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ProjectsGetXpnResources{ + ret := &BackendServiceGroupHealth{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -116632,254 +134237,177 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project } return ret, nil // { - // "description": "Gets service resources (a.k.a service project) associated with this host project.", - // "flatPath": "projects/{project}/getXpnResources", - // "httpMethod": "GET", - // "id": "compute.projects.getXpnResources", + // "description": "Gets the most recent health check results for this regional BackendService.", + // "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth", + // "httpMethod": "POST", + // "id": "compute.regionBackendServices.getHealth", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "backendService" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "backendService": { + // "description": "Name of the BackendService resource for which to get health.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/getXpnResources", + // "path": "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth", + // "request": { + // "$ref": "ResourceGroupReference" + // }, // "response": { - // "$ref": "ProjectsGetXpnResources" + // "$ref": "BackendServiceGroupHealth" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*ProjectsGetXpnResources) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.projects.listXpnHosts": +// method id "compute.regionBackendServices.getIamPolicy": -type ProjectsListXpnHostsCall struct { - s *Service - project string - projectslistxpnhostsrequest *ProjectsListXpnHostsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesGetIamPolicyCall struct { + s *Service + project string + region string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListXpnHosts: Lists all shared VPC host projects visible to the user -// in an organization. +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. // // - project: Project ID for this request. -func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { - c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionBackendServicesService) GetIamPolicy(project string, region string, resource string) *RegionBackendServicesGetIamPolicyCall { + c := &RegionBackendServicesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectslistxpnhostsrequest = projectslistxpnhostsrequest - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpnHostsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.resource = resource return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *ProjectsListXpnHostsCall) ReturnPartialSuccess(returnPartialSuccess bool) *ProjectsListXpnHostsCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *RegionBackendServicesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *RegionBackendServicesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpnHostsCall { +func (c *RegionBackendServicesGetIamPolicyCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionBackendServicesGetIamPolicyCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsListXpnHostsCall) Context(ctx context.Context) *ProjectsListXpnHostsCall { +func (c *RegionBackendServicesGetIamPolicyCall) Context(ctx context.Context) *RegionBackendServicesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsListXpnHostsCall) Header() http.Header { +func (c *RegionBackendServicesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectslistxpnhostsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/listXpnHosts") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.listXpnHosts" call. -// Exactly one of *XpnHostList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *XpnHostList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostList, error) { +// Do executes the "compute.regionBackendServices.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionBackendServicesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &XpnHostList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -116891,37 +134419,22 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis } return ret, nil // { - // "description": "Lists all shared VPC host projects visible to the user in an organization.", - // "flatPath": "projects/{project}/listXpnHosts", - // "httpMethod": "POST", - // "id": "compute.projects.listXpnHosts", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.regionBackendServices.getIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", // "location": "query", - // "minimum": "0", // "type": "integer" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -116929,66 +134442,57 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/listXpnHosts", - // "request": { - // "$ref": "ProjectsListXpnHostsRequest" - // }, + // "path": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", // "response": { - // "$ref": "XpnHostList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.projects.moveDisk": +// method id "compute.regionBackendServices.insert": -type ProjectsMoveDiskCall struct { - s *Service - project string - diskmoverequest *DiskMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesInsertCall struct { + s *Service + project string + region string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MoveDisk: Moves a persistent disk from one zone to another. +// Insert: Creates a regional BackendService resource in the specified +// project using the data included in the request. For more information, +// see Backend services overview. // // - project: Project ID for this request. -func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { - c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { + c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.diskmoverequest = diskmoverequest + c.region = region + c.backendservice = backendservice return c } @@ -117003,7 +134507,7 @@ func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequ // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall { +func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBackendServicesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -117011,7 +134515,7 @@ func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { +func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBackendServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -117019,21 +134523,21 @@ func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { +func (c *RegionBackendServicesInsertCall) Context(ctx context.Context) *RegionBackendServicesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMoveDiskCall) Header() http.Header { +func (c *RegionBackendServicesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -117041,14 +134545,14 @@ func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/moveDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -117057,35 +134561,36 @@ func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.moveDisk" call. +// Do executes the "compute.regionBackendServices.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -117099,12 +134604,13 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Moves a persistent disk from one zone to another.", - // "flatPath": "projects/{project}/moveDisk", + // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", + // "flatPath": "projects/{project}/regions/{region}/backendServices", // "httpMethod": "POST", - // "id": "compute.projects.moveDisk", + // "id": "compute.regionBackendServices.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "project": { @@ -117114,15 +134620,22 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/moveDisk", + // "path": "projects/{project}/regions/{region}/backendServices", // "request": { - // "$ref": "DiskMoveRequest" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" @@ -117135,128 +134648,201 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.projects.moveInstance": +// method id "compute.regionBackendServices.list": -type ProjectsMoveInstanceCall struct { - s *Service - project string - instancemoverequest *InstanceMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// MoveInstance: Moves an instance and its attached persistent disks -// from one zone to another. *Note*: Moving VMs or disks by using this -// method might cause unexpected behavior. For more information, see the -// known issue -// (/compute/docs/troubleshooting/known-issues#moving_vms_or_disks_using_ -// the_moveinstance_api_or_the_causes_unexpected_behavior). +// List: Retrieves the list of regional BackendService resources +// available to the specified project in the given region. // // - project: Project ID for this request. -func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { - c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { + c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instancemoverequest = instancemoverequest + c.region = region return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBackendServicesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendServicesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBackendServicesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionBackendServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionBackendServicesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { +func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBackendServicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBackendServicesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { +func (c *RegionBackendServicesListCall) Context(ctx context.Context) *RegionBackendServicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMoveInstanceCall) Header() http.Header { +func (c *RegionBackendServicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/moveInstance") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.moveInstance" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionBackendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendServiceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -117268,14 +134854,38 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Moves an instance and its attached persistent disks from one zone to another. *Note*: Moving VMs or disks by using this method might cause unexpected behavior. For more information, see the [known issue](/compute/docs/troubleshooting/known-issues#moving_vms_or_disks_using_the_moveinstance_api_or_the_causes_unexpected_behavior).", - // "flatPath": "projects/{project}/moveInstance", - // "httpMethod": "POST", - // "id": "compute.projects.moveInstance", + // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", + // "flatPath": "projects/{project}/regions/{region}/backendServices", + // "httpMethod": "GET", + // "id": "compute.regionBackendServices.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -117283,46 +134893,80 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/moveInstance", - // "request": { - // "$ref": "InstanceMoveRequest" - // }, + // "path": "projects/{project}/regions/{region}/backendServices", // "response": { - // "$ref": "Operation" + // "$ref": "BackendServiceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.setCommonInstanceMetadata": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsSetCommonInstanceMetadataCall struct { - s *Service - project string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionBackendServices.patch": + +type RegionBackendServicesPatchCall struct { + s *Service + project string + region string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetCommonInstanceMetadata: Sets metadata common to all instances -// within the specified project using the data included in the request. +// Patch: Updates the specified regional BackendService resource with +// the data included in the request. For more information, see +// Understanding backend services This method supports PATCH semantics +// and uses the JSON merge patch format and processing rules. // +// - backendService: Name of the BackendService resource to patch. // - project: Project ID for this request. -func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { - c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { + c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.metadata = metadata + c.region = region + c.backendService = backendService + c.backendservice = backendservice return c } @@ -117337,7 +134981,7 @@ func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Me // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall { +func (c *RegionBackendServicesPatchCall) RequestId(requestId string) *RegionBackendServicesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -117345,7 +134989,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *Pro // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { +func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBackendServicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -117353,21 +134997,21 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *Pr // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { +func (c *RegionBackendServicesPatchCall) Context(ctx context.Context) *RegionBackendServicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { +func (c *RegionBackendServicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -117375,51 +135019,53 @@ func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setCommonInstanceMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setCommonInstanceMetadata" call. +// Do executes the "compute.regionBackendServices.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -117433,14 +135079,23 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", - // "flatPath": "projects/{project}/setCommonInstanceMetadata", - // "httpMethod": "POST", - // "id": "compute.projects.setCommonInstanceMetadata", + // "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Understanding backend services This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", + // "httpMethod": "PATCH", + // "id": "compute.regionBackendServices.patch", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "backendService" // ], // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -117448,15 +135103,22 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/setCommonInstanceMetadata", + // "path": "projects/{project}/regions/{region}/backendServices/{backendService}", // "request": { - // "$ref": "Metadata" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" @@ -117469,50 +135131,38 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) } -// method id "compute.projects.setDefaultNetworkTier": +// method id "compute.regionBackendServices.setIamPolicy": -type ProjectsSetDefaultNetworkTierCall struct { - s *Service - project string - projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetDefaultNetworkTier: Sets the default network tier of the project. -// The default network tier is used when an -// address/forwardingRule/instance is created without specifying the -// network tier field. +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. // // - project: Project ID for this request. -func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall { - c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionBackendServicesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionBackendServicesSetIamPolicyCall { + c := &RegionBackendServicesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectssetdefaultnetworktierrequest = projectssetdefaultnetworktierrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *ProjectsSetDefaultNetworkTierCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultNetworkTierCall { +func (c *RegionBackendServicesSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionBackendServicesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -117520,21 +135170,21 @@ func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *Projec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetDefaultNetworkTierCall) Context(ctx context.Context) *ProjectsSetDefaultNetworkTierCall { +func (c *RegionBackendServicesSetIamPolicyCall) Context(ctx context.Context) *RegionBackendServicesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header { +func (c *RegionBackendServicesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -117542,14 +135192,14 @@ func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultnetworktierrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setDefaultNetworkTier") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -117557,38 +135207,40 @@ func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setDefaultNetworkTier" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionBackendServices.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionBackendServicesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -117600,12 +135252,14 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", - // "flatPath": "projects/{project}/setDefaultNetworkTier", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", // "httpMethod": "POST", - // "id": "compute.projects.setDefaultNetworkTier", + // "id": "compute.regionBackendServices.setIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "resource" // ], // "parameters": { // "project": { @@ -117615,18 +135269,27 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/setDefaultNetworkTier", + // "path": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", // "request": { - // "$ref": "ProjectsSetDefaultNetworkTierRequest" + // "$ref": "RegionSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -117636,27 +135299,32 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.projects.setUsageExportBucket": +// method id "compute.regionBackendServices.update": -type ProjectsSetUsageExportBucketCall struct { - s *Service - project string - usageexportlocation *UsageExportLocation - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesUpdateCall struct { + s *Service + project string + region string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetUsageExportBucket: Enables the usage export feature and sets the -// usage export bucket where reports are stored. If you provide an empty -// request body using this method, the usage export feature will be -// disabled. +// Update: Updates the specified regional BackendService resource with +// the data included in the request. For more information, see Backend +// services overview . // +// - backendService: Name of the BackendService resource to update. // - project: Project ID for this request. -func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { - c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { + c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.usageexportlocation = usageexportlocation + c.region = region + c.backendService = backendService + c.backendservice = backendservice return c } @@ -117671,7 +135339,7 @@ func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocati // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall { +func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBackendServicesUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -117679,7 +135347,7 @@ func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *Projects // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { +func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBackendServicesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -117687,21 +135355,21 @@ func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *Project // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { +func (c *RegionBackendServicesUpdateCall) Context(ctx context.Context) *RegionBackendServicesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { +func (c *RegionBackendServicesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -117709,51 +135377,53 @@ func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setUsageExportBucket") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setUsageExportBucket" call. +// Do executes the "compute.regionBackendServices.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -117767,14 +135437,23 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", - // "flatPath": "projects/{project}/setUsageExportBucket", - // "httpMethod": "POST", - // "id": "compute.projects.setUsageExportBucket", + // "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Backend services overview .", + // "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", + // "httpMethod": "PUT", + // "id": "compute.regionBackendServices.update", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "backendService" // ], // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -117782,145 +135461,238 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/setUsageExportBucket", + // "path": "projects/{project}/regions/{region}/backendServices/{backendService}", // "request": { - // "$ref": "UsageExportLocation" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.publicAdvertisedPrefixes.delete": +// method id "compute.regionCommitments.aggregatedList": + +type RegionCommitmentsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of commitments by +// region. +// +// - project: Project ID for this request. +func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitmentsAggregatedListCall { + c := &RegionCommitmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *RegionCommitmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} -type PublicAdvertisedPrefixesDeleteCall struct { - s *Service - project string - publicAdvertisedPrefix string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c } -// Delete: Deletes the specified PublicAdvertisedPrefix -// -// - project: Project ID for this request. -// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource -// to delete. -func (r *PublicAdvertisedPrefixesService) Delete(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesDeleteCall { - c := &PublicAdvertisedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.publicAdvertisedPrefix = publicAdvertisedPrefix +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *PublicAdvertisedPrefixesDeleteCall) RequestId(requestId string) *PublicAdvertisedPrefixesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionCommitmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PublicAdvertisedPrefixesDeleteCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesDeleteCall { +func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *RegionCommitmentsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PublicAdvertisedPrefixesDeleteCall) Context(ctx context.Context) *PublicAdvertisedPrefixesDeleteCall { +func (c *RegionCommitmentsAggregatedListCall) Context(ctx context.Context) *RegionCommitmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PublicAdvertisedPrefixesDeleteCall) Header() http.Header { +func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PublicAdvertisedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/commitments") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "publicAdvertisedPrefix": c.publicAdvertisedPrefix, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.publicAdvertisedPrefixes.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionCommitments.aggregatedList" call. +// Exactly one of *CommitmentAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *CommitmentAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*CommitmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &CommitmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -117932,15 +135704,42 @@ func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Deletes the specified PublicAdvertisedPrefix", - // "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", - // "httpMethod": "DELETE", - // "id": "compute.publicAdvertisedPrefixes.delete", + // "description": "Retrieves an aggregated list of commitments by region.", + // "flatPath": "projects/{project}/aggregated/commitments", + // "httpMethod": "GET", + // "id": "compute.regionCommitments.aggregatedList", // "parameterOrder": [ - // "project", - // "publicAdvertisedPrefix" + // "project" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -117948,59 +135747,77 @@ func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, - // "publicAdvertisedPrefix": { - // "description": "Name of the PublicAdvertisedPrefix resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // } // }, - // "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + // "path": "projects/{project}/aggregated/commitments", // "response": { - // "$ref": "Operation" + // "$ref": "CommitmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.publicAdvertisedPrefixes.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func(*CommitmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type PublicAdvertisedPrefixesGetCall struct { - s *Service - project string - publicAdvertisedPrefix string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.regionCommitments.get": + +type RegionCommitmentsGetCall struct { + s *Service + project string + region string + commitment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified PublicAdvertisedPrefix resource. +// Get: Returns the specified commitment resource. Gets a list of +// available commitments by making a list() request. // +// - commitment: Name of the commitment to return. // - project: Project ID for this request. -// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource -// to return. -func (r *PublicAdvertisedPrefixesService) Get(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesGetCall { - c := &PublicAdvertisedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { + c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.publicAdvertisedPrefix = publicAdvertisedPrefix + c.region = region + c.commitment = commitment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PublicAdvertisedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesGetCall { +func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -118010,7 +135827,7 @@ func (c *PublicAdvertisedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicAd // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *PublicAdvertisedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicAdvertisedPrefixesGetCall { +func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -118018,21 +135835,21 @@ func (c *PublicAdvertisedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicA // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PublicAdvertisedPrefixesGetCall) Context(ctx context.Context) *PublicAdvertisedPrefixesGetCall { +func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PublicAdvertisedPrefixesGetCall) Header() http.Header { +func (c *RegionCommitmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PublicAdvertisedPrefixesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -118045,7 +135862,7 @@ func (c *PublicAdvertisedPrefixesGetCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments/{commitment}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -118053,39 +135870,40 @@ func (c *PublicAdvertisedPrefixesGetCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "publicAdvertisedPrefix": c.publicAdvertisedPrefix, + "project": c.project, + "region": c.region, + "commitment": c.commitment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.publicAdvertisedPrefixes.get" call. -// Exactly one of *PublicAdvertisedPrefix or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *PublicAdvertisedPrefix.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicAdvertisedPrefix, error) { +// Do executes the "compute.regionCommitments.get" call. +// Exactly one of *Commitment or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Commitment.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &PublicAdvertisedPrefix{ + ret := &Commitment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -118097,15 +135915,23 @@ func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Pub } return ret, nil // { - // "description": "Returns the specified PublicAdvertisedPrefix resource.", - // "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + // "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", + // "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", // "httpMethod": "GET", - // "id": "compute.publicAdvertisedPrefixes.get", + // "id": "compute.regionCommitments.get", // "parameterOrder": [ // "project", - // "publicAdvertisedPrefix" + // "region", + // "commitment" // ], // "parameters": { + // "commitment": { + // "description": "Name of the commitment to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -118113,17 +135939,17 @@ func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Pub // "required": true, // "type": "string" // }, - // "publicAdvertisedPrefix": { - // "description": "Name of the PublicAdvertisedPrefix resource to return.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + // "path": "projects/{project}/regions/{region}/commitments/{commitment}", // "response": { - // "$ref": "PublicAdvertisedPrefix" + // "$ref": "Commitment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -118134,25 +135960,28 @@ func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Pub } -// method id "compute.publicAdvertisedPrefixes.insert": +// method id "compute.regionCommitments.insert": -type PublicAdvertisedPrefixesInsertCall struct { - s *Service - project string - publicadvertisedprefix *PublicAdvertisedPrefix - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionCommitmentsInsertCall struct { + s *Service + project string + region string + commitment *Commitment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a PublicAdvertisedPrefix in the specified project -// using the parameters that are included in the request. +// Insert: Creates a commitment in the specified project using the data +// included in the request. // // - project: Project ID for this request. -func (r *PublicAdvertisedPrefixesService) Insert(project string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesInsertCall { - c := &PublicAdvertisedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { + c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.publicadvertisedprefix = publicadvertisedprefix + c.region = region + c.commitment = commitment return c } @@ -118167,7 +135996,7 @@ func (r *PublicAdvertisedPrefixesService) Insert(project string, publicadvertise // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *PublicAdvertisedPrefixesInsertCall) RequestId(requestId string) *PublicAdvertisedPrefixesInsertCall { +func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -118175,7 +136004,7 @@ func (c *PublicAdvertisedPrefixesInsertCall) RequestId(requestId string) *Public // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PublicAdvertisedPrefixesInsertCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesInsertCall { +func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -118183,21 +136012,21 @@ func (c *PublicAdvertisedPrefixesInsertCall) Fields(s ...googleapi.Field) *Publi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PublicAdvertisedPrefixesInsertCall) Context(ctx context.Context) *PublicAdvertisedPrefixesInsertCall { +func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PublicAdvertisedPrefixesInsertCall) Header() http.Header { +func (c *RegionCommitmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PublicAdvertisedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -118205,14 +136034,14 @@ func (c *PublicAdvertisedPrefixesInsertCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicadvertisedprefix) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -118221,35 +136050,36 @@ func (c *PublicAdvertisedPrefixesInsertCall) doRequest(alt string) (*http.Respon req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.publicAdvertisedPrefixes.insert" call. +// Do executes the "compute.regionCommitments.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *PublicAdvertisedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -118263,12 +136093,13 @@ func (c *PublicAdvertisedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Creates a PublicAdvertisedPrefix in the specified project using the parameters that are included in the request.", - // "flatPath": "projects/{project}/global/publicAdvertisedPrefixes", + // "description": "Creates a commitment in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/commitments", // "httpMethod": "POST", - // "id": "compute.publicAdvertisedPrefixes.insert", + // "id": "compute.regionCommitments.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "project": { @@ -118278,15 +136109,22 @@ func (c *PublicAdvertisedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/global/publicAdvertisedPrefixes", + // "path": "projects/{project}/regions/{region}/commitments", // "request": { - // "$ref": "PublicAdvertisedPrefix" + // "$ref": "Commitment" // }, // "response": { // "$ref": "Operation" @@ -118299,40 +136137,47 @@ func (c *PublicAdvertisedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.publicAdvertisedPrefixes.list": +// method id "compute.regionCommitments.list": -type PublicAdvertisedPrefixesListCall struct { +type RegionCommitmentsListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists the PublicAdvertisedPrefixes for a project. +// List: Retrieves a list of commitments contained within the specified +// region. // // - project: Project ID for this request. -func (r *PublicAdvertisedPrefixesService) List(project string) *PublicAdvertisedPrefixesListCall { - c := &PublicAdvertisedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *RegionCommitmentsService) List(project string, region string) *RegionCommitmentsListCall { + c := &RegionCommitmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -118340,8 +136185,18 @@ func (r *PublicAdvertisedPrefixesService) List(project string) *PublicAdvertised // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *PublicAdvertisedPrefixesListCall) Filter(filter string) *PublicAdvertisedPrefixesListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { c.urlParams_.Set("filter", filter) return c } @@ -118352,7 +136207,7 @@ func (c *PublicAdvertisedPrefixesListCall) Filter(filter string) *PublicAdvertis // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *PublicAdvertisedPrefixesListCall) MaxResults(maxResults int64) *PublicAdvertisedPrefixesListCall { +func (c *RegionCommitmentsListCall) MaxResults(maxResults int64) *RegionCommitmentsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -118366,7 +136221,7 @@ func (c *PublicAdvertisedPrefixesListCall) MaxResults(maxResults int64) *PublicA // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *PublicAdvertisedPrefixesListCall) OrderBy(orderBy string) *PublicAdvertisedPrefixesListCall { +func (c *RegionCommitmentsListCall) OrderBy(orderBy string) *RegionCommitmentsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -118374,7 +136229,7 @@ func (c *PublicAdvertisedPrefixesListCall) OrderBy(orderBy string) *PublicAdvert // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *PublicAdvertisedPrefixesListCall) PageToken(pageToken string) *PublicAdvertisedPrefixesListCall { +func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmentsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -118383,7 +136238,7 @@ func (c *PublicAdvertisedPrefixesListCall) PageToken(pageToken string) *PublicAd // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *PublicAdvertisedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicAdvertisedPrefixesListCall { +func (c *RegionCommitmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionCommitmentsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -118391,7 +136246,7 @@ func (c *PublicAdvertisedPrefixesListCall) ReturnPartialSuccess(returnPartialSuc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PublicAdvertisedPrefixesListCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesListCall { +func (c *RegionCommitmentsListCall) Fields(s ...googleapi.Field) *RegionCommitmentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -118401,7 +136256,7 @@ func (c *PublicAdvertisedPrefixesListCall) Fields(s ...googleapi.Field) *PublicA // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *PublicAdvertisedPrefixesListCall) IfNoneMatch(entityTag string) *PublicAdvertisedPrefixesListCall { +func (c *RegionCommitmentsListCall) IfNoneMatch(entityTag string) *RegionCommitmentsListCall { c.ifNoneMatch_ = entityTag return c } @@ -118409,21 +136264,21 @@ func (c *PublicAdvertisedPrefixesListCall) IfNoneMatch(entityTag string) *Public // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PublicAdvertisedPrefixesListCall) Context(ctx context.Context) *PublicAdvertisedPrefixesListCall { +func (c *RegionCommitmentsListCall) Context(ctx context.Context) *RegionCommitmentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PublicAdvertisedPrefixesListCall) Header() http.Header { +func (c *RegionCommitmentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PublicAdvertisedPrefixesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -118436,7 +136291,7 @@ func (c *PublicAdvertisedPrefixesListCall) doRequest(alt string) (*http.Response var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -118445,37 +136300,38 @@ func (c *PublicAdvertisedPrefixesListCall) doRequest(alt string) (*http.Response req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.publicAdvertisedPrefixes.list" call. -// Exactly one of *PublicAdvertisedPrefixList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *PublicAdvertisedPrefixList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionCommitments.list" call. +// Exactly one of *CommitmentList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *CommitmentList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicAdvertisedPrefixList, error) { +func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*CommitmentList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &PublicAdvertisedPrefixList{ + ret := &CommitmentList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -118487,16 +136343,17 @@ func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pu } return ret, nil // { - // "description": "Lists the PublicAdvertisedPrefixes for a project.", - // "flatPath": "projects/{project}/global/publicAdvertisedPrefixes", + // "description": "Retrieves a list of commitments contained within the specified region.", + // "flatPath": "projects/{project}/regions/{region}/commitments", // "httpMethod": "GET", - // "id": "compute.publicAdvertisedPrefixes.list", + // "id": "compute.regionCommitments.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -118525,15 +136382,22 @@ func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pu // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "returnPartialSuccess": { // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } // }, - // "path": "projects/{project}/global/publicAdvertisedPrefixes", + // "path": "projects/{project}/regions/{region}/commitments", // "response": { - // "$ref": "PublicAdvertisedPrefixList" + // "$ref": "CommitmentList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -118547,7 +136411,7 @@ func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pu // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *PublicAdvertisedPrefixesListCall) Pages(ctx context.Context, f func(*PublicAdvertisedPrefixList) error) error { +func (c *RegionCommitmentsListCall) Pages(ctx context.Context, f func(*CommitmentList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -118565,30 +136429,40 @@ func (c *PublicAdvertisedPrefixesListCall) Pages(ctx context.Context, f func(*Pu } } -// method id "compute.publicAdvertisedPrefixes.patch": +// method id "compute.regionCommitments.update": -type PublicAdvertisedPrefixesPatchCall struct { - s *Service - project string - publicAdvertisedPrefix string - publicadvertisedprefix *PublicAdvertisedPrefix - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionCommitmentsUpdateCall struct { + s *Service + project string + region string + commitment string + commitment2 *Commitment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified Router resource with the data included -// in the request. This method supports PATCH semantics and uses JSON -// merge patch format and processing rules. +// Update: Updates the specified commitment with the data included in +// the request. Update is performed only on selected fields included as +// part of update-mask. Only the following fields can be modified: +// auto_renew. // -// - project: Project ID for this request. -// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource -// to patch. -func (r *PublicAdvertisedPrefixesService) Patch(project string, publicAdvertisedPrefix string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesPatchCall { - c := &PublicAdvertisedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - commitment: Name of the commitment for which auto renew is being +// updated. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *RegionCommitmentsService) Update(project string, region string, commitment string, commitment2 *Commitment) *RegionCommitmentsUpdateCall { + c := &RegionCommitmentsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.publicAdvertisedPrefix = publicAdvertisedPrefix - c.publicadvertisedprefix = publicadvertisedprefix + c.region = region + c.commitment = commitment + c.commitment2 = commitment2 + return c +} + +// Paths sets the optional parameter "paths": +func (c *RegionCommitmentsUpdateCall) Paths(paths ...string) *RegionCommitmentsUpdateCall { + c.urlParams_.SetMulti("paths", append([]string{}, paths...)) return c } @@ -118603,15 +136477,22 @@ func (r *PublicAdvertisedPrefixesService) Patch(project string, publicAdvertised // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *PublicAdvertisedPrefixesPatchCall) RequestId(requestId string) *PublicAdvertisedPrefixesPatchCall { +func (c *RegionCommitmentsUpdateCall) RequestId(requestId string) *RegionCommitmentsUpdateCall { c.urlParams_.Set("requestId", requestId) return c } +// UpdateMask sets the optional parameter "updateMask": update_mask +// indicates fields to be updated as part of this request. +func (c *RegionCommitmentsUpdateCall) UpdateMask(updateMask string) *RegionCommitmentsUpdateCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PublicAdvertisedPrefixesPatchCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesPatchCall { +func (c *RegionCommitmentsUpdateCall) Fields(s ...googleapi.Field) *RegionCommitmentsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -118619,21 +136500,21 @@ func (c *PublicAdvertisedPrefixesPatchCall) Fields(s ...googleapi.Field) *Public // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PublicAdvertisedPrefixesPatchCall) Context(ctx context.Context) *PublicAdvertisedPrefixesPatchCall { +func (c *RegionCommitmentsUpdateCall) Context(ctx context.Context) *RegionCommitmentsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PublicAdvertisedPrefixesPatchCall) Header() http.Header { +func (c *RegionCommitmentsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PublicAdvertisedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -118641,14 +136522,14 @@ func (c *PublicAdvertisedPrefixesPatchCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicadvertisedprefix) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments/{commitment}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -118656,37 +136537,38 @@ func (c *PublicAdvertisedPrefixesPatchCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "publicAdvertisedPrefix": c.publicAdvertisedPrefix, + "project": c.project, + "region": c.region, + "commitment": c.commitment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.publicAdvertisedPrefixes.patch" call. +// Do executes the "compute.regionCommitments.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionCommitmentsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -118700,15 +136582,28 @@ func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + // "description": "Updates the specified commitment with the data included in the request. Update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: auto_renew.", + // "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", // "httpMethod": "PATCH", - // "id": "compute.publicAdvertisedPrefixes.patch", + // "id": "compute.regionCommitments.update", // "parameterOrder": [ // "project", - // "publicAdvertisedPrefix" + // "region", + // "commitment" // ], // "parameters": { + // "commitment": { + // "description": "Name of the commitment for which auto renew is being updated.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "paths": { + // "location": "query", + // "repeated": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -118716,10 +136611,10 @@ func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "publicAdvertisedPrefix": { - // "description": "Name of the PublicAdvertisedPrefix resource to patch.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -118727,11 +136622,17 @@ func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*O // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "updateMask": { + // "description": "update_mask indicates fields to be updated as part of this request.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", + // "path": "projects/{project}/regions/{region}/commitments/{commitment}", // "request": { - // "$ref": "PublicAdvertisedPrefix" + // "$ref": "Commitment" // }, // "response": { // "$ref": "Operation" @@ -118744,41 +136645,220 @@ func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.publicDelegatedPrefixes.aggregatedList": +// method id "compute.regionDiskTypes.get": -type PublicDelegatedPrefixesAggregatedListCall struct { +type RegionDiskTypesGetCall struct { s *Service project string + region string + diskType string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Lists all PublicDelegatedPrefix resources owned by -// the specific project across all scopes. +// Get: Returns the specified regional disk type. Gets a list of +// available disk types by making a list() request. // -// - project: Name of the project scoping this request. -func (r *PublicDelegatedPrefixesService) AggregatedList(project string) *PublicDelegatedPrefixesAggregatedListCall { - c := &PublicDelegatedPrefixesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - diskType: Name of the disk type to return. +// - project: Project ID for this request. +// - region: The name of the region for this request. +func (r *RegionDiskTypesService) Get(project string, region string, diskType string) *RegionDiskTypesGetCall { + c := &RegionDiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.diskType = diskType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionDiskTypesGetCall) Fields(s ...googleapi.Field) *RegionDiskTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionDiskTypesGetCall) IfNoneMatch(entityTag string) *RegionDiskTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionDiskTypesGetCall) Context(ctx context.Context) *RegionDiskTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionDiskTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/diskTypes/{diskType}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "diskType": c.diskType, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionDiskTypes.get" call. +// Exactly one of *DiskType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &DiskType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", + // "flatPath": "projects/{project}/regions/{region}/diskTypes/{diskType}", + // "httpMethod": "GET", + // "id": "compute.regionDiskTypes.get", + // "parameterOrder": [ + // "project", + // "region", + // "diskType" + // ], + // "parameters": { + // "diskType": { + // "description": "Name of the disk type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/diskTypes/{diskType}", + // "response": { + // "$ref": "DiskType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionDiskTypes.list": + +type RegionDiskTypesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of regional disk types available to the +// specified project. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +func (r *RegionDiskTypesService) List(project string, region string) *RegionDiskTypesListCall { + c := &RegionDiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -118786,32 +136866,29 @@ func (r *PublicDelegatedPrefixesService) AggregatedList(project string) *PublicD // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *PublicDelegatedPrefixesAggregatedListCall) Filter(filter string) *PublicDelegatedPrefixesAggregatedListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall { c.urlParams_.Set("filter", filter) return c } -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *PublicDelegatedPrefixesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *PublicDelegatedPrefixesAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) - return c -} - // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of // available results is larger than `maxResults`, Compute Engine returns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *PublicDelegatedPrefixesAggregatedListCall) MaxResults(maxResults int64) *PublicDelegatedPrefixesAggregatedListCall { +func (c *RegionDiskTypesListCall) MaxResults(maxResults int64) *RegionDiskTypesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -118825,7 +136902,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) MaxResults(maxResults int64) // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *PublicDelegatedPrefixesAggregatedListCall) OrderBy(orderBy string) *PublicDelegatedPrefixesAggregatedListCall { +func (c *RegionDiskTypesListCall) OrderBy(orderBy string) *RegionDiskTypesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -118833,7 +136910,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) OrderBy(orderBy string) *Pub // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *PublicDelegatedPrefixesAggregatedListCall) PageToken(pageToken string) *PublicDelegatedPrefixesAggregatedListCall { +func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -118842,7 +136919,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) PageToken(pageToken string) // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *PublicDelegatedPrefixesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicDelegatedPrefixesAggregatedListCall { +func (c *RegionDiskTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionDiskTypesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -118850,7 +136927,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) ReturnPartialSuccess(returnP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PublicDelegatedPrefixesAggregatedListCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesAggregatedListCall { +func (c *RegionDiskTypesListCall) Fields(s ...googleapi.Field) *RegionDiskTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -118860,7 +136937,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Fields(s ...googleapi.Field) // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *PublicDelegatedPrefixesAggregatedListCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesAggregatedListCall { +func (c *RegionDiskTypesListCall) IfNoneMatch(entityTag string) *RegionDiskTypesListCall { c.ifNoneMatch_ = entityTag return c } @@ -118868,21 +136945,21 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) IfNoneMatch(entityTag string // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PublicDelegatedPrefixesAggregatedListCall) Context(ctx context.Context) *PublicDelegatedPrefixesAggregatedListCall { +func (c *RegionDiskTypesListCall) Context(ctx context.Context) *RegionDiskTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PublicDelegatedPrefixesAggregatedListCall) Header() http.Header { +func (c *RegionDiskTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PublicDelegatedPrefixesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -118895,7 +136972,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) doRequest(alt string) (*http var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/publicDelegatedPrefixes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/diskTypes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -118904,38 +136981,38 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) doRequest(alt string) (*http req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.publicDelegatedPrefixes.aggregatedList" call. -// Exactly one of *PublicDelegatedPrefixAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *PublicDelegatedPrefixAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixAggregatedList, error) { +// Do executes the "compute.regionDiskTypes.list" call. +// Exactly one of *RegionDiskTypeList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionDiskTypeList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &PublicDelegatedPrefixAggregatedList{ + ret := &RegionDiskTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -118947,24 +137024,20 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Lists all PublicDelegatedPrefix resources owned by the specific project across all scopes.", - // "flatPath": "projects/{project}/aggregated/publicDelegatedPrefixes", + // "description": "Retrieves a list of regional disk types available to the specified project.", + // "flatPath": "projects/{project}/regions/{region}/diskTypes", // "httpMethod": "GET", - // "id": "compute.publicDelegatedPrefixes.aggregatedList", + // "id": "compute.regionDiskTypes.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -118984,21 +137057,28 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "project": { - // "description": "Name of the project scoping this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "returnPartialSuccess": { // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/publicDelegatedPrefixes", + // "path": "projects/{project}/regions/{region}/diskTypes", // "response": { - // "$ref": "PublicDelegatedPrefixAggregatedList" + // "$ref": "RegionDiskTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -119012,7 +137092,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOpt // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *PublicDelegatedPrefixesAggregatedListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixAggregatedList) error) error { +func (c *RegionDiskTypesListCall) Pages(ctx context.Context, f func(*RegionDiskTypeList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -119030,30 +137110,32 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Pages(ctx context.Context, f } } -// method id "compute.publicDelegatedPrefixes.delete": +// method id "compute.regionDisks.addResourcePolicies": -type PublicDelegatedPrefixesDeleteCall struct { - s *Service - project string - region string - publicDelegatedPrefix string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksAddResourcePoliciesCall struct { + s *Service + project string + region string + disk string + regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified PublicDelegatedPrefix in the given -// region. +// AddResourcePolicies: Adds existing resource policies to a regional +// disk. You can only add one policy which will be applied to this disk +// for scheduling snapshot creation. // +// - disk: The disk name for this request. // - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to delete. -// - region: Name of the region of this request. -func (r *PublicDelegatedPrefixesService) Delete(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesDeleteCall { - c := &PublicDelegatedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +func (r *RegionDisksService) AddResourcePolicies(project string, region string, disk string, regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest) *RegionDisksAddResourcePoliciesCall { + c := &RegionDisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.publicDelegatedPrefix = publicDelegatedPrefix + c.disk = disk + c.regiondisksaddresourcepoliciesrequest = regiondisksaddresourcepoliciesrequest return c } @@ -119068,7 +137150,7 @@ func (r *PublicDelegatedPrefixesService) Delete(project string, region string, p // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *PublicDelegatedPrefixesDeleteCall) RequestId(requestId string) *PublicDelegatedPrefixesDeleteCall { +func (c *RegionDisksAddResourcePoliciesCall) RequestId(requestId string) *RegionDisksAddResourcePoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -119076,7 +137158,7 @@ func (c *PublicDelegatedPrefixesDeleteCall) RequestId(requestId string) *PublicD // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PublicDelegatedPrefixesDeleteCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesDeleteCall { +func (c *RegionDisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *RegionDisksAddResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -119084,21 +137166,21 @@ func (c *PublicDelegatedPrefixesDeleteCall) Fields(s ...googleapi.Field) *Public // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PublicDelegatedPrefixesDeleteCall) Context(ctx context.Context) *PublicDelegatedPrefixesDeleteCall { +func (c *RegionDisksAddResourcePoliciesCall) Context(ctx context.Context) *RegionDisksAddResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PublicDelegatedPrefixesDeleteCall) Header() http.Header { +func (c *RegionDisksAddResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PublicDelegatedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -119106,48 +137188,53 @@ func (c *PublicDelegatedPrefixesDeleteCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksaddresourcepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "publicDelegatedPrefix": c.publicDelegatedPrefix, + "project": c.project, + "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.publicDelegatedPrefixes.delete" call. +// Do executes the "compute.regionDisks.addResourcePolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *PublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -119161,32 +137248,32 @@ func (c *PublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Deletes the specified PublicDelegatedPrefix in the given region.", - // "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", - // "httpMethod": "DELETE", - // "id": "compute.publicDelegatedPrefixes.delete", + // "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", + // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies", + // "httpMethod": "POST", + // "id": "compute.regionDisks.addResourcePolicies", // "parameterOrder": [ // "project", // "region", - // "publicDelegatedPrefix" + // "disk" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "disk": { + // "description": "The disk name for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "publicDelegatedPrefix": { - // "description": "Name of the PublicDelegatedPrefix resource to delete.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region of this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -119198,7 +137285,10 @@ func (c *PublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*O // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", + // "path": "projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies", + // "request": { + // "$ref": "RegionDisksAddResourcePoliciesRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -119210,124 +137300,134 @@ func (c *PublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.publicDelegatedPrefixes.get": +// method id "compute.regionDisks.createSnapshot": -type PublicDelegatedPrefixesGetCall struct { - s *Service - project string - region string - publicDelegatedPrefix string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionDisksCreateSnapshotCall struct { + s *Service + project string + region string + disk string + snapshot *Snapshot + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified PublicDelegatedPrefix resource in the -// given region. +// CreateSnapshot: Creates a snapshot of a specified persistent disk. +// For regular snapshot creation, consider using snapshots.insert +// instead, as that method supports more features, such as creating +// snapshots in a project different from the source disk project. // +// - disk: Name of the regional persistent disk to snapshot. // - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to return. -// - region: Name of the region of this request. -func (r *PublicDelegatedPrefixesService) Get(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesGetCall { - c := &PublicDelegatedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *RegionDisksService) CreateSnapshot(project string, region string, disk string, snapshot *Snapshot) *RegionDisksCreateSnapshotCall { + c := &RegionDisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.publicDelegatedPrefix = publicDelegatedPrefix + c.disk = disk + c.snapshot = snapshot + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionDisksCreateSnapshotCall) RequestId(requestId string) *RegionDisksCreateSnapshotCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PublicDelegatedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesGetCall { +func (c *RegionDisksCreateSnapshotCall) Fields(s ...googleapi.Field) *RegionDisksCreateSnapshotCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *PublicDelegatedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PublicDelegatedPrefixesGetCall) Context(ctx context.Context) *PublicDelegatedPrefixesGetCall { +func (c *RegionDisksCreateSnapshotCall) Context(ctx context.Context) *RegionDisksCreateSnapshotCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PublicDelegatedPrefixesGetCall) Header() http.Header { +func (c *RegionDisksCreateSnapshotCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PublicDelegatedPrefixesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "publicDelegatedPrefix": c.publicDelegatedPrefix, + "project": c.project, + "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.publicDelegatedPrefixes.get" call. -// Exactly one of *PublicDelegatedPrefix or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *PublicDelegatedPrefix.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *PublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefix, error) { +// Do executes the "compute.regionDisks.createSnapshot" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &PublicDelegatedPrefix{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -119339,74 +137439,83 @@ func (c *PublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Publ } return ret, nil // { - // "description": "Returns the specified PublicDelegatedPrefix resource in the given region.", - // "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", - // "httpMethod": "GET", - // "id": "compute.publicDelegatedPrefixes.get", + // "description": "Creates a snapshot of a specified persistent disk. For regular snapshot creation, consider using snapshots.insert instead, as that method supports more features, such as creating snapshots in a project different from the source disk project.", + // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", + // "httpMethod": "POST", + // "id": "compute.regionDisks.createSnapshot", // "parameterOrder": [ // "project", // "region", - // "publicDelegatedPrefix" + // "disk" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "disk": { + // "description": "Name of the regional persistent disk to snapshot.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "publicDelegatedPrefix": { - // "description": "Name of the PublicDelegatedPrefix resource to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region of this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", + // "path": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", + // "request": { + // "$ref": "Snapshot" + // }, // "response": { - // "$ref": "PublicDelegatedPrefix" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.publicDelegatedPrefixes.insert": +// method id "compute.regionDisks.delete": -type PublicDelegatedPrefixesInsertCall struct { - s *Service - project string - region string - publicdelegatedprefix *PublicDelegatedPrefix - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksDeleteCall struct { + s *Service + project string + region string + disk string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a PublicDelegatedPrefix in the specified project in -// the given region using the parameters that are included in the -// request. +// Delete: Deletes the specified regional persistent disk. Deleting a +// regional disk removes all the replicas of its data permanently and is +// irreversible. However, deleting a disk does not delete any snapshots +// previously made from the disk. You must separately delete snapshots. // +// - disk: Name of the regional persistent disk to delete. // - project: Project ID for this request. -// - region: Name of the region of this request. -func (r *PublicDelegatedPrefixesService) Insert(project string, region string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesInsertCall { - c := &PublicDelegatedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *RegionDisksService) Delete(project string, region string, disk string) *RegionDisksDeleteCall { + c := &RegionDisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.publicdelegatedprefix = publicdelegatedprefix + c.disk = disk return c } @@ -119421,7 +137530,7 @@ func (r *PublicDelegatedPrefixesService) Insert(project string, region string, p // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *PublicDelegatedPrefixesInsertCall) RequestId(requestId string) *PublicDelegatedPrefixesInsertCall { +func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -119429,7 +137538,7 @@ func (c *PublicDelegatedPrefixesInsertCall) RequestId(requestId string) *PublicD // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PublicDelegatedPrefixesInsertCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesInsertCall { +func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -119437,21 +137546,21 @@ func (c *PublicDelegatedPrefixesInsertCall) Fields(s ...googleapi.Field) *Public // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PublicDelegatedPrefixesInsertCall) Context(ctx context.Context) *PublicDelegatedPrefixesInsertCall { +func (c *RegionDisksDeleteCall) Context(ctx context.Context) *RegionDisksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PublicDelegatedPrefixesInsertCall) Header() http.Header { +func (c *RegionDisksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -119459,16 +137568,11 @@ func (c *PublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -119476,35 +137580,36 @@ func (c *PublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.Respons googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.publicDelegatedPrefixes.insert" call. +// Do executes the "compute.regionDisks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -119518,15 +137623,22 @@ func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Creates a PublicDelegatedPrefix in the specified project in the given region using the parameters that are included in the request.", - // "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes", - // "httpMethod": "POST", - // "id": "compute.publicDelegatedPrefixes.insert", + // "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + // "flatPath": "projects/{project}/regions/{region}/disks/{disk}", + // "httpMethod": "DELETE", + // "id": "compute.regionDisks.delete", // "parameterOrder": [ // "project", - // "region" + // "region", + // "disk" // ], // "parameters": { + // "disk": { + // "description": "Name of the regional persistent disk to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -119535,7 +137647,7 @@ func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*O // "type": "string" // }, // "region": { - // "description": "Name of the region of this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -119547,10 +137659,7 @@ func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*O // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes", - // "request": { - // "$ref": "PublicDelegatedPrefix" - // }, + // "path": "projects/{project}/regions/{region}/disks/{disk}", // "response": { // "$ref": "Operation" // }, @@ -119562,103 +137671,36 @@ func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.publicDelegatedPrefixes.list": +// method id "compute.regionDisks.get": -type PublicDelegatedPrefixesListCall struct { +type RegionDisksGetCall struct { s *Service project string region string + disk string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists the PublicDelegatedPrefixes for a project in the given -// region. +// Get: Returns a specified regional persistent disk. // +// - disk: Name of the regional persistent disk to return. // - project: Project ID for this request. -// - region: Name of the region of this request. -func (r *PublicDelegatedPrefixesService) List(project string, region string) *PublicDelegatedPrefixesListCall { - c := &PublicDelegatedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *RegionDisksService) Get(project string, region string, disk string) *RegionDisksGetCall { + c := &RegionDisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *PublicDelegatedPrefixesListCall) Filter(filter string) *PublicDelegatedPrefixesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *PublicDelegatedPrefixesListCall) MaxResults(maxResults int64) *PublicDelegatedPrefixesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *PublicDelegatedPrefixesListCall) OrderBy(orderBy string) *PublicDelegatedPrefixesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *PublicDelegatedPrefixesListCall) PageToken(pageToken string) *PublicDelegatedPrefixesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *PublicDelegatedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicDelegatedPrefixesListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + c.disk = disk return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PublicDelegatedPrefixesListCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesListCall { +func (c *RegionDisksGetCall) Fields(s ...googleapi.Field) *RegionDisksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -119668,7 +137710,7 @@ func (c *PublicDelegatedPrefixesListCall) Fields(s ...googleapi.Field) *PublicDe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *PublicDelegatedPrefixesListCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesListCall { +func (c *RegionDisksGetCall) IfNoneMatch(entityTag string) *RegionDisksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -119676,21 +137718,21 @@ func (c *PublicDelegatedPrefixesListCall) IfNoneMatch(entityTag string) *PublicD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PublicDelegatedPrefixesListCall) Context(ctx context.Context) *PublicDelegatedPrefixesListCall { +func (c *RegionDisksGetCall) Context(ctx context.Context) *RegionDisksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PublicDelegatedPrefixesListCall) Header() http.Header { +func (c *RegionDisksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -119703,7 +137745,7 @@ func (c *PublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -119713,37 +137755,38 @@ func (c *PublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response, googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.publicDelegatedPrefixes.list" call. -// Exactly one of *PublicDelegatedPrefixList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *PublicDelegatedPrefixList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixList, error) { +// Do executes the "compute.regionDisks.get" call. +// Exactly one of *Disk or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Disk.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &PublicDelegatedPrefixList{ + ret := &Disk{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -119755,36 +137798,21 @@ func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pub } return ret, nil // { - // "description": "Lists the PublicDelegatedPrefixes for a project in the given region.", - // "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes", + // "description": "Returns a specified regional persistent disk.", + // "flatPath": "projects/{project}/regions/{region}/disks/{disk}", // "httpMethod": "GET", - // "id": "compute.publicDelegatedPrefixes.list", + // "id": "compute.regionDisks.get", // "parameterOrder": [ // "project", - // "region" + // "region", + // "disk" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "disk": { + // "description": "Name of the regional persistent disk to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -119795,21 +137823,16 @@ func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pub // "type": "string" // }, // "region": { - // "description": "Name of the region of this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes", + // "path": "projects/{project}/regions/{region}/disks/{disk}", // "response": { - // "$ref": "PublicDelegatedPrefixList" + // "$ref": "Disk" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -119820,155 +137843,130 @@ func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pub } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *PublicDelegatedPrefixesListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.publicDelegatedPrefixes.patch": +// method id "compute.regionDisks.getIamPolicy": -type PublicDelegatedPrefixesPatchCall struct { - s *Service - project string - region string - publicDelegatedPrefix string - publicdelegatedprefix *PublicDelegatedPrefix - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksGetIamPolicyCall struct { + s *Service + project string + region string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified PublicDelegatedPrefix resource with the -// data included in the request. This method supports PATCH semantics -// and uses JSON merge patch format and processing rules. +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. // // - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to patch. -// - region: Name of the region for this request. -func (r *PublicDelegatedPrefixesService) Patch(project string, region string, publicDelegatedPrefix string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesPatchCall { - c := &PublicDelegatedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionDisksService) GetIamPolicy(project string, region string, resource string) *RegionDisksGetIamPolicyCall { + c := &RegionDisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.publicDelegatedPrefix = publicDelegatedPrefix - c.publicdelegatedprefix = publicdelegatedprefix + c.resource = resource return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *PublicDelegatedPrefixesPatchCall) RequestId(requestId string) *PublicDelegatedPrefixesPatchCall { - c.urlParams_.Set("requestId", requestId) +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *RegionDisksGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *RegionDisksGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PublicDelegatedPrefixesPatchCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesPatchCall { +func (c *RegionDisksGetIamPolicyCall) Fields(s ...googleapi.Field) *RegionDisksGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionDisksGetIamPolicyCall) IfNoneMatch(entityTag string) *RegionDisksGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PublicDelegatedPrefixesPatchCall) Context(ctx context.Context) *PublicDelegatedPrefixesPatchCall { +func (c *RegionDisksGetIamPolicyCall) Context(ctx context.Context) *RegionDisksGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PublicDelegatedPrefixesPatchCall) Header() http.Header { +func (c *RegionDisksGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PublicDelegatedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "publicDelegatedPrefix": c.publicDelegatedPrefix, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.publicDelegatedPrefixes.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *PublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionDisks.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionDisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -119980,16 +137978,22 @@ func (c *PublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Patches the specified PublicDelegatedPrefix resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", - // "httpMethod": "PATCH", - // "id": "compute.publicDelegatedPrefixes.patch", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/regions/{region}/disks/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.regionDisks.getIamPolicy", // "parameterOrder": [ // "project", // "region", - // "publicDelegatedPrefix" + // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -119997,63 +138001,56 @@ func (c *PublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, - // "publicDelegatedPrefix": { - // "description": "Name of the PublicDelegatedPrefix resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", - // "request": { - // "$ref": "PublicDelegatedPrefix" - // }, + // "path": "projects/{project}/regions/{region}/disks/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionAutoscalers.delete": +// method id "compute.regionDisks.insert": -type RegionAutoscalersDeleteCall struct { +type RegionDisksInsertCall struct { s *Service project string region string - autoscaler string + disk *Disk urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified autoscaler. +// Insert: Creates a persistent regional disk in the specified project +// using the data included in the request. // -// - autoscaler: Name of the autoscaler to delete. // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { - c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *RegionDisksService) Insert(project string, region string, disk *Disk) *RegionDisksInsertCall { + c := &RegionDisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler + c.disk = disk return c } @@ -120068,15 +138065,22 @@ func (r *RegionAutoscalersService) Delete(project string, region string, autosca // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall { +func (c *RegionDisksInsertCall) RequestId(requestId string) *RegionDisksInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// SourceImage sets the optional parameter "sourceImage": Source image +// to restore onto a disk. This field is optional. +func (c *RegionDisksInsertCall) SourceImage(sourceImage string) *RegionDisksInsertCall { + c.urlParams_.Set("sourceImage", sourceImage) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall { +func (c *RegionDisksInsertCall) Fields(s ...googleapi.Field) *RegionDisksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -120084,21 +138088,21 @@ func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutosc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall { +func (c *RegionDisksInsertCall) Context(ctx context.Context) *RegionDisksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersDeleteCall) Header() http.Header { +func (c *RegionDisksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -120106,48 +138110,52 @@ func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "autoscaler": c.autoscaler, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.delete" call. +// Do executes the "compute.regionDisks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -120161,23 +138169,15 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified autoscaler.", - // "flatPath": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", - // "httpMethod": "DELETE", - // "id": "compute.regionAutoscalers.delete", + // "description": "Creates a persistent regional disk in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/disks", + // "httpMethod": "POST", + // "id": "compute.regionDisks.insert", // "parameterOrder": [ // "project", - // "region", - // "autoscaler" + // "region" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -120186,7 +138186,7 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -120196,9 +138196,17 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "sourceImage": { + // "description": "Source image to restore onto a disk. This field is optional.", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", + // "path": "projects/{project}/regions/{region}/disks", + // "request": { + // "$ref": "Disk" + // }, // "response": { // "$ref": "Operation" // }, @@ -120210,36 +138218,116 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.regionAutoscalers.get": +// method id "compute.regionDisks.list": -type RegionAutoscalersGetCall struct { +type RegionDisksListCall struct { s *Service project string region string - autoscaler string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified autoscaler. +// List: Retrieves the list of persistent disks contained within the +// specified region. // -// - autoscaler: Name of the autoscaler to return. // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { - c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *RegionDisksService) List(project string, region string) *RegionDisksListCall { + c := &RegionDisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionDisksListCall) Filter(filter string) *RegionDisksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionDisksListCall) MaxResults(maxResults int64) *RegionDisksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionDisksListCall) OrderBy(orderBy string) *RegionDisksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionDisksListCall) PageToken(pageToken string) *RegionDisksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionDisksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionDisksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall { +func (c *RegionDisksListCall) Fields(s ...googleapi.Field) *RegionDisksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -120249,7 +138337,7 @@ func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscale // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall { +func (c *RegionDisksListCall) IfNoneMatch(entityTag string) *RegionDisksListCall { c.ifNoneMatch_ = entityTag return c } @@ -120257,21 +138345,21 @@ func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall { +func (c *RegionDisksListCall) Context(ctx context.Context) *RegionDisksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersGetCall) Header() http.Header { +func (c *RegionDisksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -120284,7 +138372,7 @@ func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -120292,40 +138380,39 @@ func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "autoscaler": c.autoscaler, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.get" call. -// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Autoscaler.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.regionDisks.list" call. +// Exactly one of *DiskList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { +func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Autoscaler{ + ret := &DiskList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -120337,21 +138424,36 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler } return ret, nil // { - // "description": "Returns the specified autoscaler.", - // "flatPath": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", + // "description": "Retrieves the list of persistent disks contained within the specified region.", + // "flatPath": "projects/{project}/regions/{region}/disks", // "httpMethod": "GET", - // "id": "compute.regionAutoscalers.get", + // "id": "compute.regionDisks.list", // "parameterOrder": [ // "project", - // "region", - // "autoscaler" + // "region" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -120362,16 +138464,21 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/autoscalers/{autoscaler}", + // "path": "projects/{project}/regions/{region}/disks", // "response": { - // "$ref": "Autoscaler" + // "$ref": "DiskList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -120382,28 +138489,52 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler } -// method id "compute.regionAutoscalers.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionDisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionAutoscalersInsertCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionDisks.removeResourcePolicies": + +type RegionDisksRemoveResourcePoliciesCall struct { + s *Service + project string + region string + disk string + regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an autoscaler in the specified project using the data -// included in the request. +// RemoveResourcePolicies: Removes resource policies from a regional +// disk. // +// - disk: The disk name for this request. // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { - c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +func (r *RegionDisksService) RemoveResourcePolicies(project string, region string, disk string, regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest) *RegionDisksRemoveResourcePoliciesCall { + c := &RegionDisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler + c.disk = disk + c.regiondisksremoveresourcepoliciesrequest = regiondisksremoveresourcepoliciesrequest return c } @@ -120418,7 +138549,7 @@ func (r *RegionAutoscalersService) Insert(project string, region string, autosca // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall { +func (c *RegionDisksRemoveResourcePoliciesCall) RequestId(requestId string) *RegionDisksRemoveResourcePoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -120426,7 +138557,7 @@ func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutosca // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall { +func (c *RegionDisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *RegionDisksRemoveResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -120434,21 +138565,21 @@ func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutosc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall { +func (c *RegionDisksRemoveResourcePoliciesCall) Context(ctx context.Context) *RegionDisksRemoveResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersInsertCall) Header() http.Header { +func (c *RegionDisksRemoveResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -120456,14 +138587,14 @@ func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksremoveresourcepoliciesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -120473,35 +138604,36 @@ func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, err googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.insert" call. +// Do executes the "compute.regionDisks.removeResourcePolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -120515,15 +138647,23 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/autoscalers", + // "description": "Removes resource policies from a regional disk.", + // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies", // "httpMethod": "POST", - // "id": "compute.regionAutoscalers.insert", + // "id": "compute.regionDisks.removeResourcePolicies", // "parameterOrder": [ // "project", - // "region" + // "region", + // "disk" // ], // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -120532,7 +138672,7 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -120544,9 +138684,9 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/autoscalers", + // "path": "projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies", // "request": { - // "$ref": "Autoscaler" + // "$ref": "RegionDisksRemoveResourcePoliciesRequest" // }, // "response": { // "$ref": "Operation" @@ -120559,150 +138699,92 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.regionAutoscalers.list": +// method id "compute.regionDisks.resize": -type RegionAutoscalersListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionDisksResizeCall struct { + s *Service + project string + region string + disk string + regiondisksresizerequest *RegionDisksResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of autoscalers contained within the specified -// region. +// Resize: Resizes the specified regional persistent disk. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { - c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - disk: Name of the regional persistent disk. +// - project: The project ID for this request. +// - region: Name of the region for this request. +func (r *RegionDisksService) Resize(project string, region string, disk string, regiondisksresizerequest *RegionDisksResizeRequest) *RegionDisksResizeCall { + c := &RegionDisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.disk = disk + c.regiondisksresizerequest = regiondisksresizerequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *RegionAutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionAutoscalersListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionDisksResizeCall) RequestId(requestId string) *RegionDisksResizeCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall { +func (c *RegionDisksResizeCall) Fields(s ...googleapi.Field) *RegionDisksResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall { +func (c *RegionDisksResizeCall) Context(ctx context.Context) *RegionDisksResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersListCall) Header() http.Header { +func (c *RegionDisksResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksresizerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -120710,37 +138792,38 @@ func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.list" call. -// Exactly one of *RegionAutoscalerList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RegionAutoscalerList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) { +// Do executes the "compute.regionDisks.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &RegionAutoscalerList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -120752,145 +138835,90 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified region.", - // "flatPath": "projects/{project}/regions/{region}/autoscalers", - // "httpMethod": "GET", - // "id": "compute.regionAutoscalers.list", + // "description": "Resizes the specified regional persistent disk.", + // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/resize", + // "httpMethod": "POST", + // "id": "compute.regionDisks.resize", // "parameterOrder": [ // "project", - // "region" + // "region", + // "disk" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "disk": { + // "description": "Name of the regional persistent disk.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "The project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/autoscalers", + // "path": "projects/{project}/regions/{region}/disks/{disk}/resize", + // "request": { + // "$ref": "RegionDisksResizeRequest" + // }, // "response": { - // "$ref": "RegionAutoscalerList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionAutoscalers.patch": - -type RegionAutoscalersPatchCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} +// method id "compute.regionDisks.setIamPolicy": -// Patch: Updates an autoscaler in the specified project using the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. +type RegionDisksSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. // // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall { - c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionDisksService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionDisksSetIamPolicyCall { + c := &RegionDisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to patch. -func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall { - c.urlParams_.Set("autoscaler", autoscaler) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall { +func (c *RegionDisksSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionDisksSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -120898,21 +138926,21 @@ func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutosca // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall { +func (c *RegionDisksSetIamPolicyCall) Context(ctx context.Context) *RegionDisksSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersPatchCall) Header() http.Header { +func (c *RegionDisksSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -120920,54 +138948,55 @@ func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionDisks.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionDisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -120979,21 +139008,16 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/regions/{region}/autoscalers", - // "httpMethod": "PATCH", - // "id": "compute.regionAutoscalers.patch", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy", + // "httpMethod": "POST", + // "id": "compute.regionDisks.setIamPolicy", // "parameterOrder": [ // "project", - // "region" + // "region", + // "resource" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to patch.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -121002,24 +139026,26 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/autoscalers", + // "path": "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy", // "request": { - // "$ref": "Autoscaler" + // "$ref": "RegionSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -121029,35 +139055,30 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.regionAutoscalers.update": +// method id "compute.regionDisks.setLabels": -type RegionAutoscalersUpdateCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates an autoscaler in the specified project using the data -// included in the request. +// SetLabels: Sets the labels on the target regional disk. // // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { - c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionDisksService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionDisksSetLabelsCall { + c := &RegionDisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to update. -func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall { - c.urlParams_.Set("autoscaler", autoscaler) + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest return c } @@ -121072,7 +139093,7 @@ func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutos // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall { +func (c *RegionDisksSetLabelsCall) RequestId(requestId string) *RegionDisksSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -121080,7 +139101,7 @@ func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutosca // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall { +func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -121088,21 +139109,21 @@ func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutosc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall { +func (c *RegionDisksSetLabelsCall) Context(ctx context.Context) *RegionDisksSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersUpdateCall) Header() http.Header { +func (c *RegionDisksSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -121110,52 +139131,53 @@ func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.update" call. +// Do executes the "compute.regionDisks.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -121169,21 +139191,16 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/autoscalers", - // "httpMethod": "PUT", - // "id": "compute.regionAutoscalers.update", + // "description": "Sets the labels on the target regional disk.", + // "flatPath": "projects/{project}/regions/{region}/disks/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.regionDisks.setLabels", // "parameterOrder": [ // "project", - // "region" + // "region", + // "resource" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to update.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -121192,7 +139209,7 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -121202,11 +139219,18 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/autoscalers", + // "path": "projects/{project}/regions/{region}/disks/{resource}/setLabels", // "request": { - // "$ref": "Autoscaler" + // "$ref": "RegionSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -121219,51 +139243,38 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.regionBackendServices.delete": +// method id "compute.regionDisks.testIamPermissions": -type RegionBackendServicesDeleteCall struct { - s *Service - project string - region string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified regional BackendService resource. +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. // -// - backendService: Name of the BackendService resource to delete. // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { - c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionDisksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionDisksTestIamPermissionsCall { + c := &RegionDisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesDeleteCall) RequestId(requestId string) *RegionBackendServicesDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBackendServicesDeleteCall { +func (c *RegionDisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionDisksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -121271,21 +139282,21 @@ func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesDeleteCall) Context(ctx context.Context) *RegionBackendServicesDeleteCall { +func (c *RegionDisksTestIamPermissionsCall) Context(ctx context.Context) *RegionDisksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesDeleteCall) Header() http.Header { +func (c *RegionDisksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -121293,50 +139304,55 @@ func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionDisks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -121348,23 +139364,16 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified regional BackendService resource.", - // "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", - // "httpMethod": "DELETE", - // "id": "compute.regionBackendServices.delete", + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.regionDisks.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "resource" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -121373,146 +139382,155 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/backendServices/{backendService}", + // "path": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionBackendServices.get": +// method id "compute.regionHealthCheckServices.delete": -type RegionBackendServicesGetCall struct { - s *Service - project string - region string - backendService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionHealthCheckServicesDeleteCall struct { + s *Service + project string + region string + healthCheckService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified regional BackendService resource. +// Delete: Deletes the specified regional HealthCheckService. // -// - backendService: Name of the BackendService resource to return. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { - c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - healthCheckService: Name of the HealthCheckService to delete. The +// name must be 1-63 characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionHealthCheckServicesService) Delete(project string, region string, healthCheckService string) *RegionHealthCheckServicesDeleteCall { + c := &RegionHealthCheckServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService + c.healthCheckService = healthCheckService + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionHealthCheckServicesDeleteCall) RequestId(requestId string) *RegionHealthCheckServicesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesGetCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetCall { +func (c *RegionHealthCheckServicesDeleteCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionBackendServicesGetCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesGetCall) Context(ctx context.Context) *RegionBackendServicesGetCall { +func (c *RegionHealthCheckServicesDeleteCall) Context(ctx context.Context) *RegionHealthCheckServicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesGetCall) Header() http.Header { +func (c *RegionHealthCheckServicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "healthCheckService": c.healthCheckService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.get" call. -// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// Do executes the "compute.regionHealthCheckServices.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *BackendService.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &BackendService{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -121524,20 +139542,19 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen } return ret, nil // { - // "description": "Returns the specified regional BackendService resource.", - // "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", - // "httpMethod": "GET", - // "id": "compute.regionBackendServices.get", + // "description": "Deletes the specified regional HealthCheckService.", + // "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + // "httpMethod": "DELETE", + // "id": "compute.regionHealthCheckServices.delete", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "healthCheckService" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to return.", + // "healthCheckService": { + // "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -121554,132 +139571,142 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/backendServices/{backendService}", + // "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", // "response": { - // "$ref": "BackendService" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionBackendServices.getHealth": +// method id "compute.regionHealthCheckServices.get": -type RegionBackendServicesGetHealthCall struct { - s *Service - project string - region string - backendService string - resourcegroupreference *ResourceGroupReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthCheckServicesGetCall struct { + s *Service + project string + region string + healthCheckService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// GetHealth: Gets the most recent health check results for this -// regional BackendService. +// Get: Returns the specified regional HealthCheckService resource. // -// - backendService: Name of the BackendService resource for which to -// get health. -// - project: . -// - region: Name of the region scoping this request. -func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { - c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - healthCheckService: Name of the HealthCheckService to update. The +// name must be 1-63 characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionHealthCheckServicesService) Get(project string, region string, healthCheckService string) *RegionHealthCheckServicesGetCall { + c := &RegionHealthCheckServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - c.resourcegroupreference = resourcegroupreference + c.healthCheckService = healthCheckService return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesGetHealthCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetHealthCall { +func (c *RegionHealthCheckServicesGetCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionHealthCheckServicesGetCall) IfNoneMatch(entityTag string) *RegionHealthCheckServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesGetHealthCall) Context(ctx context.Context) *RegionBackendServicesGetHealthCall { +func (c *RegionHealthCheckServicesGetCall) Context(ctx context.Context) *RegionHealthCheckServicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesGetHealthCall) Header() http.Header { +func (c *RegionHealthCheckServicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "healthCheckService": c.healthCheckService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.getHealth" call. -// Exactly one of *BackendServiceGroupHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *BackendServiceGroupHealth.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionHealthCheckServices.get" call. +// Exactly one of *HealthCheckService or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HealthCheckService.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { +func (c *RegionHealthCheckServicesGetCall) Do(opts ...googleapi.CallOption) (*HealthCheckService, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &BackendServiceGroupHealth{ + ret := &HealthCheckService{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -121691,24 +139718,24 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Gets the most recent health check results for this regional BackendService.", - // "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth", - // "httpMethod": "POST", - // "id": "compute.regionBackendServices.getHealth", + // "description": "Returns the specified regional HealthCheckService resource.", + // "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + // "httpMethod": "GET", + // "id": "compute.regionHealthCheckServices.get", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "healthCheckService" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource for which to get health.", + // "healthCheckService": { + // "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, // "project": { + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, @@ -121722,12 +139749,9 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/backendServices/{backendService}/getHealth", - // "request": { - // "$ref": "ResourceGroupReference" - // }, + // "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", // "response": { - // "$ref": "BackendServiceGroupHealth" + // "$ref": "HealthCheckService" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -121738,29 +139762,28 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.regionBackendServices.insert": +// method id "compute.regionHealthCheckServices.insert": -type RegionBackendServicesInsertCall struct { - s *Service - project string - region string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthCheckServicesInsertCall struct { + s *Service + project string + region string + healthcheckservice *HealthCheckService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a regional BackendService resource in the specified -// project using the data included in the request. For more information, -// see Backend services overview. +// Insert: Creates a regional HealthCheckService resource in the +// specified project and region using the data included in the request. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { - c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionHealthCheckServicesService) Insert(project string, region string, healthcheckservice *HealthCheckService) *RegionHealthCheckServicesInsertCall { + c := &RegionHealthCheckServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendservice = backendservice + c.healthcheckservice = healthcheckservice return c } @@ -121775,7 +139798,7 @@ func (r *RegionBackendServicesService) Insert(project string, region string, bac // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBackendServicesInsertCall { +func (c *RegionHealthCheckServicesInsertCall) RequestId(requestId string) *RegionHealthCheckServicesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -121783,7 +139806,7 @@ func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBac // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBackendServicesInsertCall { +func (c *RegionHealthCheckServicesInsertCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -121791,21 +139814,21 @@ func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesInsertCall) Context(ctx context.Context) *RegionBackendServicesInsertCall { +func (c *RegionHealthCheckServicesInsertCall) Context(ctx context.Context) *RegionHealthCheckServicesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesInsertCall) Header() http.Header { +func (c *RegionHealthCheckServicesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -121813,14 +139836,14 @@ func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheckservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -121834,31 +139857,31 @@ func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.insert" call. +// Do executes the "compute.regionHealthCheckServices.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthCheckServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -121872,10 +139895,10 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", - // "flatPath": "projects/{project}/regions/{region}/backendServices", + // "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/healthCheckServices", // "httpMethod": "POST", - // "id": "compute.regionBackendServices.insert", + // "id": "compute.regionHealthCheckServices.insert", // "parameterOrder": [ // "project", // "region" @@ -121901,9 +139924,9 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/backendServices", + // "path": "projects/{project}/regions/{region}/healthCheckServices", // "request": { - // "$ref": "BackendService" + // "$ref": "HealthCheckService" // }, // "response": { // "$ref": "Operation" @@ -121916,9 +139939,9 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionBackendServices.list": +// method id "compute.regionHealthCheckServices.list": -type RegionBackendServicesListCall struct { +type RegionHealthCheckServicesListCall struct { s *Service project string region string @@ -121928,32 +139951,35 @@ type RegionBackendServicesListCall struct { header_ http.Header } -// List: Retrieves the list of regional BackendService resources -// available to the specified project in the given region. +// List: Lists all the HealthCheckService resources that have been +// configured for the specified project in the given region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { - c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionHealthCheckServicesService) List(project string, region string) *RegionHealthCheckServicesListCall { + c := &RegionHealthCheckServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -121961,8 +139987,18 @@ func (r *RegionBackendServicesService) List(project string, region string) *Regi // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionHealthCheckServicesListCall) Filter(filter string) *RegionHealthCheckServicesListCall { c.urlParams_.Set("filter", filter) return c } @@ -121973,7 +140009,7 @@ func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServ // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBackendServicesListCall { +func (c *RegionHealthCheckServicesListCall) MaxResults(maxResults int64) *RegionHealthCheckServicesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -121987,7 +140023,7 @@ func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBack // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendServicesListCall { +func (c *RegionHealthCheckServicesListCall) OrderBy(orderBy string) *RegionHealthCheckServicesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -121995,7 +140031,7 @@ func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendSe // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBackendServicesListCall { +func (c *RegionHealthCheckServicesListCall) PageToken(pageToken string) *RegionHealthCheckServicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -122004,7 +140040,7 @@ func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBacke // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionBackendServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionBackendServicesListCall { +func (c *RegionHealthCheckServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionHealthCheckServicesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -122012,7 +140048,7 @@ func (c *RegionBackendServicesListCall) ReturnPartialSuccess(returnPartialSucces // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBackendServicesListCall { +func (c *RegionHealthCheckServicesListCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -122022,7 +140058,7 @@ func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBack // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBackendServicesListCall { +func (c *RegionHealthCheckServicesListCall) IfNoneMatch(entityTag string) *RegionHealthCheckServicesListCall { c.ifNoneMatch_ = entityTag return c } @@ -122030,21 +140066,21 @@ func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesListCall) Context(ctx context.Context) *RegionBackendServicesListCall { +func (c *RegionHealthCheckServicesListCall) Context(ctx context.Context) *RegionHealthCheckServicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesListCall) Header() http.Header { +func (c *RegionHealthCheckServicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -122057,7 +140093,7 @@ func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -122071,33 +140107,33 @@ func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.list" call. -// Exactly one of *BackendServiceList or error will be non-nil. Any +// Do executes the "compute.regionHealthCheckServices.list" call. +// Exactly one of *HealthCheckServicesList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *BackendServiceList.ServerResponse.Header or (if a response was +// *HealthCheckServicesList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { +func (c *RegionHealthCheckServicesListCall) Do(opts ...googleapi.CallOption) (*HealthCheckServicesList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &BackendServiceList{ + ret := &HealthCheckServicesList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -122109,17 +140145,17 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe } return ret, nil // { - // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", - // "flatPath": "projects/{project}/regions/{region}/backendServices", + // "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", + // "flatPath": "projects/{project}/regions/{region}/healthCheckServices", // "httpMethod": "GET", - // "id": "compute.regionBackendServices.list", + // "id": "compute.regionHealthCheckServices.list", // "parameterOrder": [ // "project", // "region" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -122161,9 +140197,9 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/backendServices", + // "path": "projects/{project}/regions/{region}/healthCheckServices", // "response": { - // "$ref": "BackendServiceList" + // "$ref": "HealthCheckServicesList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -122177,7 +140213,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { +func (c *RegionHealthCheckServicesListCall) Pages(ctx context.Context, f func(*HealthCheckServicesList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -122195,134 +140231,491 @@ func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*Backe } } -// method id "compute.regionBackendServices.patch": +// method id "compute.regionHealthCheckServices.patch": + +type RegionHealthCheckServicesPatchCall struct { + s *Service + project string + region string + healthCheckService string + healthcheckservice *HealthCheckService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified regional HealthCheckService resource +// with the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +// +// - healthCheckService: Name of the HealthCheckService to update. The +// name must be 1-63 characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionHealthCheckServicesService) Patch(project string, region string, healthCheckService string, healthcheckservice *HealthCheckService) *RegionHealthCheckServicesPatchCall { + c := &RegionHealthCheckServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.healthCheckService = healthCheckService + c.healthcheckservice = healthcheckservice + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionHealthCheckServicesPatchCall) RequestId(requestId string) *RegionHealthCheckServicesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthCheckServicesPatchCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthCheckServicesPatchCall) Context(ctx context.Context) *RegionHealthCheckServicesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthCheckServicesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthCheckServicesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheckservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "healthCheckService": c.healthCheckService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthCheckServices.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthCheckServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + // "httpMethod": "PATCH", + // "id": "compute.regionHealthCheckServices.patch", + // "parameterOrder": [ + // "project", + // "region", + // "healthCheckService" + // ], + // "parameters": { + // "healthCheckService": { + // "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + // "request": { + // "$ref": "HealthCheckService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionHealthChecks.delete": + +type RegionHealthChecksDeleteCall struct { + s *Service + project string + region string + healthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified HealthCheck resource. +// +// - healthCheck: Name of the HealthCheck resource to delete. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionHealthChecksService) Delete(project string, region string, healthCheck string) *RegionHealthChecksDeleteCall { + c := &RegionHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.healthCheck = healthCheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionHealthChecksDeleteCall) RequestId(requestId string) *RegionHealthChecksDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthChecksDeleteCall) Fields(s ...googleapi.Field) *RegionHealthChecksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthChecksDeleteCall) Context(ctx context.Context) *RegionHealthChecksDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthChecksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthChecks/{healthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthChecks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified HealthCheck resource.", + // "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + // "httpMethod": "DELETE", + // "id": "compute.regionHealthChecks.delete", + // "parameterOrder": [ + // "project", + // "region", + // "healthCheck" + // ], + // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionHealthChecks.get": -type RegionBackendServicesPatchCall struct { - s *Service - project string - region string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthChecksGetCall struct { + s *Service + project string + region string + healthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified regional BackendService resource with -// the data included in the request. For more information, see -// Understanding backend services This method supports PATCH semantics -// and uses the JSON merge patch format and processing rules. +// Get: Returns the specified HealthCheck resource. Gets a list of +// available health checks by making a list() request. // -// - backendService: Name of the BackendService resource to patch. +// - healthCheck: Name of the HealthCheck resource to return. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { - c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionHealthChecksService) Get(project string, region string, healthCheck string) *RegionHealthChecksGetCall { + c := &RegionHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - c.backendservice = backendservice - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesPatchCall) RequestId(requestId string) *RegionBackendServicesPatchCall { - c.urlParams_.Set("requestId", requestId) + c.healthCheck = healthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBackendServicesPatchCall { +func (c *RegionHealthChecksGetCall) Fields(s ...googleapi.Field) *RegionHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionHealthChecksGetCall) IfNoneMatch(entityTag string) *RegionHealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesPatchCall) Context(ctx context.Context) *RegionBackendServicesPatchCall { +func (c *RegionHealthChecksGetCall) Context(ctx context.Context) *RegionHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesPatchCall) Header() http.Header { +func (c *RegionHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionHealthChecks.get" call. +// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *HealthCheck.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -122334,18 +140727,18 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Understanding backend services This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", - // "httpMethod": "PATCH", - // "id": "compute.regionBackendServices.patch", + // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + // "httpMethod": "GET", + // "id": "compute.regionHealthChecks.get", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "healthCheck" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -122364,54 +140757,43 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, + // "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", // "response": { - // "$ref": "Operation" + // "$ref": "HealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionBackendServices.update": +// method id "compute.regionHealthChecks.insert": -type RegionBackendServicesUpdateCall struct { - s *Service - project string - region string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthChecksInsertCall struct { + s *Service + project string + region string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified regional BackendService resource with -// the data included in the request. For more information, see Backend -// services overview . +// Insert: Creates a HealthCheck resource in the specified project using +// the data included in the request. // -// - backendService: Name of the BackendService resource to update. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { - c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionHealthChecksService) Insert(project string, region string, healthcheck *HealthCheck) *RegionHealthChecksInsertCall { + c := &RegionHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - c.backendservice = backendservice + c.healthcheck = healthcheck return c } @@ -122426,7 +140808,7 @@ func (r *RegionBackendServicesService) Update(project string, region string, bac // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBackendServicesUpdateCall { +func (c *RegionHealthChecksInsertCall) RequestId(requestId string) *RegionHealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -122434,7 +140816,7 @@ func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBac // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBackendServicesUpdateCall { +func (c *RegionHealthChecksInsertCall) Fields(s ...googleapi.Field) *RegionHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -122442,21 +140824,21 @@ func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesUpdateCall) Context(ctx context.Context) *RegionBackendServicesUpdateCall { +func (c *RegionHealthChecksInsertCall) Context(ctx context.Context) *RegionHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesUpdateCall) Header() http.Header { +func (c *RegionHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -122464,53 +140846,52 @@ func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthChecks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.update" call. +// Do executes the "compute.regionHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -122524,23 +140905,15 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Backend services overview .", - // "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", - // "httpMethod": "PUT", - // "id": "compute.regionBackendServices.update", + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/healthChecks", + // "httpMethod": "POST", + // "id": "compute.regionHealthChecks.insert", // "parameterOrder": [ // "project", - // "region", - // "backendService" + // "region" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -122561,9 +140934,9 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/backendServices/{backendService}", + // "path": "projects/{project}/regions/{region}/healthChecks", // "request": { - // "$ref": "BackendService" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -122576,41 +140949,47 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionCommitments.aggregatedList": +// method id "compute.regionHealthChecks.list": -type RegionCommitmentsAggregatedListCall struct { +type RegionHealthChecksListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of commitments by -// region. +// List: Retrieves the list of HealthCheck resources available to the +// specified project. // // - project: Project ID for this request. -func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitmentsAggregatedListCall { - c := &RegionCommitmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionHealthChecksService) List(project string, region string) *RegionHealthChecksListCall { + c := &RegionHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -122618,32 +140997,29 @@ func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitm // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } -// IncludeAllScopes sets the optional parameter "includeAllScopes": -// Indicates whether every visible scope for each scope type (zone, -// region, global) should be included in the response. For new resource -// types added after this field, the flag has no effect as new resource -// types will always include every visible scope for each scope type in -// response. For resource types which predate this field, if this flag -// is omitted or false, only scopes of the scope types where the -// resource type is expected to be found will be included. -func (c *RegionCommitmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *RegionCommitmentsAggregatedListCall { - c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) - return c -} - // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of // available results is larger than `maxResults`, Compute Engine returns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *RegionCommitmentsAggregatedListCall { +func (c *RegionHealthChecksListCall) MaxResults(maxResults int64) *RegionHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -122657,7 +141033,7 @@ func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *Regi // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCommitmentsAggregatedListCall { +func (c *RegionHealthChecksListCall) OrderBy(orderBy string) *RegionHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -122665,7 +141041,7 @@ func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCom // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *RegionCommitmentsAggregatedListCall { +func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -122674,7 +141050,7 @@ func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *Regio // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionCommitmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionCommitmentsAggregatedListCall { +func (c *RegionHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionHealthChecksListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -122682,7 +141058,7 @@ func (c *RegionCommitmentsAggregatedListCall) ReturnPartialSuccess(returnPartial // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *RegionCommitmentsAggregatedListCall { +func (c *RegionHealthChecksListCall) Fields(s ...googleapi.Field) *RegionHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -122692,7 +141068,7 @@ func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *Regi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *RegionCommitmentsAggregatedListCall { +func (c *RegionHealthChecksListCall) IfNoneMatch(entityTag string) *RegionHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -122700,21 +141076,21 @@ func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsAggregatedListCall) Context(ctx context.Context) *RegionCommitmentsAggregatedListCall { +func (c *RegionHealthChecksListCall) Context(ctx context.Context) *RegionHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { +func (c *RegionHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -122727,7 +141103,7 @@ func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Respo var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/commitments") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -122736,37 +141112,38 @@ func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Respo req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.aggregatedList" call. -// Exactly one of *CommitmentAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *CommitmentAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionHealthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*CommitmentAggregatedList, error) { +func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &CommitmentAggregatedList{ + ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -122778,24 +141155,20 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves an aggregated list of commitments by region.", - // "flatPath": "projects/{project}/aggregated/commitments", + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", + // "flatPath": "projects/{project}/regions/{region}/healthChecks", // "httpMethod": "GET", - // "id": "compute.regionCommitments.aggregatedList", + // "id": "compute.regionHealthChecks.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, - // "includeAllScopes": { - // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - // "location": "query", - // "type": "boolean" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -122821,15 +141194,22 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "returnPartialSuccess": { // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/commitments", + // "path": "projects/{project}/regions/{region}/healthChecks", // "response": { - // "$ref": "CommitmentAggregatedList" + // "$ref": "HealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -122843,7 +141223,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func(*CommitmentAggregatedList) error) error { +func (c *RegionHealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -122861,123 +141241,133 @@ func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func( } } -// method id "compute.regionCommitments.get": +// method id "compute.regionHealthChecks.patch": -type RegionCommitmentsGetCall struct { - s *Service - project string - region string - commitment string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionHealthChecksPatchCall struct { + s *Service + project string + region string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified commitment resource. Gets a list of -// available commitments by making a list() request. +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. // -// - commitment: Name of the commitment to return. +// - healthCheck: Name of the HealthCheck resource to patch. // - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { - c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionHealthChecksService) Patch(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksPatchCall { + c := &RegionHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.commitment = commitment + c.healthCheck = healthCheck + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionHealthChecksPatchCall) RequestId(requestId string) *RegionHealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { +func (c *RegionHealthChecksPatchCall) Fields(s ...googleapi.Field) *RegionHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { +func (c *RegionHealthChecksPatchCall) Context(ctx context.Context) *RegionHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsGetCall) Header() http.Header { +func (c *RegionHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments/{commitment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "commitment": c.commitment, + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.get" call. -// Exactly one of *Commitment or error will be non-nil. Any non-2xx +// Do executes the "compute.regionHealthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Commitment.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { +func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Commitment{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -122989,18 +141379,18 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment } return ret, nil // { - // "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", - // "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", - // "httpMethod": "GET", - // "id": "compute.regionCommitments.get", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + // "httpMethod": "PATCH", + // "id": "compute.regionHealthChecks.patch", // "parameterOrder": [ // "project", // "region", - // "commitment" + // "healthCheck" // ], // "parameters": { - // "commitment": { - // "description": "Name of the commitment to return.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -123014,48 +141404,58 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/commitments/{commitment}", + // "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + // "request": { + // "$ref": "HealthCheck" + // }, // "response": { - // "$ref": "Commitment" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionCommitments.insert": +// method id "compute.regionHealthChecks.update": -type RegionCommitmentsInsertCall struct { - s *Service - project string - region string - commitment *Commitment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthChecksUpdateCall struct { + s *Service + project string + region string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a commitment in the specified project using the data -// included in the request. +// Update: Updates a HealthCheck resource in the specified project using +// the data included in the request. // +// - healthCheck: Name of the HealthCheck resource to update. // - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { - c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionHealthChecksService) Update(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksUpdateCall { + c := &RegionHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.commitment = commitment + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } @@ -123070,7 +141470,7 @@ func (r *RegionCommitmentsService) Insert(project string, region string, commitm // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { +func (c *RegionHealthChecksUpdateCall) RequestId(requestId string) *RegionHealthChecksUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -123078,7 +141478,7 @@ func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitm // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { +func (c *RegionHealthChecksUpdateCall) Fields(s ...googleapi.Field) *RegionHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -123086,21 +141486,21 @@ func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommit // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { +func (c *RegionHealthChecksUpdateCall) Context(ctx context.Context) *RegionHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsInsertCall) Header() http.Header { +func (c *RegionHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -123108,52 +141508,53 @@ func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.insert" call. +// Do executes the "compute.regionHealthChecks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -123167,15 +141568,23 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a commitment in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/commitments", - // "httpMethod": "POST", - // "id": "compute.regionCommitments.insert", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + // "httpMethod": "PUT", + // "id": "compute.regionHealthChecks.update", // "parameterOrder": [ // "project", - // "region" + // "region", + // "healthCheck" // ], // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -123184,7 +141593,7 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -123196,9 +141605,9 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/commitments", + // "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", // "request": { - // "$ref": "Commitment" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -123211,188 +141620,317 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.regionCommitments.list": +// method id "compute.regionInstanceGroupManagers.abandonInstances": -type RegionCommitmentsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of commitments contained within the specified -// region. +// AbandonInstances: Flags the specified instances to be immediately +// removed from the managed instance group. Abandoning an instance does +// not delete the instance, but it does remove the instance from any +// target pools that are applied by the managed instance group. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you abandon. This operation is marked as +// DONE when the action is scheduled even if the instances have not yet +// been removed from the group. You must separately verify the status of +// the abandoning action with the listmanagedinstances method. If the +// group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +// You can specify a maximum of 1000 instances with this method per +// request. // +// - instanceGroupManager: Name of the managed instance group. // - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionCommitmentsService) List(project string, region string) *RegionCommitmentsListCall { - c := &RegionCommitmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { + c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { - c.urlParams_.Set("filter", filter) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *RegionCommitmentsListCall) MaxResults(maxResults int64) *RegionCommitmentsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *RegionCommitmentsListCall) OrderBy(orderBy string) *RegionCommitmentsListCall { - c.urlParams_.Set("orderBy", orderBy) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { + c.ctx_ = ctx return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmentsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *RegionCommitmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionCommitmentsListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.abandonInstances", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroupManagers.applyUpdatesToInstances": + +type RegionInstanceGroupManagersApplyUpdatesToInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ApplyUpdatesToInstances: Apply updates to selected instances the +// managed instance group. +// +// - instanceGroupManager: The name of the managed instance group, +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. +func (r *RegionInstanceGroupManagersService) ApplyUpdatesToInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { + c := &RegionInstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersapplyupdatesrequest = regioninstancegroupmanagersapplyupdatesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsListCall) Fields(s ...googleapi.Field) *RegionCommitmentsListCall { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionCommitmentsListCall) IfNoneMatch(entityTag string) *RegionCommitmentsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsListCall) Context(ctx context.Context) *RegionCommitmentsListCall { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsListCall) Header() http.Header { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersapplyupdatesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.list" call. -// Exactly one of *CommitmentList or error will be non-nil. Any non-2xx +// Do executes the "compute.regionInstanceGroupManagers.applyUpdatesToInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *CommitmentList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*CommitmentList, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &CommitmentList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -123404,36 +141942,20 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen } return ret, nil // { - // "description": "Retrieves a list of commitments contained within the specified region.", - // "flatPath": "projects/{project}/regions/{region}/commitments", - // "httpMethod": "GET", - // "id": "compute.regionCommitments.list", + // "description": "Apply updates to selected instances the managed instance group.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.applyUpdatesToInstances", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "instanceGroupManager": { + // "description": "The name of the managed instance group, should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -123444,86 +141966,59 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/commitments", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" + // }, // "response": { - // "$ref": "CommitmentList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionCommitmentsListCall) Pages(ctx context.Context, f func(*CommitmentList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionCommitments.update": +// method id "compute.regionInstanceGroupManagers.createInstances": -type RegionCommitmentsUpdateCall struct { - s *Service - project string - region string - commitment string - commitment2 *Commitment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersCreateInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified commitment with the data included in -// the request. Update is performed only on selected fields included as -// part of update-mask. Only the following fields can be modified: -// auto_renew. -// -// - commitment: Name of the commitment for which auto renew is being -// updated. -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionCommitmentsService) Update(project string, region string, commitment string, commitment2 *Commitment) *RegionCommitmentsUpdateCall { - c := &RegionCommitmentsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// CreateInstances: Creates instances with per-instance configurations +// in this regional managed instance group. Instances are created using +// the current instance template. The create instances operation is +// marked DONE if the createInstances request is successful. The +// underlying actions take additional time. You must separately verify +// the status of the creating or actions with the listmanagedinstances +// method. +// +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the managed instance group is +// located. It should conform to RFC1035. +func (r *RegionInstanceGroupManagersService) CreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest) *RegionInstanceGroupManagersCreateInstancesCall { + c := &RegionInstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.commitment = commitment - c.commitment2 = commitment2 - return c -} - -// Paths sets the optional parameter "paths": -func (c *RegionCommitmentsUpdateCall) Paths(paths ...string) *RegionCommitmentsUpdateCall { - c.urlParams_.SetMulti("paths", append([]string{}, paths...)) + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerscreateinstancesrequest = regioninstancegroupmanagerscreateinstancesrequest return c } @@ -123534,26 +142029,18 @@ func (c *RegionCommitmentsUpdateCall) Paths(paths ...string) *RegionCommitmentsU // situation where you make an initial request and the request times // out. If you make the request again with the same request ID, the // server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not +// received, and if so, will ignore the second request. The request ID +// must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionCommitmentsUpdateCall) RequestId(requestId string) *RegionCommitmentsUpdateCall { +func (c *RegionInstanceGroupManagersCreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersCreateInstancesCall { c.urlParams_.Set("requestId", requestId) return c } -// UpdateMask sets the optional parameter "updateMask": update_mask -// indicates fields to be updated as part of this request. -func (c *RegionCommitmentsUpdateCall) UpdateMask(updateMask string) *RegionCommitmentsUpdateCall { - c.urlParams_.Set("updateMask", updateMask) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsUpdateCall) Fields(s ...googleapi.Field) *RegionCommitmentsUpdateCall { +func (c *RegionInstanceGroupManagersCreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersCreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -123561,21 +142048,21 @@ func (c *RegionCommitmentsUpdateCall) Fields(s ...googleapi.Field) *RegionCommit // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsUpdateCall) Context(ctx context.Context) *RegionCommitmentsUpdateCall { +func (c *RegionInstanceGroupManagersCreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersCreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsUpdateCall) Header() http.Header { +func (c *RegionInstanceGroupManagersCreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -123583,53 +142070,53 @@ func (c *RegionCommitmentsUpdateCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerscreateinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments/{commitment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "commitment": c.commitment, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.update" call. +// Do executes the "compute.regionInstanceGroupManagers.createInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionCommitmentsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -123643,28 +142130,22 @@ func (c *RegionCommitmentsUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates the specified commitment with the data included in the request. Update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: auto_renew.", - // "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", - // "httpMethod": "PATCH", - // "id": "compute.regionCommitments.update", + // "description": "Creates instances with per-instance configurations in this regional managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.createInstances", // "parameterOrder": [ // "project", // "region", - // "commitment" + // "instanceGroupManager" // ], // "parameters": { - // "commitment": { - // "description": "Name of the commitment for which auto renew is being updated.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "paths": { - // "location": "query", - // "repeated": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -123673,27 +142154,20 @@ func (c *RegionCommitmentsUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "The name of the region where the managed instance group is located. It should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "updateMask": { - // "description": "update_mask indicates fields to be updated as part of this request.", - // "format": "google-fieldmask", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/commitments/{commitment}", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", // "request": { - // "$ref": "Commitment" + // "$ref": "RegionInstanceGroupManagersCreateInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -123706,123 +142180,125 @@ func (c *RegionCommitmentsUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.regionDiskTypes.get": +// method id "compute.regionInstanceGroupManagers.delete": -type RegionDiskTypesGetCall struct { - s *Service - project string - region string - diskType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersDeleteCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified regional disk type. Gets a list of -// available disk types by making a list() request. +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. // -// - diskType: Name of the disk type to return. +// - instanceGroupManager: Name of the managed instance group to delete. // - project: Project ID for this request. -// - region: The name of the region for this request. -func (r *RegionDiskTypesService) Get(project string, region string, diskType string) *RegionDiskTypesGetCall { - c := &RegionDiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { + c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.diskType = diskType + c.instanceGroupManager = instanceGroupManager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDiskTypesGetCall) Fields(s ...googleapi.Field) *RegionDiskTypesGetCall { +func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionDiskTypesGetCall) IfNoneMatch(entityTag string) *RegionDiskTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDiskTypesGetCall) Context(ctx context.Context) *RegionDiskTypesGetCall { +func (c *RegionInstanceGroupManagersDeleteCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDiskTypesGetCall) Header() http.Header { +func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/diskTypes/{diskType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "diskType": c.diskType, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDiskTypes.get" call. -// Exactly one of *DiskType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskType.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.regionInstanceGroupManagers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { +func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &DiskType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -123834,20 +142310,19 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er } return ret, nil // { - // "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", - // "flatPath": "projects/{project}/regions/{region}/diskTypes/{diskType}", - // "httpMethod": "GET", - // "id": "compute.regionDiskTypes.get", + // "description": "Deletes the specified managed instance group and all of the instances in that group.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "httpMethod": "DELETE", + // "id": "compute.regionInstanceGroupManagers.delete", // "parameterOrder": [ // "project", // "region", - // "diskType" + // "instanceGroupManager" // ], // "parameters": { - // "diskType": { - // "description": "Name of the disk type to return.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -123859,208 +142334,166 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/diskTypes/{diskType}", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "DiskType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionDiskTypes.list": +// method id "compute.regionInstanceGroupManagers.deleteInstances": -type RegionDiskTypesListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of regional disk types available to the -// specified project. +// DeleteInstances: Flags the specified instances in the managed +// instance group to be immediately deleted. The instances are also +// removed from any target pools of which they were a member. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you delete. The deleteInstances operation is +// marked DONE if the deleteInstances request is successful. The +// underlying actions take additional time. You must separately verify +// the status of the deleting action with the listmanagedinstances +// method. If the group is part of a backend service that has enabled +// connection draining, it can take up to 60 seconds after the +// connection draining duration has elapsed before the VM instance is +// removed or deleted. You can specify a maximum of 1000 instances with +// this method per request. // +// - instanceGroupManager: Name of the managed instance group. // - project: Project ID for this request. -// - region: The name of the region for this request. -func (r *RegionDiskTypesService) List(project string, region string) *RegionDiskTypesListCall { - c := &RegionDiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { + c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersdeleteinstancesrequest = regioninstancegroupmanagersdeleteinstancesrequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *RegionDiskTypesListCall) MaxResults(maxResults int64) *RegionDiskTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *RegionDiskTypesListCall) OrderBy(orderBy string) *RegionDiskTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *RegionDiskTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionDiskTypesListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDiskTypesListCall) Fields(s ...googleapi.Field) *RegionDiskTypesListCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionDiskTypesListCall) IfNoneMatch(entityTag string) *RegionDiskTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDiskTypesListCall) Context(ctx context.Context) *RegionDiskTypesListCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDiskTypesListCall) Header() http.Header { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersdeleteinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDiskTypes.list" call. -// Exactly one of *RegionDiskTypeList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RegionDiskTypeList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskTypeList, error) { +// Do executes the "compute.regionInstanceGroupManagers.deleteInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &RegionDiskTypeList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -124072,36 +142505,20 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT } return ret, nil // { - // "description": "Retrieves a list of regional disk types available to the specified project.", - // "flatPath": "projects/{project}/regions/{region}/diskTypes", - // "httpMethod": "GET", - // "id": "compute.regionDiskTypes.list", + // "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.deleteInstances", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -124112,101 +142529,66 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/diskTypes", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" + // }, // "response": { - // "$ref": "RegionDiskTypeList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionDiskTypesListCall) Pages(ctx context.Context, f func(*RegionDiskTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionDisks.addResourcePolicies": +// method id "compute.regionInstanceGroupManagers.deletePerInstanceConfigs": -type RegionDisksAddResourcePoliciesCall struct { - s *Service - project string - region string - disk string - regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersDeletePerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddResourcePolicies: Adds existing resource policies to a regional -// disk. You can only add one policy which will be applied to this disk -// for scheduling snapshot creation. +// DeletePerInstanceConfigs: Deletes selected per-instance +// configurations for the managed instance group. // -// - disk: The disk name for this request. -// - project: Project ID for this request. -// - region: The name of the region for this request. -func (r *RegionDisksService) AddResourcePolicies(project string, region string, disk string, regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest) *RegionDisksAddResourcePoliciesCall { - c := &RegionDisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. +func (r *RegionInstanceGroupManagersService) DeletePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { + c := &RegionInstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - c.regiondisksaddresourcepoliciesrequest = regiondisksaddresourcepoliciesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionDisksAddResourcePoliciesCall) RequestId(requestId string) *RegionDisksAddResourcePoliciesCall { - c.urlParams_.Set("requestId", requestId) + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerdeleteinstanceconfigreq = regioninstancegroupmanagerdeleteinstanceconfigreq return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *RegionDisksAddResourcePoliciesCall { +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -124214,21 +142596,21 @@ func (c *RegionDisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *Regio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksAddResourcePoliciesCall) Context(ctx context.Context) *RegionDisksAddResourcePoliciesCall { +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksAddResourcePoliciesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -124236,14 +142618,14 @@ func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksaddresourcepoliciesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerdeleteinstanceconfigreq) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -124251,38 +142633,38 @@ func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.addResourcePolicies" call. +// Do executes the "compute.regionInstanceGroupManagers.deletePerInstanceConfigs" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -124296,20 +142678,19 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", - // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies", + // "description": "Deletes selected per-instance configurations for the managed instance group.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", // "httpMethod": "POST", - // "id": "compute.regionDisks.addResourcePolicies", + // "id": "compute.regionInstanceGroupManagers.deletePerInstanceConfigs", // "parameterOrder": [ // "project", // "region", - // "disk" + // "instanceGroupManager" // ], // "parameters": { - // "disk": { - // "description": "The disk name for this request.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -124321,21 +142702,15 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", // "request": { - // "$ref": "RegionDisksAddResourcePoliciesRequest" + // "$ref": "RegionInstanceGroupManagerDeleteInstanceConfigReq" // }, // "response": { // "$ref": "Operation" @@ -124348,134 +142723,123 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.regionDisks.createSnapshot": +// method id "compute.regionInstanceGroupManagers.get": -type RegionDisksCreateSnapshotCall struct { - s *Service - project string - region string - disk string - snapshot *Snapshot - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersGetCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// CreateSnapshot: Creates a snapshot of a specified persistent disk. -// For regular snapshot creation, consider using snapshots.insert -// instead, as that method supports more features, such as creating -// snapshots in a project different from the source disk project. +// Get: Returns all of the details about the specified managed instance +// group. // -// - disk: Name of the regional persistent disk to snapshot. +// - instanceGroupManager: Name of the managed instance group to return. // - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionDisksService) CreateSnapshot(project string, region string, disk string, snapshot *Snapshot) *RegionDisksCreateSnapshotCall { - c := &RegionDisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { + c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - c.snapshot = snapshot - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionDisksCreateSnapshotCall) RequestId(requestId string) *RegionDisksCreateSnapshotCall { - c.urlParams_.Set("requestId", requestId) + c.instanceGroupManager = instanceGroupManager return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksCreateSnapshotCall) Fields(s ...googleapi.Field) *RegionDisksCreateSnapshotCall { +func (c *RegionInstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksCreateSnapshotCall) Context(ctx context.Context) *RegionDisksCreateSnapshotCall { +func (c *RegionInstanceGroupManagersGetCall) Context(ctx context.Context) *RegionInstanceGroupManagersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksCreateSnapshotCall) Header() http.Header { +func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/createSnapshot") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.createSnapshot" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupManager.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -124487,20 +142851,19 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Creates a snapshot of a specified persistent disk. For regular snapshot creation, consider using snapshots.insert instead, as that method supports more features, such as creating snapshots in a project different from the source disk project.", - // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", - // "httpMethod": "POST", - // "id": "compute.regionDisks.createSnapshot", + // "description": "Returns all of the details about the specified managed instance group.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroupManagers.get", // "parameterOrder": [ // "project", // "region", - // "disk" + // "instanceGroupManager" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk to snapshot.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -124512,58 +142875,53 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", - // "request": { - // "$ref": "Snapshot" - // }, + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupManager" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionDisks.delete": +// method id "compute.regionInstanceGroupManagers.insert": -type RegionDisksDeleteCall struct { - s *Service - project string - region string - disk string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersInsertCall struct { + s *Service + project string + region string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified regional persistent disk. Deleting a -// regional disk removes all the replicas of its data permanently and is -// irreversible. However, deleting a disk does not delete any snapshots -// previously made from the disk. You must separately delete snapshots. +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, instances in +// the group are created using the specified instance template. This +// operation is marked as DONE when the group is created even if the +// instances in the group have not yet been created. You must separately +// verify the status of the individual instances with the +// listmanagedinstances method. A regional managed instance group can +// contain up to 2000 instances. // -// - disk: Name of the regional persistent disk to delete. // - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionDisksService) Delete(project string, region string, disk string) *RegionDisksDeleteCall { - c := &RegionDisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { + c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk + c.instancegroupmanager = instancegroupmanager return c } @@ -124578,7 +142936,7 @@ func (r *RegionDisksService) Delete(project string, region string, disk string) // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCall { +func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *RegionInstanceGroupManagersInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -124586,7 +142944,7 @@ func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteCall { +func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -124594,21 +142952,21 @@ func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksDeleteCall) Context(ctx context.Context) *RegionDisksDeleteCall { +func (c *RegionInstanceGroupManagersInsertCall) Context(ctx context.Context) *RegionInstanceGroupManagersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksDeleteCall) Header() http.Header { +func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -124616,11 +142974,16 @@ func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -124628,36 +142991,35 @@ func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.delete" call. +// Do executes the "compute.regionInstanceGroupManagers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -124671,22 +143033,15 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", - // "flatPath": "projects/{project}/regions/{region}/disks/{disk}", - // "httpMethod": "DELETE", - // "id": "compute.regionDisks.delete", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method. A regional managed instance group can contain up to 2000 instances.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.insert", // "parameterOrder": [ // "project", - // "region", - // "disk" + // "region" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -124695,9 +143050,8 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -124707,7 +143061,10 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/disks/{disk}", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers", + // "request": { + // "$ref": "InstanceGroupManager" + // }, // "response": { // "$ref": "Operation" // }, @@ -124719,36 +143076,116 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.regionDisks.get": +// method id "compute.regionInstanceGroupManagers.list": -type RegionDisksGetCall struct { +type RegionInstanceGroupManagersListCall struct { s *Service project string region string - disk string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns a specified regional persistent disk. +// List: Retrieves the list of managed instance groups that are +// contained within the specified region. // -// - disk: Name of the regional persistent disk to return. // - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionDisksService) Get(project string, region string, disk string) *RegionDisksGetCall { - c := &RegionDisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { + c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionInstanceGroupManagersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksGetCall) Fields(s ...googleapi.Field) *RegionDisksGetCall { +func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -124758,7 +143195,7 @@ func (c *RegionDisksGetCall) Fields(s ...googleapi.Field) *RegionDisksGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionDisksGetCall) IfNoneMatch(entityTag string) *RegionDisksGetCall { +func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListCall { c.ifNoneMatch_ = entityTag return c } @@ -124766,21 +143203,21 @@ func (c *RegionDisksGetCall) IfNoneMatch(entityTag string) *RegionDisksGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksGetCall) Context(ctx context.Context) *RegionDisksGetCall { +func (c *RegionInstanceGroupManagersListCall) Context(ctx context.Context) *RegionInstanceGroupManagersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksGetCall) Header() http.Header { +func (c *RegionInstanceGroupManagersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -124793,7 +143230,7 @@ func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -124803,38 +143240,37 @@ func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.get" call. -// Exactly one of *Disk or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Disk.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { +// Do executes the "compute.regionInstanceGroupManagers.list" call. +// Exactly one of *RegionInstanceGroupManagerList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupManagerList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Disk{ + ret := &RegionInstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -124846,21 +143282,36 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified regional persistent disk.", - // "flatPath": "projects/{project}/regions/{region}/disks/{disk}", + // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers", // "httpMethod": "GET", - // "id": "compute.regionDisks.get", + // "id": "compute.regionInstanceGroupManagers.list", // "parameterOrder": [ // "project", - // "region", - // "disk" + // "region" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -124871,16 +143322,20 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/disks/{disk}", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers", // "response": { - // "$ref": "Disk" + // "$ref": "RegionInstanceGroupManagerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -124891,44 +143346,145 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } -// method id "compute.regionDisks.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionDisksGetIamPolicyCall struct { - s *Service - project string - region string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.regionInstanceGroupManagers.listErrors": + +type RegionInstanceGroupManagersListErrorsCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// ListErrors: Lists all errors thrown by actions on instances for a +// given regional managed instance group. The filter and orderBy query +// parameters are not supported. // -// - project: Project ID for this request. -// - region: The name of the region for this request. -// - resource: Name or id of the resource for this request. -func (r *RegionDisksService) GetIamPolicy(project string, region string, resource string) *RegionDisksGetIamPolicyCall { - c := &RegionDisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group. It +// must be a string that meets the requirements in RFC1035, or an +// unsigned long integer: must match regexp pattern: (?:a-z +// (?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. This should +// conform to RFC1035. +func (r *RegionInstanceGroupManagersService) ListErrors(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListErrorsCall { + c := &RegionInstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource + c.instanceGroupManager = instanceGroupManager return c } -// OptionsRequestedPolicyVersion sets the optional parameter -// "optionsRequestedPolicyVersion": Requested IAM Policy version. -func (c *RegionDisksGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *RegionDisksGetIamPolicyCall { - c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionInstanceGroupManagersListErrorsCall) Filter(filter string) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionInstanceGroupManagersListErrorsCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionInstanceGroupManagersListErrorsCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupManagersListErrorsCall) PageToken(pageToken string) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionInstanceGroupManagersListErrorsCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksGetIamPolicyCall) Fields(s ...googleapi.Field) *RegionDisksGetIamPolicyCall { +func (c *RegionInstanceGroupManagersListErrorsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListErrorsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -124938,7 +143494,7 @@ func (c *RegionDisksGetIamPolicyCall) Fields(s ...googleapi.Field) *RegionDisksG // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionDisksGetIamPolicyCall) IfNoneMatch(entityTag string) *RegionDisksGetIamPolicyCall { +func (c *RegionInstanceGroupManagersListErrorsCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListErrorsCall { c.ifNoneMatch_ = entityTag return c } @@ -124946,21 +143502,21 @@ func (c *RegionDisksGetIamPolicyCall) IfNoneMatch(entityTag string) *RegionDisks // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksGetIamPolicyCall) Context(ctx context.Context) *RegionDisksGetIamPolicyCall { +func (c *RegionInstanceGroupManagersListErrorsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListErrorsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksGetIamPolicyCall) Header() http.Header { +func (c *RegionInstanceGroupManagersListErrorsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -124973,7 +143529,7 @@ func (c *RegionDisksGetIamPolicyCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -124981,40 +143537,42 @@ func (c *RegionDisksGetIamPolicyCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RegionDisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.regionInstanceGroupManagers.listErrors" call. +// Exactly one of *RegionInstanceGroupManagersListErrorsResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionInstanceGroupManagersListErrorsResponse.ServerResponse.Header +// or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListErrorsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &RegionInstanceGroupManagersListErrorsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -125026,21 +143584,44 @@ func (c *RegionDisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "flatPath": "projects/{project}/regions/{region}/disks/{resource}/getIamPolicy", + // "description": "Lists all errors thrown by actions on instances for a given regional managed instance group. The filter and orderBy query parameters are not supported.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", // "httpMethod": "GET", - // "id": "compute.regionDisks.getIamPolicy", + // "id": "compute.regionInstanceGroupManagers.listErrors", // "parameterOrder": [ // "project", // "region", - // "resource" + // "instanceGroupManager" // ], // "parameters": { - // "optionsRequestedPolicyVersion": { - // "description": "Requested IAM Policy version.", - // "format": "int32", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035, or an unsigned long integer: must match regexp pattern: (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", - // "type": "integer" + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" // }, // "project": { // "description": "Project ID for this request.", @@ -125050,23 +143631,20 @@ func (c *RegionDisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request. This should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/disks/{resource}/getIamPolicy", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", // "response": { - // "$ref": "Policy" + // "$ref": "RegionInstanceGroupManagersListErrorsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -125077,58 +143655,144 @@ func (c *RegionDisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } -// method id "compute.regionDisks.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListErrorsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListErrorsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionDisksInsertCall struct { - s *Service - project string - region string - disk *Disk - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionInstanceGroupManagers.listManagedInstances": + +type RegionInstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a persistent regional disk in the specified project -// using the data included in the request. +// ListManagedInstances: Lists the instances in the managed instance +// group and instances that are scheduled to be created. The list +// includes any current actions that the group has scheduled for its +// instances. The orderBy query parameter is not supported. The +// `pageToken` query parameter is supported only in the alpha and beta +// API and only if the group's `listManagedInstancesResults` field is +// set to `PAGINATED`. // +// - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionDisksService) Insert(project string, region string, disk *Disk) *RegionDisksInsertCall { - c := &RegionDisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { + c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk + c.instanceGroupManager = instanceGroupManager return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionDisksInsertCall) RequestId(requestId string) *RegionDisksInsertCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("filter", filter) return c } -// SourceImage sets the optional parameter "sourceImage": Source image -// to restore onto a disk. This field is optional. -func (c *RegionDisksInsertCall) SourceImage(sourceImage string) *RegionDisksInsertCall { - c.urlParams_.Set("sourceImage", sourceImage) +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksInsertCall) Fields(s ...googleapi.Field) *RegionDisksInsertCall { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -125136,21 +143800,21 @@ func (c *RegionDisksInsertCall) Fields(s ...googleapi.Field) *RegionDisksInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksInsertCall) Context(ctx context.Context) *RegionDisksInsertCall { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersListManagedInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksInsertCall) Header() http.Header { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -125158,14 +143822,9 @@ func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -125173,39 +143832,42 @@ func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroupManagers.listManagedInstances" call. +// Exactly one of *RegionInstanceGroupManagersListInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionInstanceGroupManagersListInstancesResponse.ServerResponse.Heade +// r or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstancesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &RegionInstanceGroupManagersListInstancesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -125217,15 +143879,45 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a persistent regional disk in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/disks", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", - // "id": "compute.regionDisks.insert", + // "id": "compute.regionInstanceGroupManagers.listManagedInstances", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -125234,76 +143926,97 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "sourceImage": { - // "description": "Source image to restore onto a disk. This field is optional.", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/disks", - // "request": { - // "$ref": "Disk" - // }, + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "response": { - // "$ref": "Operation" + // "$ref": "RegionInstanceGroupManagersListInstancesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionDisks.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionDisksListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.regionInstanceGroupManagers.listPerInstanceConfigs": + +type RegionInstanceGroupManagersListPerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of persistent disks contained within the -// specified region. +// ListPerInstanceConfigs: Lists all of the per-instance configurations +// defined for the managed instance group. The orderBy query parameter +// is not supported. // -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionDisksService) List(project string, region string) *RegionDisksListCall { - c := &RegionDisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. +func (r *RegionInstanceGroupManagersService) ListPerInstanceConfigs(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c := &RegionInstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.instanceGroupManager = instanceGroupManager return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -125311,8 +144024,18 @@ func (r *RegionDisksService) List(project string, region string) *RegionDisksLis // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionDisksListCall) Filter(filter string) *RegionDisksListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("filter", filter) return c } @@ -125323,7 +144046,7 @@ func (c *RegionDisksListCall) Filter(filter string) *RegionDisksListCall { // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionDisksListCall) MaxResults(maxResults int64) *RegionDisksListCall { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -125337,7 +144060,7 @@ func (c *RegionDisksListCall) MaxResults(maxResults int64) *RegionDisksListCall // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionDisksListCall) OrderBy(orderBy string) *RegionDisksListCall { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -125345,7 +144068,7 @@ func (c *RegionDisksListCall) OrderBy(orderBy string) *RegionDisksListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionDisksListCall) PageToken(pageToken string) *RegionDisksListCall { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -125354,7 +144077,7 @@ func (c *RegionDisksListCall) PageToken(pageToken string) *RegionDisksListCall { // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionDisksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionDisksListCall { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -125362,92 +144085,82 @@ func (c *RegionDisksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *R // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksListCall) Fields(s ...googleapi.Field) *RegionDisksListCall { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionDisksListCall) IfNoneMatch(entityTag string) *RegionDisksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksListCall) Context(ctx context.Context) *RegionDisksListCall { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksListCall) Header() http.Header { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.list" call. -// Exactly one of *DiskList or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { +// Do executes the "compute.regionInstanceGroupManagers.listPerInstanceConfigs" call. +// Exactly one of *RegionInstanceGroupManagersListInstanceConfigsResp or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionInstanceGroupManagersListInstanceConfigsResp.ServerResponse.Hea +// der or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstanceConfigsResp, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &DiskList{ + ret := &RegionInstanceGroupManagersListInstanceConfigsResp{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -125459,20 +144172,27 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error } return ret, nil // { - // "description": "Retrieves the list of persistent disks contained within the specified region.", - // "flatPath": "projects/{project}/regions/{region}/disks", - // "httpMethod": "GET", - // "id": "compute.regionDisks.list", + // "description": "Lists all of the per-instance configurations defined for the managed instance group. The orderBy query parameter is not supported.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -125499,9 +144219,8 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -125511,9 +144230,9 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/disks", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", // "response": { - // "$ref": "DiskList" + // "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -125527,7 +144246,7 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionDisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstanceConfigsResp) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -125545,31 +144264,40 @@ func (c *RegionDisksListCall) Pages(ctx context.Context, f func(*DiskList) error } } -// method id "compute.regionDisks.removeResourcePolicies": +// method id "compute.regionInstanceGroupManagers.patch": -type RegionDisksRemoveResourcePoliciesCall struct { - s *Service - project string - region string - disk string - regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersPatchCall struct { + s *Service + project string + region string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveResourcePolicies: Removes resource policies from a regional -// disk. +// Patch: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is patched even if the instances in the group are still in the +// process of being patched. You must separately verify the status of +// the individual instances with the listmanagedinstances method. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. If you update your group to specify a new +// template or instance configuration, it's possible that your intended +// specification for each VM in the group is different from the current +// state of that VM. To learn how to apply an updated configuration to +// the VMs in a MIG, see Updating instances in a MIG. // -// - disk: The disk name for this request. +// - instanceGroupManager: The name of the instance group manager. // - project: Project ID for this request. -// - region: The name of the region for this request. -func (r *RegionDisksService) RemoveResourcePolicies(project string, region string, disk string, regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest) *RegionDisksRemoveResourcePoliciesCall { - c := &RegionDisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { + c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - c.regiondisksremoveresourcepoliciesrequest = regiondisksremoveresourcepoliciesrequest + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager return c } @@ -125584,7 +144312,7 @@ func (r *RegionDisksService) RemoveResourcePolicies(project string, region strin // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionDisksRemoveResourcePoliciesCall) RequestId(requestId string) *RegionDisksRemoveResourcePoliciesCall { +func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -125592,7 +144320,7 @@ func (c *RegionDisksRemoveResourcePoliciesCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *RegionDisksRemoveResourcePoliciesCall { +func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -125600,21 +144328,21 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksRemoveResourcePoliciesCall) Context(ctx context.Context) *RegionDisksRemoveResourcePoliciesCall { +func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksRemoveResourcePoliciesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -125622,53 +144350,53 @@ func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksremoveresourcepoliciesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.removeResourcePolicies" call. +// Do executes the "compute.regionInstanceGroupManagers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -125682,20 +144410,19 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Removes resource policies from a regional disk.", - // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies", - // "httpMethod": "POST", - // "id": "compute.regionDisks.removeResourcePolicies", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. If you update your group to specify a new template or instance configuration, it's possible that your intended specification for each VM in the group is different from the current state of that VM. To learn how to apply an updated configuration to the VMs in a MIG, see Updating instances in a MIG.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "httpMethod": "PATCH", + // "id": "compute.regionInstanceGroupManagers.patch", // "parameterOrder": [ // "project", // "region", - // "disk" + // "instanceGroupManager" // ], // "parameters": { - // "disk": { - // "description": "The disk name for this request.", + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -125707,9 +144434,8 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -125719,9 +144445,9 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "request": { - // "$ref": "RegionDisksRemoveResourcePoliciesRequest" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -125734,30 +144460,35 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionDisks.resize": +// method id "compute.regionInstanceGroupManagers.patchPerInstanceConfigs": -type RegionDisksResizeCall struct { - s *Service - project string - region string - disk string - regiondisksresizerequest *RegionDisksResizeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersPatchPerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the specified regional persistent disk. +// PatchPerInstanceConfigs: Inserts or patches per-instance +// configurations for the managed instance group. perInstanceConfig.name +// serves as a key used to distinguish whether to perform insert or +// patch. // -// - disk: Name of the regional persistent disk. -// - project: The project ID for this request. -// - region: Name of the region for this request. -func (r *RegionDisksService) Resize(project string, region string, disk string, regiondisksresizerequest *RegionDisksResizeRequest) *RegionDisksResizeCall { - c := &RegionDisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. +func (r *RegionInstanceGroupManagersService) PatchPerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { + c := &RegionInstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - c.regiondisksresizerequest = regiondisksresizerequest + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerpatchinstanceconfigreq = regioninstancegroupmanagerpatchinstanceconfigreq return c } @@ -125772,7 +144503,7 @@ func (r *RegionDisksService) Resize(project string, region string, disk string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionDisksResizeCall) RequestId(requestId string) *RegionDisksResizeCall { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -125780,7 +144511,7 @@ func (c *RegionDisksResizeCall) RequestId(requestId string) *RegionDisksResizeCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksResizeCall) Fields(s ...googleapi.Field) *RegionDisksResizeCall { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -125788,21 +144519,21 @@ func (c *RegionDisksResizeCall) Fields(s ...googleapi.Field) *RegionDisksResizeC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksResizeCall) Context(ctx context.Context) *RegionDisksResizeCall { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksResizeCall) Header() http.Header { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -125810,14 +144541,14 @@ func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksresizerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerpatchinstanceconfigreq) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -125825,38 +144556,38 @@ func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.resize" call. +// Do executes the "compute.regionInstanceGroupManagers.patchPerInstanceConfigs" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -125870,34 +144601,32 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Resizes the specified regional persistent disk.", - // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/resize", + // "description": "Inserts or patches per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", // "httpMethod": "POST", - // "id": "compute.regionDisks.resize", + // "id": "compute.regionInstanceGroupManagers.patchPerInstanceConfigs", // "parameterOrder": [ // "project", // "region", - // "disk" + // "instanceGroupManager" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, // "project": { - // "description": "The project ID for this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -125907,9 +144636,9 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/disks/{disk}/resize", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", // "request": { - // "$ref": "RegionDisksResizeRequest" + // "$ref": "RegionInstanceGroupManagerPatchInstanceConfigReq" // }, // "response": { // "$ref": "Operation" @@ -125922,38 +144651,63 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.regionDisks.setIamPolicy": +// method id "compute.regionInstanceGroupManagers.recreateInstances": -type RegionDisksSetIamPolicyCall struct { - s *Service - project string - region string - resource string - regionsetpolicyrequest *RegionSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// RecreateInstances: Flags the specified VM instances in the managed +// instance group to be immediately recreated. Each instance is +// recreated using the group's current configuration. This operation is +// marked as DONE when the flag is set even if the instances have not +// yet been recreated. You must separately verify the status of each +// instance by checking its currentAction field; for more information, +// see Checking the status of managed instances. If the group is part of +// a backend service that has enabled connection draining, it can take +// up to 60 seconds after the connection draining duration has elapsed +// before the VM instance is removed or deleted. You can specify a +// maximum of 1000 instances with this method per request. // +// - instanceGroupManager: Name of the managed instance group. // - project: Project ID for this request. -// - region: The name of the region for this request. -// - resource: Name or id of the resource for this request. -func (r *RegionDisksService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionDisksSetIamPolicyCall { - c := &RegionDisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { + c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.regionsetpolicyrequest = regionsetpolicyrequest + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionDisksSetIamPolicyCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -125961,21 +144715,21 @@ func (c *RegionDisksSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionDisksS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksSetIamPolicyCall) Context(ctx context.Context) *RegionDisksSetIamPolicyCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksSetIamPolicyCall) Header() http.Header { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -125983,14 +144737,14 @@ func (c *RegionDisksSetIamPolicyCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -125998,40 +144752,40 @@ func (c *RegionDisksSetIamPolicyCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RegionDisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -126043,16 +144797,22 @@ func (c *RegionDisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "flatPath": "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy", + // "description": "Flags the specified VM instances in the managed instance group to be immediately recreated. Each instance is recreated using the group's current configuration. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of each instance by checking its currentAction field; for more information, see Checking the status of managed instances. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", // "httpMethod": "POST", - // "id": "compute.regionDisks.setIamPolicy", + // "id": "compute.regionInstanceGroupManagers.recreateInstances", // "parameterOrder": [ // "project", // "region", - // "resource" + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -126061,26 +144821,23 @@ func (c *RegionDisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", // "request": { - // "$ref": "RegionSetPolicyRequest" + // "$ref": "RegionInstanceGroupManagersRecreateRequest" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -126090,30 +144847,229 @@ func (c *RegionDisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } -// method id "compute.regionDisks.setLabels": +// method id "compute.regionInstanceGroupManagers.resize": -type RegionDisksSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersResizeCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on the target regional disk. +// Resize: Changes the intended size of the managed instance group. If +// you increase the size, the group creates new instances using the +// current instance template. If you decrease the size, the group +// deletes one or more instances. The resize operation is marked DONE if +// the resize request is successful. The underlying actions take +// additional time. You must separately verify the status of the +// creating or deleting actions with the listmanagedinstances method. If +// the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +// +// - instanceGroupManager: Name of the managed instance group. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - size: Number of instances that should exist in this instance group +// manager. +func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { + c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances. The resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.resize", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager", + // "size" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "size": { + // "description": "Number of instances that should exist in this instance group manager.", + // "format": "int32", + // "location": "query", + // "minimum": "0", + // "required": true, + // "type": "integer" + // } + // }, + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": + +type RegionInstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetInstanceTemplate: Sets the instance template to use when creating +// new instances or recreating instances in this group. Existing +// instances are not affected. // +// - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. -// - region: The region for this request. -// - resource: Name or id of the resource for this request. -func (r *RegionDisksService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionDisksSetLabelsCall { - c := &RegionDisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest return c } @@ -126128,7 +145084,7 @@ func (r *RegionDisksService) SetLabels(project string, region string, resource s // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionDisksSetLabelsCall) RequestId(requestId string) *RegionDisksSetLabelsCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -126136,7 +145092,7 @@ func (c *RegionDisksSetLabelsCall) RequestId(requestId string) *RegionDisksSetLa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetLabelsCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -126144,21 +145100,21 @@ func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetL // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksSetLabelsCall) Context(ctx context.Context) *RegionDisksSetLabelsCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksSetLabelsCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -126166,14 +145122,14 @@ func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -126181,38 +145137,38 @@ func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.setLabels" call. +// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -126226,16 +145182,22 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets the labels on the target regional disk.", - // "flatPath": "projects/{project}/regions/{region}/disks/{resource}/setLabels", + // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", // "httpMethod": "POST", - // "id": "compute.regionDisks.setLabels", + // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ // "project", // "region", - // "resource" + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -126244,9 +145206,8 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "region": { - // "description": "The region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -126254,18 +145215,11 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/disks/{resource}/setLabels", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", // "request": { - // "$ref": "RegionSetLabelsRequest" + // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" // }, // "response": { // "$ref": "Operation" @@ -126278,38 +145232,55 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.regionDisks.testIamPermissions": +// method id "compute.regionInstanceGroupManagers.setTargetPools": -type RegionDisksTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// SetTargetPools: Modifies the target pools to which all new instances +// in this group are assigned. Existing instances in the group are not +// affected. // +// - instanceGroupManager: Name of the managed instance group. // - project: Project ID for this request. -// - region: The name of the region for this request. -// - resource: Name or id of the resource for this request. -func (r *RegionDisksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionDisksTestIamPermissionsCall { - c := &RegionDisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { + c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionDisksTestIamPermissionsCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -126317,21 +145288,21 @@ func (c *RegionDisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *Region // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksTestIamPermissionsCall) Context(ctx context.Context) *RegionDisksTestIamPermissionsCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksTestIamPermissionsCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -126339,14 +145310,14 @@ func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -126354,40 +145325,40 @@ func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -126399,16 +145370,22 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "flatPath": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", + // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", // "httpMethod": "POST", - // "id": "compute.regionDisks.testIamPermissions", + // "id": "compute.regionInstanceGroupManagers.setTargetPools", // "parameterOrder": [ // "project", // "region", - // "resource" + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -126417,59 +145394,61 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionHealthCheckServices.delete": +// method id "compute.regionInstanceGroupManagers.updatePerInstanceConfigs": -type RegionHealthCheckServicesDeleteCall struct { - s *Service - project string - region string - healthCheckService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersUpdatePerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified regional HealthCheckService. +// UpdatePerInstanceConfigs: Inserts or updates per-instance +// configurations for the managed instance group. perInstanceConfig.name +// serves as a key used to distinguish whether to perform insert or +// patch. // -// - healthCheckService: Name of the HealthCheckService to delete. The -// name must be 1-63 characters long, and comply with RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionHealthCheckServicesService) Delete(project string, region string, healthCheckService string) *RegionHealthCheckServicesDeleteCall { - c := &RegionHealthCheckServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. +func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { + c := &RegionInstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheckService = healthCheckService + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerupdateinstanceconfigreq = regioninstancegroupmanagerupdateinstanceconfigreq return c } @@ -126484,7 +145463,7 @@ func (r *RegionHealthCheckServicesService) Delete(project string, region string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionHealthCheckServicesDeleteCall) RequestId(requestId string) *RegionHealthCheckServicesDeleteCall { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -126492,7 +145471,7 @@ func (c *RegionHealthCheckServicesDeleteCall) RequestId(requestId string) *Regio // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthCheckServicesDeleteCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesDeleteCall { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -126500,21 +145479,21 @@ func (c *RegionHealthCheckServicesDeleteCall) Fields(s ...googleapi.Field) *Regi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthCheckServicesDeleteCall) Context(ctx context.Context) *RegionHealthCheckServicesDeleteCall { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthCheckServicesDeleteCall) Header() http.Header { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthCheckServicesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -126522,48 +145501,53 @@ func (c *RegionHealthCheckServicesDeleteCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerupdateinstanceconfigreq) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheckService": c.healthCheckService, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthCheckServices.delete" call. +// Do executes the "compute.regionInstanceGroupManagers.updatePerInstanceConfigs" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -126577,18 +145561,18 @@ func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Deletes the specified regional HealthCheckService.", - // "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - // "httpMethod": "DELETE", - // "id": "compute.regionHealthCheckServices.delete", + // "description": "Inserts or updates per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", // "parameterOrder": [ // "project", // "region", - // "healthCheckService" + // "instanceGroupManager" // ], // "parameters": { - // "healthCheckService": { - // "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -126601,9 +145585,8 @@ func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -126613,7 +145596,10 @@ func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "request": { + // "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" + // }, // "response": { // "$ref": "Operation" // }, @@ -126625,37 +145611,36 @@ func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.regionHealthCheckServices.get": +// method id "compute.regionInstanceGroups.get": -type RegionHealthCheckServicesGetCall struct { - s *Service - project string - region string - healthCheckService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsGetCall struct { + s *Service + project string + region string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified regional HealthCheckService resource. +// Get: Returns the specified instance group resource. // -// - healthCheckService: Name of the HealthCheckService to update. The -// name must be 1-63 characters long, and comply with RFC1035. +// - instanceGroup: Name of the instance group resource to return. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionHealthCheckServicesService) Get(project string, region string, healthCheckService string) *RegionHealthCheckServicesGetCall { - c := &RegionHealthCheckServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { + c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheckService = healthCheckService + c.instanceGroup = instanceGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthCheckServicesGetCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesGetCall { +func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -126665,7 +145650,7 @@ func (c *RegionHealthCheckServicesGetCall) Fields(s ...googleapi.Field) *RegionH // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionHealthCheckServicesGetCall) IfNoneMatch(entityTag string) *RegionHealthCheckServicesGetCall { +func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -126673,21 +145658,21 @@ func (c *RegionHealthCheckServicesGetCall) IfNoneMatch(entityTag string) *Region // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthCheckServicesGetCall) Context(ctx context.Context) *RegionHealthCheckServicesGetCall { +func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthCheckServicesGetCall) Header() http.Header { +func (c *RegionInstanceGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthCheckServicesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -126700,7 +145685,7 @@ func (c *RegionHealthCheckServicesGetCall) doRequest(alt string) (*http.Response var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -126708,40 +145693,40 @@ func (c *RegionHealthCheckServicesGetCall) doRequest(alt string) (*http.Response } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheckService": c.healthCheckService, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthCheckServices.get" call. -// Exactly one of *HealthCheckService or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HealthCheckService.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionInstanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionHealthCheckServicesGetCall) Do(opts ...googleapi.CallOption) (*HealthCheckService, error) { +func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &HealthCheckService{ + ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -126753,18 +145738,18 @@ func (c *RegionHealthCheckServicesGetCall) Do(opts ...googleapi.CallOption) (*He } return ret, nil // { - // "description": "Returns the specified regional HealthCheckService resource.", - // "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + // "description": "Returns the specified instance group resource.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", // "httpMethod": "GET", - // "id": "compute.regionHealthCheckServices.get", + // "id": "compute.regionInstanceGroups.get", // "parameterOrder": [ // "project", // "region", - // "healthCheckService" + // "instanceGroup" // ], // "parameters": { - // "healthCheckService": { - // "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + // "instanceGroup": { + // "description": "Name of the instance group resource to return.", // "location": "path", // "required": true, // "type": "string" @@ -126779,14 +145764,13 @@ func (c *RegionHealthCheckServicesGetCall) Do(opts ...googleapi.CallOption) (*He // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "HealthCheckService" + // "$ref": "InstanceGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -126797,90 +145781,163 @@ func (c *RegionHealthCheckServicesGetCall) Do(opts ...googleapi.CallOption) (*He } -// method id "compute.regionHealthCheckServices.insert": +// method id "compute.regionInstanceGroups.list": -type RegionHealthCheckServicesInsertCall struct { - s *Service - project string - region string - healthcheckservice *HealthCheckService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a regional HealthCheckService resource in the -// specified project and region using the data included in the request. +// List: Retrieves the list of instance group resources contained within +// the specified region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionHealthCheckServicesService) Insert(project string, region string, healthcheckservice *HealthCheckService) *RegionHealthCheckServicesInsertCall { - c := &RegionHealthCheckServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { + c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthcheckservice = healthcheckservice return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionHealthCheckServicesInsertCall) RequestId(requestId string) *RegionHealthCheckServicesInsertCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionInstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthCheckServicesInsertCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesInsertCall { +func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthCheckServicesInsertCall) Context(ctx context.Context) *RegionHealthCheckServicesInsertCall { +func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthCheckServicesInsertCall) Header() http.Header { +func (c *RegionInstanceGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthCheckServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheckservice) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -126892,33 +145949,33 @@ func (c *RegionHealthCheckServicesInsertCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthCheckServices.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionHealthCheckServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroups.list" call. +// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionInstanceGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &RegionInstanceGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -126930,15 +145987,38 @@ func (c *RegionHealthCheckServicesInsertCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/healthCheckServices", - // "httpMethod": "POST", - // "id": "compute.regionHealthCheckServices.insert", + // "description": "Retrieves the list of instance group resources contained within the specified region.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroups", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroups.list", // "parameterOrder": [ // "project", // "region" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -126949,69 +146029,98 @@ func (c *RegionHealthCheckServicesInsertCall) Do(opts ...googleapi.CallOption) ( // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/healthCheckServices", - // "request": { - // "$ref": "HealthCheckService" - // }, + // "path": "projects/{project}/regions/{region}/instanceGroups", // "response": { - // "$ref": "Operation" + // "$ref": "RegionInstanceGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionHealthCheckServices.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionHealthCheckServicesListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.regionInstanceGroups.listInstances": + +type RegionInstanceGroupsListInstancesCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Lists all the HealthCheckService resources that have been -// configured for the specified project in the given region. +// ListInstances: Lists the instances in the specified instance group +// and displays information about the named ports. Depending on the +// specified options, this method can list all instances or only the +// instances that are running. The orderBy query parameter is not +// supported. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionHealthCheckServicesService) List(project string, region string) *RegionHealthCheckServicesListCall { - c := &RegionHealthCheckServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroup: Name of the regional instance group for which we +// want to list the instances. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { + c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.instanceGroup = instanceGroup + c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -127019,8 +146128,18 @@ func (r *RegionHealthCheckServicesService) List(project string, region string) * // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionHealthCheckServicesListCall) Filter(filter string) *RegionHealthCheckServicesListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c } @@ -127031,7 +146150,7 @@ func (c *RegionHealthCheckServicesListCall) Filter(filter string) *RegionHealthC // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionHealthCheckServicesListCall) MaxResults(maxResults int64) *RegionHealthCheckServicesListCall { +func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -127045,7 +146164,7 @@ func (c *RegionHealthCheckServicesListCall) MaxResults(maxResults int64) *Region // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionHealthCheckServicesListCall) OrderBy(orderBy string) *RegionHealthCheckServicesListCall { +func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -127053,7 +146172,7 @@ func (c *RegionHealthCheckServicesListCall) OrderBy(orderBy string) *RegionHealt // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionHealthCheckServicesListCall) PageToken(pageToken string) *RegionHealthCheckServicesListCall { +func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -127062,7 +146181,7 @@ func (c *RegionHealthCheckServicesListCall) PageToken(pageToken string) *RegionH // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionHealthCheckServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionHealthCheckServicesListCall { +func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -127070,92 +146189,86 @@ func (c *RegionHealthCheckServicesListCall) ReturnPartialSuccess(returnPartialSu // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthCheckServicesListCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesListCall { +func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionHealthCheckServicesListCall) IfNoneMatch(entityTag string) *RegionHealthCheckServicesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthCheckServicesListCall) Context(ctx context.Context) *RegionHealthCheckServicesListCall { +func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthCheckServicesListCall) Header() http.Header { +func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthCheckServicesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthCheckServices.list" call. -// Exactly one of *HealthCheckServicesList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HealthCheckServicesList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionHealthCheckServicesListCall) Do(opts ...googleapi.CallOption) (*HealthCheckServicesList, error) { +// Do executes the "compute.regionInstanceGroups.listInstances" call. +// Exactly one of *RegionInstanceGroupsListInstances or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupsListInstances.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &HealthCheckServicesList{ + ret := &RegionInstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -127167,20 +146280,27 @@ func (c *RegionHealthCheckServicesListCall) Do(opts ...googleapi.CallOption) (*H } return ret, nil // { - // "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", - // "flatPath": "projects/{project}/regions/{region}/healthCheckServices", - // "httpMethod": "GET", - // "id": "compute.regionHealthCheckServices.list", + // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroups.listInstances", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroup" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, + // "instanceGroup": { + // "description": "Name of the regional instance group for which we want to list the instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -127209,7 +146329,6 @@ func (c *RegionHealthCheckServicesListCall) Do(opts ...googleapi.CallOption) (*H // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -127219,9 +146338,12 @@ func (c *RegionHealthCheckServicesListCall) Do(opts ...googleapi.CallOption) (*H // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/healthCheckServices", + // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "request": { + // "$ref": "RegionInstanceGroupsListInstancesRequest" + // }, // "response": { - // "$ref": "HealthCheckServicesList" + // "$ref": "RegionInstanceGroupsListInstances" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -127235,7 +146357,7 @@ func (c *RegionHealthCheckServicesListCall) Do(opts ...googleapi.CallOption) (*H // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionHealthCheckServicesListCall) Pages(ctx context.Context, f func(*HealthCheckServicesList) error) error { +func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -127253,33 +146375,32 @@ func (c *RegionHealthCheckServicesListCall) Pages(ctx context.Context, f func(*H } } -// method id "compute.regionHealthCheckServices.patch": +// method id "compute.regionInstanceGroups.setNamedPorts": -type RegionHealthCheckServicesPatchCall struct { - s *Service - project string - region string - healthCheckService string - healthcheckservice *HealthCheckService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsSetNamedPortsCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified regional HealthCheckService resource -// with the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. +// SetNamedPorts: Sets the named ports for the specified regional +// instance group. // -// - healthCheckService: Name of the HealthCheckService to update. The -// name must be 1-63 characters long, and comply with RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionHealthCheckServicesService) Patch(project string, region string, healthCheckService string, healthcheckservice *HealthCheckService) *RegionHealthCheckServicesPatchCall { - c := &RegionHealthCheckServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instanceGroup: The name of the regional instance group where the +// named ports are updated. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { + c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheckService = healthCheckService - c.healthcheckservice = healthcheckservice + c.instanceGroup = instanceGroup + c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest return c } @@ -127294,7 +146415,7 @@ func (r *RegionHealthCheckServicesService) Patch(project string, region string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionHealthCheckServicesPatchCall) RequestId(requestId string) *RegionHealthCheckServicesPatchCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -127302,7 +146423,7 @@ func (c *RegionHealthCheckServicesPatchCall) RequestId(requestId string) *Region // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthCheckServicesPatchCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesPatchCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -127310,21 +146431,21 @@ func (c *RegionHealthCheckServicesPatchCall) Fields(s ...googleapi.Field) *Regio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthCheckServicesPatchCall) Context(ctx context.Context) *RegionHealthCheckServicesPatchCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthCheckServicesPatchCall) Header() http.Header { +func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthCheckServicesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -127332,53 +146453,53 @@ func (c *RegionHealthCheckServicesPatchCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheckservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheckService": c.healthCheckService, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthCheckServices.patch" call. +// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthCheckServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -127392,18 +146513,18 @@ func (c *RegionHealthCheckServicesPatchCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - // "httpMethod": "PATCH", - // "id": "compute.regionHealthCheckServices.patch", + // "description": "Sets the named ports for the specified regional instance group.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroups.setNamedPorts", // "parameterOrder": [ // "project", // "region", - // "healthCheckService" + // "instanceGroup" // ], // "parameters": { - // "healthCheckService": { - // "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + // "instanceGroup": { + // "description": "The name of the regional instance group where the named ports are updated.", // "location": "path", // "required": true, // "type": "string" @@ -127418,7 +146539,6 @@ func (c *RegionHealthCheckServicesPatchCall) Do(opts ...googleapi.CallOption) (* // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -127428,9 +146548,9 @@ func (c *RegionHealthCheckServicesPatchCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", // "request": { - // "$ref": "HealthCheckService" + // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" // }, // "response": { // "$ref": "Operation" @@ -127443,28 +146563,28 @@ func (c *RegionHealthCheckServicesPatchCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.regionHealthChecks.delete": +// method id "compute.regionInstances.bulkInsert": -type RegionHealthChecksDeleteCall struct { - s *Service - project string - region string - healthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstancesBulkInsertCall struct { + s *Service + project string + region string + bulkinsertinstanceresource *BulkInsertInstanceResource + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HealthCheck resource. +// BulkInsert: Creates multiple instances in a given region. Count +// specifies the number of instances to create. // -// - healthCheck: Name of the HealthCheck resource to delete. // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionHealthChecksService) Delete(project string, region string, healthCheck string) *RegionHealthChecksDeleteCall { - c := &RegionHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +func (r *RegionInstancesService) BulkInsert(project string, region string, bulkinsertinstanceresource *BulkInsertInstanceResource) *RegionInstancesBulkInsertCall { + c := &RegionInstancesBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheck = healthCheck + c.bulkinsertinstanceresource = bulkinsertinstanceresource return c } @@ -127479,7 +146599,7 @@ func (r *RegionHealthChecksService) Delete(project string, region string, health // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionHealthChecksDeleteCall) RequestId(requestId string) *RegionHealthChecksDeleteCall { +func (c *RegionInstancesBulkInsertCall) RequestId(requestId string) *RegionInstancesBulkInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -127487,7 +146607,7 @@ func (c *RegionHealthChecksDeleteCall) RequestId(requestId string) *RegionHealth // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksDeleteCall) Fields(s ...googleapi.Field) *RegionHealthChecksDeleteCall { +func (c *RegionInstancesBulkInsertCall) Fields(s ...googleapi.Field) *RegionInstancesBulkInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -127495,21 +146615,21 @@ func (c *RegionHealthChecksDeleteCall) Fields(s ...googleapi.Field) *RegionHealt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksDeleteCall) Context(ctx context.Context) *RegionHealthChecksDeleteCall { +func (c *RegionInstancesBulkInsertCall) Context(ctx context.Context) *RegionInstancesBulkInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksDeleteCall) Header() http.Header { +func (c *RegionInstancesBulkInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstancesBulkInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -127517,49 +146637,230 @@ func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertinstanceresource) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instances/bulkInsert") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheck": c.healthCheck, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.delete" call. +// Do executes the "compute.regionInstances.bulkInsert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates multiple instances in a given region. Count specifies the number of instances to create.", + // "flatPath": "projects/{project}/regions/{region}/instances/bulkInsert", + // "httpMethod": "POST", + // "id": "compute.regionInstances.bulkInsert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/instances/bulkInsert", + // "request": { + // "$ref": "BulkInsertInstanceResource" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionNetworkEndpointGroups.delete": + +type RegionNetworkEndpointGroupsDeleteCall struct { + s *Service + project string + region string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified network endpoint group. Note that the +// NEG cannot be deleted if it is configured as a backend of a backend +// service. +// +// - networkEndpointGroup: The name of the network endpoint group to +// delete. It should comply with RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the network endpoint group is +// located. It should comply with RFC1035. +func (r *RegionNetworkEndpointGroupsService) Delete(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsDeleteCall { + c := &RegionNetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.networkEndpointGroup = networkEndpointGroup + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionNetworkEndpointGroupsDeleteCall) RequestId(requestId string) *RegionNetworkEndpointGroupsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNetworkEndpointGroupsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "networkEndpointGroup": c.networkEndpointGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNetworkEndpointGroups.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { return nil, err } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, @@ -127572,20 +146873,19 @@ func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Deletes the specified HealthCheck resource.", - // "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + // "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", + // "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", // "httpMethod": "DELETE", - // "id": "compute.regionHealthChecks.delete", + // "id": "compute.regionNetworkEndpointGroups.delete", // "parameterOrder": [ // "project", // "region", - // "healthCheck" + // "networkEndpointGroup" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to delete.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -127597,9 +146897,8 @@ func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -127609,7 +146908,7 @@ func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + // "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", // "response": { // "$ref": "Operation" // }, @@ -127621,37 +146920,39 @@ func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.regionHealthChecks.get": +// method id "compute.regionNetworkEndpointGroups.get": -type RegionHealthChecksGetCall struct { - s *Service - project string - region string - healthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionNetworkEndpointGroupsGetCall struct { + s *Service + project string + region string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. +// Get: Returns the specified network endpoint group. Gets a list of +// available network endpoint groups by making a list() request. // -// - healthCheck: Name of the HealthCheck resource to return. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionHealthChecksService) Get(project string, region string, healthCheck string) *RegionHealthChecksGetCall { - c := &RegionHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkEndpointGroup: The name of the network endpoint group. It +// should comply with RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the network endpoint group is +// located. It should comply with RFC1035. +func (r *RegionNetworkEndpointGroupsService) Get(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsGetCall { + c := &RegionNetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheck = healthCheck + c.networkEndpointGroup = networkEndpointGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksGetCall) Fields(s ...googleapi.Field) *RegionHealthChecksGetCall { +func (c *RegionNetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -127661,7 +146962,7 @@ func (c *RegionHealthChecksGetCall) Fields(s ...googleapi.Field) *RegionHealthCh // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionHealthChecksGetCall) IfNoneMatch(entityTag string) *RegionHealthChecksGetCall { +func (c *RegionNetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *RegionNetworkEndpointGroupsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -127669,21 +146970,21 @@ func (c *RegionHealthChecksGetCall) IfNoneMatch(entityTag string) *RegionHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksGetCall) Context(ctx context.Context) *RegionHealthChecksGetCall { +func (c *RegionNetworkEndpointGroupsGetCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksGetCall) Header() http.Header { +func (c *RegionNetworkEndpointGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -127696,7 +146997,7 @@ func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -127704,40 +147005,40 @@ func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheck": c.healthCheck, + "project": c.project, + "region": c.region, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.get" call. -// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheck.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { +// Do executes the "compute.regionNetworkEndpointGroups.get" call. +// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroup.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &HealthCheck{ + ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -127749,20 +147050,19 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", - // "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", // "httpMethod": "GET", - // "id": "compute.regionHealthChecks.get", + // "id": "compute.regionNetworkEndpointGroups.get", // "parameterOrder": [ // "project", // "region", - // "healthCheck" + // "networkEndpointGroup" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to return.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -127774,16 +147074,15 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + // "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", // "response": { - // "$ref": "HealthCheck" + // "$ref": "NetworkEndpointGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -127794,28 +147093,29 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe } -// method id "compute.regionHealthChecks.insert": +// method id "compute.regionNetworkEndpointGroups.insert": -type RegionHealthChecksInsertCall struct { - s *Service - project string - region string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNetworkEndpointGroupsInsertCall struct { + s *Service + project string + region string + networkendpointgroup *NetworkEndpointGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HealthCheck resource in the specified project using -// the data included in the request. +// Insert: Creates a network endpoint group in the specified project +// using the parameters that are included in the request. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionHealthChecksService) Insert(project string, region string, healthcheck *HealthCheck) *RegionHealthChecksInsertCall { - c := &RegionHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - region: The name of the region where you want to create the network +// endpoint group. It should comply with RFC1035. +func (r *RegionNetworkEndpointGroupsService) Insert(project string, region string, networkendpointgroup *NetworkEndpointGroup) *RegionNetworkEndpointGroupsInsertCall { + c := &RegionNetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthcheck = healthcheck + c.networkendpointgroup = networkendpointgroup return c } @@ -127830,7 +147130,7 @@ func (r *RegionHealthChecksService) Insert(project string, region string, health // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionHealthChecksInsertCall) RequestId(requestId string) *RegionHealthChecksInsertCall { +func (c *RegionNetworkEndpointGroupsInsertCall) RequestId(requestId string) *RegionNetworkEndpointGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -127838,7 +147138,7 @@ func (c *RegionHealthChecksInsertCall) RequestId(requestId string) *RegionHealth // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksInsertCall) Fields(s ...googleapi.Field) *RegionHealthChecksInsertCall { +func (c *RegionNetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -127846,21 +147146,21 @@ func (c *RegionHealthChecksInsertCall) Fields(s ...googleapi.Field) *RegionHealt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksInsertCall) Context(ctx context.Context) *RegionHealthChecksInsertCall { +func (c *RegionNetworkEndpointGroupsInsertCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksInsertCall) Header() http.Header { +func (c *RegionNetworkEndpointGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -127868,14 +147168,14 @@ func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -127889,31 +147189,31 @@ func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.insert" call. +// Do executes the "compute.regionNetworkEndpointGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -127927,10 +147227,10 @@ func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/healthChecks", + // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + // "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", // "httpMethod": "POST", - // "id": "compute.regionHealthChecks.insert", + // "id": "compute.regionNetworkEndpointGroups.insert", // "parameterOrder": [ // "project", // "region" @@ -127944,9 +147244,8 @@ func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -127956,9 +147255,9 @@ func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/healthChecks", + // "path": "projects/{project}/regions/{region}/networkEndpointGroups", // "request": { - // "$ref": "HealthCheck" + // "$ref": "NetworkEndpointGroup" // }, // "response": { // "$ref": "Operation" @@ -127971,9 +147270,9 @@ func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.regionHealthChecks.list": +// method id "compute.regionNetworkEndpointGroups.list": -type RegionHealthChecksListCall struct { +type RegionNetworkEndpointGroupsListCall struct { s *Service project string region string @@ -127983,32 +147282,36 @@ type RegionHealthChecksListCall struct { header_ http.Header } -// List: Retrieves the list of HealthCheck resources available to the -// specified project. +// List: Retrieves the list of regional network endpoint groups +// available to the specified project in the given region. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionHealthChecksService) List(project string, region string) *RegionHealthChecksListCall { - c := &RegionHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - region: The name of the region where the network endpoint group is +// located. It should comply with RFC1035. +func (r *RegionNetworkEndpointGroupsService) List(project string, region string) *RegionNetworkEndpointGroupsListCall { + c := &RegionNetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -128016,8 +147319,18 @@ func (r *RegionHealthChecksService) List(project string, region string) *RegionH // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionNetworkEndpointGroupsListCall) Filter(filter string) *RegionNetworkEndpointGroupsListCall { c.urlParams_.Set("filter", filter) return c } @@ -128028,7 +147341,7 @@ func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksLi // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionHealthChecksListCall) MaxResults(maxResults int64) *RegionHealthChecksListCall { +func (c *RegionNetworkEndpointGroupsListCall) MaxResults(maxResults int64) *RegionNetworkEndpointGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -128042,7 +147355,7 @@ func (c *RegionHealthChecksListCall) MaxResults(maxResults int64) *RegionHealthC // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionHealthChecksListCall) OrderBy(orderBy string) *RegionHealthChecksListCall { +func (c *RegionNetworkEndpointGroupsListCall) OrderBy(orderBy string) *RegionNetworkEndpointGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -128050,7 +147363,7 @@ func (c *RegionHealthChecksListCall) OrderBy(orderBy string) *RegionHealthChecks // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthChecksListCall { +func (c *RegionNetworkEndpointGroupsListCall) PageToken(pageToken string) *RegionNetworkEndpointGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -128059,7 +147372,7 @@ func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthCh // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionHealthChecksListCall { +func (c *RegionNetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionNetworkEndpointGroupsListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -128067,7 +147380,7 @@ func (c *RegionHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess b // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksListCall) Fields(s ...googleapi.Field) *RegionHealthChecksListCall { +func (c *RegionNetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -128077,7 +147390,7 @@ func (c *RegionHealthChecksListCall) Fields(s ...googleapi.Field) *RegionHealthC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionHealthChecksListCall) IfNoneMatch(entityTag string) *RegionHealthChecksListCall { +func (c *RegionNetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *RegionNetworkEndpointGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -128085,21 +147398,21 @@ func (c *RegionHealthChecksListCall) IfNoneMatch(entityTag string) *RegionHealth // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksListCall) Context(ctx context.Context) *RegionHealthChecksListCall { +func (c *RegionNetworkEndpointGroupsListCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksListCall) Header() http.Header { +func (c *RegionNetworkEndpointGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -128112,7 +147425,7 @@ func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -128126,33 +147439,33 @@ func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.list" call. -// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheckList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionNetworkEndpointGroups.list" call. +// Exactly one of *NetworkEndpointGroupList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { +func (c *RegionNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &HealthCheckList{ + ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -128164,17 +147477,17 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh } return ret, nil // { - // "description": "Retrieves the list of HealthCheck resources available to the specified project.", - // "flatPath": "projects/{project}/regions/{region}/healthChecks", + // "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", + // "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", // "httpMethod": "GET", - // "id": "compute.regionHealthChecks.list", + // "id": "compute.regionNetworkEndpointGroups.list", // "parameterOrder": [ // "project", // "region" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -128204,9 +147517,8 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -128216,9 +147528,9 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/healthChecks", + // "path": "projects/{project}/regions/{region}/networkEndpointGroups", // "response": { - // "$ref": "HealthCheckList" + // "$ref": "NetworkEndpointGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -128232,7 +147544,7 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionHealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { +func (c *RegionNetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -128250,32 +147562,40 @@ func (c *RegionHealthChecksListCall) Pages(ctx context.Context, f func(*HealthCh } } -// method id "compute.regionHealthChecks.patch": +// method id "compute.regionNetworkFirewallPolicies.addAssociation": -type RegionHealthChecksPatchCall struct { - s *Service - project string - region string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNetworkFirewallPoliciesAddAssociationCall struct { + s *Service + project string + region string + firewallPolicy string + firewallpolicyassociation *FirewallPolicyAssociation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HealthCheck resource in the specified project using -// the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. +// AddAssociation: Inserts an association for the specified network +// firewall policy. // -// - healthCheck: Name of the HealthCheck resource to patch. +// - firewallPolicy: Name of the firewall policy to update. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionHealthChecksService) Patch(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksPatchCall { - c := &RegionHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionNetworkFirewallPoliciesService) AddAssociation(project string, region string, firewallPolicy string, firewallpolicyassociation *FirewallPolicyAssociation) *RegionNetworkFirewallPoliciesAddAssociationCall { + c := &RegionNetworkFirewallPoliciesAddAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheck = healthCheck - c.healthcheck = healthcheck + c.firewallPolicy = firewallPolicy + c.firewallpolicyassociation = firewallpolicyassociation + return c +} + +// ReplaceExistingAssociation sets the optional parameter +// "replaceExistingAssociation": Indicates whether or not to replace it +// if an association already exists. This is false by default, in which +// case an error will be returned if an association already exists. +func (c *RegionNetworkFirewallPoliciesAddAssociationCall) ReplaceExistingAssociation(replaceExistingAssociation bool) *RegionNetworkFirewallPoliciesAddAssociationCall { + c.urlParams_.Set("replaceExistingAssociation", fmt.Sprint(replaceExistingAssociation)) return c } @@ -128290,7 +147610,7 @@ func (r *RegionHealthChecksService) Patch(project string, region string, healthC // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionHealthChecksPatchCall) RequestId(requestId string) *RegionHealthChecksPatchCall { +func (c *RegionNetworkFirewallPoliciesAddAssociationCall) RequestId(requestId string) *RegionNetworkFirewallPoliciesAddAssociationCall { c.urlParams_.Set("requestId", requestId) return c } @@ -128298,7 +147618,7 @@ func (c *RegionHealthChecksPatchCall) RequestId(requestId string) *RegionHealthC // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksPatchCall) Fields(s ...googleapi.Field) *RegionHealthChecksPatchCall { +func (c *RegionNetworkFirewallPoliciesAddAssociationCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesAddAssociationCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -128306,21 +147626,21 @@ func (c *RegionHealthChecksPatchCall) Fields(s ...googleapi.Field) *RegionHealth // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksPatchCall) Context(ctx context.Context) *RegionHealthChecksPatchCall { +func (c *RegionNetworkFirewallPoliciesAddAssociationCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesAddAssociationCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksPatchCall) Header() http.Header { +func (c *RegionNetworkFirewallPoliciesAddAssociationCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkFirewallPoliciesAddAssociationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -128328,53 +147648,53 @@ func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyassociation) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/addAssociation") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheck": c.healthCheck, + "project": c.project, + "region": c.region, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.patch" call. +// Do executes the "compute.regionNetworkFirewallPolicies.addAssociation" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -128388,18 +147708,18 @@ func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", - // "httpMethod": "PATCH", - // "id": "compute.regionHealthChecks.patch", + // "description": "Inserts an association for the specified network firewall policy.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/addAssociation", + // "httpMethod": "POST", + // "id": "compute.regionNetworkFirewallPolicies.addAssociation", // "parameterOrder": [ // "project", // "region", - // "healthCheck" + // "firewallPolicy" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to patch.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -128419,15 +147739,20 @@ func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, + // "replaceExistingAssociation": { + // "description": "Indicates whether or not to replace it if an association already exists. This is false by default, in which case an error will be returned if an association already exists.", + // "location": "query", + // "type": "boolean" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + // "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/addAssociation", // "request": { - // "$ref": "HealthCheck" + // "$ref": "FirewallPolicyAssociation" // }, // "response": { // "$ref": "Operation" @@ -128440,31 +147765,48 @@ func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.regionHealthChecks.update": +// method id "compute.regionNetworkFirewallPolicies.addRule": -type RegionHealthChecksUpdateCall struct { - s *Service - project string - region string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNetworkFirewallPoliciesAddRuleCall struct { + s *Service + project string + region string + firewallPolicy string + firewallpolicyrule *FirewallPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HealthCheck resource in the specified project using -// the data included in the request. +// AddRule: Inserts a rule into a network firewall policy. // -// - healthCheck: Name of the HealthCheck resource to update. +// - firewallPolicy: Name of the firewall policy to update. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionHealthChecksService) Update(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksUpdateCall { - c := &RegionHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionNetworkFirewallPoliciesService) AddRule(project string, region string, firewallPolicy string, firewallpolicyrule *FirewallPolicyRule) *RegionNetworkFirewallPoliciesAddRuleCall { + c := &RegionNetworkFirewallPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheck = healthCheck - c.healthcheck = healthcheck + c.firewallPolicy = firewallPolicy + c.firewallpolicyrule = firewallpolicyrule + return c +} + +// MaxPriority sets the optional parameter "maxPriority": When +// rule.priority is not specified, auto choose a unused priority between +// minPriority and maxPriority>. This field is exclusive with +// rule.priority. +func (c *RegionNetworkFirewallPoliciesAddRuleCall) MaxPriority(maxPriority int64) *RegionNetworkFirewallPoliciesAddRuleCall { + c.urlParams_.Set("maxPriority", fmt.Sprint(maxPriority)) + return c +} + +// MinPriority sets the optional parameter "minPriority": When +// rule.priority is not specified, auto choose a unused priority between +// minPriority and maxPriority>. This field is exclusive with +// rule.priority. +func (c *RegionNetworkFirewallPoliciesAddRuleCall) MinPriority(minPriority int64) *RegionNetworkFirewallPoliciesAddRuleCall { + c.urlParams_.Set("minPriority", fmt.Sprint(minPriority)) return c } @@ -128479,7 +147821,7 @@ func (r *RegionHealthChecksService) Update(project string, region string, health // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionHealthChecksUpdateCall) RequestId(requestId string) *RegionHealthChecksUpdateCall { +func (c *RegionNetworkFirewallPoliciesAddRuleCall) RequestId(requestId string) *RegionNetworkFirewallPoliciesAddRuleCall { c.urlParams_.Set("requestId", requestId) return c } @@ -128487,7 +147829,7 @@ func (c *RegionHealthChecksUpdateCall) RequestId(requestId string) *RegionHealth // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksUpdateCall) Fields(s ...googleapi.Field) *RegionHealthChecksUpdateCall { +func (c *RegionNetworkFirewallPoliciesAddRuleCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesAddRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -128495,21 +147837,21 @@ func (c *RegionHealthChecksUpdateCall) Fields(s ...googleapi.Field) *RegionHealt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksUpdateCall) Context(ctx context.Context) *RegionHealthChecksUpdateCall { +func (c *RegionNetworkFirewallPoliciesAddRuleCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesAddRuleCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksUpdateCall) Header() http.Header { +func (c *RegionNetworkFirewallPoliciesAddRuleCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkFirewallPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -128517,53 +147859,53 @@ func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/addRule") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheck": c.healthCheck, + "project": c.project, + "region": c.region, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.update" call. +// Do executes the "compute.regionNetworkFirewallPolicies.addRule" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -128577,23 +147919,35 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", - // "httpMethod": "PUT", - // "id": "compute.regionHealthChecks.update", + // "description": "Inserts a rule into a network firewall policy.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/addRule", + // "httpMethod": "POST", + // "id": "compute.regionNetworkFirewallPolicies.addRule", // "parameterOrder": [ // "project", // "region", - // "healthCheck" + // "firewallPolicy" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to update.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "maxPriority": { + // "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "minPriority": { + // "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -128614,9 +147968,9 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", + // "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/addRule", // "request": { - // "$ref": "HealthCheck" + // "$ref": "FirewallPolicyRule" // }, // "response": { // "$ref": "Operation" @@ -128629,43 +147983,28 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.regionInstanceGroupManagers.abandonInstances": +// method id "compute.regionNetworkFirewallPolicies.cloneRules": -type RegionInstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNetworkFirewallPoliciesCloneRulesCall struct { + s *Service + project string + region string + firewallPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Flags the specified instances to be immediately -// removed from the managed instance group. Abandoning an instance does -// not delete the instance, but it does remove the instance from any -// target pools that are applied by the managed instance group. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you abandon. This operation is marked as -// DONE when the action is scheduled even if the instances have not yet -// been removed from the group. You must separately verify the status of -// the abandoning action with the listmanagedinstances method. If the -// group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -// You can specify a maximum of 1000 instances with this method per -// request. +// CloneRules: Copies rules to the specified network firewall policy. // -// - instanceGroupManager: Name of the managed instance group. +// - firewallPolicy: Name of the firewall policy to update. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { - c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionNetworkFirewallPoliciesService) CloneRules(project string, region string, firewallPolicy string) *RegionNetworkFirewallPoliciesCloneRulesCall { + c := &RegionNetworkFirewallPoliciesCloneRulesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest + c.firewallPolicy = firewallPolicy return c } @@ -128680,15 +148019,22 @@ func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, re // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionNetworkFirewallPoliciesCloneRulesCall) RequestId(requestId string) *RegionNetworkFirewallPoliciesCloneRulesCall { c.urlParams_.Set("requestId", requestId) return c } +// SourceFirewallPolicy sets the optional parameter +// "sourceFirewallPolicy": The firewall policy from which to copy rules. +func (c *RegionNetworkFirewallPoliciesCloneRulesCall) SourceFirewallPolicy(sourceFirewallPolicy string) *RegionNetworkFirewallPoliciesCloneRulesCall { + c.urlParams_.Set("sourceFirewallPolicy", sourceFirewallPolicy) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionNetworkFirewallPoliciesCloneRulesCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesCloneRulesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -128696,21 +148042,21 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionNetworkFirewallPoliciesCloneRulesCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesCloneRulesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { +func (c *RegionNetworkFirewallPoliciesCloneRulesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkFirewallPoliciesCloneRulesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -128718,14 +148064,9 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/cloneRules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -128733,38 +148074,38 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. +// Do executes the "compute.regionNetworkFirewallPolicies.cloneRules" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNetworkFirewallPoliciesCloneRulesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -128778,19 +148119,20 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "description": "Copies rules to the specified network firewall policy.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/cloneRules", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.abandonInstances", + // "id": "compute.regionNetworkFirewallPolicies.cloneRules", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "firewallPolicy" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -128804,6 +148146,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -128811,12 +148154,14 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "sourceFirewallPolicy": { + // "description": "The firewall policy from which to copy rules.", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" - // }, + // "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/cloneRules", // "response": { // "$ref": "Operation" // }, @@ -128828,40 +148173,51 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C } -// method id "compute.regionInstanceGroupManagers.applyUpdatesToInstances": +// method id "compute.regionNetworkFirewallPolicies.delete": -type RegionInstanceGroupManagersApplyUpdatesToInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNetworkFirewallPoliciesDeleteCall struct { + s *Service + project string + region string + firewallPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ApplyUpdatesToInstances: Apply updates to selected instances the -// managed instance group. +// Delete: Deletes the specified network firewall policy. // -// - instanceGroupManager: The name of the managed instance group, -// should conform to RFC1035. +// - firewallPolicy: Name of the firewall policy to delete. // - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. -func (r *RegionInstanceGroupManagersService) ApplyUpdatesToInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { - c := &RegionInstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionNetworkFirewallPoliciesService) Delete(project string, region string, firewallPolicy string) *RegionNetworkFirewallPoliciesDeleteCall { + c := &RegionNetworkFirewallPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersapplyupdatesrequest = regioninstancegroupmanagersapplyupdatesrequest + c.firewallPolicy = firewallPolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionNetworkFirewallPoliciesDeleteCall) RequestId(requestId string) *RegionNetworkFirewallPoliciesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { +func (c *RegionNetworkFirewallPoliciesDeleteCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -128869,21 +148225,21 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...goo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { +func (c *RegionNetworkFirewallPoliciesDeleteCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { +func (c *RegionNetworkFirewallPoliciesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkFirewallPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -128891,53 +148247,48 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt s } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersapplyupdatesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.applyUpdatesToInstances" call. +// Do executes the "compute.regionNetworkFirewallPolicies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNetworkFirewallPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -128951,19 +148302,20 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog } return ret, nil // { - // "description": "Apply updates to selected instances the managed instance group.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.applyUpdatesToInstances", + // "description": "Deletes the specified network firewall policy.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}", + // "httpMethod": "DELETE", + // "id": "compute.regionNetworkFirewallPolicies.delete", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "firewallPolicy" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group, should conform to RFC1035.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -128975,16 +148327,19 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" - // }, + // "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}", // "response": { // "$ref": "Operation" // }, @@ -128996,137 +148351,122 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog } -// method id "compute.regionInstanceGroupManagers.createInstances": +// method id "compute.regionNetworkFirewallPolicies.get": -type RegionInstanceGroupManagersCreateInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNetworkFirewallPoliciesGetCall struct { + s *Service + project string + region string + firewallPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// CreateInstances: Creates instances with per-instance configs in this -// regional managed instance group. Instances are created using the -// current instance template. The create instances operation is marked -// DONE if the createInstances request is successful. The underlying -// actions take additional time. You must separately verify the status -// of the creating or actions with the listmanagedinstances method. +// Get: Returns the specified network firewall policy. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. +// - firewallPolicy: Name of the firewall policy to get. // - project: Project ID for this request. -// - region: The name of the region where the managed instance group is -// located. It should conform to RFC1035. -func (r *RegionInstanceGroupManagersService) CreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest) *RegionInstanceGroupManagersCreateInstancesCall { - c := &RegionInstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionNetworkFirewallPoliciesService) Get(project string, region string, firewallPolicy string) *RegionNetworkFirewallPoliciesGetCall { + c := &RegionNetworkFirewallPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerscreateinstancesrequest = regioninstancegroupmanagerscreateinstancesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. The request ID -// must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersCreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersCreateInstancesCall { - c.urlParams_.Set("requestId", requestId) + c.firewallPolicy = firewallPolicy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersCreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersCreateInstancesCall { +func (c *RegionNetworkFirewallPoliciesGetCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNetworkFirewallPoliciesGetCall) IfNoneMatch(entityTag string) *RegionNetworkFirewallPoliciesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersCreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersCreateInstancesCall { +func (c *RegionNetworkFirewallPoliciesGetCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersCreateInstancesCall) Header() http.Header { +func (c *RegionNetworkFirewallPoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkFirewallPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerscreateinstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.createInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionNetworkFirewallPolicies.get" call. +// Exactly one of *FirewallPolicy or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *FirewallPolicy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNetworkFirewallPoliciesGetCall) Do(opts ...googleapi.CallOption) (*FirewallPolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &FirewallPolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -129138,19 +148478,20 @@ func (c *RegionInstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Creates instances with per-instance configs in this regional managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.createInstances", + // "description": "Returns the specified network firewall policy.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}", + // "httpMethod": "GET", + // "id": "compute.regionNetworkFirewallPolicies.get", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "firewallPolicy" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to get.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -129162,151 +148503,694 @@ func (c *RegionInstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.Ca // "type": "string" // }, // "region": { - // "description": "The name of the region where the managed instance group is located. It should conform to RFC1035.", + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}", + // "response": { + // "$ref": "FirewallPolicy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionNetworkFirewallPolicies.getAssociation": + +type RegionNetworkFirewallPoliciesGetAssociationCall struct { + s *Service + project string + region string + firewallPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetAssociation: Gets an association with the specified name. +// +// - firewallPolicy: Name of the firewall policy to which the queried +// association belongs. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionNetworkFirewallPoliciesService) GetAssociation(project string, region string, firewallPolicy string) *RegionNetworkFirewallPoliciesGetAssociationCall { + c := &RegionNetworkFirewallPoliciesGetAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.firewallPolicy = firewallPolicy + return c +} + +// Name sets the optional parameter "name": The name of the association +// to get from the firewall policy. +func (c *RegionNetworkFirewallPoliciesGetAssociationCall) Name(name string) *RegionNetworkFirewallPoliciesGetAssociationCall { + c.urlParams_.Set("name", name) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNetworkFirewallPoliciesGetAssociationCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesGetAssociationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNetworkFirewallPoliciesGetAssociationCall) IfNoneMatch(entityTag string) *RegionNetworkFirewallPoliciesGetAssociationCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNetworkFirewallPoliciesGetAssociationCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesGetAssociationCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNetworkFirewallPoliciesGetAssociationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNetworkFirewallPoliciesGetAssociationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/getAssociation") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "firewallPolicy": c.firewallPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNetworkFirewallPolicies.getAssociation" call. +// Exactly one of *FirewallPolicyAssociation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *FirewallPolicyAssociation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNetworkFirewallPoliciesGetAssociationCall) Do(opts ...googleapi.CallOption) (*FirewallPolicyAssociation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &FirewallPolicyAssociation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets an association with the specified name.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/getAssociation", + // "httpMethod": "GET", + // "id": "compute.regionNetworkFirewallPolicies.getAssociation", + // "parameterOrder": [ + // "project", + // "region", + // "firewallPolicy" + // ], + // "parameters": { + // "firewallPolicy": { + // "description": "Name of the firewall policy to which the queried association belongs.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "name": { + // "description": "The name of the association to get from the firewall policy.", // "location": "query", // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersCreateInstancesRequest" - // }, + // "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/getAssociation", // "response": { - // "$ref": "Operation" + // "$ref": "FirewallPolicyAssociation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.delete": +// method id "compute.regionNetworkFirewallPolicies.getEffectiveFirewalls": -type RegionInstanceGroupManagersDeleteCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNetworkFirewallPoliciesGetEffectiveFirewallsCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. +// GetEffectiveFirewalls: Returns the effective firewalls on a given +// network. // -// - instanceGroupManager: Name of the managed instance group to delete. +// - network: Network reference. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { - c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionNetworkFirewallPoliciesService) GetEffectiveFirewalls(project string, region string, network string) *RegionNetworkFirewallPoliciesGetEffectiveFirewallsCall { + c := &RegionNetworkFirewallPoliciesGetEffectiveFirewallsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("network", network) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteCall { - c.urlParams_.Set("requestId", requestId) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNetworkFirewallPoliciesGetEffectiveFirewallsCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesGetEffectiveFirewallsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNetworkFirewallPoliciesGetEffectiveFirewallsCall) IfNoneMatch(entityTag string) *RegionNetworkFirewallPoliciesGetEffectiveFirewallsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNetworkFirewallPoliciesGetEffectiveFirewallsCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesGetEffectiveFirewallsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNetworkFirewallPoliciesGetEffectiveFirewallsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNetworkFirewallPoliciesGetEffectiveFirewallsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/getEffectiveFirewalls") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNetworkFirewallPolicies.getEffectiveFirewalls" call. +// Exactly one of +// *RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse.ServerResp +// onse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionNetworkFirewallPoliciesGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (*RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the effective firewalls on a given network.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/getEffectiveFirewalls", + // "httpMethod": "GET", + // "id": "compute.regionNetworkFirewallPolicies.getEffectiveFirewalls", + // "parameterOrder": [ + // "project", + // "region", + // "network" + // ], + // "parameters": { + // "network": { + // "description": "Network reference", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/firewallPolicies/getEffectiveFirewalls", + // "response": { + // "$ref": "RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionNetworkFirewallPolicies.getIamPolicy": + +type RegionNetworkFirewallPoliciesGetIamPolicyCall struct { + s *Service + project string + region string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionNetworkFirewallPoliciesService) GetIamPolicy(project string, region string, resource string) *RegionNetworkFirewallPoliciesGetIamPolicyCall { + c := &RegionNetworkFirewallPoliciesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *RegionNetworkFirewallPoliciesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *RegionNetworkFirewallPoliciesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNetworkFirewallPoliciesGetIamPolicyCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNetworkFirewallPoliciesGetIamPolicyCall) IfNoneMatch(entityTag string) *RegionNetworkFirewallPoliciesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNetworkFirewallPoliciesGetIamPolicyCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNetworkFirewallPoliciesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNetworkFirewallPoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNetworkFirewallPolicies.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionNetworkFirewallPoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.regionNetworkFirewallPolicies.getIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/firewallPolicies/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionNetworkFirewallPolicies.getRule": + +type RegionNetworkFirewallPoliciesGetRuleCall struct { + s *Service + project string + region string + firewallPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetRule: Gets a rule of the specified priority. +// +// - firewallPolicy: Name of the firewall policy to which the queried +// rule belongs. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionNetworkFirewallPoliciesService) GetRule(project string, region string, firewallPolicy string) *RegionNetworkFirewallPoliciesGetRuleCall { + c := &RegionNetworkFirewallPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.firewallPolicy = firewallPolicy + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to get from the firewall policy. +func (c *RegionNetworkFirewallPoliciesGetRuleCall) Priority(priority int64) *RegionNetworkFirewallPoliciesGetRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionNetworkFirewallPoliciesGetRuleCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesGetRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNetworkFirewallPoliciesGetRuleCall) IfNoneMatch(entityTag string) *RegionNetworkFirewallPoliciesGetRuleCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeleteCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionNetworkFirewallPoliciesGetRuleCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesGetRuleCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { +func (c *RegionNetworkFirewallPoliciesGetRuleCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkFirewallPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/getRule") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionNetworkFirewallPolicies.getRule" call. +// Exactly one of *FirewallPolicyRule or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *FirewallPolicyRule.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNetworkFirewallPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*FirewallPolicyRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &FirewallPolicyRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -129318,22 +149202,29 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "httpMethod": "DELETE", - // "id": "compute.regionInstanceGroupManagers.delete", + // "description": "Gets a rule of the specified priority.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/getRule", + // "httpMethod": "GET", + // "id": "compute.regionNetworkFirewallPolicies.getRule", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "firewallPolicy" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group to delete.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to which the queried rule belongs.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "priority": { + // "description": "The priority of the rule to get from the firewall policy.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -129344,63 +149235,46 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/getRule", // "response": { - // "$ref": "Operation" + // "$ref": "FirewallPolicyRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.deleteInstances": +// method id "compute.regionNetworkFirewallPolicies.insert": -type RegionInstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNetworkFirewallPoliciesInsertCall struct { + s *Service + project string + region string + firewallpolicy *FirewallPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteInstances: Flags the specified instances in the managed -// instance group to be immediately deleted. The instances are also -// removed from any target pools of which they were a member. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you delete. The deleteInstances operation is -// marked DONE if the deleteInstances request is successful. The -// underlying actions take additional time. You must separately verify -// the status of the deleting action with the listmanagedinstances -// method. If the group is part of a backend service that has enabled -// connection draining, it can take up to 60 seconds after the -// connection draining duration has elapsed before the VM instance is -// removed or deleted. You can specify a maximum of 1000 instances with -// this method per request. +// Insert: Creates a new network firewall policy in the specified +// project and region. // -// - instanceGroupManager: Name of the managed instance group. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { - c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionNetworkFirewallPoliciesService) Insert(project string, region string, firewallpolicy *FirewallPolicy) *RegionNetworkFirewallPoliciesInsertCall { + c := &RegionNetworkFirewallPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersdeleteinstancesrequest = regioninstancegroupmanagersdeleteinstancesrequest + c.firewallpolicy = firewallpolicy return c } @@ -129415,7 +149289,7 @@ func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, reg // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionNetworkFirewallPoliciesInsertCall) RequestId(requestId string) *RegionNetworkFirewallPoliciesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -129423,7 +149297,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId str // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionNetworkFirewallPoliciesInsertCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -129431,21 +149305,21 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.F // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionNetworkFirewallPoliciesInsertCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { +func (c *RegionNetworkFirewallPoliciesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkFirewallPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -129453,14 +149327,14 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) ( } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersdeleteinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -129468,38 +149342,37 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) ( } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.deleteInstances" call. +// Do executes the "compute.regionNetworkFirewallPolicies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNetworkFirewallPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -129513,22 +149386,15 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "description": "Creates a new network firewall policy in the specified project and region.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.deleteInstances", + // "id": "compute.regionNetworkFirewallPolicies.insert", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "region" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -129539,6 +149405,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -129548,9 +149415,9 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "path": "projects/{project}/regions/{region}/firewallPolicies", // "request": { - // "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" + // "$ref": "FirewallPolicy" // }, // "response": { // "$ref": "Operation" @@ -129563,118 +149430,201 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca } -// method id "compute.regionInstanceGroupManagers.deletePerInstanceConfigs": +// method id "compute.regionNetworkFirewallPolicies.list": -type RegionInstanceGroupManagersDeletePerInstanceConfigsCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNetworkFirewallPoliciesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DeletePerInstanceConfigs: Deletes selected per-instance configs for -// the managed instance group. +// List: Lists all the network firewall policies that have been +// configured for the specified project in the given region. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. // - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. -func (r *RegionInstanceGroupManagersService) DeletePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { - c := &RegionInstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionNetworkFirewallPoliciesService) List(project string, region string) *RegionNetworkFirewallPoliciesListCall { + c := &RegionNetworkFirewallPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerdeleteinstanceconfigreq = regioninstancegroupmanagerdeleteinstanceconfigreq + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionNetworkFirewallPoliciesListCall) Filter(filter string) *RegionNetworkFirewallPoliciesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionNetworkFirewallPoliciesListCall) MaxResults(maxResults int64) *RegionNetworkFirewallPoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionNetworkFirewallPoliciesListCall) OrderBy(orderBy string) *RegionNetworkFirewallPoliciesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionNetworkFirewallPoliciesListCall) PageToken(pageToken string) *RegionNetworkFirewallPoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionNetworkFirewallPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionNetworkFirewallPoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { +func (c *RegionNetworkFirewallPoliciesListCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNetworkFirewallPoliciesListCall) IfNoneMatch(entityTag string) *RegionNetworkFirewallPoliciesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { +func (c *RegionNetworkFirewallPoliciesListCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header { +func (c *RegionNetworkFirewallPoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkFirewallPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerdeleteinstanceconfigreq) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.deletePerInstanceConfigs" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionNetworkFirewallPolicies.list" call. +// Exactly one of *FirewallPolicyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *FirewallPolicyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNetworkFirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*FirewallPolicyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &FirewallPolicyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -129686,20 +149636,36 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...goo } return ret, nil // { - // "description": "Deletes selected per-instance configs for the managed instance group.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.deletePerInstanceConfigs", + // "description": "Lists all the network firewall policies that have been configured for the specified project in the given region.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies", + // "httpMethod": "GET", + // "id": "compute.regionNetworkFirewallPolicies.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "region" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -129710,144 +149676,177 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...goo // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", - // "request": { - // "$ref": "RegionInstanceGroupManagerDeleteInstanceConfigReq" - // }, + // "path": "projects/{project}/regions/{region}/firewallPolicies", // "response": { - // "$ref": "Operation" + // "$ref": "FirewallPolicyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionNetworkFirewallPoliciesListCall) Pages(ctx context.Context, f func(*FirewallPolicyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionInstanceGroupManagersGetCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.regionNetworkFirewallPolicies.patch": + +type RegionNetworkFirewallPoliciesPatchCall struct { + s *Service + project string + region string + firewallPolicy string + firewallpolicy *FirewallPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns all of the details about the specified managed instance -// group. +// Patch: Patches the specified network firewall policy. // -// - instanceGroupManager: Name of the managed instance group to return. +// - firewallPolicy: Name of the firewall policy to update. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { - c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionNetworkFirewallPoliciesService) Patch(project string, region string, firewallPolicy string, firewallpolicy *FirewallPolicy) *RegionNetworkFirewallPoliciesPatchCall { + c := &RegionNetworkFirewallPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager + c.firewallPolicy = firewallPolicy + c.firewallpolicy = firewallpolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionNetworkFirewallPoliciesPatchCall) RequestId(requestId string) *RegionNetworkFirewallPoliciesPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersGetCall { +func (c *RegionNetworkFirewallPoliciesPatchCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersGetCall) Context(ctx context.Context) *RegionInstanceGroupManagersGetCall { +func (c *RegionNetworkFirewallPoliciesPatchCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { +func (c *RegionNetworkFirewallPoliciesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkFirewallPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { +// Do executes the "compute.regionNetworkFirewallPolicies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionNetworkFirewallPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceGroupManager{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -129859,19 +149858,20 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.get", + // "description": "Patches the specified network firewall policy.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}", + // "httpMethod": "PATCH", + // "id": "compute.regionNetworkFirewallPolicies.patch", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "firewallPolicy" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group to return.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -129885,51 +149885,62 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}", + // "request": { + // "$ref": "FirewallPolicy" + // }, // "response": { - // "$ref": "InstanceGroupManager" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionInstanceGroupManagers.insert": +// method id "compute.regionNetworkFirewallPolicies.patchRule": -type RegionInstanceGroupManagersInsertCall struct { - s *Service - project string - region string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNetworkFirewallPoliciesPatchRuleCall struct { + s *Service + project string + region string + firewallPolicy string + firewallpolicyrule *FirewallPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, instances in -// the group are created using the specified instance template. This -// operation is marked as DONE when the group is created even if the -// instances in the group have not yet been created. You must separately -// verify the status of the individual instances with the -// listmanagedinstances method. A regional managed instance group can -// contain up to 2000 instances. +// PatchRule: Patches a rule of the specified priority. // +// - firewallPolicy: Name of the firewall policy to update. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { - c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionNetworkFirewallPoliciesService) PatchRule(project string, region string, firewallPolicy string, firewallpolicyrule *FirewallPolicyRule) *RegionNetworkFirewallPoliciesPatchRuleCall { + c := &RegionNetworkFirewallPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instancegroupmanager = instancegroupmanager + c.firewallPolicy = firewallPolicy + c.firewallpolicyrule = firewallpolicyrule + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to patch. +func (c *RegionNetworkFirewallPoliciesPatchRuleCall) Priority(priority int64) *RegionNetworkFirewallPoliciesPatchRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) return c } @@ -129944,7 +149955,7 @@ func (r *RegionInstanceGroupManagersService) Insert(project string, region strin // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *RegionInstanceGroupManagersInsertCall { +func (c *RegionNetworkFirewallPoliciesPatchRuleCall) RequestId(requestId string) *RegionNetworkFirewallPoliciesPatchRuleCall { c.urlParams_.Set("requestId", requestId) return c } @@ -129952,7 +149963,7 @@ func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersInsertCall { +func (c *RegionNetworkFirewallPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesPatchRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -129960,21 +149971,21 @@ func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersInsertCall) Context(ctx context.Context) *RegionInstanceGroupManagersInsertCall { +func (c *RegionNetworkFirewallPoliciesPatchRuleCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesPatchRuleCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { +func (c *RegionNetworkFirewallPoliciesPatchRuleCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkFirewallPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -129982,14 +149993,14 @@ func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchRule") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -129997,37 +150008,38 @@ func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.insert" call. +// Do executes the "compute.regionNetworkFirewallPolicies.patchRule" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNetworkFirewallPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -130041,15 +150053,29 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method. A regional managed instance group can contain up to 2000 instances.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers", + // "description": "Patches a rule of the specified priority.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchRule", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.insert", + // "id": "compute.regionNetworkFirewallPolicies.patchRule", // "parameterOrder": [ // "project", - // "region" + // "region", + // "firewallPolicy" // ], // "parameters": { + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "priority": { + // "description": "The priority of the rule to patch.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -130060,6 +150086,7 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -130069,9 +150096,9 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers", + // "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchRule", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "FirewallPolicyRule" // }, // "response": { // "$ref": "Operation" @@ -130084,188 +150111,132 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.list": +// method id "compute.regionNetworkFirewallPolicies.removeAssociation": -type RegionInstanceGroupManagersListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionNetworkFirewallPoliciesRemoveAssociationCall struct { + s *Service + project string + region string + firewallPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of managed instance groups that are -// contained within the specified region. +// RemoveAssociation: Removes an association for the specified network +// firewall policy. // +// - firewallPolicy: Name of the firewall policy to update. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { - c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionNetworkFirewallPoliciesService) RemoveAssociation(project string, region string, firewallPolicy string) *RegionNetworkFirewallPoliciesRemoveAssociationCall { + c := &RegionNetworkFirewallPoliciesRemoveAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.firewallPolicy = firewallPolicy return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("pageToken", pageToken) +// Name sets the optional parameter "name": Name for the association +// that will be removed. +func (c *RegionNetworkFirewallPoliciesRemoveAssociationCall) Name(name string) *RegionNetworkFirewallPoliciesRemoveAssociationCall { + c.urlParams_.Set("name", name) return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *RegionInstanceGroupManagersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionNetworkFirewallPoliciesRemoveAssociationCall) RequestId(requestId string) *RegionNetworkFirewallPoliciesRemoveAssociationCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListCall { +func (c *RegionNetworkFirewallPoliciesRemoveAssociationCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesRemoveAssociationCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListCall) Context(ctx context.Context) *RegionInstanceGroupManagersListCall { +func (c *RegionNetworkFirewallPoliciesRemoveAssociationCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesRemoveAssociationCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListCall) Header() http.Header { +func (c *RegionNetworkFirewallPoliciesRemoveAssociationCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkFirewallPoliciesRemoveAssociationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/removeAssociation") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.list" call. -// Exactly one of *RegionInstanceGroupManagerList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupManagerList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagerList, error) { +// Do executes the "compute.regionNetworkFirewallPolicies.removeAssociation" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionNetworkFirewallPoliciesRemoveAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &RegionInstanceGroupManagerList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -130277,35 +150248,25 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.list", + // "description": "Removes an association for the specified network firewall policy.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/removeAssociation", + // "httpMethod": "POST", + // "id": "compute.regionNetworkFirewallPolicies.removeAssociation", // "parameterOrder": [ // "project", - // "region" + // "region", + // "firewallPolicy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "name": { + // "description": "Name for the association that will be removed.", // "location": "query", // "type": "string" // }, @@ -130319,242 +150280,153 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers", + // "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/removeAssociation", // "response": { - // "$ref": "RegionInstanceGroupManagerList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.listErrors": +// method id "compute.regionNetworkFirewallPolicies.removeRule": -type RegionInstanceGroupManagersListErrorsCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionNetworkFirewallPoliciesRemoveRuleCall struct { + s *Service + project string + region string + firewallPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListErrors: Lists all errors thrown by actions on instances for a -// given regional managed instance group. The filter and orderBy query -// parameters are not supported. +// RemoveRule: Deletes a rule of the specified priority. // -// - instanceGroupManager: The name of the managed instance group. It -// must be a string that meets the requirements in RFC1035, or an -// unsigned long integer: must match regexp pattern: (?:a-z -// (?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}. +// - firewallPolicy: Name of the firewall policy to update. // - project: Project ID for this request. -// - region: Name of the region scoping this request. This should -// conform to RFC1035. -func (r *RegionInstanceGroupManagersService) ListErrors(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListErrorsCall { - c := &RegionInstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionNetworkFirewallPoliciesService) RemoveRule(project string, region string, firewallPolicy string) *RegionNetworkFirewallPoliciesRemoveRuleCall { + c := &RegionNetworkFirewallPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionInstanceGroupManagersListErrorsCall) Filter(filter string) *RegionInstanceGroupManagersListErrorsCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *RegionInstanceGroupManagersListErrorsCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListErrorsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *RegionInstanceGroupManagersListErrorsCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListErrorsCall { - c.urlParams_.Set("orderBy", orderBy) + c.firewallPolicy = firewallPolicy return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListErrorsCall) PageToken(pageToken string) *RegionInstanceGroupManagersListErrorsCall { - c.urlParams_.Set("pageToken", pageToken) +// Priority sets the optional parameter "priority": The priority of the +// rule to remove from the firewall policy. +func (c *RegionNetworkFirewallPoliciesRemoveRuleCall) Priority(priority int64) *RegionNetworkFirewallPoliciesRemoveRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *RegionInstanceGroupManagersListErrorsCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListErrorsCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionNetworkFirewallPoliciesRemoveRuleCall) RequestId(requestId string) *RegionNetworkFirewallPoliciesRemoveRuleCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListErrorsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListErrorsCall { +func (c *RegionNetworkFirewallPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesRemoveRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersListErrorsCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListErrorsCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListErrorsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListErrorsCall { +func (c *RegionNetworkFirewallPoliciesRemoveRuleCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesRemoveRuleCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListErrorsCall) Header() http.Header { +func (c *RegionNetworkFirewallPoliciesRemoveRuleCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkFirewallPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/removeRule") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "firewallPolicy": c.firewallPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.listErrors" call. -// Exactly one of *RegionInstanceGroupManagersListErrorsResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *RegionInstanceGroupManagersListErrorsResponse.ServerResponse.Header -// or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListErrorsResponse, error) { +// Do executes the "compute.regionNetworkFirewallPolicies.removeRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionNetworkFirewallPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &RegionInstanceGroupManagersListErrorsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -130566,45 +150438,29 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Lists all errors thrown by actions on instances for a given regional managed instance group. The filter and orderBy query parameters are not supported.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.listErrors", + // "description": "Deletes a rule of the specified priority.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/removeRule", + // "httpMethod": "POST", + // "id": "compute.regionNetworkFirewallPolicies.removeRule", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "firewallPolicy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035, or an unsigned long integer: must match regexp pattern: (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}.", + // "firewallPolicy": { + // "description": "Name of the firewall policy to update.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", + // "priority": { + // "description": "The priority of the rule to remove from the firewall policy.", + // "format": "int32", // "location": "query", - // "minimum": "0", // "type": "integer" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -130613,152 +150469,62 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request. This should conform to RFC1035.", + // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", - // "type": "boolean" + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", + // "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/removeRule", // "response": { - // "$ref": "RegionInstanceGroupManagersListErrorsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListErrorsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListErrorsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.listManagedInstances": - -type RegionInstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// ListManagedInstances: Lists the instances in the managed instance -// group and instances that are scheduled to be created. The list -// includes any current actions that the group has scheduled for its -// instances. The orderBy query parameter is not supported. -// -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { - c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} +// method id "compute.regionNetworkFirewallPolicies.setIamPolicy": -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("pageToken", pageToken) - return c +type RegionNetworkFirewallPoliciesSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionNetworkFirewallPoliciesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionNetworkFirewallPoliciesSetIamPolicyCall { + c := &RegionNetworkFirewallPoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionNetworkFirewallPoliciesSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -130766,21 +150532,21 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...google // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionNetworkFirewallPoliciesSetIamPolicyCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Header { +func (c *RegionNetworkFirewallPoliciesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkFirewallPoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -130788,9 +150554,14 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt stri } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -130798,42 +150569,40 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt stri } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.listManagedInstances" call. -// Exactly one of *RegionInstanceGroupManagersListInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *RegionInstanceGroupManagersListInstancesResponse.ServerResponse.Heade -// r or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstancesResponse, error) { +// Do executes the "compute.regionNetworkFirewallPolicies.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionNetworkFirewallPoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &RegionInstanceGroupManagersListInstancesResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -130845,45 +150614,16 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{resource}/setIamPolicy", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.listManagedInstances", + // "id": "compute.regionNetworkFirewallPolicies.setIamPolicy", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -130892,153 +150632,67 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "path": "projects/{project}/regions/{region}/firewallPolicies/{resource}/setIamPolicy", + // "request": { + // "$ref": "RegionSetPolicyRequest" + // }, // "response": { - // "$ref": "RegionInstanceGroupManagersListInstancesResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstancesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.listPerInstanceConfigs": +// method id "compute.regionNetworkFirewallPolicies.testIamPermissions": -type RegionInstanceGroupManagersListPerInstanceConfigsCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNetworkFirewallPoliciesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListPerInstanceConfigs: Lists all of the per-instance configs defined -// for the managed instance group. The orderBy query parameter is not -// supported. +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. // - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. -func (r *RegionInstanceGroupManagersService) ListPerInstanceConfigs(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c := &RegionInstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionNetworkFirewallPoliciesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionNetworkFirewallPoliciesTestIamPermissionsCall { + c := &RegionNetworkFirewallPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListPerInstanceConfigsCall { +func (c *RegionNetworkFirewallPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -131046,21 +150700,21 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...goog // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListPerInstanceConfigsCall { +func (c *RegionNetworkFirewallPoliciesTestIamPermissionsCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { +func (c *RegionNetworkFirewallPoliciesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNetworkFirewallPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -131068,9 +150722,14 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt st } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -131078,42 +150737,40 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt st } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.listPerInstanceConfigs" call. -// Exactly one of *RegionInstanceGroupManagersListInstanceConfigsResp or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *RegionInstanceGroupManagersListInstanceConfigsResp.ServerResponse.Hea -// der or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstanceConfigsResp, error) { +// Do executes the "compute.regionNetworkFirewallPolicies.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNetworkFirewallPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &RegionInstanceGroupManagersListInstanceConfigsResp{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -131125,45 +150782,16 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl } return ret, nil // { - // "description": "Lists all of the per-instance configs defined for the managed instance group. The orderBy query parameter is not supported.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{resource}/testIamPermissions", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", + // "id": "compute.regionNetworkFirewallPolicies.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -131172,20 +150800,26 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "description": "The name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + // "path": "projects/{project}/regions/{region}/firewallPolicies/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -131196,61 +150830,30 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstanceConfigsResp) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.patch": +// method id "compute.regionNotificationEndpoints.delete": -type RegionInstanceGroupManagersPatchCall struct { +type RegionNotificationEndpointsDeleteCall struct { s *Service project string region string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager + notificationEndpoint string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listmanagedinstances method. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. If you update your group to specify a new -// template or instance configuration, it's possible that your intended -// specification for each VM in the group is different from the current -// state of that VM. To learn how to apply an updated configuration to -// the VMs in a MIG, see Updating instances in a MIG. +// Delete: Deletes the specified NotificationEndpoint in the given +// region // -// - instanceGroupManager: The name of the instance group manager. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { - c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - notificationEndpoint: Name of the NotificationEndpoint resource to +// delete. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionNotificationEndpointsService) Delete(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsDeleteCall { + c := &RegionNotificationEndpointsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager + c.notificationEndpoint = notificationEndpoint return c } @@ -131265,7 +150868,7 @@ func (r *RegionInstanceGroupManagersService) Patch(project string, region string // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { +func (c *RegionNotificationEndpointsDeleteCall) RequestId(requestId string) *RegionNotificationEndpointsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -131273,7 +150876,7 @@ func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *Regi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { +func (c *RegionNotificationEndpointsDeleteCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -131281,21 +150884,21 @@ func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { +func (c *RegionNotificationEndpointsDeleteCall) Context(ctx context.Context) *RegionNotificationEndpointsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { +func (c *RegionNotificationEndpointsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNotificationEndpointsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -131303,16 +150906,11 @@ func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -131320,36 +150918,36 @@ func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Resp googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "notificationEndpoint": c.notificationEndpoint, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.patch" call. +// Do executes the "compute.regionNotificationEndpoints.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -131363,19 +150961,20 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. If you update your group to specify a new template or instance configuration, it's possible that your intended specification for each VM in the group is different from the current state of that VM. To learn how to apply an updated configuration to the VMs in a MIG, see Updating instances in a MIG.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "httpMethod": "PATCH", - // "id": "compute.regionInstanceGroupManagers.patch", + // "description": "Deletes the specified NotificationEndpoint in the given region", + // "flatPath": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + // "httpMethod": "DELETE", + // "id": "compute.regionNotificationEndpoints.delete", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "notificationEndpoint" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "notificationEndpoint": { + // "description": "Name of the NotificationEndpoint resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -131389,6 +150988,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -131398,10 +150998,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", // "response": { // "$ref": "Operation" // }, @@ -131413,96 +151010,85 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.patchPerInstanceConfigs": +// method id "compute.regionNotificationEndpoints.get": -type RegionInstanceGroupManagersPatchPerInstanceConfigsCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNotificationEndpointsGetCall struct { + s *Service + project string + region string + notificationEndpoint string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// PatchPerInstanceConfigs: Inserts or patches per-instance configs for -// the managed instance group. perInstanceConfig.name serves as a key -// used to distinguish whether to perform insert or patch. +// Get: Returns the specified NotificationEndpoint resource in the given +// region. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. -func (r *RegionInstanceGroupManagersService) PatchPerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { - c := &RegionInstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - notificationEndpoint: Name of the NotificationEndpoint resource to +// return. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionNotificationEndpointsService) Get(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsGetCall { + c := &RegionNotificationEndpointsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerpatchinstanceconfigreq = regioninstancegroupmanagerpatchinstanceconfigreq - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { - c.urlParams_.Set("requestId", requestId) + c.notificationEndpoint = notificationEndpoint return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { +func (c *RegionNotificationEndpointsGetCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNotificationEndpointsGetCall) IfNoneMatch(entityTag string) *RegionNotificationEndpointsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { +func (c *RegionNotificationEndpointsGetCall) Context(ctx context.Context) *RegionNotificationEndpointsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header { +func (c *RegionNotificationEndpointsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNotificationEndpointsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerpatchinstanceconfigreq) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -131510,38 +151096,38 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt s googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "notificationEndpoint": c.notificationEndpoint, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.patchPerInstanceConfigs" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionNotificationEndpoints.get" call. +// Exactly one of *NotificationEndpoint or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NotificationEndpoint.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (*NotificationEndpoint, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &NotificationEndpoint{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -131553,19 +151139,20 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...goog } return ret, nil // { - // "description": "Inserts or patches per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.patchPerInstanceConfigs", + // "description": "Returns the specified NotificationEndpoint resource in the given region.", + // "flatPath": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + // "httpMethod": "GET", + // "id": "compute.regionNotificationEndpoints.get", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "notificationEndpoint" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "notificationEndpoint": { + // "description": "Name of the NotificationEndpoint resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -131577,66 +151164,48 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...goog // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", - // "request": { - // "$ref": "RegionInstanceGroupManagerPatchInstanceConfigReq" - // }, + // "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", // "response": { - // "$ref": "Operation" + // "$ref": "NotificationEndpoint" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.recreateInstances": +// method id "compute.regionNotificationEndpoints.insert": -type RegionInstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNotificationEndpointsInsertCall struct { + s *Service + project string + region string + notificationendpoint *NotificationEndpoint + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RecreateInstances: Flags the specified VM instances in the managed -// instance group to be immediately recreated. Each instance is -// recreated using the group's current configuration. This operation is -// marked as DONE when the flag is set even if the instances have not -// yet been recreated. You must separately verify the status of each -// instance by checking its currentAction field; for more information, -// see Checking the status of managed instances. If the group is part of -// a backend service that has enabled connection draining, it can take -// up to 60 seconds after the connection draining duration has elapsed -// before the VM instance is removed or deleted. You can specify a -// maximum of 1000 instances with this method per request. +// Insert: Create a NotificationEndpoint in the specified project in the +// given region using the parameters that are included in the request. // -// - instanceGroupManager: Name of the managed instance group. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { - c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionNotificationEndpointsService) Insert(project string, region string, notificationendpoint *NotificationEndpoint) *RegionNotificationEndpointsInsertCall { + c := &RegionNotificationEndpointsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest + c.notificationendpoint = notificationendpoint return c } @@ -131651,7 +151220,7 @@ func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, r // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *RegionNotificationEndpointsInsertCall) RequestId(requestId string) *RegionNotificationEndpointsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -131659,7 +151228,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId s // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *RegionNotificationEndpointsInsertCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -131667,21 +151236,21 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *RegionNotificationEndpointsInsertCall) Context(ctx context.Context) *RegionNotificationEndpointsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { +func (c *RegionNotificationEndpointsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNotificationEndpointsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -131689,14 +151258,14 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.notificationendpoint) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/notificationEndpoints") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -131704,38 +151273,37 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. +// Do executes the "compute.regionNotificationEndpoints.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNotificationEndpointsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -131749,22 +151317,15 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Flags the specified VM instances in the managed instance group to be immediately recreated. Each instance is recreated using the group's current configuration. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of each instance by checking its currentAction field; for more information, see Checking the status of managed instances. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "description": "Create a NotificationEndpoint in the specified project in the given region using the parameters that are included in the request.", + // "flatPath": "projects/{project}/regions/{region}/notificationEndpoints", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.recreateInstances", + // "id": "compute.regionNotificationEndpoints.insert", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "region" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -131775,6 +151336,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -131784,9 +151346,9 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "path": "projects/{project}/regions/{region}/notificationEndpoints", // "request": { - // "$ref": "RegionInstanceGroupManagersRecreateRequest" + // "$ref": "NotificationEndpoint" // }, // "response": { // "$ref": "Operation" @@ -131799,136 +151361,201 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. } -// method id "compute.regionInstanceGroupManagers.resize": +// method id "compute.regionNotificationEndpoints.list": -type RegionInstanceGroupManagersResizeCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNotificationEndpointsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Resize: Changes the intended size of the managed instance group. If -// you increase the size, the group creates new instances using the -// current instance template. If you decrease the size, the group -// deletes one or more instances. The resize operation is marked DONE if -// the resize request is successful. The underlying actions take -// additional time. You must separately verify the status of the -// creating or deleting actions with the listmanagedinstances method. If -// the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. +// List: Lists the NotificationEndpoints for a project in the given +// region. // -// - instanceGroupManager: Name of the managed instance group. // - project: Project ID for this request. // - region: Name of the region scoping this request. -// - size: Number of instances that should exist in this instance group -// manager. -func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { - c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionNotificationEndpointsService) List(project string, region string) *RegionNotificationEndpointsListCall { + c := &RegionNotificationEndpointsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionNotificationEndpointsListCall) Filter(filter string) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionNotificationEndpointsListCall) MaxResults(maxResults int64) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionNotificationEndpointsListCall) OrderBy(orderBy string) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionNotificationEndpointsListCall) PageToken(pageToken string) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionNotificationEndpointsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { +func (c *RegionNotificationEndpointsListCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNotificationEndpointsListCall) IfNoneMatch(entityTag string) *RegionNotificationEndpointsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { +func (c *RegionNotificationEndpointsListCall) Context(ctx context.Context) *RegionNotificationEndpointsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { +func (c *RegionNotificationEndpointsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNotificationEndpointsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/notificationEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionNotificationEndpoints.list" call. +// Exactly one of *NotificationEndpointList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NotificationEndpointList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNotificationEndpointsListCall) Do(opts ...googleapi.CallOption) (*NotificationEndpointList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &NotificationEndpointList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -131940,21 +151567,36 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances. The resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.resize", + // "description": "Lists the NotificationEndpoints for a project in the given region.", + // "flatPath": "projects/{project}/regions/{region}/notificationEndpoints", + // "httpMethod": "GET", + // "id": "compute.regionNotificationEndpoints.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager", - // "size" + // "region" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -131967,160 +151609,293 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "size": { - // "description": "Number of instances that should exist in this instance group manager.", - // "format": "int32", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "minimum": "0", - // "required": true, - // "type": "integer" + // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "projects/{project}/regions/{region}/notificationEndpoints", // "response": { - // "$ref": "Operation" + // "$ref": "NotificationEndpointList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionNotificationEndpointsListCall) Pages(ctx context.Context, f func(*NotificationEndpointList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionInstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionOperations.delete": + +type RegionOperationsDeleteCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Sets the instance template to use when creating -// new instances or recreating instances in this group. Existing -// instances are not affected. +// Delete: Deletes the specified region-specific Operations resource. // -// - instanceGroupManager: The name of the managed instance group. +// - operation: Name of the Operations resource to delete. // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { - c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { + c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest + c.operation = operation return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { - c.urlParams_.Set("requestId", requestId) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionOperationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "operation": c.operation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionOperations.delete" call. +func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes the specified region-specific Operations resource.", + // "flatPath": "projects/{project}/regions/{region}/operations/{operation}", + // "httpMethod": "DELETE", + // "id": "compute.regionOperations.delete", + // "parameterOrder": [ + // "project", + // "region", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/operations/{operation}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionOperations.get": + +type RegionOperationsGetCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves the specified region-specific Operations resource. +// +// - operation: Name of the Operations resource to return. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { + c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { +func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { +func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { +func (c *RegionOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. +// Do executes the "compute.regionOperations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -132134,19 +151909,20 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap } return ret, nil // { - // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", + // "description": "Retrieves the specified region-specific Operations resource.", + // "flatPath": "projects/{project}/regions/{region}/operations/{operation}", + // "httpMethod": "GET", + // "id": "compute.regionOperations.get", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "operation" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -132158,159 +151934,221 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", - // "request": { - // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" - // }, + // "path": "projects/{project}/regions/{region}/operations/{operation}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.setTargetPools": +// method id "compute.regionOperations.list": -type RegionInstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionOperationsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetTargetPools: Modifies the target pools to which all new instances -// in this group are assigned. Existing instances in the group are not -// affected. +// List: Retrieves a list of Operation resources contained within the +// specified region. // -// - instanceGroupManager: Name of the managed instance group. // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { - c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { + c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionOperationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { +func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { +func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { +func (c *RegionOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/operations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -132322,20 +152160,36 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setTargetPools", + // "description": "Retrieves a list of Operation resources contained within the specified region.", + // "flatPath": "projects/{project}/regions/{region}/operations", + // "httpMethod": "GET", + // "id": "compute.regionOperations.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "region" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -132346,83 +152200,92 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - // "request": { - // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" - // }, + // "path": "projects/{project}/regions/{region}/operations", // "response": { - // "$ref": "Operation" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.updatePerInstanceConfigs": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionInstanceGroupManagersUpdatePerInstanceConfigsCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionOperations.wait": + +type RegionOperationsWaitCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdatePerInstanceConfigs: Inserts or updates per-instance configs for -// the managed instance group. perInstanceConfig.name serves as a key -// used to distinguish whether to perform insert or patch. +// Wait: Waits for the specified Operation resource to return as `DONE` +// or for the request to approach the 2 minute deadline, and retrieves +// the specified Operation resource. This method differs from the `GET` +// method in that it waits for no more than the default deadline (2 +// minutes) and then returns the current state of the operation, which +// might be `DONE` or still in progress. This method is called on a +// best-effort basis. Specifically: - In uncommon cases, when the server +// is overloaded, the request might return before the default deadline +// is reached, or might return after zero seconds. - If the default +// deadline is reached, there is no guarantee that the operation is +// actually done when the method returns. Be prepared to retry if the +// operation is not `DONE`. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. +// - operation: Name of the Operations resource to return. // - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. -func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { - c := &RegionInstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +func (r *RegionOperationsService) Wait(project string, region string, operation string) *RegionOperationsWaitCall { + c := &RegionOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerupdateinstanceconfigreq = regioninstancegroupmanagerupdateinstanceconfigreq - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { - c.urlParams_.Set("requestId", requestId) + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { +func (c *RegionOperationsWaitCall) Fields(s ...googleapi.Field) *RegionOperationsWaitCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -132430,21 +152293,21 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...go // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { +func (c *RegionOperationsWaitCall) Context(ctx context.Context) *RegionOperationsWaitCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { +func (c *RegionOperationsWaitCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -132452,14 +152315,9 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerupdateinstanceconfigreq) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/operations/{operation}/wait") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -132467,38 +152325,38 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.updatePerInstanceConfigs" call. +// Do executes the "compute.regionOperations.wait" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -132512,19 +152370,20 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo } return ret, nil // { - // "description": "Inserts or updates per-instance configs for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress. This method is called on a best-effort basis. Specifically: - In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. - If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`. ", + // "flatPath": "projects/{project}/regions/{region}/operations/{operation}/wait", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", + // "id": "compute.regionOperations.wait", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "operation" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -132536,148 +152395,144 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", - // "request": { - // "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" - // }, + // "path": "projects/{project}/regions/{region}/operations/{operation}/wait", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroups.get": +// method id "compute.regionSecurityPolicies.delete": -type RegionInstanceGroupsGetCall struct { - s *Service - project string - region string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionSecurityPoliciesDeleteCall struct { + s *Service + project string + region string + securityPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance group resource. +// Delete: Deletes the specified policy. // -// - instanceGroup: Name of the instance group resource to return. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { - c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - securityPolicy: Name of the security policy to delete. +func (r *RegionSecurityPoliciesService) Delete(project string, region string, securityPolicy string) *RegionSecurityPoliciesDeleteCall { + c := &RegionSecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup + c.securityPolicy = securityPolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionSecurityPoliciesDeleteCall) RequestId(requestId string) *RegionSecurityPoliciesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { +func (c *RegionSecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { +func (c *RegionSecurityPoliciesDeleteCall) Context(ctx context.Context) *RegionSecurityPoliciesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsGetCall) Header() http.Header { +func (c *RegionSecurityPoliciesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +// Do executes the "compute.regionSecurityPolicies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &InstanceGroup{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -132689,22 +152544,16 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Returns the specified instance group resource.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroups.get", + // "description": "Deletes the specified policy.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "httpMethod": "DELETE", + // "id": "compute.regionSecurityPolicies.delete", // "parameterOrder": [ // "project", // "region", - // "instanceGroup" + // "securityPolicy" // ], // "parameters": { - // "instanceGroup": { - // "description": "Name of the instance group resource to return.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -132715,120 +152564,66 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", + // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", // "response": { - // "$ref": "InstanceGroup" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionInstanceGroups.list": +// method id "compute.regionSecurityPolicies.get": -type RegionInstanceGroupsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionSecurityPoliciesGetCall struct { + s *Service + project string + region string + securityPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of instance group resources contained within -// the specified region. +// Get: List all of the ordered rules present in a single specified +// policy. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { - c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - securityPolicy: Name of the security policy to get. +func (r *RegionSecurityPoliciesService) Get(project string, region string, securityPolicy string) *RegionSecurityPoliciesGetCall { + c := &RegionSecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *RegionInstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + c.securityPolicy = securityPolicy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { +func (c *RegionSecurityPoliciesGetCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -132838,7 +152633,7 @@ func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInsta // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { +func (c *RegionSecurityPoliciesGetCall) IfNoneMatch(entityTag string) *RegionSecurityPoliciesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -132846,21 +152641,21 @@ func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { +func (c *RegionSecurityPoliciesGetCall) Context(ctx context.Context) *RegionSecurityPoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsListCall) Header() http.Header { +func (c *RegionSecurityPoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -132873,7 +152668,7 @@ func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -132881,39 +152676,40 @@ func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.list" call. -// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RegionInstanceGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionSecurityPolicies.get" call. +// Exactly one of *SecurityPolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SecurityPolicy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { +func (c *RegionSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &RegionInstanceGroupList{ + ret := &SecurityPolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -132925,38 +152721,16 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region } return ret, nil // { - // "description": "Retrieves the list of instance group resources contained within the specified region.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroups", + // "description": "List all of the ordered rules present in a single specified policy.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", // "httpMethod": "GET", - // "id": "compute.regionInstanceGroups.list", + // "id": "compute.regionSecurityPolicies.get", // "parameterOrder": [ // "project", - // "region" + // "region", + // "securityPolicy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -132967,18 +152741,21 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" + // "securityPolicy": { + // "description": "Name of the security policy to get.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroups", + // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", // "response": { - // "$ref": "RegionInstanceGroupList" + // "$ref": "SecurityPolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -132989,132 +152766,58 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroups.listInstances": +// method id "compute.regionSecurityPolicies.insert": -type RegionInstanceGroupsListInstancesCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSecurityPoliciesInsertCall struct { + s *Service + project string + region string + securitypolicy *SecurityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListInstances: Lists the instances in the specified instance group -// and displays information about the named ports. Depending on the -// specified options, this method can list all instances or only the -// instances that are running. The orderBy query parameter is not -// supported. +// Insert: Creates a new policy in the specified project using the data +// included in the request. // -// - instanceGroup: Name of the regional instance group for which we -// want to list the instances. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { - c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionSecurityPoliciesService) Insert(project string, region string, securitypolicy *SecurityPolicy) *RegionSecurityPoliciesInsertCall { + c := &RegionSecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("orderBy", orderBy) + c.securitypolicy = securitypolicy return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("pageToken", pageToken) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionSecurityPoliciesInsertCall) RequestId(requestId string) *RegionSecurityPoliciesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *RegionSecurityPoliciesInsertCall) ValidateOnly(validateOnly bool) *RegionSecurityPoliciesInsertCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { +func (c *RegionSecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -133122,21 +152825,21 @@ func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { +func (c *RegionSecurityPoliciesInsertCall) Context(ctx context.Context) *RegionSecurityPoliciesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { +func (c *RegionSecurityPoliciesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -133144,14 +152847,14 @@ func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -133159,41 +152862,39 @@ func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.listInstances" call. -// Exactly one of *RegionInstanceGroupsListInstances or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupsListInstances.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { +// Do executes the "compute.regionSecurityPolicies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &RegionInstanceGroupsListInstances{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -133205,45 +152906,15 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "description": "Creates a new policy in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.listInstances", + // "id": "compute.regionSecurityPolicies.insert", // "parameterOrder": [ // "project", - // "region", - // "instanceGroup" + // "region" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", - // "location": "query", - // "type": "string" - // }, - // "instanceGroup": { - // "description": "Name of the regional instance group for which we want to list the instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -133254,179 +152925,231 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, the request will not be committed.", // "location": "query", // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "path": "projects/{project}/regions/{region}/securityPolicies", // "request": { - // "$ref": "RegionInstanceGroupsListInstancesRequest" + // "$ref": "SecurityPolicy" // }, // "response": { - // "$ref": "RegionInstanceGroupsListInstances" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroups.setNamedPorts": +// method id "compute.regionSecurityPolicies.list": -type RegionInstanceGroupsSetNamedPortsCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSecurityPoliciesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetNamedPorts: Sets the named ports for the specified regional -// instance group. +// List: List all the policies that have been configured for the +// specified project and region. // -// - instanceGroup: The name of the regional instance group where the -// named ports are updated. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { - c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionSecurityPoliciesService) List(project string, region string) *RegionSecurityPoliciesListCall { + c := &RegionSecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionSecurityPoliciesListCall) Filter(filter string) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionSecurityPoliciesListCall) MaxResults(maxResults int64) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionSecurityPoliciesListCall) OrderBy(orderBy string) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionSecurityPoliciesListCall) PageToken(pageToken string) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionSecurityPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { +func (c *RegionSecurityPoliciesListCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionSecurityPoliciesListCall) IfNoneMatch(entityTag string) *RegionSecurityPoliciesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { +func (c *RegionSecurityPoliciesListCall) Context(ctx context.Context) *RegionSecurityPoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { +func (c *RegionSecurityPoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionSecurityPolicies.list" call. +// Exactly one of *SecurityPolicyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SecurityPolicyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &SecurityPolicyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -133438,20 +153161,36 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the named ports for the specified regional instance group.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.setNamedPorts", + // "description": "List all the policies that have been configured for the specified project and region.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies", + // "httpMethod": "GET", + // "id": "compute.regionSecurityPolicies.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroup" + // "region" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the regional instance group where the named ports are updated.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -133464,52 +153203,78 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - // "request": { - // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" - // }, + // "path": "projects/{project}/regions/{region}/securityPolicies", // "response": { - // "$ref": "Operation" + // "$ref": "SecurityPolicyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstances.bulkInsert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionSecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPolicyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionInstancesBulkInsertCall struct { - s *Service - project string - region string - bulkinsertinstanceresource *BulkInsertInstanceResource - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionSecurityPolicies.patch": + +type RegionSecurityPoliciesPatchCall struct { + s *Service + project string + region string + securityPolicy string + securitypolicy *SecurityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// BulkInsert: Creates multiple instances in a given region. Count -// specifies the number of instances to create. +// Patch: Patches the specified policy with the data included in the +// request. To clear fields in the rule, leave the fields empty and +// specify them in the updateMask. This cannot be used to be update the +// rules in the policy. Please use the per rule methods like addRule, +// patchRule, and removeRule instead. // // - project: Project ID for this request. -// - region: The name of the region for this request. -func (r *RegionInstancesService) BulkInsert(project string, region string, bulkinsertinstanceresource *BulkInsertInstanceResource) *RegionInstancesBulkInsertCall { - c := &RegionInstancesBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +// - securityPolicy: Name of the security policy to update. +func (r *RegionSecurityPoliciesService) Patch(project string, region string, securityPolicy string, securitypolicy *SecurityPolicy) *RegionSecurityPoliciesPatchCall { + c := &RegionSecurityPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.bulkinsertinstanceresource = bulkinsertinstanceresource + c.securityPolicy = securityPolicy + c.securitypolicy = securitypolicy return c } @@ -133524,7 +153289,7 @@ func (r *RegionInstancesService) BulkInsert(project string, region string, bulki // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstancesBulkInsertCall) RequestId(requestId string) *RegionInstancesBulkInsertCall { +func (c *RegionSecurityPoliciesPatchCall) RequestId(requestId string) *RegionSecurityPoliciesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -133532,7 +153297,7 @@ func (c *RegionInstancesBulkInsertCall) RequestId(requestId string) *RegionInsta // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstancesBulkInsertCall) Fields(s ...googleapi.Field) *RegionInstancesBulkInsertCall { +func (c *RegionSecurityPoliciesPatchCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -133540,21 +153305,21 @@ func (c *RegionInstancesBulkInsertCall) Fields(s ...googleapi.Field) *RegionInst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstancesBulkInsertCall) Context(ctx context.Context) *RegionInstancesBulkInsertCall { +func (c *RegionSecurityPoliciesPatchCall) Context(ctx context.Context) *RegionSecurityPoliciesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstancesBulkInsertCall) Header() http.Header { +func (c *RegionSecurityPoliciesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstancesBulkInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -133562,52 +153327,53 @@ func (c *RegionInstancesBulkInsertCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertinstanceresource) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instances/bulkInsert") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstances.bulkInsert" call. +// Do executes the "compute.regionSecurityPolicies.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -133621,13 +153387,14 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Creates multiple instances in a given region. Count specifies the number of instances to create.", - // "flatPath": "projects/{project}/regions/{region}/instances/bulkInsert", - // "httpMethod": "POST", - // "id": "compute.regionInstances.bulkInsert", + // "description": "Patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "httpMethod": "PATCH", + // "id": "compute.regionSecurityPolicies.patch", // "parameterOrder": [ // "project", - // "region" + // "region", + // "securityPolicy" // ], // "parameters": { // "project": { @@ -133638,7 +153405,7 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -133648,11 +153415,18 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/instances/bulkInsert", + // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", // "request": { - // "$ref": "BulkInsertInstanceResource" + // "$ref": "SecurityPolicy" // }, // "response": { // "$ref": "Operation" @@ -133665,32 +153439,28 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.regionNetworkEndpointGroups.delete": +// method id "compute.regionSslCertificates.delete": -type RegionNetworkEndpointGroupsDeleteCall struct { - s *Service - project string - region string - networkEndpointGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSslCertificatesDeleteCall struct { + s *Service + project string + region string + sslCertificate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified network endpoint group. Note that the -// NEG cannot be deleted if it is configured as a backend of a backend -// service. +// Delete: Deletes the specified SslCertificate resource in the region. // -// - networkEndpointGroup: The name of the network endpoint group to -// delete. It should comply with RFC1035. // - project: Project ID for this request. -// - region: The name of the region where the network endpoint group is -// located. It should comply with RFC1035. -func (r *RegionNetworkEndpointGroupsService) Delete(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsDeleteCall { - c := &RegionNetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +// - sslCertificate: Name of the SslCertificate resource to delete. +func (r *RegionSslCertificatesService) Delete(project string, region string, sslCertificate string) *RegionSslCertificatesDeleteCall { + c := &RegionSslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.networkEndpointGroup = networkEndpointGroup + c.sslCertificate = sslCertificate return c } @@ -133705,7 +153475,7 @@ func (r *RegionNetworkEndpointGroupsService) Delete(project string, region strin // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionNetworkEndpointGroupsDeleteCall) RequestId(requestId string) *RegionNetworkEndpointGroupsDeleteCall { +func (c *RegionSslCertificatesDeleteCall) RequestId(requestId string) *RegionSslCertificatesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -133713,7 +153483,7 @@ func (c *RegionNetworkEndpointGroupsDeleteCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionNetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsDeleteCall { +func (c *RegionSslCertificatesDeleteCall) Fields(s ...googleapi.Field) *RegionSslCertificatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -133721,21 +153491,21 @@ func (c *RegionNetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionNetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsDeleteCall { +func (c *RegionSslCertificatesDeleteCall) Context(ctx context.Context) *RegionSslCertificatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionNetworkEndpointGroupsDeleteCall) Header() http.Header { +func (c *RegionSslCertificatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -133745,7 +153515,7 @@ func (c *RegionNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Res var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -133753,38 +153523,38 @@ func (c *RegionNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "region": c.region, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionNetworkEndpointGroups.delete" call. +// Do executes the "compute.regionSslCertificates.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -133798,22 +153568,16 @@ func (c *RegionNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", - // "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + // "description": "Deletes the specified SslCertificate resource in the region.", + // "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", // "httpMethod": "DELETE", - // "id": "compute.regionNetworkEndpointGroups.delete", + // "id": "compute.regionSslCertificates.delete", // "parameterOrder": [ // "project", // "region", - // "networkEndpointGroup" + // "sslCertificate" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -133822,8 +153586,9 @@ func (c *RegionNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -133831,9 +153596,16 @@ func (c *RegionNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + // "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", // "response": { // "$ref": "Operation" // }, @@ -133845,39 +153617,38 @@ func (c *RegionNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionNetworkEndpointGroups.get": +// method id "compute.regionSslCertificates.get": -type RegionNetworkEndpointGroupsGetCall struct { - s *Service - project string - region string - networkEndpointGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionSslCertificatesGetCall struct { + s *Service + project string + region string + sslCertificate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified network endpoint group. Gets a list of -// available network endpoint groups by making a list() request. +// Get: Returns the specified SslCertificate resource in the specified +// region. Get a list of available SSL certificates by making a list() +// request. // -// - networkEndpointGroup: The name of the network endpoint group. It -// should comply with RFC1035. // - project: Project ID for this request. -// - region: The name of the region where the network endpoint group is -// located. It should comply with RFC1035. -func (r *RegionNetworkEndpointGroupsService) Get(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsGetCall { - c := &RegionNetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +// - sslCertificate: Name of the SslCertificate resource to return. +func (r *RegionSslCertificatesService) Get(project string, region string, sslCertificate string) *RegionSslCertificatesGetCall { + c := &RegionSslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.networkEndpointGroup = networkEndpointGroup + c.sslCertificate = sslCertificate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionNetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsGetCall { +func (c *RegionSslCertificatesGetCall) Fields(s ...googleapi.Field) *RegionSslCertificatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -133887,7 +153658,7 @@ func (c *RegionNetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *Regio // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionNetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *RegionNetworkEndpointGroupsGetCall { +func (c *RegionSslCertificatesGetCall) IfNoneMatch(entityTag string) *RegionSslCertificatesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -133895,21 +153666,21 @@ func (c *RegionNetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *Regi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionNetworkEndpointGroupsGetCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsGetCall { +func (c *RegionSslCertificatesGetCall) Context(ctx context.Context) *RegionSslCertificatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionNetworkEndpointGroupsGetCall) Header() http.Header { +func (c *RegionSslCertificatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -133922,7 +153693,7 @@ func (c *RegionNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -133930,40 +153701,40 @@ func (c *RegionNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "region": c.region, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionNetworkEndpointGroups.get" call. -// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NetworkEndpointGroup.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionSslCertificates.get" call. +// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SslCertificate.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { +func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworkEndpointGroup{ + ret := &SslCertificate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -133975,22 +153746,16 @@ func (c *RegionNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", - // "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + // "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", + // "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", // "httpMethod": "GET", - // "id": "compute.regionNetworkEndpointGroups.get", + // "id": "compute.regionSslCertificates.get", // "parameterOrder": [ // "project", // "region", - // "networkEndpointGroup" + // "sslCertificate" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -133999,15 +153764,23 @@ func (c *RegionNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "region": { - // "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + // "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", // "response": { - // "$ref": "NetworkEndpointGroup" + // "$ref": "SslCertificate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -134018,29 +153791,28 @@ func (c *RegionNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.regionNetworkEndpointGroups.insert": +// method id "compute.regionSslCertificates.insert": -type RegionNetworkEndpointGroupsInsertCall struct { - s *Service - project string - region string - networkendpointgroup *NetworkEndpointGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSslCertificatesInsertCall struct { + s *Service + project string + region string + sslcertificate *SslCertificate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a network endpoint group in the specified project -// using the parameters that are included in the request. +// Insert: Creates a SslCertificate resource in the specified project +// and region using the data included in the request // // - project: Project ID for this request. -// - region: The name of the region where you want to create the network -// endpoint group. It should comply with RFC1035. -func (r *RegionNetworkEndpointGroupsService) Insert(project string, region string, networkendpointgroup *NetworkEndpointGroup) *RegionNetworkEndpointGroupsInsertCall { - c := &RegionNetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionSslCertificatesService) Insert(project string, region string, sslcertificate *SslCertificate) *RegionSslCertificatesInsertCall { + c := &RegionSslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.networkendpointgroup = networkendpointgroup + c.sslcertificate = sslcertificate return c } @@ -134055,7 +153827,7 @@ func (r *RegionNetworkEndpointGroupsService) Insert(project string, region strin // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionNetworkEndpointGroupsInsertCall) RequestId(requestId string) *RegionNetworkEndpointGroupsInsertCall { +func (c *RegionSslCertificatesInsertCall) RequestId(requestId string) *RegionSslCertificatesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -134063,7 +153835,7 @@ func (c *RegionNetworkEndpointGroupsInsertCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionNetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsInsertCall { +func (c *RegionSslCertificatesInsertCall) Fields(s ...googleapi.Field) *RegionSslCertificatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -134071,21 +153843,21 @@ func (c *RegionNetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionNetworkEndpointGroupsInsertCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsInsertCall { +func (c *RegionSslCertificatesInsertCall) Context(ctx context.Context) *RegionSslCertificatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionNetworkEndpointGroupsInsertCall) Header() http.Header { +func (c *RegionSslCertificatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -134093,14 +153865,14 @@ func (c *RegionNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -134114,31 +153886,31 @@ func (c *RegionNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionNetworkEndpointGroups.insert" call. +// Do executes the "compute.regionSslCertificates.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -134152,10 +153924,10 @@ func (c *RegionNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", - // "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", + // "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", + // "flatPath": "projects/{project}/regions/{region}/sslCertificates", // "httpMethod": "POST", - // "id": "compute.regionNetworkEndpointGroups.insert", + // "id": "compute.regionSslCertificates.insert", // "parameterOrder": [ // "project", // "region" @@ -134169,8 +153941,9 @@ func (c *RegionNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", + // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -134180,9 +153953,9 @@ func (c *RegionNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/networkEndpointGroups", + // "path": "projects/{project}/regions/{region}/sslCertificates", // "request": { - // "$ref": "NetworkEndpointGroup" + // "$ref": "SslCertificate" // }, // "response": { // "$ref": "Operation" @@ -134195,9 +153968,9 @@ func (c *RegionNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionNetworkEndpointGroups.list": +// method id "compute.regionSslCertificates.list": -type RegionNetworkEndpointGroupsListCall struct { +type RegionSslCertificatesListCall struct { s *Service project string region string @@ -134207,33 +153980,35 @@ type RegionNetworkEndpointGroupsListCall struct { header_ http.Header } -// List: Retrieves the list of regional network endpoint groups -// available to the specified project in the given region. +// List: Retrieves the list of SslCertificate resources available to the +// specified project in the specified region. // // - project: Project ID for this request. -// - region: The name of the region where the network endpoint group is -// located. It should comply with RFC1035. -func (r *RegionNetworkEndpointGroupsService) List(project string, region string) *RegionNetworkEndpointGroupsListCall { - c := &RegionNetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionSslCertificatesService) List(project string, region string) *RegionSslCertificatesListCall { + c := &RegionSslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -134241,8 +154016,18 @@ func (r *RegionNetworkEndpointGroupsService) List(project string, region string) // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionNetworkEndpointGroupsListCall) Filter(filter string) *RegionNetworkEndpointGroupsListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertificatesListCall { c.urlParams_.Set("filter", filter) return c } @@ -134253,7 +154038,7 @@ func (c *RegionNetworkEndpointGroupsListCall) Filter(filter string) *RegionNetwo // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionNetworkEndpointGroupsListCall) MaxResults(maxResults int64) *RegionNetworkEndpointGroupsListCall { +func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslCertificatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -134267,7 +154052,7 @@ func (c *RegionNetworkEndpointGroupsListCall) MaxResults(maxResults int64) *Regi // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionNetworkEndpointGroupsListCall) OrderBy(orderBy string) *RegionNetworkEndpointGroupsListCall { +func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertificatesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -134275,7 +154060,7 @@ func (c *RegionNetworkEndpointGroupsListCall) OrderBy(orderBy string) *RegionNet // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionNetworkEndpointGroupsListCall) PageToken(pageToken string) *RegionNetworkEndpointGroupsListCall { +func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCertificatesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -134284,7 +154069,7 @@ func (c *RegionNetworkEndpointGroupsListCall) PageToken(pageToken string) *Regio // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionNetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionNetworkEndpointGroupsListCall { +func (c *RegionSslCertificatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslCertificatesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -134292,7 +154077,7 @@ func (c *RegionNetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartial // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionNetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsListCall { +func (c *RegionSslCertificatesListCall) Fields(s ...googleapi.Field) *RegionSslCertificatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -134302,7 +154087,7 @@ func (c *RegionNetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *Regi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionNetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *RegionNetworkEndpointGroupsListCall { +func (c *RegionSslCertificatesListCall) IfNoneMatch(entityTag string) *RegionSslCertificatesListCall { c.ifNoneMatch_ = entityTag return c } @@ -134310,21 +154095,21 @@ func (c *RegionNetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionNetworkEndpointGroupsListCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsListCall { +func (c *RegionSslCertificatesListCall) Context(ctx context.Context) *RegionSslCertificatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionNetworkEndpointGroupsListCall) Header() http.Header { +func (c *RegionSslCertificatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -134337,7 +154122,7 @@ func (c *RegionNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Respo var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -134351,33 +154136,33 @@ func (c *RegionNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionNetworkEndpointGroups.list" call. -// Exactly one of *NetworkEndpointGroupList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +// Do executes the "compute.regionSslCertificates.list" call. +// Exactly one of *SslCertificateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SslCertificateList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { +func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworkEndpointGroupList{ + ret := &SslCertificateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -134389,17 +154174,17 @@ func (c *RegionNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", - // "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", + // "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", + // "flatPath": "projects/{project}/regions/{region}/sslCertificates", // "httpMethod": "GET", - // "id": "compute.regionNetworkEndpointGroups.list", + // "id": "compute.regionSslCertificates.list", // "parameterOrder": [ // "project", // "region" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -134429,8 +154214,9 @@ func (c *RegionNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "region": { - // "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -134440,9 +154226,9 @@ func (c *RegionNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/networkEndpointGroups", + // "path": "projects/{project}/regions/{region}/sslCertificates", // "response": { - // "$ref": "NetworkEndpointGroupList" + // "$ref": "SslCertificateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -134456,7 +154242,7 @@ func (c *RegionNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionNetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { +func (c *RegionSslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -134474,30 +154260,31 @@ func (c *RegionNetworkEndpointGroupsListCall) Pages(ctx context.Context, f func( } } -// method id "compute.regionNotificationEndpoints.delete": +// method id "compute.regionSslPolicies.delete": -type RegionNotificationEndpointsDeleteCall struct { - s *Service - project string - region string - notificationEndpoint string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSslPoliciesDeleteCall struct { + s *Service + project string + region string + sslPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified NotificationEndpoint in the given -// region +// Delete: Deletes the specified SSL policy. The SSL policy resource can +// be deleted only if it is not in use by any TargetHttpsProxy or +// TargetSslProxy resources. // -// - notificationEndpoint: Name of the NotificationEndpoint resource to -// delete. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionNotificationEndpointsService) Delete(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsDeleteCall { - c := &RegionNotificationEndpointsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - sslPolicy: Name of the SSL policy to delete. The name must be 1-63 +// characters long, and comply with RFC1035. +func (r *RegionSslPoliciesService) Delete(project string, region string, sslPolicy string) *RegionSslPoliciesDeleteCall { + c := &RegionSslPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.notificationEndpoint = notificationEndpoint + c.sslPolicy = sslPolicy return c } @@ -134512,7 +154299,7 @@ func (r *RegionNotificationEndpointsService) Delete(project string, region strin // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionNotificationEndpointsDeleteCall) RequestId(requestId string) *RegionNotificationEndpointsDeleteCall { +func (c *RegionSslPoliciesDeleteCall) RequestId(requestId string) *RegionSslPoliciesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -134520,7 +154307,7 @@ func (c *RegionNotificationEndpointsDeleteCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionNotificationEndpointsDeleteCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsDeleteCall { +func (c *RegionSslPoliciesDeleteCall) Fields(s ...googleapi.Field) *RegionSslPoliciesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -134528,21 +154315,21 @@ func (c *RegionNotificationEndpointsDeleteCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionNotificationEndpointsDeleteCall) Context(ctx context.Context) *RegionNotificationEndpointsDeleteCall { +func (c *RegionSslPoliciesDeleteCall) Context(ctx context.Context) *RegionSslPoliciesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionNotificationEndpointsDeleteCall) Header() http.Header { +func (c *RegionSslPoliciesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionNotificationEndpointsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -134552,7 +154339,7 @@ func (c *RegionNotificationEndpointsDeleteCall) doRequest(alt string) (*http.Res var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -134560,38 +154347,38 @@ func (c *RegionNotificationEndpointsDeleteCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "notificationEndpoint": c.notificationEndpoint, + "project": c.project, + "region": c.region, + "sslPolicy": c.sslPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionNotificationEndpoints.delete" call. +// Do executes the "compute.regionSslPolicies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -134605,23 +154392,16 @@ func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the specified NotificationEndpoint in the given region", - // "flatPath": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + // "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", // "httpMethod": "DELETE", - // "id": "compute.regionNotificationEndpoints.delete", + // "id": "compute.regionSslPolicies.delete", // "parameterOrder": [ // "project", // "region", - // "notificationEndpoint" + // "sslPolicy" // ], // "parameters": { - // "notificationEndpoint": { - // "description": "Name of the NotificationEndpoint resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -134640,9 +154420,15 @@ func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "sslPolicy": { + // "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + // "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", // "response": { // "$ref": "Operation" // }, @@ -134654,38 +154440,38 @@ func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionNotificationEndpoints.get": +// method id "compute.regionSslPolicies.get": -type RegionNotificationEndpointsGetCall struct { - s *Service - project string - region string - notificationEndpoint string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionSslPoliciesGetCall struct { + s *Service + project string + region string + sslPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified NotificationEndpoint resource in the given -// region. +// Get: Lists all of the ordered rules present in a single specified +// policy. // -// - notificationEndpoint: Name of the NotificationEndpoint resource to -// return. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionNotificationEndpointsService) Get(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsGetCall { - c := &RegionNotificationEndpointsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 +// characters long, and comply with RFC1035. +func (r *RegionSslPoliciesService) Get(project string, region string, sslPolicy string) *RegionSslPoliciesGetCall { + c := &RegionSslPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.notificationEndpoint = notificationEndpoint + c.sslPolicy = sslPolicy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionNotificationEndpointsGetCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsGetCall { +func (c *RegionSslPoliciesGetCall) Fields(s ...googleapi.Field) *RegionSslPoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -134695,7 +154481,7 @@ func (c *RegionNotificationEndpointsGetCall) Fields(s ...googleapi.Field) *Regio // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionNotificationEndpointsGetCall) IfNoneMatch(entityTag string) *RegionNotificationEndpointsGetCall { +func (c *RegionSslPoliciesGetCall) IfNoneMatch(entityTag string) *RegionSslPoliciesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -134703,21 +154489,21 @@ func (c *RegionNotificationEndpointsGetCall) IfNoneMatch(entityTag string) *Regi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionNotificationEndpointsGetCall) Context(ctx context.Context) *RegionNotificationEndpointsGetCall { +func (c *RegionSslPoliciesGetCall) Context(ctx context.Context) *RegionSslPoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionNotificationEndpointsGetCall) Header() http.Header { +func (c *RegionSslPoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionNotificationEndpointsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -134730,7 +154516,7 @@ func (c *RegionNotificationEndpointsGetCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -134738,40 +154524,40 @@ func (c *RegionNotificationEndpointsGetCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "notificationEndpoint": c.notificationEndpoint, + "project": c.project, + "region": c.region, + "sslPolicy": c.sslPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionNotificationEndpoints.get" call. -// Exactly one of *NotificationEndpoint or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NotificationEndpoint.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (*NotificationEndpoint, error) { +// Do executes the "compute.regionSslPolicies.get" call. +// Exactly one of *SslPolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SslPolicy.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NotificationEndpoint{ + ret := &SslPolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -134783,23 +154569,16 @@ func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns the specified NotificationEndpoint resource in the given region.", - // "flatPath": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + // "description": "Lists all of the ordered rules present in a single specified policy.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", // "httpMethod": "GET", - // "id": "compute.regionNotificationEndpoints.get", + // "id": "compute.regionSslPolicies.get", // "parameterOrder": [ // "project", // "region", - // "notificationEndpoint" + // "sslPolicy" // ], // "parameters": { - // "notificationEndpoint": { - // "description": "Name of the NotificationEndpoint resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -134813,11 +154592,17 @@ func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (* // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "sslPolicy": { + // "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + // "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", // "response": { - // "$ref": "NotificationEndpoint" + // "$ref": "SslPolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -134828,28 +154613,28 @@ func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.regionNotificationEndpoints.insert": +// method id "compute.regionSslPolicies.insert": -type RegionNotificationEndpointsInsertCall struct { - s *Service - project string - region string - notificationendpoint *NotificationEndpoint - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSslPoliciesInsertCall struct { + s *Service + project string + region string + sslpolicy *SslPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Create a NotificationEndpoint in the specified project in the -// given region using the parameters that are included in the request. +// Insert: Creates a new policy in the specified project and region +// using the data included in the request. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionNotificationEndpointsService) Insert(project string, region string, notificationendpoint *NotificationEndpoint) *RegionNotificationEndpointsInsertCall { - c := &RegionNotificationEndpointsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionSslPoliciesService) Insert(project string, region string, sslpolicy *SslPolicy) *RegionSslPoliciesInsertCall { + c := &RegionSslPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.notificationendpoint = notificationendpoint + c.sslpolicy = sslpolicy return c } @@ -134864,7 +154649,7 @@ func (r *RegionNotificationEndpointsService) Insert(project string, region strin // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionNotificationEndpointsInsertCall) RequestId(requestId string) *RegionNotificationEndpointsInsertCall { +func (c *RegionSslPoliciesInsertCall) RequestId(requestId string) *RegionSslPoliciesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -134872,7 +154657,7 @@ func (c *RegionNotificationEndpointsInsertCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionNotificationEndpointsInsertCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsInsertCall { +func (c *RegionSslPoliciesInsertCall) Fields(s ...googleapi.Field) *RegionSslPoliciesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -134880,21 +154665,21 @@ func (c *RegionNotificationEndpointsInsertCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionNotificationEndpointsInsertCall) Context(ctx context.Context) *RegionNotificationEndpointsInsertCall { +func (c *RegionSslPoliciesInsertCall) Context(ctx context.Context) *RegionSslPoliciesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionNotificationEndpointsInsertCall) Header() http.Header { +func (c *RegionSslPoliciesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionNotificationEndpointsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -134902,14 +154687,14 @@ func (c *RegionNotificationEndpointsInsertCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.notificationendpoint) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/notificationEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -134923,31 +154708,31 @@ func (c *RegionNotificationEndpointsInsertCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionNotificationEndpoints.insert" call. +// Do executes the "compute.regionSslPolicies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionNotificationEndpointsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -134961,10 +154746,10 @@ func (c *RegionNotificationEndpointsInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Create a NotificationEndpoint in the specified project in the given region using the parameters that are included in the request.", - // "flatPath": "projects/{project}/regions/{region}/notificationEndpoints", + // "description": "Creates a new policy in the specified project and region using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies", // "httpMethod": "POST", - // "id": "compute.regionNotificationEndpoints.insert", + // "id": "compute.regionSslPolicies.insert", // "parameterOrder": [ // "project", // "region" @@ -134990,9 +154775,9 @@ func (c *RegionNotificationEndpointsInsertCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/notificationEndpoints", + // "path": "projects/{project}/regions/{region}/sslPolicies", // "request": { - // "$ref": "NotificationEndpoint" + // "$ref": "SslPolicy" // }, // "response": { // "$ref": "Operation" @@ -135005,9 +154790,9 @@ func (c *RegionNotificationEndpointsInsertCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionNotificationEndpoints.list": +// method id "compute.regionSslPolicies.list": -type RegionNotificationEndpointsListCall struct { +type RegionSslPoliciesListCall struct { s *Service project string region string @@ -135017,32 +154802,35 @@ type RegionNotificationEndpointsListCall struct { header_ http.Header } -// List: Lists the NotificationEndpoints for a project in the given -// region. +// List: Lists all the SSL policies that have been configured for the +// specified project and region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionNotificationEndpointsService) List(project string, region string) *RegionNotificationEndpointsListCall { - c := &RegionNotificationEndpointsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionSslPoliciesService) List(project string, region string) *RegionSslPoliciesListCall { + c := &RegionSslPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -135050,8 +154838,18 @@ func (r *RegionNotificationEndpointsService) List(project string, region string) // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionNotificationEndpointsListCall) Filter(filter string) *RegionNotificationEndpointsListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionSslPoliciesListCall) Filter(filter string) *RegionSslPoliciesListCall { c.urlParams_.Set("filter", filter) return c } @@ -135062,7 +154860,7 @@ func (c *RegionNotificationEndpointsListCall) Filter(filter string) *RegionNotif // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionNotificationEndpointsListCall) MaxResults(maxResults int64) *RegionNotificationEndpointsListCall { +func (c *RegionSslPoliciesListCall) MaxResults(maxResults int64) *RegionSslPoliciesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -135076,7 +154874,7 @@ func (c *RegionNotificationEndpointsListCall) MaxResults(maxResults int64) *Regi // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionNotificationEndpointsListCall) OrderBy(orderBy string) *RegionNotificationEndpointsListCall { +func (c *RegionSslPoliciesListCall) OrderBy(orderBy string) *RegionSslPoliciesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -135084,7 +154882,7 @@ func (c *RegionNotificationEndpointsListCall) OrderBy(orderBy string) *RegionNot // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionNotificationEndpointsListCall) PageToken(pageToken string) *RegionNotificationEndpointsListCall { +func (c *RegionSslPoliciesListCall) PageToken(pageToken string) *RegionSslPoliciesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -135093,7 +154891,7 @@ func (c *RegionNotificationEndpointsListCall) PageToken(pageToken string) *Regio // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionNotificationEndpointsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionNotificationEndpointsListCall { +func (c *RegionSslPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslPoliciesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -135101,7 +154899,7 @@ func (c *RegionNotificationEndpointsListCall) ReturnPartialSuccess(returnPartial // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionNotificationEndpointsListCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsListCall { +func (c *RegionSslPoliciesListCall) Fields(s ...googleapi.Field) *RegionSslPoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -135111,7 +154909,7 @@ func (c *RegionNotificationEndpointsListCall) Fields(s ...googleapi.Field) *Regi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionNotificationEndpointsListCall) IfNoneMatch(entityTag string) *RegionNotificationEndpointsListCall { +func (c *RegionSslPoliciesListCall) IfNoneMatch(entityTag string) *RegionSslPoliciesListCall { c.ifNoneMatch_ = entityTag return c } @@ -135119,21 +154917,21 @@ func (c *RegionNotificationEndpointsListCall) IfNoneMatch(entityTag string) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionNotificationEndpointsListCall) Context(ctx context.Context) *RegionNotificationEndpointsListCall { +func (c *RegionSslPoliciesListCall) Context(ctx context.Context) *RegionSslPoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionNotificationEndpointsListCall) Header() http.Header { +func (c *RegionSslPoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionNotificationEndpointsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -135146,7 +154944,7 @@ func (c *RegionNotificationEndpointsListCall) doRequest(alt string) (*http.Respo var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/notificationEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -135160,33 +154958,33 @@ func (c *RegionNotificationEndpointsListCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionNotificationEndpoints.list" call. -// Exactly one of *NotificationEndpointList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *NotificationEndpointList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionSslPolicies.list" call. +// Exactly one of *SslPoliciesList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SslPoliciesList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionNotificationEndpointsListCall) Do(opts ...googleapi.CallOption) (*NotificationEndpointList, error) { +func (c *RegionSslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NotificationEndpointList{ + ret := &SslPoliciesList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -135198,17 +154996,17 @@ func (c *RegionNotificationEndpointsListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Lists the NotificationEndpoints for a project in the given region.", - // "flatPath": "projects/{project}/regions/{region}/notificationEndpoints", + // "description": "Lists all the SSL policies that have been configured for the specified project and region.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies", // "httpMethod": "GET", - // "id": "compute.regionNotificationEndpoints.list", + // "id": "compute.regionSslPolicies.list", // "parameterOrder": [ // "project", // "region" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -135250,9 +155048,9 @@ func (c *RegionNotificationEndpointsListCall) Do(opts ...googleapi.CallOption) ( // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/notificationEndpoints", + // "path": "projects/{project}/regions/{region}/sslPolicies", // "response": { - // "$ref": "NotificationEndpointList" + // "$ref": "SslPoliciesList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -135266,7 +155064,7 @@ func (c *RegionNotificationEndpointsListCall) Do(opts ...googleapi.CallOption) ( // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionNotificationEndpointsListCall) Pages(ctx context.Context, f func(*NotificationEndpointList) error) error { +func (c *RegionSslPoliciesListCall) Pages(ctx context.Context, f func(*SslPoliciesList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -135284,310 +155082,9 @@ func (c *RegionNotificationEndpointsListCall) Pages(ctx context.Context, f func( } } -// method id "compute.regionOperations.delete": - -type RegionOperationsDeleteCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified region-specific Operations resource. -// -// - operation: Name of the Operations resource to delete. -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { - c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.operation = operation - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionOperationsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/operations/{operation}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionOperations.delete" call. -func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Deletes the specified region-specific Operations resource.", - // "flatPath": "projects/{project}/regions/{region}/operations/{operation}", - // "httpMethod": "DELETE", - // "id": "compute.regionOperations.delete", - // "parameterOrder": [ - // "project", - // "region", - // "operation" - // ], - // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/regions/{region}/operations/{operation}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionOperations.get": - -type RegionOperationsGetCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Retrieves the specified region-specific Operations resource. -// -// - operation: Name of the Operations resource to return. -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { - c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.operation = operation - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionOperationsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/operations/{operation}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionOperations.get" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the specified region-specific Operations resource.", - // "flatPath": "projects/{project}/regions/{region}/operations/{operation}", - // "httpMethod": "GET", - // "id": "compute.regionOperations.get", - // "parameterOrder": [ - // "project", - // "region", - // "operation" - // ], - // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/regions/{region}/operations/{operation}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionOperations.list": +// method id "compute.regionSslPolicies.listAvailableFeatures": -type RegionOperationsListCall struct { +type RegionSslPoliciesListAvailableFeaturesCall struct { s *Service project string region string @@ -135597,32 +155094,35 @@ type RegionOperationsListCall struct { header_ http.Header } -// List: Retrieves a list of Operation resources contained within the -// specified region. +// ListAvailableFeatures: Lists all features that can be specified in +// the SSL policy when using custom profile. // // - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { - c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +func (r *RegionSslPoliciesService) ListAvailableFeatures(project string, region string) *RegionSslPoliciesListAvailableFeaturesCall { + c := &RegionSslPoliciesListAvailableFeaturesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -135630,8 +155130,18 @@ func (r *RegionOperationsService) List(project string, region string) *RegionOpe // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionSslPoliciesListAvailableFeaturesCall) Filter(filter string) *RegionSslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("filter", filter) return c } @@ -135642,7 +155152,7 @@ func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCa // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { +func (c *RegionSslPoliciesListAvailableFeaturesCall) MaxResults(maxResults int64) *RegionSslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -135656,7 +155166,7 @@ func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperation // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsListCall { +func (c *RegionSslPoliciesListAvailableFeaturesCall) OrderBy(orderBy string) *RegionSslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -135664,7 +155174,7 @@ func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { +func (c *RegionSslPoliciesListAvailableFeaturesCall) PageToken(pageToken string) *RegionSslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -135673,7 +155183,7 @@ func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperations // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionOperationsListCall { +func (c *RegionSslPoliciesListAvailableFeaturesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -135681,7 +155191,7 @@ func (c *RegionOperationsListCall) ReturnPartialSuccess(returnPartialSuccess boo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { +func (c *RegionSslPoliciesListAvailableFeaturesCall) Fields(s ...googleapi.Field) *RegionSslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -135691,7 +155201,7 @@ func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperation // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { +func (c *RegionSslPoliciesListAvailableFeaturesCall) IfNoneMatch(entityTag string) *RegionSslPoliciesListAvailableFeaturesCall { c.ifNoneMatch_ = entityTag return c } @@ -135699,21 +155209,21 @@ func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperatio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { +func (c *RegionSslPoliciesListAvailableFeaturesCall) Context(ctx context.Context) *RegionSslPoliciesListAvailableFeaturesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsListCall) Header() http.Header { +func (c *RegionSslPoliciesListAvailableFeaturesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -135726,7 +155236,7 @@ func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -135740,33 +155250,35 @@ func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { +// Do executes the "compute.regionSslPolicies.listAvailableFeatures" call. +// Exactly one of *SslPoliciesListAvailableFeaturesResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *SslPoliciesListAvailableFeaturesResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionSslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) (*SslPoliciesListAvailableFeaturesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &OperationList{ + ret := &SslPoliciesListAvailableFeaturesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -135778,17 +155290,17 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified region.", - // "flatPath": "projects/{project}/regions/{region}/operations", + // "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures", // "httpMethod": "GET", - // "id": "compute.regionOperations.list", + // "id": "compute.regionSslPolicies.listAvailableFeatures", // "parameterOrder": [ // "project", // "region" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -135818,7 +155330,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -135830,80 +155342,68 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/operations", + // "path": "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures", // "response": { - // "$ref": "OperationList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } + // "$ref": "SslPoliciesListAvailableFeaturesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + } -// method id "compute.regionOperations.wait": +// method id "compute.regionSslPolicies.patch": -type RegionOperationsWaitCall struct { +type RegionSslPoliciesPatchCall struct { s *Service project string region string - operation string + sslPolicy string + sslpolicy *SslPolicy urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Wait: Waits for the specified Operation resource to return as `DONE` -// or for the request to approach the 2 minute deadline, and retrieves -// the specified Operation resource. This method differs from the `GET` -// method in that it waits for no more than the default deadline (2 -// minutes) and then returns the current state of the operation, which -// might be `DONE` or still in progress. This method is called on a -// best-effort basis. Specifically: - In uncommon cases, when the server -// is overloaded, the request might return before the default deadline -// is reached, or might return after zero seconds. - If the default -// deadline is reached, there is no guarantee that the operation is -// actually done when the method returns. Be prepared to retry if the -// operation is not `DONE`. +// Patch: Patches the specified SSL policy with the data included in the +// request. // -// - operation: Name of the Operations resource to return. -// - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionOperationsService) Wait(project string, region string, operation string) *RegionOperationsWaitCall { - c := &RegionOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 +// characters long, and comply with RFC1035. +func (r *RegionSslPoliciesService) Patch(project string, region string, sslPolicy string, sslpolicy *SslPolicy) *RegionSslPoliciesPatchCall { + c := &RegionSslPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.operation = operation + c.sslPolicy = sslPolicy + c.sslpolicy = sslpolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionSslPoliciesPatchCall) RequestId(requestId string) *RegionSslPoliciesPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsWaitCall) Fields(s ...googleapi.Field) *RegionOperationsWaitCall { +func (c *RegionSslPoliciesPatchCall) Fields(s ...googleapi.Field) *RegionSslPoliciesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -135911,21 +155411,21 @@ func (c *RegionOperationsWaitCall) Fields(s ...googleapi.Field) *RegionOperation // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsWaitCall) Context(ctx context.Context) *RegionOperationsWaitCall { +func (c *RegionSslPoliciesPatchCall) Context(ctx context.Context) *RegionSslPoliciesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsWaitCall) Header() http.Header { +func (c *RegionSslPoliciesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -135933,11 +155433,16 @@ func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/operations/{operation}/wait") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -135945,36 +155450,36 @@ func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "operation": c.operation, + "sslPolicy": c.sslPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.wait" call. +// Do executes the "compute.regionSslPolicies.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -135988,23 +155493,16 @@ func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress. This method is called on a best-effort basis. Specifically: - In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. - If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`. ", - // "flatPath": "projects/{project}/regions/{region}/operations/{operation}/wait", - // "httpMethod": "POST", - // "id": "compute.regionOperations.wait", + // "description": "Patches the specified SSL policy with the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", + // "httpMethod": "PATCH", + // "id": "compute.regionSslPolicies.patch", // "parameterOrder": [ // "project", // "region", - // "operation" + // "sslPolicy" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -136013,48 +155511,61 @@ func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sslPolicy": { + // "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/operations/{operation}/wait", + // "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", + // "request": { + // "$ref": "SslPolicy" + // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionSslCertificates.delete": +// method id "compute.regionTargetHttpProxies.delete": -type RegionSslCertificatesDeleteCall struct { - s *Service - project string - region string - sslCertificate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetHttpProxiesDeleteCall struct { + s *Service + project string + region string + targetHttpProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified SslCertificate resource in the region. +// Delete: Deletes the specified TargetHttpProxy resource. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -// - sslCertificate: Name of the SslCertificate resource to delete. -func (r *RegionSslCertificatesService) Delete(project string, region string, sslCertificate string) *RegionSslCertificatesDeleteCall { - c := &RegionSslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - targetHttpProxy: Name of the TargetHttpProxy resource to delete. +func (r *RegionTargetHttpProxiesService) Delete(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesDeleteCall { + c := &RegionTargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.sslCertificate = sslCertificate + c.targetHttpProxy = targetHttpProxy return c } @@ -136069,7 +155580,7 @@ func (r *RegionSslCertificatesService) Delete(project string, region string, ssl // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionSslCertificatesDeleteCall) RequestId(requestId string) *RegionSslCertificatesDeleteCall { +func (c *RegionTargetHttpProxiesDeleteCall) RequestId(requestId string) *RegionTargetHttpProxiesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -136077,7 +155588,7 @@ func (c *RegionSslCertificatesDeleteCall) RequestId(requestId string) *RegionSsl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesDeleteCall) Fields(s ...googleapi.Field) *RegionSslCertificatesDeleteCall { +func (c *RegionTargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -136085,21 +155596,21 @@ func (c *RegionSslCertificatesDeleteCall) Fields(s ...googleapi.Field) *RegionSs // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesDeleteCall) Context(ctx context.Context) *RegionSslCertificatesDeleteCall { +func (c *RegionTargetHttpProxiesDeleteCall) Context(ctx context.Context) *RegionTargetHttpProxiesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesDeleteCall) Header() http.Header { +func (c *RegionTargetHttpProxiesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -136109,7 +155620,7 @@ func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -136117,38 +155628,38 @@ func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "sslCertificate": c.sslCertificate, + "project": c.project, + "region": c.region, + "targetHttpProxy": c.targetHttpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.delete" call. +// Do executes the "compute.regionTargetHttpProxies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -136162,14 +155673,14 @@ func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified SslCertificate resource in the region.", - // "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "description": "Deletes the specified TargetHttpProxy resource.", + // "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", // "httpMethod": "DELETE", - // "id": "compute.regionSslCertificates.delete", + // "id": "compute.regionTargetHttpProxies.delete", // "parameterOrder": [ // "project", // "region", - // "sslCertificate" + // "targetHttpProxy" // ], // "parameters": { // "project": { @@ -136191,15 +155702,15 @@ func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "location": "query", // "type": "string" // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to delete.", + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", // "response": { // "$ref": "Operation" // }, @@ -136211,38 +155722,38 @@ func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionSslCertificates.get": +// method id "compute.regionTargetHttpProxies.get": -type RegionSslCertificatesGetCall struct { - s *Service - project string - region string - sslCertificate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionTargetHttpProxiesGetCall struct { + s *Service + project string + region string + targetHttpProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified SslCertificate resource in the specified -// region. Get a list of available SSL certificates by making a list() -// request. +// Get: Returns the specified TargetHttpProxy resource in the specified +// region. Gets a list of available target HTTP proxies by making a +// list() request. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -// - sslCertificate: Name of the SslCertificate resource to return. -func (r *RegionSslCertificatesService) Get(project string, region string, sslCertificate string) *RegionSslCertificatesGetCall { - c := &RegionSslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - targetHttpProxy: Name of the TargetHttpProxy resource to return. +func (r *RegionTargetHttpProxiesService) Get(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesGetCall { + c := &RegionTargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.sslCertificate = sslCertificate + c.targetHttpProxy = targetHttpProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesGetCall) Fields(s ...googleapi.Field) *RegionSslCertificatesGetCall { +func (c *RegionTargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -136252,7 +155763,7 @@ func (c *RegionSslCertificatesGetCall) Fields(s ...googleapi.Field) *RegionSslCe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionSslCertificatesGetCall) IfNoneMatch(entityTag string) *RegionSslCertificatesGetCall { +func (c *RegionTargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetHttpProxiesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -136260,21 +155771,21 @@ func (c *RegionSslCertificatesGetCall) IfNoneMatch(entityTag string) *RegionSslC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesGetCall) Context(ctx context.Context) *RegionSslCertificatesGetCall { +func (c *RegionTargetHttpProxiesGetCall) Context(ctx context.Context) *RegionTargetHttpProxiesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesGetCall) Header() http.Header { +func (c *RegionTargetHttpProxiesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -136287,7 +155798,7 @@ func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -136295,40 +155806,40 @@ func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "sslCertificate": c.sslCertificate, + "project": c.project, + "region": c.region, + "targetHttpProxy": c.targetHttpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.get" call. -// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx +// Do executes the "compute.regionTargetHttpProxies.get" call. +// Exactly one of *TargetHttpProxy or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *SslCertificate.ServerResponse.Header or (if a response was returned +// *TargetHttpProxy.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { +func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &SslCertificate{ + ret := &TargetHttpProxy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -136340,14 +155851,14 @@ func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCer } return ret, nil // { - // "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", - // "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + // "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", // "httpMethod": "GET", - // "id": "compute.regionSslCertificates.get", + // "id": "compute.regionTargetHttpProxies.get", // "parameterOrder": [ // "project", // "region", - // "sslCertificate" + // "targetHttpProxy" // ], // "parameters": { // "project": { @@ -136364,17 +155875,17 @@ func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCer // "required": true, // "type": "string" // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to return.", + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", // "response": { - // "$ref": "SslCertificate" + // "$ref": "TargetHttpProxy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -136385,28 +155896,28 @@ func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCer } -// method id "compute.regionSslCertificates.insert": +// method id "compute.regionTargetHttpProxies.insert": -type RegionSslCertificatesInsertCall struct { - s *Service - project string - region string - sslcertificate *SslCertificate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetHttpProxiesInsertCall struct { + s *Service + project string + region string + targethttpproxy *TargetHttpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a SslCertificate resource in the specified project -// and region using the data included in the request +// Insert: Creates a TargetHttpProxy resource in the specified project +// and region using the data included in the request. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionSslCertificatesService) Insert(project string, region string, sslcertificate *SslCertificate) *RegionSslCertificatesInsertCall { - c := &RegionSslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionTargetHttpProxiesService) Insert(project string, region string, targethttpproxy *TargetHttpProxy) *RegionTargetHttpProxiesInsertCall { + c := &RegionTargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.sslcertificate = sslcertificate + c.targethttpproxy = targethttpproxy return c } @@ -136421,7 +155932,7 @@ func (r *RegionSslCertificatesService) Insert(project string, region string, ssl // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionSslCertificatesInsertCall) RequestId(requestId string) *RegionSslCertificatesInsertCall { +func (c *RegionTargetHttpProxiesInsertCall) RequestId(requestId string) *RegionTargetHttpProxiesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -136429,7 +155940,7 @@ func (c *RegionSslCertificatesInsertCall) RequestId(requestId string) *RegionSsl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesInsertCall) Fields(s ...googleapi.Field) *RegionSslCertificatesInsertCall { +func (c *RegionTargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -136437,21 +155948,21 @@ func (c *RegionSslCertificatesInsertCall) Fields(s ...googleapi.Field) *RegionSs // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesInsertCall) Context(ctx context.Context) *RegionSslCertificatesInsertCall { +func (c *RegionTargetHttpProxiesInsertCall) Context(ctx context.Context) *RegionTargetHttpProxiesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesInsertCall) Header() http.Header { +func (c *RegionTargetHttpProxiesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -136459,14 +155970,14 @@ func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -136480,31 +155991,31 @@ func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.insert" call. +// Do executes the "compute.regionTargetHttpProxies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -136518,10 +156029,10 @@ func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", - // "flatPath": "projects/{project}/regions/{region}/sslCertificates", + // "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", // "httpMethod": "POST", - // "id": "compute.regionSslCertificates.insert", + // "id": "compute.regionTargetHttpProxies.insert", // "parameterOrder": [ // "project", // "region" @@ -136547,9 +156058,9 @@ func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/sslCertificates", + // "path": "projects/{project}/regions/{region}/targetHttpProxies", // "request": { - // "$ref": "SslCertificate" + // "$ref": "TargetHttpProxy" // }, // "response": { // "$ref": "Operation" @@ -136562,9 +156073,9 @@ func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionSslCertificates.list": +// method id "compute.regionTargetHttpProxies.list": -type RegionSslCertificatesListCall struct { +type RegionTargetHttpProxiesListCall struct { s *Service project string region string @@ -136574,32 +156085,35 @@ type RegionSslCertificatesListCall struct { header_ http.Header } -// List: Retrieves the list of SslCertificate resources available to the -// specified project in the specified region. +// List: Retrieves the list of TargetHttpProxy resources available to +// the specified project in the specified region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionSslCertificatesService) List(project string, region string) *RegionSslCertificatesListCall { - c := &RegionSslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionTargetHttpProxiesService) List(project string, region string) *RegionTargetHttpProxiesListCall { + c := &RegionTargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -136607,8 +156121,18 @@ func (r *RegionSslCertificatesService) List(project string, region string) *Regi // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertificatesListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionTargetHttpProxiesListCall) Filter(filter string) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("filter", filter) return c } @@ -136619,7 +156143,7 @@ func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertific // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslCertificatesListCall { +func (c *RegionTargetHttpProxiesListCall) MaxResults(maxResults int64) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -136633,7 +156157,7 @@ func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslC // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertificatesListCall { +func (c *RegionTargetHttpProxiesListCall) OrderBy(orderBy string) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -136641,7 +156165,7 @@ func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertif // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCertificatesListCall { +func (c *RegionTargetHttpProxiesListCall) PageToken(pageToken string) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -136650,7 +156174,7 @@ func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCe // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionSslCertificatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslCertificatesListCall { +func (c *RegionTargetHttpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -136658,7 +156182,7 @@ func (c *RegionSslCertificatesListCall) ReturnPartialSuccess(returnPartialSucces // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesListCall) Fields(s ...googleapi.Field) *RegionSslCertificatesListCall { +func (c *RegionTargetHttpProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -136668,7 +156192,7 @@ func (c *RegionSslCertificatesListCall) Fields(s ...googleapi.Field) *RegionSslC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionSslCertificatesListCall) IfNoneMatch(entityTag string) *RegionSslCertificatesListCall { +func (c *RegionTargetHttpProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetHttpProxiesListCall { c.ifNoneMatch_ = entityTag return c } @@ -136676,21 +156200,21 @@ func (c *RegionSslCertificatesListCall) IfNoneMatch(entityTag string) *RegionSsl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesListCall) Context(ctx context.Context) *RegionSslCertificatesListCall { +func (c *RegionTargetHttpProxiesListCall) Context(ctx context.Context) *RegionTargetHttpProxiesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesListCall) Header() http.Header { +func (c *RegionTargetHttpProxiesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -136703,7 +156227,7 @@ func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -136717,33 +156241,33 @@ func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.list" call. -// Exactly one of *SslCertificateList or error will be non-nil. Any +// Do executes the "compute.regionTargetHttpProxies.list" call. +// Exactly one of *TargetHttpProxyList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *SslCertificateList.ServerResponse.Header or (if a response was +// *TargetHttpProxyList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { +func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &SslCertificateList{ + ret := &TargetHttpProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -136755,17 +156279,17 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe } return ret, nil // { - // "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", - // "flatPath": "projects/{project}/regions/{region}/sslCertificates", + // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", + // "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", // "httpMethod": "GET", - // "id": "compute.regionSslCertificates.list", + // "id": "compute.regionTargetHttpProxies.list", // "parameterOrder": [ // "project", // "region" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -136807,9 +156331,9 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/sslCertificates", + // "path": "projects/{project}/regions/{region}/targetHttpProxies", // "response": { - // "$ref": "SslCertificateList" + // "$ref": "TargetHttpProxyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -136823,7 +156347,7 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionSslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { +func (c *RegionTargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpProxyList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -136841,28 +156365,30 @@ func (c *RegionSslCertificatesListCall) Pages(ctx context.Context, f func(*SslCe } } -// method id "compute.regionTargetHttpProxies.delete": +// method id "compute.regionTargetHttpProxies.setUrlMap": -type RegionTargetHttpProxiesDeleteCall struct { +type RegionTargetHttpProxiesSetUrlMapCall struct { s *Service project string region string targetHttpProxy string + urlmapreference *UrlMapReference urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified TargetHttpProxy resource. +// SetUrlMap: Changes the URL map for TargetHttpProxy. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -// - targetHttpProxy: Name of the TargetHttpProxy resource to delete. -func (r *RegionTargetHttpProxiesService) Delete(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesDeleteCall { - c := &RegionTargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - targetHttpProxy: Name of the TargetHttpProxy to set a URL map for. +func (r *RegionTargetHttpProxiesService) SetUrlMap(project string, region string, targetHttpProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpProxiesSetUrlMapCall { + c := &RegionTargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.targetHttpProxy = targetHttpProxy + c.urlmapreference = urlmapreference return c } @@ -136877,7 +156403,7 @@ func (r *RegionTargetHttpProxiesService) Delete(project string, region string, t // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpProxiesDeleteCall) RequestId(requestId string) *RegionTargetHttpProxiesDeleteCall { +func (c *RegionTargetHttpProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpProxiesSetUrlMapCall { c.urlParams_.Set("requestId", requestId) return c } @@ -136885,7 +156411,7 @@ func (c *RegionTargetHttpProxiesDeleteCall) RequestId(requestId string) *RegionT // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesDeleteCall { +func (c *RegionTargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesSetUrlMapCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -136893,21 +156419,21 @@ func (c *RegionTargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *Region // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpProxiesDeleteCall) Context(ctx context.Context) *RegionTargetHttpProxiesDeleteCall { +func (c *RegionTargetHttpProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpProxiesSetUrlMapCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpProxiesDeleteCall) Header() http.Header { +func (c *RegionTargetHttpProxiesSetUrlMapCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -136915,11 +156441,16 @@ func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -136932,31 +156463,31 @@ func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpProxies.delete" call. +// Do executes the "compute.regionTargetHttpProxies.setUrlMap" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -136970,10 +156501,10 @@ func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Deletes the specified TargetHttpProxy resource.", - // "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", - // "httpMethod": "DELETE", - // "id": "compute.regionTargetHttpProxies.delete", + // "description": "Changes the URL map for TargetHttpProxy.", + // "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpProxies.setUrlMap", // "parameterOrder": [ // "project", // "region", @@ -137000,14 +156531,17 @@ func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*O // "type": "string" // }, // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy resource to delete.", + // "description": "Name of the TargetHttpProxy to set a URL map for.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + // "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, // "response": { // "$ref": "Operation" // }, @@ -137019,38 +156553,216 @@ func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.regionTargetHttpProxies.get": +// method id "compute.regionTargetHttpsProxies.delete": -type RegionTargetHttpProxiesGetCall struct { - s *Service - project string - region string - targetHttpProxy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionTargetHttpsProxiesDeleteCall struct { + s *Service + project string + region string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified TargetHttpProxy resource in the specified +// Delete: Deletes the specified TargetHttpsProxy resource. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to delete. +func (r *RegionTargetHttpsProxiesService) Delete(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesDeleteCall { + c := &RegionTargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpsProxy = targetHttpsProxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpsProxiesDeleteCall) RequestId(requestId string) *RegionTargetHttpsProxiesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpsProxiesDeleteCall) Context(ctx context.Context) *RegionTargetHttpsProxiesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpsProxiesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpsProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetHttpsProxy resource.", + // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + // "httpMethod": "DELETE", + // "id": "compute.regionTargetHttpsProxies.delete", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetHttpsProxies.get": + +type RegionTargetHttpsProxiesGetCall struct { + s *Service + project string + region string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified TargetHttpsProxy resource in the specified // region. Gets a list of available target HTTP proxies by making a // list() request. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -// - targetHttpProxy: Name of the TargetHttpProxy resource to return. -func (r *RegionTargetHttpProxiesService) Get(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesGetCall { - c := &RegionTargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to return. +func (r *RegionTargetHttpsProxiesService) Get(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesGetCall { + c := &RegionTargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targetHttpProxy = targetHttpProxy + c.targetHttpsProxy = targetHttpsProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesGetCall { +func (c *RegionTargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -137060,7 +156772,7 @@ func (c *RegionTargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *RegionTar // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionTargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetHttpProxiesGetCall { +func (c *RegionTargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetHttpsProxiesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -137068,21 +156780,21 @@ func (c *RegionTargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *RegionTa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpProxiesGetCall) Context(ctx context.Context) *RegionTargetHttpProxiesGetCall { +func (c *RegionTargetHttpsProxiesGetCall) Context(ctx context.Context) *RegionTargetHttpsProxiesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpProxiesGetCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -137095,7 +156807,7 @@ func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -137103,40 +156815,40 @@ func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetHttpProxy": c.targetHttpProxy, + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpProxies.get" call. -// Exactly one of *TargetHttpProxy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *TargetHttpProxy.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionTargetHttpsProxies.get" call. +// Exactly one of *TargetHttpsProxy or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpsProxy.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxy, error) { +func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &TargetHttpProxy{ + ret := &TargetHttpsProxy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -137148,14 +156860,14 @@ func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*Targ } return ret, nil // { - // "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", - // "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + // "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", // "httpMethod": "GET", - // "id": "compute.regionTargetHttpProxies.get", + // "id": "compute.regionTargetHttpsProxies.get", // "parameterOrder": [ // "project", // "region", - // "targetHttpProxy" + // "targetHttpsProxy" // ], // "parameters": { // "project": { @@ -137172,17 +156884,17 @@ func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*Targ // "required": true, // "type": "string" // }, - // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy resource to return.", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", // "response": { - // "$ref": "TargetHttpProxy" + // "$ref": "TargetHttpsProxy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -137193,28 +156905,28 @@ func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*Targ } -// method id "compute.regionTargetHttpProxies.insert": +// method id "compute.regionTargetHttpsProxies.insert": -type RegionTargetHttpProxiesInsertCall struct { - s *Service - project string - region string - targethttpproxy *TargetHttpProxy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetHttpsProxiesInsertCall struct { + s *Service + project string + region string + targethttpsproxy *TargetHttpsProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a TargetHttpProxy resource in the specified project +// Insert: Creates a TargetHttpsProxy resource in the specified project // and region using the data included in the request. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionTargetHttpProxiesService) Insert(project string, region string, targethttpproxy *TargetHttpProxy) *RegionTargetHttpProxiesInsertCall { - c := &RegionTargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionTargetHttpsProxiesService) Insert(project string, region string, targethttpsproxy *TargetHttpsProxy) *RegionTargetHttpsProxiesInsertCall { + c := &RegionTargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targethttpproxy = targethttpproxy + c.targethttpsproxy = targethttpsproxy return c } @@ -137229,7 +156941,7 @@ func (r *RegionTargetHttpProxiesService) Insert(project string, region string, t // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpProxiesInsertCall) RequestId(requestId string) *RegionTargetHttpProxiesInsertCall { +func (c *RegionTargetHttpsProxiesInsertCall) RequestId(requestId string) *RegionTargetHttpsProxiesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -137237,7 +156949,7 @@ func (c *RegionTargetHttpProxiesInsertCall) RequestId(requestId string) *RegionT // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesInsertCall { +func (c *RegionTargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -137245,21 +156957,21 @@ func (c *RegionTargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *Region // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpProxiesInsertCall) Context(ctx context.Context) *RegionTargetHttpProxiesInsertCall { +func (c *RegionTargetHttpsProxiesInsertCall) Context(ctx context.Context) *RegionTargetHttpsProxiesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpProxiesInsertCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -137267,14 +156979,14 @@ func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -137288,31 +157000,31 @@ func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpProxies.insert" call. +// Do executes the "compute.regionTargetHttpsProxies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -137326,10 +157038,10 @@ func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", + // "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", // "httpMethod": "POST", - // "id": "compute.regionTargetHttpProxies.insert", + // "id": "compute.regionTargetHttpsProxies.insert", // "parameterOrder": [ // "project", // "region" @@ -137355,9 +157067,9 @@ func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*O // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/targetHttpProxies", + // "path": "projects/{project}/regions/{region}/targetHttpsProxies", // "request": { - // "$ref": "TargetHttpProxy" + // "$ref": "TargetHttpsProxy" // }, // "response": { // "$ref": "Operation" @@ -137370,9 +157082,9 @@ func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.regionTargetHttpProxies.list": +// method id "compute.regionTargetHttpsProxies.list": -type RegionTargetHttpProxiesListCall struct { +type RegionTargetHttpsProxiesListCall struct { s *Service project string region string @@ -137382,32 +157094,35 @@ type RegionTargetHttpProxiesListCall struct { header_ http.Header } -// List: Retrieves the list of TargetHttpProxy resources available to +// List: Retrieves the list of TargetHttpsProxy resources available to // the specified project in the specified region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionTargetHttpProxiesService) List(project string, region string) *RegionTargetHttpProxiesListCall { - c := &RegionTargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionTargetHttpsProxiesService) List(project string, region string) *RegionTargetHttpsProxiesListCall { + c := &RegionTargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -137415,8 +157130,18 @@ func (r *RegionTargetHttpProxiesService) List(project string, region string) *Re // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionTargetHttpProxiesListCall) Filter(filter string) *RegionTargetHttpProxiesListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionTargetHttpsProxiesListCall) Filter(filter string) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("filter", filter) return c } @@ -137427,7 +157152,7 @@ func (c *RegionTargetHttpProxiesListCall) Filter(filter string) *RegionTargetHtt // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionTargetHttpProxiesListCall) MaxResults(maxResults int64) *RegionTargetHttpProxiesListCall { +func (c *RegionTargetHttpsProxiesListCall) MaxResults(maxResults int64) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -137441,7 +157166,7 @@ func (c *RegionTargetHttpProxiesListCall) MaxResults(maxResults int64) *RegionTa // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionTargetHttpProxiesListCall) OrderBy(orderBy string) *RegionTargetHttpProxiesListCall { +func (c *RegionTargetHttpsProxiesListCall) OrderBy(orderBy string) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -137449,7 +157174,7 @@ func (c *RegionTargetHttpProxiesListCall) OrderBy(orderBy string) *RegionTargetH // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionTargetHttpProxiesListCall) PageToken(pageToken string) *RegionTargetHttpProxiesListCall { +func (c *RegionTargetHttpsProxiesListCall) PageToken(pageToken string) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -137458,7 +157183,7 @@ func (c *RegionTargetHttpProxiesListCall) PageToken(pageToken string) *RegionTar // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionTargetHttpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetHttpProxiesListCall { +func (c *RegionTargetHttpsProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -137466,7 +157191,7 @@ func (c *RegionTargetHttpProxiesListCall) ReturnPartialSuccess(returnPartialSucc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesListCall { +func (c *RegionTargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -137476,7 +157201,7 @@ func (c *RegionTargetHttpProxiesListCall) Fields(s ...googleapi.Field) *RegionTa // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionTargetHttpProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetHttpProxiesListCall { +func (c *RegionTargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetHttpsProxiesListCall { c.ifNoneMatch_ = entityTag return c } @@ -137484,21 +157209,21 @@ func (c *RegionTargetHttpProxiesListCall) IfNoneMatch(entityTag string) *RegionT // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpProxiesListCall) Context(ctx context.Context) *RegionTargetHttpProxiesListCall { +func (c *RegionTargetHttpsProxiesListCall) Context(ctx context.Context) *RegionTargetHttpsProxiesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpProxiesListCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -137511,7 +157236,7 @@ func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -137525,33 +157250,33 @@ func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpProxies.list" call. -// Exactly one of *TargetHttpProxyList or error will be non-nil. Any +// Do executes the "compute.regionTargetHttpsProxies.list" call. +// Exactly one of *TargetHttpsProxyList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TargetHttpProxyList.ServerResponse.Header or (if a response was +// *TargetHttpsProxyList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyList, error) { +func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &TargetHttpProxyList{ + ret := &TargetHttpsProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -137563,17 +157288,17 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar } return ret, nil // { - // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", - // "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", + // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", + // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", // "httpMethod": "GET", - // "id": "compute.regionTargetHttpProxies.list", + // "id": "compute.regionTargetHttpsProxies.list", // "parameterOrder": [ // "project", // "region" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -137615,9 +157340,9 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/targetHttpProxies", + // "path": "projects/{project}/regions/{region}/targetHttpsProxies", // "response": { - // "$ref": "TargetHttpProxyList" + // "$ref": "TargetHttpsProxyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -137631,7 +157356,7 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionTargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpProxyList) error) error { +func (c *RegionTargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpsProxyList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -137649,30 +157374,32 @@ func (c *RegionTargetHttpProxiesListCall) Pages(ctx context.Context, f func(*Tar } } -// method id "compute.regionTargetHttpProxies.setUrlMap": +// method id "compute.regionTargetHttpsProxies.patch": -type RegionTargetHttpProxiesSetUrlMapCall struct { - s *Service - project string - region string - targetHttpProxy string - urlmapreference *UrlMapReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetHttpsProxiesPatchCall struct { + s *Service + project string + region string + targetHttpsProxy string + targethttpsproxy *TargetHttpsProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetUrlMap: Changes the URL map for TargetHttpProxy. +// Patch: Patches the specified regional TargetHttpsProxy resource with +// the data included in the request. This method supports PATCH +// semantics and uses JSON merge patch format and processing rules. // // - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetHttpProxy: Name of the TargetHttpProxy to set a URL map for. -func (r *RegionTargetHttpProxiesService) SetUrlMap(project string, region string, targetHttpProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpProxiesSetUrlMapCall { - c := &RegionTargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to patch. +func (r *RegionTargetHttpsProxiesService) Patch(project string, region string, targetHttpsProxy string, targethttpsproxy *TargetHttpsProxy) *RegionTargetHttpsProxiesPatchCall { + c := &RegionTargetHttpsProxiesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targetHttpProxy = targetHttpProxy - c.urlmapreference = urlmapreference + c.targetHttpsProxy = targetHttpsProxy + c.targethttpsproxy = targethttpsproxy return c } @@ -137687,7 +157414,7 @@ func (r *RegionTargetHttpProxiesService) SetUrlMap(project string, region string // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpProxiesSetUrlMapCall { +func (c *RegionTargetHttpsProxiesPatchCall) RequestId(requestId string) *RegionTargetHttpsProxiesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -137695,7 +157422,7 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) RequestId(requestId string) *Regi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesSetUrlMapCall { +func (c *RegionTargetHttpsProxiesPatchCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -137703,21 +157430,21 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpProxiesSetUrlMapCall { +func (c *RegionTargetHttpsProxiesPatchCall) Context(ctx context.Context) *RegionTargetHttpsProxiesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpProxiesSetUrlMapCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -137725,54 +157452,243 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetHttpProxy": c.targetHttpProxy, + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpProxies.setUrlMap" call. +// Do executes the "compute.regionTargetHttpsProxies.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetHttpsProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified regional TargetHttpsProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + // "httpMethod": "PATCH", + // "id": "compute.regionTargetHttpsProxies.patch", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + // "request": { + // "$ref": "TargetHttpsProxy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetHttpsProxies.setSslCertificates": + +type RegionTargetHttpsProxiesSetSslCertificatesCall struct { + s *Service + project string + region string + targetHttpsProxy string + regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set an +// SslCertificates resource for. +func (r *RegionTargetHttpsProxiesService) SetSslCertificates(project string, region string, targetHttpsProxy string, regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest) *RegionTargetHttpsProxiesSetSslCertificatesCall { + c := &RegionTargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpsProxy = targetHttpsProxy + c.regiontargethttpsproxiessetsslcertificatesrequest = regiontargethttpsproxiessetsslcertificatesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetSslCertificatesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetSslCertificatesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetSslCertificatesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiontargethttpsproxiessetsslcertificatesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpsProxies.setSslCertificates" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { return nil, err } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, @@ -137785,14 +157701,14 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the URL map for TargetHttpProxy.", - // "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + // "description": "Replaces SslCertificates for TargetHttpsProxy.", + // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", // "httpMethod": "POST", - // "id": "compute.regionTargetHttpProxies.setUrlMap", + // "id": "compute.regionTargetHttpsProxies.setSslCertificates", // "parameterOrder": [ // "project", // "region", - // "targetHttpProxy" + // "targetHttpsProxy" // ], // "parameters": { // "project": { @@ -137814,17 +157730,17 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) // "location": "query", // "type": "string" // }, - // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy to set a URL map for.", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", // "request": { - // "$ref": "UrlMapReference" + // "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" // }, // "response": { // "$ref": "Operation" @@ -137837,28 +157753,31 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionTargetHttpsProxies.delete": +// method id "compute.regionTargetHttpsProxies.setUrlMap": -type RegionTargetHttpsProxiesDeleteCall struct { +type RegionTargetHttpsProxiesSetUrlMapCall struct { s *Service project string region string targetHttpsProxy string + urlmapreference *UrlMapReference urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified TargetHttpsProxy resource. +// SetUrlMap: Changes the URL map for TargetHttpsProxy. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource to delete. -func (r *RegionTargetHttpsProxiesService) Delete(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesDeleteCall { - c := &RegionTargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy to set a URL map +// for. +func (r *RegionTargetHttpsProxiesService) SetUrlMap(project string, region string, targetHttpsProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpsProxiesSetUrlMapCall { + c := &RegionTargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.targetHttpsProxy = targetHttpsProxy + c.urlmapreference = urlmapreference return c } @@ -137873,7 +157792,7 @@ func (r *RegionTargetHttpsProxiesService) Delete(project string, region string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpsProxiesDeleteCall) RequestId(requestId string) *RegionTargetHttpsProxiesDeleteCall { +func (c *RegionTargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetUrlMapCall { c.urlParams_.Set("requestId", requestId) return c } @@ -137881,7 +157800,7 @@ func (c *RegionTargetHttpsProxiesDeleteCall) RequestId(requestId string) *Region // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesDeleteCall { +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetUrlMapCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -137889,21 +157808,21 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *Regio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpsProxiesDeleteCall) Context(ctx context.Context) *RegionTargetHttpsProxiesDeleteCall { +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetUrlMapCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesDeleteCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -137911,11 +157830,16 @@ func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -137928,31 +157852,31 @@ func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpsProxies.delete" call. +// Do executes the "compute.regionTargetHttpsProxies.setUrlMap" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -137966,10 +157890,10 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Deletes the specified TargetHttpsProxy resource.", - // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", - // "httpMethod": "DELETE", - // "id": "compute.regionTargetHttpsProxies.delete", + // "description": "Changes the URL map for TargetHttpsProxy.", + // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpsProxies.setUrlMap", // "parameterOrder": [ // "project", // "region", @@ -137996,14 +157920,17 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to delete.", + // "description": "Name of the TargetHttpsProxy to set a URL map for.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, // "response": { // "$ref": "Operation" // }, @@ -138015,38 +157942,214 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.regionTargetHttpsProxies.get": +// method id "compute.regionTargetTcpProxies.delete": -type RegionTargetHttpsProxiesGetCall struct { - s *Service - project string - region string - targetHttpsProxy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionTargetTcpProxiesDeleteCall struct { + s *Service + project string + region string + targetTcpProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified TargetHttpsProxy resource in the specified -// region. Gets a list of available target HTTP proxies by making a -// list() request. +// Delete: Deletes the specified TargetTcpProxy resource. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource to return. -func (r *RegionTargetHttpsProxiesService) Get(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesGetCall { - c := &RegionTargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - targetTcpProxy: Name of the TargetTcpProxy resource to delete. +func (r *RegionTargetTcpProxiesService) Delete(project string, region string, targetTcpProxy string) *RegionTargetTcpProxiesDeleteCall { + c := &RegionTargetTcpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targetHttpsProxy = targetHttpsProxy + c.targetTcpProxy = targetTcpProxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionTargetTcpProxiesDeleteCall) RequestId(requestId string) *RegionTargetTcpProxiesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesGetCall { +func (c *RegionTargetTcpProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetTcpProxiesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetTcpProxiesDeleteCall) Context(ctx context.Context) *RegionTargetTcpProxiesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetTcpProxiesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetTcpProxy": c.targetTcpProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetTcpProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetTcpProxy resource.", + // "flatPath": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + // "httpMethod": "DELETE", + // "id": "compute.regionTargetTcpProxies.delete", + // "parameterOrder": [ + // "project", + // "region", + // "targetTcpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetTcpProxy": { + // "description": "Name of the TargetTcpProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetTcpProxies.get": + +type RegionTargetTcpProxiesGetCall struct { + s *Service + project string + region string + targetTcpProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified TargetTcpProxy resource. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetTcpProxy: Name of the TargetTcpProxy resource to return. +func (r *RegionTargetTcpProxiesService) Get(project string, region string, targetTcpProxy string) *RegionTargetTcpProxiesGetCall { + c := &RegionTargetTcpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetTcpProxy = targetTcpProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetTcpProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetTcpProxiesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -138056,7 +158159,7 @@ func (c *RegionTargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *RegionTa // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionTargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetHttpsProxiesGetCall { +func (c *RegionTargetTcpProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetTcpProxiesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -138064,21 +158167,21 @@ func (c *RegionTargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *RegionT // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpsProxiesGetCall) Context(ctx context.Context) *RegionTargetHttpsProxiesGetCall { +func (c *RegionTargetTcpProxiesGetCall) Context(ctx context.Context) *RegionTargetTcpProxiesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesGetCall) Header() http.Header { +func (c *RegionTargetTcpProxiesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -138091,7 +158194,7 @@ func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -138099,40 +158202,40 @@ func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, + "region": c.region, + "targetTcpProxy": c.targetTcpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpsProxies.get" call. -// Exactly one of *TargetHttpsProxy or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetHttpsProxy.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionTargetTcpProxies.get" call. +// Exactly one of *TargetTcpProxy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetTcpProxy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxy, error) { +func (c *RegionTargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &TargetHttpsProxy{ + ret := &TargetTcpProxy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -138144,14 +158247,14 @@ func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*Tar } return ret, nil // { - // "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", - // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + // "description": "Returns the specified TargetTcpProxy resource.", + // "flatPath": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", // "httpMethod": "GET", - // "id": "compute.regionTargetHttpsProxies.get", + // "id": "compute.regionTargetTcpProxies.get", // "parameterOrder": [ // "project", // "region", - // "targetHttpsProxy" + // "targetTcpProxy" // ], // "parameters": { // "project": { @@ -138168,17 +158271,17 @@ func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*Tar // "required": true, // "type": "string" // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to return.", + // "targetTcpProxy": { + // "description": "Name of the TargetTcpProxy resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + // "path": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", // "response": { - // "$ref": "TargetHttpsProxy" + // "$ref": "TargetTcpProxy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -138189,28 +158292,28 @@ func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*Tar } -// method id "compute.regionTargetHttpsProxies.insert": +// method id "compute.regionTargetTcpProxies.insert": -type RegionTargetHttpsProxiesInsertCall struct { - s *Service - project string - region string - targethttpsproxy *TargetHttpsProxy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetTcpProxiesInsertCall struct { + s *Service + project string + region string + targettcpproxy *TargetTcpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a TargetHttpsProxy resource in the specified project +// Insert: Creates a TargetTcpProxy resource in the specified project // and region using the data included in the request. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionTargetHttpsProxiesService) Insert(project string, region string, targethttpsproxy *TargetHttpsProxy) *RegionTargetHttpsProxiesInsertCall { - c := &RegionTargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionTargetTcpProxiesService) Insert(project string, region string, targettcpproxy *TargetTcpProxy) *RegionTargetTcpProxiesInsertCall { + c := &RegionTargetTcpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targethttpsproxy = targethttpsproxy + c.targettcpproxy = targettcpproxy return c } @@ -138225,7 +158328,7 @@ func (r *RegionTargetHttpsProxiesService) Insert(project string, region string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpsProxiesInsertCall) RequestId(requestId string) *RegionTargetHttpsProxiesInsertCall { +func (c *RegionTargetTcpProxiesInsertCall) RequestId(requestId string) *RegionTargetTcpProxiesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -138233,7 +158336,7 @@ func (c *RegionTargetHttpsProxiesInsertCall) RequestId(requestId string) *Region // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesInsertCall { +func (c *RegionTargetTcpProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetTcpProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -138241,21 +158344,21 @@ func (c *RegionTargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *Regio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpsProxiesInsertCall) Context(ctx context.Context) *RegionTargetHttpsProxiesInsertCall { +func (c *RegionTargetTcpProxiesInsertCall) Context(ctx context.Context) *RegionTargetTcpProxiesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesInsertCall) Header() http.Header { +func (c *RegionTargetTcpProxiesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -138263,14 +158366,14 @@ func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targettcpproxy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetTcpProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -138284,31 +158387,31 @@ func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpsProxies.insert" call. +// Do executes the "compute.regionTargetTcpProxies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -138322,10 +158425,10 @@ func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", + // "description": "Creates a TargetTcpProxy resource in the specified project and region using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/targetTcpProxies", // "httpMethod": "POST", - // "id": "compute.regionTargetHttpsProxies.insert", + // "id": "compute.regionTargetTcpProxies.insert", // "parameterOrder": [ // "project", // "region" @@ -138351,9 +158454,9 @@ func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/targetHttpsProxies", + // "path": "projects/{project}/regions/{region}/targetTcpProxies", // "request": { - // "$ref": "TargetHttpsProxy" + // "$ref": "TargetTcpProxy" // }, // "response": { // "$ref": "Operation" @@ -138366,9 +158469,9 @@ func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.regionTargetHttpsProxies.list": +// method id "compute.regionTargetTcpProxies.list": -type RegionTargetHttpsProxiesListCall struct { +type RegionTargetTcpProxiesListCall struct { s *Service project string region string @@ -138378,32 +158481,35 @@ type RegionTargetHttpsProxiesListCall struct { header_ http.Header } -// List: Retrieves the list of TargetHttpsProxy resources available to -// the specified project in the specified region. +// List: Retrieves a list of TargetTcpProxy resources available to the +// specified project in a given region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionTargetHttpsProxiesService) List(project string, region string) *RegionTargetHttpsProxiesListCall { - c := &RegionTargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionTargetTcpProxiesService) List(project string, region string) *RegionTargetTcpProxiesListCall { + c := &RegionTargetTcpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -138411,8 +158517,18 @@ func (r *RegionTargetHttpsProxiesService) List(project string, region string) *R // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` -func (c *RegionTargetHttpsProxiesListCall) Filter(filter string) *RegionTargetHttpsProxiesListCall { +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionTargetTcpProxiesListCall) Filter(filter string) *RegionTargetTcpProxiesListCall { c.urlParams_.Set("filter", filter) return c } @@ -138423,7 +158539,7 @@ func (c *RegionTargetHttpsProxiesListCall) Filter(filter string) *RegionTargetHt // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionTargetHttpsProxiesListCall) MaxResults(maxResults int64) *RegionTargetHttpsProxiesListCall { +func (c *RegionTargetTcpProxiesListCall) MaxResults(maxResults int64) *RegionTargetTcpProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -138437,7 +158553,7 @@ func (c *RegionTargetHttpsProxiesListCall) MaxResults(maxResults int64) *RegionT // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionTargetHttpsProxiesListCall) OrderBy(orderBy string) *RegionTargetHttpsProxiesListCall { +func (c *RegionTargetTcpProxiesListCall) OrderBy(orderBy string) *RegionTargetTcpProxiesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -138445,7 +158561,7 @@ func (c *RegionTargetHttpsProxiesListCall) OrderBy(orderBy string) *RegionTarget // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionTargetHttpsProxiesListCall) PageToken(pageToken string) *RegionTargetHttpsProxiesListCall { +func (c *RegionTargetTcpProxiesListCall) PageToken(pageToken string) *RegionTargetTcpProxiesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -138454,7 +158570,7 @@ func (c *RegionTargetHttpsProxiesListCall) PageToken(pageToken string) *RegionTa // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionTargetHttpsProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetHttpsProxiesListCall { +func (c *RegionTargetTcpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetTcpProxiesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -138462,7 +158578,7 @@ func (c *RegionTargetHttpsProxiesListCall) ReturnPartialSuccess(returnPartialSuc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesListCall { +func (c *RegionTargetTcpProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetTcpProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -138472,7 +158588,7 @@ func (c *RegionTargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *RegionT // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionTargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetHttpsProxiesListCall { +func (c *RegionTargetTcpProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetTcpProxiesListCall { c.ifNoneMatch_ = entityTag return c } @@ -138480,21 +158596,21 @@ func (c *RegionTargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *Region // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpsProxiesListCall) Context(ctx context.Context) *RegionTargetHttpsProxiesListCall { +func (c *RegionTargetTcpProxiesListCall) Context(ctx context.Context) *RegionTargetTcpProxiesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesListCall) Header() http.Header { +func (c *RegionTargetTcpProxiesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -138507,7 +158623,7 @@ func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetTcpProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -138521,33 +158637,33 @@ func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpsProxies.list" call. -// Exactly one of *TargetHttpsProxyList or error will be non-nil. Any +// Do executes the "compute.regionTargetTcpProxies.list" call. +// Exactly one of *TargetTcpProxyList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TargetHttpsProxyList.ServerResponse.Header or (if a response was +// *TargetTcpProxyList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyList, error) { +func (c *RegionTargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &TargetHttpsProxyList{ + ret := &TargetTcpProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -138559,17 +158675,17 @@ func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*Ta } return ret, nil // { - // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", - // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", + // "description": "Retrieves a list of TargetTcpProxy resources available to the specified project in a given region.", + // "flatPath": "projects/{project}/regions/{region}/targetTcpProxies", // "httpMethod": "GET", - // "id": "compute.regionTargetHttpsProxies.list", + // "id": "compute.regionTargetTcpProxies.list", // "parameterOrder": [ // "project", // "region" // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -138611,9 +158727,9 @@ func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*Ta // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/targetHttpsProxies", + // "path": "projects/{project}/regions/{region}/targetTcpProxies", // "response": { - // "$ref": "TargetHttpsProxyList" + // "$ref": "TargetTcpProxyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -138627,7 +158743,7 @@ func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*Ta // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionTargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpsProxyList) error) error { +func (c *RegionTargetTcpProxiesListCall) Pages(ctx context.Context, f func(*TargetTcpProxyList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -138645,384 +158761,6 @@ func (c *RegionTargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*Ta } } -// method id "compute.regionTargetHttpsProxies.setSslCertificates": - -type RegionTargetHttpsProxiesSetSslCertificatesCall struct { - s *Service - project string - region string - targetHttpsProxy string - regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. -// -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set an -// SslCertificates resource for. -func (r *RegionTargetHttpsProxiesService) SetSslCertificates(project string, region string, targetHttpsProxy string, regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest) *RegionTargetHttpsProxiesSetSslCertificatesCall { - c := &RegionTargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetHttpsProxy = targetHttpsProxy - c.regiontargethttpsproxiessetsslcertificatesrequest = regiontargethttpsproxiessetsslcertificatesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetSslCertificatesCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetSslCertificatesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetSslCertificatesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiontargethttpsproxiessetsslcertificatesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetHttpsProxy": c.targetHttpsProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionTargetHttpsProxies.setSslCertificates" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Replaces SslCertificates for TargetHttpsProxy.", - // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", - // "httpMethod": "POST", - // "id": "compute.regionTargetHttpsProxies.setSslCertificates", - // "parameterOrder": [ - // "project", - // "region", - // "targetHttpsProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", - // "request": { - // "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionTargetHttpsProxies.setUrlMap": - -type RegionTargetHttpsProxiesSetUrlMapCall struct { - s *Service - project string - region string - targetHttpsProxy string - urlmapreference *UrlMapReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetUrlMap: Changes the URL map for TargetHttpsProxy. -// -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy to set a URL map -// for. -func (r *RegionTargetHttpsProxiesService) SetUrlMap(project string, region string, targetHttpsProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpsProxiesSetUrlMapCall { - c := &RegionTargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetHttpsProxy = targetHttpsProxy - c.urlmapreference = urlmapreference - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetUrlMapCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetUrlMapCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetUrlMapCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetHttpsProxy": c.targetHttpsProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionTargetHttpsProxies.setUrlMap" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes the URL map for TargetHttpsProxy.", - // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", - // "httpMethod": "POST", - // "id": "compute.regionTargetHttpsProxies.setUrlMap", - // "parameterOrder": [ - // "project", - // "region", - // "targetHttpsProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy to set a URL map for.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", - // "request": { - // "$ref": "UrlMapReference" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - // method id "compute.regionUrlMaps.delete": type RegionUrlMapsDeleteCall struct { @@ -139119,17 +158857,17 @@ func (c *RegionUrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -139296,17 +159034,17 @@ func (c *RegionUrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMap{ ServerResponse: googleapi.ServerResponse{ @@ -139465,17 +159203,17 @@ func (c *RegionUrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -139558,19 +159296,22 @@ func (r *RegionUrlMapsService) List(project string, region string) *RegionUrlMap } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -139578,7 +159319,17 @@ func (r *RegionUrlMapsService) List(project string, region string) *RegionUrlMap // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *RegionUrlMapsListCall) Filter(filter string) *RegionUrlMapsListCall { c.urlParams_.Set("filter", filter) return c @@ -139702,17 +159453,17 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapList{ ServerResponse: googleapi.ServerResponse{ @@ -139736,7 +159487,7 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -139917,17 +159668,17 @@ func (c *RegionUrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -140097,17 +159848,17 @@ func (c *RegionUrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -140271,17 +160022,17 @@ func (c *RegionUrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsVa if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapsValidateResponse{ ServerResponse: googleapi.ServerResponse{ @@ -140449,17 +160200,17 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Region{ ServerResponse: googleapi.ServerResponse{ @@ -140539,19 +160290,22 @@ func (r *RegionsService) List(project string) *RegionsListCall { } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -140559,7 +160313,17 @@ func (r *RegionsService) List(project string) *RegionsListCall { // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *RegionsListCall) Filter(filter string) *RegionsListCall { c.urlParams_.Set("filter", filter) return c @@ -140682,17 +160446,17 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionList{ ServerResponse: googleapi.ServerResponse{ @@ -140715,7 +160479,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -140805,19 +160569,22 @@ func (r *ReservationsService) AggregatedList(project string) *ReservationsAggreg } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -140825,7 +160592,17 @@ func (r *ReservationsService) AggregatedList(project string) *ReservationsAggreg // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *ReservationsAggregatedListCall) Filter(filter string) *ReservationsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -140961,17 +160738,17 @@ func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Rese if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ReservationAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -140994,7 +160771,7 @@ func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Rese // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -141173,17 +160950,17 @@ func (c *ReservationsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -141349,17 +161126,17 @@ func (c *ReservationsGetCall) Do(opts ...googleapi.CallOption) (*Reservation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Reservation{ ServerResponse: googleapi.ServerResponse{ @@ -141529,17 +161306,17 @@ func (c *ReservationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -141713,17 +161490,17 @@ func (c *ReservationsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -141806,19 +161583,22 @@ func (r *ReservationsService) List(project string, zone string) *ReservationsLis } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -141826,7 +161606,17 @@ func (r *ReservationsService) List(project string, zone string) *ReservationsLis // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *ReservationsListCall) Filter(filter string) *ReservationsListCall { c.urlParams_.Set("filter", filter) return c @@ -141950,17 +161740,17 @@ func (c *ReservationsListCall) Do(opts ...googleapi.CallOption) (*ReservationLis if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ReservationList{ ServerResponse: googleapi.ServerResponse{ @@ -141984,7 +161774,7 @@ func (c *ReservationsListCall) Do(opts ...googleapi.CallOption) (*ReservationLis // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -142174,17 +161964,17 @@ func (c *ReservationsResizeCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -142347,17 +162137,17 @@ func (c *ReservationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -142515,17 +162305,17 @@ func (c *ReservationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -142712,17 +162502,17 @@ func (c *ReservationsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -142820,19 +162610,22 @@ func (r *ResourcePoliciesService) AggregatedList(project string) *ResourcePolici } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -142840,7 +162633,17 @@ func (r *ResourcePoliciesService) AggregatedList(project string) *ResourcePolici // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *ResourcePoliciesAggregatedListCall) Filter(filter string) *ResourcePoliciesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -142976,17 +162779,17 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResourcePolicyAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -143009,7 +162812,7 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -143188,17 +162991,17 @@ func (c *ResourcePoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -143364,17 +163167,17 @@ func (c *ResourcePoliciesGetCall) Do(opts ...googleapi.CallOption) (*ResourcePol if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResourcePolicy{ ServerResponse: googleapi.ServerResponse{ @@ -143544,17 +163347,17 @@ func (c *ResourcePoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Po if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -143727,17 +163530,17 @@ func (c *ResourcePoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -143820,19 +163623,22 @@ func (r *ResourcePoliciesService) List(project string, region string) *ResourceP } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -143840,7 +163646,17 @@ func (r *ResourcePoliciesService) List(project string, region string) *ResourceP // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *ResourcePoliciesListCall) Filter(filter string) *ResourcePoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -143964,17 +163780,17 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResourcePolicyList{ ServerResponse: googleapi.ServerResponse{ @@ -143998,7 +163814,7 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -144171,17 +163987,17 @@ func (c *ResourcePoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Po if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -144339,17 +164155,17 @@ func (c *ResourcePoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -144432,19 +164248,22 @@ func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCa } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -144452,7 +164271,17 @@ func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCa // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -144588,17 +164417,17 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RouterAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -144621,7 +164450,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -144800,17 +164629,17 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -144977,17 +164806,17 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Router{ ServerResponse: googleapi.ServerResponse{ @@ -145062,10 +164891,10 @@ type RoutersGetNatMappingInfoCall struct { // GetNatMappingInfo: Retrieves runtime Nat mapping information of VM // endpoints. // -// - project: Project ID for this request. -// - region: Name of the region for this request. -// - router: Name of the Router resource to query for Nat Mapping -// information of VM endpoints. +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - router: Name of the Router resource to query for Nat Mapping +// information of VM endpoints. func (r *RoutersService) GetNatMappingInfo(project string, region string, router string) *RoutersGetNatMappingInfoCall { c := &RoutersGetNatMappingInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -145075,19 +164904,22 @@ func (r *RoutersService) GetNatMappingInfo(project string, region string, router } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -145095,7 +164927,17 @@ func (r *RoutersService) GetNatMappingInfo(project string, region string, router // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *RoutersGetNatMappingInfoCall) Filter(filter string) *RoutersGetNatMappingInfoCall { c.urlParams_.Set("filter", filter) return c @@ -145220,17 +165062,17 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VmEndpointNatMappingsList{ ServerResponse: googleapi.ServerResponse{ @@ -145255,7 +165097,7 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -145442,17 +165284,17 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RouterStatusResponse{ ServerResponse: googleapi.ServerResponse{ @@ -145620,17 +165462,17 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -145713,19 +165555,22 @@ func (r *RoutersService) List(project string, region string) *RoutersListCall { } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -145733,7 +165578,17 @@ func (r *RoutersService) List(project string, region string) *RoutersListCall { // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *RoutersListCall) Filter(filter string) *RoutersListCall { c.urlParams_.Set("filter", filter) return c @@ -145857,17 +165712,17 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RouterList{ ServerResponse: googleapi.ServerResponse{ @@ -145891,7 +165746,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -146081,17 +165936,17 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -146255,17 +166110,17 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RoutersPreviewResponse{ ServerResponse: googleapi.ServerResponse{ @@ -146443,17 +166298,17 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -146620,17 +166475,17 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -146785,17 +166640,17 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Route{ ServerResponse: googleapi.ServerResponse{ @@ -146951,17 +166806,17 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -147033,19 +166888,22 @@ func (r *RoutesService) List(project string) *RoutesListCall { } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -147053,7 +166911,17 @@ func (r *RoutesService) List(project string) *RoutesListCall { // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *RoutesListCall) Filter(filter string) *RoutesListCall { c.urlParams_.Set("filter", filter) return c @@ -147176,17 +167044,17 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RouteList{ ServerResponse: googleapi.ServerResponse{ @@ -147209,7 +167077,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -147302,6 +167170,13 @@ func (r *SecurityPoliciesService) AddRule(project string, securityPolicy string, return c } +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *SecurityPoliciesAddRuleCall) ValidateOnly(validateOnly bool) *SecurityPoliciesAddRuleCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -147370,17 +167245,17 @@ func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -147416,6 +167291,11 @@ func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/securityPolicies/{securityPolicy}/addRule", @@ -147433,6 +167313,304 @@ func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati } +// method id "compute.securityPolicies.aggregatedList": + +type SecurityPoliciesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all SecurityPolicy resources, +// regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. +func (r *SecurityPoliciesService) AggregatedList(project string) *SecurityPoliciesAggregatedListCall { + c := &SecurityPoliciesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *SecurityPoliciesAggregatedListCall) Filter(filter string) *SecurityPoliciesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *SecurityPoliciesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *SecurityPoliciesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *SecurityPoliciesAggregatedListCall) MaxResults(maxResults int64) *SecurityPoliciesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *SecurityPoliciesAggregatedListCall) OrderBy(orderBy string) *SecurityPoliciesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *SecurityPoliciesAggregatedListCall) PageToken(pageToken string) *SecurityPoliciesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *SecurityPoliciesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SecurityPoliciesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesAggregatedListCall) Fields(s ...googleapi.Field) *SecurityPoliciesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SecurityPoliciesAggregatedListCall) IfNoneMatch(entityTag string) *SecurityPoliciesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesAggregatedListCall) Context(ctx context.Context) *SecurityPoliciesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/securityPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.aggregatedList" call. +// Exactly one of *SecurityPoliciesAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *SecurityPoliciesAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SecurityPoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (*SecurityPoliciesAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SecurityPoliciesAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all SecurityPolicy resources, regional and global, available to the specified project.", + // "flatPath": "projects/{project}/aggregated/securityPolicies", + // "httpMethod": "GET", + // "id": "compute.securityPolicies.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/securityPolicies", + // "response": { + // "$ref": "SecurityPoliciesAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SecurityPoliciesAggregatedListCall) Pages(ctx context.Context, f func(*SecurityPoliciesAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.securityPolicies.delete": type SecurityPoliciesDeleteCall struct { @@ -147534,17 +167712,17 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -147699,17 +167877,17 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SecurityPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -147774,9 +167952,9 @@ type SecurityPoliciesGetRuleCall struct { // GetRule: Gets a rule at the specified priority. // -// - project: Project ID for this request. -// - securityPolicy: Name of the security policy to which the queried -// rule belongs. +// - project: Project ID for this request. +// - securityPolicy: Name of the security policy to which the queried +// rule belongs. func (r *SecurityPoliciesService) GetRule(project string, securityPolicy string) *SecurityPoliciesGetRuleCall { c := &SecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -147867,17 +168045,17 @@ func (c *SecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*Securit if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SecurityPolicyRule{ ServerResponse: googleapi.ServerResponse{ @@ -147972,6 +168150,13 @@ func (c *SecurityPoliciesInsertCall) RequestId(requestId string) *SecurityPolici return c } +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *SecurityPoliciesInsertCall) ValidateOnly(validateOnly bool) *SecurityPoliciesInsertCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -148039,17 +168224,17 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -148082,6 +168267,11 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/securityPolicies", @@ -148121,19 +168311,22 @@ func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -148141,7 +168334,17 @@ func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *SecurityPoliciesListCall) Filter(filter string) *SecurityPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -148264,17 +168467,17 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SecurityPolicyList{ ServerResponse: googleapi.ServerResponse{ @@ -148297,7 +168500,7 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -148388,19 +168591,22 @@ func (r *SecurityPoliciesService) ListPreconfiguredExpressionSets(project string } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -148408,7 +168614,17 @@ func (r *SecurityPoliciesService) ListPreconfiguredExpressionSets(project string // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Filter(filter string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { c.urlParams_.Set("filter", filter) return c @@ -148534,17 +168750,17 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googlea if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SecurityPoliciesListPreconfiguredExpressionSetsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -148567,7 +168783,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googlea // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -148628,9 +168844,10 @@ type SecurityPoliciesPatchCall struct { } // Patch: Patches the specified policy with the data included in the -// request. This cannot be used to be update the rules in the policy. -// Please use the per rule methods like addRule, patchRule, and -// removeRule instead. +// request. To clear fields in the rule, leave the fields empty and +// specify them in the updateMask. This cannot be used to be update the +// rules in the policy. Please use the per rule methods like addRule, +// patchRule, and removeRule instead. // // - project: Project ID for this request. // - securityPolicy: Name of the security policy to update. @@ -148726,17 +168943,17 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -148750,7 +168967,7 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Patches the specified policy with the data included in the request. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", + // "description": "Patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", // "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}", // "httpMethod": "PATCH", // "id": "compute.securityPolicies.patch", @@ -148825,6 +169042,13 @@ func (c *SecurityPoliciesPatchRuleCall) Priority(priority int64) *SecurityPolici return c } +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *SecurityPoliciesPatchRuleCall) ValidateOnly(validateOnly bool) *SecurityPoliciesPatchRuleCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -148893,17 +169117,17 @@ func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -148945,6 +169169,11 @@ func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "projects/{project}/global/securityPolicies/{securityPolicy}/patchRule", @@ -149054,17 +169283,17 @@ func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -149120,6 +169349,162 @@ func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper } +// method id "compute.securityPolicies.setLabels": + +type SecurityPoliciesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a security policy. To learn more about +// labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *SecurityPoliciesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *SecurityPoliciesSetLabelsCall { + c := &SecurityPoliciesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesSetLabelsCall) Fields(s ...googleapi.Field) *SecurityPoliciesSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesSetLabelsCall) Context(ctx context.Context) *SecurityPoliciesSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/securityPolicies/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a security policy. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/global/securityPolicies/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.securityPolicies.setLabels", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/securityPolicies/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.serviceAttachments.aggregatedList": type ServiceAttachmentsAggregatedListCall struct { @@ -149142,19 +169527,22 @@ func (r *ServiceAttachmentsService) AggregatedList(project string) *ServiceAttac } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -149162,7 +169550,17 @@ func (r *ServiceAttachmentsService) AggregatedList(project string) *ServiceAttac // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *ServiceAttachmentsAggregatedListCall) Filter(filter string) *ServiceAttachmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -149298,17 +169696,17 @@ func (c *ServiceAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -149331,7 +169729,7 @@ func (c *ServiceAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -149419,10 +169817,10 @@ type ServiceAttachmentsDeleteCall struct { // Delete: Deletes the specified ServiceAttachment in the given scope // -// - project: Project ID for this request. -// - region: Name of the region of this request. -// - serviceAttachment: Name of the ServiceAttachment resource to -// delete. +// - project: Project ID for this request. +// - region: Name of the region of this request. +// - serviceAttachment: Name of the ServiceAttachment resource to +// delete. func (r *ServiceAttachmentsService) Delete(project string, region string, serviceAttachment string) *ServiceAttachmentsDeleteCall { c := &ServiceAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -149511,17 +169909,17 @@ func (c *ServiceAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -149600,10 +169998,10 @@ type ServiceAttachmentsGetCall struct { // Get: Returns the specified ServiceAttachment resource in the given // scope. // -// - project: Project ID for this request. -// - region: Name of the region of this request. -// - serviceAttachment: Name of the ServiceAttachment resource to -// return. +// - project: Project ID for this request. +// - region: Name of the region of this request. +// - serviceAttachment: Name of the ServiceAttachment resource to +// return. func (r *ServiceAttachmentsService) Get(project string, region string, serviceAttachment string) *ServiceAttachmentsGetCall { c := &ServiceAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -149689,17 +170087,17 @@ func (c *ServiceAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*ServiceAt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceAttachment{ ServerResponse: googleapi.ServerResponse{ @@ -149869,17 +170267,17 @@ func (c *ServiceAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -150053,17 +170451,17 @@ func (c *ServiceAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -150145,19 +170543,22 @@ func (r *ServiceAttachmentsService) List(project string, region string) *Service } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -150165,7 +170566,17 @@ func (r *ServiceAttachmentsService) List(project string, region string) *Service // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *ServiceAttachmentsListCall) Filter(filter string) *ServiceAttachmentsListCall { c.urlParams_.Set("filter", filter) return c @@ -150289,17 +170700,17 @@ func (c *ServiceAttachmentsListCall) Do(opts ...googleapi.CallOption) (*ServiceA if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceAttachmentList{ ServerResponse: googleapi.ServerResponse{ @@ -150323,7 +170734,7 @@ func (c *ServiceAttachmentsListCall) Do(opts ...googleapi.CallOption) (*ServiceA // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -150416,12 +170827,12 @@ type ServiceAttachmentsPatchCall struct { // included in the request. This method supports PATCH semantics and // uses JSON merge patch format and processing rules. // -// - project: Project ID for this request. -// - region: The region scoping this request and should conform to -// RFC1035. -// - serviceAttachment: The resource id of the ServiceAttachment to -// patch. It should conform to RFC1035 resource name or be a string -// form on an unsigned long number. +// - project: Project ID for this request. +// - region: The region scoping this request and should conform to +// RFC1035. +// - serviceAttachment: The resource id of the ServiceAttachment to +// patch. It should conform to RFC1035 resource name or be a string +// form on an unsigned long number. func (r *ServiceAttachmentsService) Patch(project string, region string, serviceAttachment string, serviceattachment *ServiceAttachment) *ServiceAttachmentsPatchCall { c := &ServiceAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -150516,17 +170927,17 @@ func (c *ServiceAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -150687,17 +171098,17 @@ func (c *ServiceAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -150855,17 +171266,17 @@ func (c *ServiceAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -151033,17 +171444,17 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -151198,17 +171609,17 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Snapshot{ ServerResponse: googleapi.ServerResponse{ @@ -151366,17 +171777,17 @@ func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -151541,17 +171952,17 @@ func (c *SnapshotsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -151623,19 +172034,22 @@ func (r *SnapshotsService) List(project string) *SnapshotsListCall { } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -151643,7 +172057,17 @@ func (r *SnapshotsService) List(project string) *SnapshotsListCall { // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { c.urlParams_.Set("filter", filter) return c @@ -151766,17 +172190,17 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SnapshotList{ ServerResponse: googleapi.ServerResponse{ @@ -151799,7 +172223,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -151961,17 +172385,17 @@ func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -152117,17 +172541,17 @@ func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -152273,17 +172697,17 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -152359,19 +172783,22 @@ func (r *SslCertificatesService) AggregatedList(project string) *SslCertificates } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -152379,7 +172806,17 @@ func (r *SslCertificatesService) AggregatedList(project string) *SslCertificates // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *SslCertificatesAggregatedListCall) Filter(filter string) *SslCertificatesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -152515,17 +172952,17 @@ func (c *SslCertificatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*S if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslCertificateAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -152548,7 +172985,7 @@ func (c *SslCertificatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*S // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -152723,17 +173160,17 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -152888,17 +173325,17 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslCertificate{ ServerResponse: googleapi.ServerResponse{ @@ -153054,17 +173491,17 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -153136,19 +173573,22 @@ func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -153156,7 +173596,17 @@ func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { c.urlParams_.Set("filter", filter) return c @@ -153279,17 +173729,17 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslCertificateList{ ServerResponse: googleapi.ServerResponse{ @@ -153312,7 +173762,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -153381,6 +173831,304 @@ func (c *SslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertific } } +// method id "compute.sslPolicies.aggregatedList": + +type SslPoliciesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all SslPolicy resources, +// regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. +func (r *SslPoliciesService) AggregatedList(project string) *SslPoliciesAggregatedListCall { + c := &SslPoliciesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *SslPoliciesAggregatedListCall) Filter(filter string) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *SslPoliciesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *SslPoliciesAggregatedListCall) MaxResults(maxResults int64) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *SslPoliciesAggregatedListCall) OrderBy(orderBy string) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *SslPoliciesAggregatedListCall) PageToken(pageToken string) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *SslPoliciesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslPoliciesAggregatedListCall) Fields(s ...googleapi.Field) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslPoliciesAggregatedListCall) IfNoneMatch(entityTag string) *SslPoliciesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslPoliciesAggregatedListCall) Context(ctx context.Context) *SslPoliciesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslPoliciesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslPoliciesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/sslPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslPolicies.aggregatedList" call. +// Exactly one of *SslPoliciesAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *SslPoliciesAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslPoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SslPoliciesAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all SslPolicy resources, regional and global, available to the specified project.", + // "flatPath": "projects/{project}/aggregated/sslPolicies", + // "httpMethod": "GET", + // "id": "compute.sslPolicies.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/sslPolicies", + // "response": { + // "$ref": "SslPoliciesAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SslPoliciesAggregatedListCall) Pages(ctx context.Context, f func(*SslPoliciesAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.sslPolicies.delete": type SslPoliciesDeleteCall struct { @@ -153396,9 +174144,9 @@ type SslPoliciesDeleteCall struct { // be deleted only if it is not in use by any TargetHttpsProxy or // TargetSslProxy resources. // -// - project: Project ID for this request. -// - sslPolicy: Name of the SSL policy to delete. The name must be 1-63 -// characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - sslPolicy: Name of the SSL policy to delete. The name must be 1-63 +// characters long, and comply with RFC1035. func (r *SslPoliciesService) Delete(project string, sslPolicy string) *SslPoliciesDeleteCall { c := &SslPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -153485,17 +174233,17 @@ func (c *SslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -153564,9 +174312,9 @@ type SslPoliciesGetCall struct { // Get: Lists all of the ordered rules present in a single specified // policy. // -// - project: Project ID for this request. -// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 -// characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 +// characters long, and comply with RFC1035. func (r *SslPoliciesService) Get(project string, sslPolicy string) *SslPoliciesGetCall { c := &SslPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -153650,17 +174398,17 @@ func (c *SslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -153815,17 +174563,17 @@ func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -153897,19 +174645,22 @@ func (r *SslPoliciesService) List(project string) *SslPoliciesListCall { } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -153917,7 +174668,17 @@ func (r *SslPoliciesService) List(project string) *SslPoliciesListCall { // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *SslPoliciesListCall) Filter(filter string) *SslPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -154040,17 +174801,17 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslPoliciesList{ ServerResponse: googleapi.ServerResponse{ @@ -154073,7 +174834,7 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -154164,19 +174925,22 @@ func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesL } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -154184,7 +174948,17 @@ func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesL // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *SslPoliciesListAvailableFeaturesCall) Filter(filter string) *SslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("filter", filter) return c @@ -154309,17 +175083,17 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslPoliciesListAvailableFeaturesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -154342,7 +175116,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -154405,9 +175179,9 @@ type SslPoliciesPatchCall struct { // Patch: Patches the specified SSL policy with the data included in the // request. // -// - project: Project ID for this request. -// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 -// characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 +// characters long, and comply with RFC1035. func (r *SslPoliciesService) Patch(project string, sslPolicy string, sslpolicy *SslPolicy) *SslPoliciesPatchCall { c := &SslPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -154500,17 +175274,17 @@ func (c *SslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -154588,19 +175362,22 @@ func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregat } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -154608,7 +175385,17 @@ func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregat // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *SubnetworksAggregatedListCall) Filter(filter string) *SubnetworksAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -154744,17 +175531,17 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SubnetworkAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -154777,7 +175564,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -154956,17 +175743,17 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -155142,17 +175929,17 @@ func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -155322,17 +176109,17 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Subnetwork{ ServerResponse: googleapi.ServerResponse{ @@ -155502,17 +176289,17 @@ func (c *SubnetworksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -155686,17 +176473,17 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -155779,19 +176566,22 @@ func (r *SubnetworksService) List(project string, region string) *SubnetworksLis } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -155799,7 +176589,17 @@ func (r *SubnetworksService) List(project string, region string) *SubnetworksLis // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *SubnetworksListCall) Filter(filter string) *SubnetworksListCall { c.urlParams_.Set("filter", filter) return c @@ -155923,17 +176723,17 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SubnetworkList{ ServerResponse: googleapi.ServerResponse{ @@ -155957,7 +176757,7 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -156055,19 +176855,22 @@ func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCa } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -156075,7 +176878,17 @@ func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCa // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *SubnetworksListUsableCall) Filter(filter string) *SubnetworksListUsableCall { c.urlParams_.Set("filter", filter) return c @@ -156198,17 +177011,17 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UsableSubnetworksAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -156231,7 +177044,7 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -156430,17 +177243,17 @@ func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -156609,17 +177422,17 @@ func (c *SubnetworksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -156794,17 +177607,17 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -156967,17 +177780,17 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -157140,17 +177953,17 @@ func (c *TargetGrpcProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -157305,17 +178118,17 @@ func (c *TargetGrpcProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetGrpc if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetGrpcProxy{ ServerResponse: googleapi.ServerResponse{ @@ -157471,17 +178284,17 @@ func (c *TargetGrpcProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -157552,19 +178365,22 @@ func (r *TargetGrpcProxiesService) List(project string) *TargetGrpcProxiesListCa } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -157572,7 +178388,17 @@ func (r *TargetGrpcProxiesService) List(project string) *TargetGrpcProxiesListCa // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *TargetGrpcProxiesListCall) Filter(filter string) *TargetGrpcProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -157695,17 +178521,17 @@ func (c *TargetGrpcProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetGrp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetGrpcProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -157728,7 +178554,7 @@ func (c *TargetGrpcProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetGrp // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -157907,17 +178733,17 @@ func (c *TargetGrpcProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -157997,19 +178823,22 @@ func (r *TargetHttpProxiesService) AggregatedList(project string) *TargetHttpPro } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -158017,7 +178846,17 @@ func (r *TargetHttpProxiesService) AggregatedList(project string) *TargetHttpPro // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *TargetHttpProxiesAggregatedListCall) Filter(filter string) *TargetHttpProxiesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -158153,17 +178992,17 @@ func (c *TargetHttpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpProxyAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -158186,7 +179025,7 @@ func (c *TargetHttpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -158361,17 +179200,17 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -158526,17 +179365,17 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpProxy{ ServerResponse: googleapi.ServerResponse{ @@ -158692,17 +179531,17 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -158774,19 +179613,22 @@ func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCa } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -158794,7 +179636,17 @@ func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCa // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -158917,17 +179769,17 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -158950,7 +179802,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -159129,17 +179981,17 @@ func (c *TargetHttpProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -159305,17 +180157,17 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -159395,19 +180247,22 @@ func (r *TargetHttpsProxiesService) AggregatedList(project string) *TargetHttpsP } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -159415,7 +180270,17 @@ func (r *TargetHttpsProxiesService) AggregatedList(project string) *TargetHttpsP // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *TargetHttpsProxiesAggregatedListCall) Filter(filter string) *TargetHttpsProxiesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -159551,17 +180416,17 @@ func (c *TargetHttpsProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpsProxyAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -159584,7 +180449,7 @@ func (c *TargetHttpsProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -159759,17 +180624,17 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -159924,17 +180789,17 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpsProxy{ ServerResponse: googleapi.ServerResponse{ @@ -160090,17 +180955,17 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -160172,19 +181037,22 @@ func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesList } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -160192,7 +181060,17 @@ func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesList // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -160315,17 +181193,17 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpsProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -160348,7 +181226,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -160527,17 +181405,17 @@ func (c *TargetHttpsProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -160595,6 +181473,183 @@ func (c *TargetHttpsProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operati } +// method id "compute.targetHttpsProxies.setCertificateMap": + +type TargetHttpsProxiesSetCertificateMapCall struct { + s *Service + project string + targetHttpsProxy string + targethttpsproxiessetcertificatemaprequest *TargetHttpsProxiesSetCertificateMapRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetCertificateMap: Changes the Certificate Map for TargetHttpsProxy. +// +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose +// CertificateMap is to be set. The name must be 1-63 characters long, +// and comply with RFC1035. +func (r *TargetHttpsProxiesService) SetCertificateMap(project string, targetHttpsProxy string, targethttpsproxiessetcertificatemaprequest *TargetHttpsProxiesSetCertificateMapRequest) *TargetHttpsProxiesSetCertificateMapCall { + c := &TargetHttpsProxiesSetCertificateMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpsProxy = targetHttpsProxy + c.targethttpsproxiessetcertificatemaprequest = targethttpsproxiessetcertificatemaprequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *TargetHttpsProxiesSetCertificateMapCall) RequestId(requestId string) *TargetHttpsProxiesSetCertificateMapCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesSetCertificateMapCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetCertificateMapCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesSetCertificateMapCall) Context(ctx context.Context) *TargetHttpsProxiesSetCertificateMapCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpsProxiesSetCertificateMapCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpsProxiesSetCertificateMapCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxiessetcertificatemaprequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}/setCertificateMap") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetHttpsProxies.setCertificateMap" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesSetCertificateMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the Certificate Map for TargetHttpsProxy.", + // "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}/setCertificateMap", + // "httpMethod": "POST", + // "id": "compute.targetHttpsProxies.setCertificateMap", + // "parameterOrder": [ + // "project", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource whose CertificateMap is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}/setCertificateMap", + // "request": { + // "$ref": "TargetHttpsProxiesSetCertificateMapRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.targetHttpsProxies.setQuicOverride": type TargetHttpsProxiesSetQuicOverrideCall struct { @@ -160609,9 +181664,9 @@ type TargetHttpsProxiesSetQuicOverrideCall struct { // SetQuicOverride: Sets the QUIC override policy for TargetHttpsProxy. // -// - project: Project ID for this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set the -// QUIC override policy for. The name should conform to RFC1035. +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set the +// QUIC override policy for. The name should conform to RFC1035. func (r *TargetHttpsProxiesService) SetQuicOverride(project string, targetHttpsProxy string, targethttpsproxiessetquicoverriderequest *TargetHttpsProxiesSetQuicOverrideRequest) *TargetHttpsProxiesSetQuicOverrideCall { c := &TargetHttpsProxiesSetQuicOverrideCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -160704,17 +181759,17 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -160785,9 +181840,9 @@ type TargetHttpsProxiesSetSslCertificatesCall struct { // SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. // -// - project: Project ID for this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set an -// SslCertificates resource for. +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set an +// SslCertificates resource for. func (r *TargetHttpsProxiesService) SetSslCertificates(project string, targetHttpsProxy string, targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest) *TargetHttpsProxiesSetSslCertificatesCall { c := &TargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -160880,17 +181935,17 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -160966,10 +182021,10 @@ type TargetHttpsProxiesSetSslPolicyCall struct { // balancer. They do not affect the connection between the load balancer // and the backends. // -// - project: Project ID for this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose SSL -// policy is to be set. The name must be 1-63 characters long, and -// comply with RFC1035. +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose SSL +// policy is to be set. The name must be 1-63 characters long, and +// comply with RFC1035. func (r *TargetHttpsProxiesService) SetSslPolicy(project string, targetHttpsProxy string, sslpolicyreference *SslPolicyReference) *TargetHttpsProxiesSetSslPolicyCall { c := &TargetHttpsProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -161062,17 +182117,17 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -161143,9 +182198,9 @@ type TargetHttpsProxiesSetUrlMapCall struct { // SetUrlMap: Changes the URL map for TargetHttpsProxy. // -// - project: Project ID for this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose URL -// map is to be set. +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose URL +// map is to be set. func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy string, urlmapreference *UrlMapReference) *TargetHttpsProxiesSetUrlMapCall { c := &TargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -161238,17 +182293,17 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -161327,19 +182382,22 @@ func (r *TargetInstancesService) AggregatedList(project string) *TargetInstances } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -161347,7 +182405,17 @@ func (r *TargetInstancesService) AggregatedList(project string) *TargetInstances // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -161483,17 +182551,17 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetInstanceAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -161516,7 +182584,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -161695,17 +182763,17 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -161872,17 +182940,17 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetInstance{ ServerResponse: googleapi.ServerResponse{ @@ -162050,17 +183118,17 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -162143,19 +183211,22 @@ func (r *TargetInstancesService) List(project string, zone string) *TargetInstan } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -162163,7 +183234,17 @@ func (r *TargetInstancesService) List(project string, zone string) *TargetInstan // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall { c.urlParams_.Set("filter", filter) return c @@ -162287,17 +183368,17 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetInstanceList{ ServerResponse: googleapi.ServerResponse{ @@ -162321,7 +183402,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -162509,17 +183590,17 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -162697,17 +183778,17 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -162794,19 +183875,22 @@ func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregat } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -162814,7 +183898,17 @@ func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregat // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -162950,17 +184044,17 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetPoolAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -162983,7 +184077,7 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -163162,17 +184256,17 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -163339,17 +184433,17 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetPool{ ServerResponse: googleapi.ServerResponse{ @@ -163424,10 +184518,10 @@ type TargetPoolsGetHealthCall struct { // GetHealth: Gets the most recent health check results for each IP for // the instance that is referenced by the given target pool. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetPool: Name of the TargetPool resource to which the queried -// instance belongs. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to which the queried +// instance belongs. func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall { c := &TargetPoolsGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -163506,17 +184600,17 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetPoolInstanceHealth{ ServerResponse: googleapi.ServerResponse{ @@ -163687,17 +184781,17 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -163780,19 +184874,22 @@ func (r *TargetPoolsService) List(project string, region string) *TargetPoolsLis } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -163800,7 +184897,17 @@ func (r *TargetPoolsService) List(project string, region string) *TargetPoolsLis // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { c.urlParams_.Set("filter", filter) return c @@ -163924,17 +185031,17 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetPoolList{ ServerResponse: googleapi.ServerResponse{ @@ -163958,7 +185065,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -164146,17 +185253,17 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -164237,10 +185344,10 @@ type TargetPoolsRemoveInstanceCall struct { // RemoveInstance: Removes instance URL from a target pool. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetPool: Name of the TargetPool resource to remove instances -// from. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to remove instances +// from. func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -164335,17 +185442,17 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -164426,10 +185533,10 @@ type TargetPoolsSetBackupCall struct { // SetBackup: Changes a backup target pool's configurations. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetPool: Name of the TargetPool resource to set a backup pool -// for. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to set a backup pool +// for. func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -164531,17 +185638,17 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -164714,17 +185821,17 @@ func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -164879,17 +185986,17 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetSslProxy{ ServerResponse: googleapi.ServerResponse{ @@ -165045,17 +186152,17 @@ func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -165127,19 +186234,22 @@ func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -165147,7 +186257,17 @@ func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *TargetSslProxiesListCall) Filter(filter string) *TargetSslProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -165270,17 +186390,17 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetSslProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -165303,7 +186423,7 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -165386,9 +186506,9 @@ type TargetSslProxiesSetBackendServiceCall struct { // SetBackendService: Changes the BackendService for TargetSslProxy. // -// - project: Project ID for this request. -// - targetSslProxy: Name of the TargetSslProxy resource whose -// BackendService resource is to be set. +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// BackendService resource is to be set. func (r *TargetSslProxiesService) SetBackendService(project string, targetSslProxy string, targetsslproxiessetbackendservicerequest *TargetSslProxiesSetBackendServiceRequest) *TargetSslProxiesSetBackendServiceCall { c := &TargetSslProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -165481,17 +186601,17 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -165549,6 +186669,183 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) } +// method id "compute.targetSslProxies.setCertificateMap": + +type TargetSslProxiesSetCertificateMapCall struct { + s *Service + project string + targetSslProxy string + targetsslproxiessetcertificatemaprequest *TargetSslProxiesSetCertificateMapRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetCertificateMap: Changes the Certificate Map for TargetSslProxy. +// +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// CertificateMap is to be set. The name must be 1-63 characters long, +// and comply with RFC1035. +func (r *TargetSslProxiesService) SetCertificateMap(project string, targetSslProxy string, targetsslproxiessetcertificatemaprequest *TargetSslProxiesSetCertificateMapRequest) *TargetSslProxiesSetCertificateMapCall { + c := &TargetSslProxiesSetCertificateMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetSslProxy = targetSslProxy + c.targetsslproxiessetcertificatemaprequest = targetsslproxiessetcertificatemaprequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *TargetSslProxiesSetCertificateMapCall) RequestId(requestId string) *TargetSslProxiesSetCertificateMapCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetSslProxiesSetCertificateMapCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetCertificateMapCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetSslProxiesSetCertificateMapCall) Context(ctx context.Context) *TargetSslProxiesSetCertificateMapCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetSslProxiesSetCertificateMapCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetSslProxiesSetCertificateMapCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxiessetcertificatemaprequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/targetSslProxies/{targetSslProxy}/setCertificateMap") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetSslProxy": c.targetSslProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetSslProxies.setCertificateMap" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetSslProxiesSetCertificateMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the Certificate Map for TargetSslProxy.", + // "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setCertificateMap", + // "httpMethod": "POST", + // "id": "compute.targetSslProxies.setCertificateMap", + // "parameterOrder": [ + // "project", + // "targetSslProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetSslProxy": { + // "description": "Name of the TargetSslProxy resource whose CertificateMap is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setCertificateMap", + // "request": { + // "$ref": "TargetSslProxiesSetCertificateMapRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.targetSslProxies.setProxyHeader": type TargetSslProxiesSetProxyHeaderCall struct { @@ -165563,9 +186860,9 @@ type TargetSslProxiesSetProxyHeaderCall struct { // SetProxyHeader: Changes the ProxyHeaderType for TargetSslProxy. // -// - project: Project ID for this request. -// - targetSslProxy: Name of the TargetSslProxy resource whose -// ProxyHeader is to be set. +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// ProxyHeader is to be set. func (r *TargetSslProxiesService) SetProxyHeader(project string, targetSslProxy string, targetsslproxiessetproxyheaderrequest *TargetSslProxiesSetProxyHeaderRequest) *TargetSslProxiesSetProxyHeaderCall { c := &TargetSslProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -165658,17 +186955,17 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -165740,9 +187037,9 @@ type TargetSslProxiesSetSslCertificatesCall struct { // SetSslCertificates: Changes SslCertificates for TargetSslProxy. // -// - project: Project ID for this request. -// - targetSslProxy: Name of the TargetSslProxy resource whose -// SslCertificate resource is to be set. +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// SslCertificate resource is to be set. func (r *TargetSslProxiesService) SetSslCertificates(project string, targetSslProxy string, targetsslproxiessetsslcertificatesrequest *TargetSslProxiesSetSslCertificatesRequest) *TargetSslProxiesSetSslCertificatesCall { c := &TargetSslProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -165835,17 +187132,17 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -165920,10 +187217,10 @@ type TargetSslProxiesSetSslPolicyCall struct { // connections between clients and the SSL proxy load balancer. They do // not affect the connection between the load balancer and the backends. // -// - project: Project ID for this request. -// - targetSslProxy: Name of the TargetSslProxy resource whose SSL -// policy is to be set. The name must be 1-63 characters long, and -// comply with RFC1035. +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose SSL +// policy is to be set. The name must be 1-63 characters long, and +// comply with RFC1035. func (r *TargetSslProxiesService) SetSslPolicy(project string, targetSslProxy string, sslpolicyreference *SslPolicyReference) *TargetSslProxiesSetSslPolicyCall { c := &TargetSslProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -166016,17 +187313,17 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -166083,6 +187380,304 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op } +// method id "compute.targetTcpProxies.aggregatedList": + +type TargetTcpProxiesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all TargetTcpProxy resources, +// regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. +func (r *TargetTcpProxiesService) AggregatedList(project string) *TargetTcpProxiesAggregatedListCall { + c := &TargetTcpProxiesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *TargetTcpProxiesAggregatedListCall) Filter(filter string) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *TargetTcpProxiesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *TargetTcpProxiesAggregatedListCall) MaxResults(maxResults int64) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *TargetTcpProxiesAggregatedListCall) OrderBy(orderBy string) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *TargetTcpProxiesAggregatedListCall) PageToken(pageToken string) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *TargetTcpProxiesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetTcpProxiesAggregatedListCall) Fields(s ...googleapi.Field) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetTcpProxiesAggregatedListCall) IfNoneMatch(entityTag string) *TargetTcpProxiesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetTcpProxiesAggregatedListCall) Context(ctx context.Context) *TargetTcpProxiesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetTcpProxiesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetTcpProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/targetTcpProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetTcpProxies.aggregatedList" call. +// Exactly one of *TargetTcpProxyAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *TargetTcpProxyAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetTcpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxyAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TargetTcpProxyAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all TargetTcpProxy resources, regional and global, available to the specified project.", + // "flatPath": "projects/{project}/aggregated/targetTcpProxies", + // "httpMethod": "GET", + // "id": "compute.targetTcpProxies.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/targetTcpProxies", + // "response": { + // "$ref": "TargetTcpProxyAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetTcpProxiesAggregatedListCall) Pages(ctx context.Context, f func(*TargetTcpProxyAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.targetTcpProxies.delete": type TargetTcpProxiesDeleteCall struct { @@ -166184,17 +187779,17 @@ func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -166349,17 +187944,17 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetTcpProxy{ ServerResponse: googleapi.ServerResponse{ @@ -166515,17 +188110,17 @@ func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -166597,19 +188192,22 @@ func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -166617,7 +188215,17 @@ func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *TargetTcpProxiesListCall) Filter(filter string) *TargetTcpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -166740,17 +188348,17 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetTcpProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -166773,7 +188381,7 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -166856,9 +188464,9 @@ type TargetTcpProxiesSetBackendServiceCall struct { // SetBackendService: Changes the BackendService for TargetTcpProxy. // -// - project: Project ID for this request. -// - targetTcpProxy: Name of the TargetTcpProxy resource whose -// BackendService resource is to be set. +// - project: Project ID for this request. +// - targetTcpProxy: Name of the TargetTcpProxy resource whose +// BackendService resource is to be set. func (r *TargetTcpProxiesService) SetBackendService(project string, targetTcpProxy string, targettcpproxiessetbackendservicerequest *TargetTcpProxiesSetBackendServiceRequest) *TargetTcpProxiesSetBackendServiceCall { c := &TargetTcpProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -166951,17 +188559,17 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -167033,9 +188641,9 @@ type TargetTcpProxiesSetProxyHeaderCall struct { // SetProxyHeader: Changes the ProxyHeaderType for TargetTcpProxy. // -// - project: Project ID for this request. -// - targetTcpProxy: Name of the TargetTcpProxy resource whose -// ProxyHeader is to be set. +// - project: Project ID for this request. +// - targetTcpProxy: Name of the TargetTcpProxy resource whose +// ProxyHeader is to be set. func (r *TargetTcpProxiesService) SetProxyHeader(project string, targetTcpProxy string, targettcpproxiessetproxyheaderrequest *TargetTcpProxiesSetProxyHeaderRequest) *TargetTcpProxiesSetProxyHeaderCall { c := &TargetTcpProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -167128,17 +188736,17 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -167217,19 +188825,22 @@ func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGate } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -167237,7 +188848,17 @@ func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGate // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *TargetVpnGatewaysAggregatedListCall) Filter(filter string) *TargetVpnGatewaysAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -167373,17 +188994,17 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetVpnGatewayAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -167406,7 +189027,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -167585,17 +189206,17 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -167762,17 +189383,17 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetVpnGateway{ ServerResponse: googleapi.ServerResponse{ @@ -167940,17 +189561,17 @@ func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -168033,19 +189654,22 @@ func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVp } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -168053,7 +189677,17 @@ func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVp // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *TargetVpnGatewaysListCall) Filter(filter string) *TargetVpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c @@ -168177,17 +189811,17 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetVpnGatewayList{ ServerResponse: googleapi.ServerResponse{ @@ -168211,7 +189845,7 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -168287,6 +189921,195 @@ func (c *TargetVpnGatewaysListCall) Pages(ctx context.Context, f func(*TargetVpn } } +// method id "compute.targetVpnGateways.setLabels": + +type TargetVpnGatewaysSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a TargetVpnGateway. To learn more about +// labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *TargetVpnGatewaysService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *TargetVpnGatewaysSetLabelsCall { + c := &TargetVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *TargetVpnGatewaysSetLabelsCall) RequestId(requestId string) *TargetVpnGatewaysSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysSetLabelsCall) Context(ctx context.Context) *TargetVpnGatewaysSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetVpnGatewaysSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetVpnGateways.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a TargetVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.targetVpnGateways.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.urlMaps.aggregatedList": type UrlMapsAggregatedListCall struct { @@ -168309,19 +190132,22 @@ func (r *UrlMapsService) AggregatedList(project string) *UrlMapsAggregatedListCa } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -168329,7 +190155,17 @@ func (r *UrlMapsService) AggregatedList(project string) *UrlMapsAggregatedListCa // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *UrlMapsAggregatedListCall) Filter(filter string) *UrlMapsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -168465,17 +190301,17 @@ func (c *UrlMapsAggregatedListCall) Do(opts ...googleapi.CallOption) (*UrlMapsAg if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapsAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -168498,7 +190334,7 @@ func (c *UrlMapsAggregatedListCall) Do(opts ...googleapi.CallOption) (*UrlMapsAg // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -168673,17 +190509,17 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -168838,17 +190674,17 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMap{ ServerResponse: googleapi.ServerResponse{ @@ -169004,17 +190840,17 @@ func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -169175,17 +191011,17 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -169265,19 +191101,22 @@ func (r *UrlMapsService) List(project string) *UrlMapsListCall { } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -169285,7 +191124,17 @@ func (r *UrlMapsService) List(project string) *UrlMapsListCall { // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall { c.urlParams_.Set("filter", filter) return c @@ -169408,17 +191257,17 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapList{ ServerResponse: googleapi.ServerResponse{ @@ -169441,7 +191290,7 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -169620,17 +191469,17 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -169797,17 +191646,17 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -169959,17 +191808,17 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapsValidateResponse{ ServerResponse: googleapi.ServerResponse{ @@ -170043,19 +191892,22 @@ func (r *VpnGatewaysService) AggregatedList(project string) *VpnGatewaysAggregat } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -170063,7 +191915,17 @@ func (r *VpnGatewaysService) AggregatedList(project string) *VpnGatewaysAggregat // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *VpnGatewaysAggregatedListCall) Filter(filter string) *VpnGatewaysAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -170199,17 +192061,17 @@ func (c *VpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnGa if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnGatewayAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -170232,7 +192094,7 @@ func (c *VpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnGa // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -170411,17 +192273,17 @@ func (c *VpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -170588,17 +192450,17 @@ func (c *VpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*VpnGateway, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnGateway{ ServerResponse: googleapi.ServerResponse{ @@ -170760,17 +192622,17 @@ func (c *VpnGatewaysGetStatusCall) Do(opts ...googleapi.CallOption) (*VpnGateway if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnGatewaysGetStatusResponse{ ServerResponse: googleapi.ServerResponse{ @@ -170938,17 +192800,17 @@ func (c *VpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -171031,19 +192893,22 @@ func (r *VpnGatewaysService) List(project string, region string) *VpnGatewaysLis } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -171051,7 +192916,17 @@ func (r *VpnGatewaysService) List(project string, region string) *VpnGatewaysLis // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *VpnGatewaysListCall) Filter(filter string) *VpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c @@ -171175,17 +193050,17 @@ func (c *VpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayList, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnGatewayList{ ServerResponse: googleapi.ServerResponse{ @@ -171209,7 +193084,7 @@ func (c *VpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayList, // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -171398,17 +193273,17 @@ func (c *VpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -171571,17 +193446,17 @@ func (c *VpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -171664,19 +193539,22 @@ func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregated } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -171684,7 +193562,17 @@ func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregated // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *VpnTunnelsAggregatedListCall) Filter(filter string) *VpnTunnelsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -171820,17 +193708,17 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnTunnelAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -171853,7 +193741,7 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -172032,17 +193920,17 @@ func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -172209,17 +194097,17 @@ func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnTunnel{ ServerResponse: googleapi.ServerResponse{ @@ -172387,17 +194275,17 @@ func (c *VpnTunnelsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -172480,19 +194368,22 @@ func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListC } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -172500,7 +194391,17 @@ func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListC // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *VpnTunnelsListCall) Filter(filter string) *VpnTunnelsListCall { c.urlParams_.Set("filter", filter) return c @@ -172624,17 +194525,17 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnTunnelList{ ServerResponse: googleapi.ServerResponse{ @@ -172658,7 +194559,7 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -172734,6 +194635,195 @@ func (c *VpnTunnelsListCall) Pages(ctx context.Context, f func(*VpnTunnelList) e } } +// method id "compute.vpnTunnels.setLabels": + +type VpnTunnelsSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a VpnTunnel. To learn more about +// labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *VpnTunnelsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *VpnTunnelsSetLabelsCall { + c := &VpnTunnelsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *VpnTunnelsSetLabelsCall) RequestId(requestId string) *VpnTunnelsSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnTunnelsSetLabelsCall) Fields(s ...googleapi.Field) *VpnTunnelsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnTunnelsSetLabelsCall) Context(ctx context.Context) *VpnTunnelsSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnTunnelsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnTunnelsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnTunnels.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnTunnelsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a VpnTunnel. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.vpnTunnels.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.zoneOperations.delete": type ZoneOperationsDeleteCall struct { @@ -172818,7 +194908,7 @@ func (c *ZoneOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -172966,17 +195056,17 @@ func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -173060,19 +195150,22 @@ func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperation } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -173080,7 +195173,17 @@ func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperation // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *ZoneOperationsListCall) Filter(filter string) *ZoneOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -173204,17 +195307,17 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ @@ -173238,7 +195341,7 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, @@ -173413,17 +195516,17 @@ func (c *ZoneOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -173582,17 +195685,17 @@ func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Zone{ ServerResponse: googleapi.ServerResponse{ @@ -173665,19 +195768,22 @@ func (r *ZonesService) List(project string) *ZonesListCall { } // Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, an operator, and the value that you want to use for -// filtering. The value must be a string, a number, or a boolean. The -// operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`. For -// example, if you are filtering Compute Engine instances, you can -// exclude instances named `example-instance` by specifying `name != -// example-instance`. The `:` operator can be used with string fields to -// match substrings. For non-string fields it is equivalent to the `=` -// operator. The `:*` comparison can be used to test whether a key has -// been defined. For example, to find all objects with `owner` label -// use: ``` labels.owner:* ``` You can also filter nested fields. For -// example, you could specify `scheduling.automaticRestart = false` to -// include instances only if they are not scheduled for automatic +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic // restarts. You can use filtering on nested fields to filter based on // resource labels. To filter on multiple expressions, provide each // separate expression within parentheses. For example: ``` @@ -173685,7 +195791,17 @@ func (r *ZonesService) List(project string) *ZonesListCall { // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` // (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. func (c *ZonesListCall) Filter(filter string) *ZonesListCall { c.urlParams_.Set("filter", filter) return c @@ -173808,17 +195924,17 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ZoneList{ ServerResponse: googleapi.ServerResponse{ @@ -173841,7 +195957,7 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", // "location": "query", // "type": "string" // }, diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 2d3e00edc9f2..65b125abd2cf 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -79,6 +79,9 @@ type Error struct { Header http.Header Errors []ErrorItem + // err is typically a wrapped apierror.APIError, see + // google-api-go-client/internal/gensupport/error.go. + err error } // ErrorItem is a detailed error code & message from the Google API frontend. @@ -122,6 +125,15 @@ func (e *Error) Error() string { return buf.String() } +// Wrap allows an existing Error to wrap another error. See also [Error.Unwrap]. +func (e *Error) Wrap(err error) { + e.err = err +} + +func (e *Error) Unwrap() error { + return e.err +} + type errorReply struct { Error *Error `json:"error"` } @@ -382,11 +394,11 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool { // For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, // you could request just those fields like this: // -// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// svc.Events.List().Fields("nextPageToken", "items/id").Do() // // or if you were also interested in each Item's "Updated" field, you can combine them like this: // -// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() // // Another way to find field names is through the Google API explorer: // https://developers.google.com/apis-explorer/#p/ diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go index 61720ec2ea13..f5d826c2a19d 100644 --- a/vendor/google.golang.org/api/googleapi/transport/apikey.go +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -7,7 +7,7 @@ // // This package is DEPRECATED. Users should instead use, // -// service, err := NewService(..., option.WithAPIKey(...)) +// service, err := NewService(..., option.WithAPIKey(...)) package transport import ( diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json new file mode 100644 index 000000000000..0c82b86a1008 --- /dev/null +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json @@ -0,0 +1,372 @@ +{ + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account." + } + } + } + }, + "basePath": "", + "baseUrl": "https://iamcredentials.googleapis.com/", + "batchPath": "batch", + "canonicalName": "IAM Credentials", + "description": "Creates short-lived credentials for impersonating IAM service accounts. To enable this API, you must enable the IAM API (iam.googleapis.com). ", + "discoveryVersion": "v1", + "documentationLink": "https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials", + "fullyEncodeReservedExpansion": true, + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "id": "iamcredentials:v1", + "kind": "discovery#restDescription", + "mtlsRootUrl": "https://iamcredentials.mtls.googleapis.com/", + "name": "iamcredentials", + "ownerDomain": "google.com", + "ownerName": "Google", + "parameters": { + "$.xgafv": { + "description": "V1 error format.", + "enum": [ + "1", + "2" + ], + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "location": "query", + "type": "string" + }, + "alt": { + "default": "json", + "description": "Data format for response.", + "enum": [ + "json", + "media", + "proto" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "type": "string" + }, + "callback": { + "description": "JSONP", + "location": "query", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "location": "query", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query", + "type": "string" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "location": "query", + "type": "string" + }, + "prettyPrint": { + "default": "true", + "description": "Returns response with indentations and line breaks.", + "location": "query", + "type": "boolean" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "location": "query", + "type": "string" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "location": "query", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "location": "query", + "type": "string" + } + }, + "protocol": "rest", + "resources": { + "projects": { + "resources": { + "serviceAccounts": { + "methods": { + "generateAccessToken": { + "description": "Generates an OAuth 2.0 access token for a service account.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateAccessToken", + "httpMethod": "POST", + "id": "iamcredentials.projects.serviceAccounts.generateAccessToken", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:generateAccessToken", + "request": { + "$ref": "GenerateAccessTokenRequest" + }, + "response": { + "$ref": "GenerateAccessTokenResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "generateIdToken": { + "description": "Generates an OpenID Connect ID token for a service account.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateIdToken", + "httpMethod": "POST", + "id": "iamcredentials.projects.serviceAccounts.generateIdToken", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:generateIdToken", + "request": { + "$ref": "GenerateIdTokenRequest" + }, + "response": { + "$ref": "GenerateIdTokenResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "signBlob": { + "description": "Signs a blob using a service account's system-managed private key.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", + "httpMethod": "POST", + "id": "iamcredentials.projects.serviceAccounts.signBlob", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:signBlob", + "request": { + "$ref": "SignBlobRequest" + }, + "response": { + "$ref": "SignBlobResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "signJwt": { + "description": "Signs a JWT using a service account's system-managed private key.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", + "httpMethod": "POST", + "id": "iamcredentials.projects.serviceAccounts.signJwt", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:signJwt", + "request": { + "$ref": "SignJwtRequest" + }, + "response": { + "$ref": "SignJwtResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + }, + "revision": "20211203", + "rootUrl": "https://iamcredentials.googleapis.com/", + "schemas": { + "GenerateAccessTokenRequest": { + "id": "GenerateAccessTokenRequest", + "properties": { + "delegates": { + "description": "The sequence of service accounts in a delegation chain. This field is required for [delegated requests](https://cloud.google.com/iam/help/credentials/delegated-request). For [direct requests](https://cloud.google.com/iam/help/credentials/direct-request), which are more common, do not specify this field. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "items": { + "type": "string" + }, + "type": "array" + }, + "lifetime": { + "description": "The desired lifetime duration of the access token in seconds. By default, the maximum allowed value is 1 hour. To set a lifetime of up to 12 hours, you can add the service account as an allowed value in an Organization Policy that enforces the `constraints/iam.allowServiceAccountCredentialLifetimeExtension` constraint. See detailed instructions at https://cloud.google.com/iam/help/credentials/lifetime If a value is not specified, the token's lifetime will be set to a default value of 1 hour.", + "format": "google-duration", + "type": "string" + }, + "scope": { + "description": "Required. Code to identify the scopes to be included in the OAuth 2.0 access token. See https://developers.google.com/identity/protocols/googlescopes for more information. At least one value required.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GenerateAccessTokenResponse": { + "id": "GenerateAccessTokenResponse", + "properties": { + "accessToken": { + "description": "The OAuth 2.0 access token.", + "type": "string" + }, + "expireTime": { + "description": "Token expiration time. The expiration time is always set.", + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "GenerateIdTokenRequest": { + "id": "GenerateIdTokenRequest", + "properties": { + "audience": { + "description": "Required. The audience for the token, such as the API or account that this token grants access to.", + "type": "string" + }, + "delegates": { + "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "items": { + "type": "string" + }, + "type": "array" + }, + "includeEmail": { + "description": "Include the service account email in the token. If set to `true`, the token will contain `email` and `email_verified` claims.", + "type": "boolean" + } + }, + "type": "object" + }, + "GenerateIdTokenResponse": { + "id": "GenerateIdTokenResponse", + "properties": { + "token": { + "description": "The OpenId Connect ID token.", + "type": "string" + } + }, + "type": "object" + }, + "SignBlobRequest": { + "id": "SignBlobRequest", + "properties": { + "delegates": { + "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "items": { + "type": "string" + }, + "type": "array" + }, + "payload": { + "description": "Required. The bytes to sign.", + "format": "byte", + "type": "string" + } + }, + "type": "object" + }, + "SignBlobResponse": { + "id": "SignBlobResponse", + "properties": { + "keyId": { + "description": "The ID of the key used to sign the blob. The key used for signing will remain valid for at least 12 hours after the blob is signed. To verify the signature, you can retrieve the public key in several formats from the following endpoints: - RSA public key wrapped in an X.509 v3 certificate: `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT_EMAIL}` - Raw key in JSON format: `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` - JSON Web Key (JWK): `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_EMAIL}`", + "type": "string" + }, + "signedBlob": { + "description": "The signature for the blob. Does not include the original blob. After the key pair referenced by the `key_id` response field expires, Google no longer exposes the public key that can be used to verify the blob. As a result, the receiver can no longer verify the signature.", + "format": "byte", + "type": "string" + } + }, + "type": "object" + }, + "SignJwtRequest": { + "id": "SignJwtRequest", + "properties": { + "delegates": { + "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "items": { + "type": "string" + }, + "type": "array" + }, + "payload": { + "description": "Required. The JWT payload to sign. Must be a serialized JSON object that contains a JWT Claims Set. For example: `{\"sub\": \"user@example.com\", \"iat\": 313435}` If the JWT Claims Set contains an expiration time (`exp`) claim, it must be an integer timestamp that is not in the past and no more than 12 hours in the future.", + "type": "string" + } + }, + "type": "object" + }, + "SignJwtResponse": { + "id": "SignJwtResponse", + "properties": { + "keyId": { + "description": "The ID of the key used to sign the JWT. The key used for signing will remain valid for at least 12 hours after the JWT is signed. To verify the signature, you can retrieve the public key in several formats from the following endpoints: - RSA public key wrapped in an X.509 v3 certificate: `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT_EMAIL}` - Raw key in JSON format: `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` - JSON Web Key (JWK): `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_EMAIL}`", + "type": "string" + }, + "signedJwt": { + "description": "The signed JWT. Contains the automatically generated header; the client-supplied payload; and the signature, which is generated using the key referenced by the `kid` field in the header. After the key pair referenced by the `key_id` response field expires, Google no longer exposes the public key that can be used to verify the JWT. As a result, the receiver can no longer verify the signature.", + "type": "string" + } + }, + "type": "object" + } + }, + "servicePath": "", + "title": "IAM Service Account Credentials API", + "version": "v1", + "version_module": true +} \ No newline at end of file diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go new file mode 100644 index 000000000000..63e5ef64c0dc --- /dev/null +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -0,0 +1,1094 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated file. DO NOT EDIT. + +// Package iamcredentials provides access to the IAM Service Account Credentials API. +// +// For product documentation, see: https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials +// +// # Creating a client +// +// Usage example: +// +// import "google.golang.org/api/iamcredentials/v1" +// ... +// ctx := context.Background() +// iamcredentialsService, err := iamcredentials.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for authentication. +// +// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// # Other authentication options +// +// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// +// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See https://godoc.org/google.golang.org/api/option/ for details on options. +package iamcredentials // import "google.golang.org/api/iamcredentials/v1" + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + googleapi "google.golang.org/api/googleapi" + internal "google.golang.org/api/internal" + gensupport "google.golang.org/api/internal/gensupport" + option "google.golang.org/api/option" + internaloption "google.golang.org/api/option/internaloption" + htransport "google.golang.org/api/transport/http" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = internaloption.WithDefaultEndpoint + +const apiId = "iamcredentials:v1" +const apiName = "iamcredentials" +const apiVersion = "v1" +const basePath = "https://iamcredentials.googleapis.com/" +const mtlsBasePath = "https://iamcredentials.mtls.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // See, edit, configure, and delete your Google Cloud data and see the + // email address for your Google Account. + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" +) + +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := internaloption.WithDefaultScopes( + "https://www.googleapis.com/auth/cloud-platform", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.ServiceAccounts = NewProjectsServiceAccountsService(s) + return rs +} + +type ProjectsService struct { + s *Service + + ServiceAccounts *ProjectsServiceAccountsService +} + +func NewProjectsServiceAccountsService(s *Service) *ProjectsServiceAccountsService { + rs := &ProjectsServiceAccountsService{s: s} + return rs +} + +type ProjectsServiceAccountsService struct { + s *Service +} + +type GenerateAccessTokenRequest struct { + // Delegates: The sequence of service accounts in a delegation chain. + // This field is required for delegated requests + // (https://cloud.google.com/iam/help/credentials/delegated-request). + // For direct requests + // (https://cloud.google.com/iam/help/credentials/direct-request), which + // are more common, do not specify this field. Each service account must + // be granted the `roles/iam.serviceAccountTokenCreator` role on its + // next service account in the chain. The last service account in the + // chain must be granted the `roles/iam.serviceAccountTokenCreator` role + // on the service account that is specified in the `name` field of the + // request. The delegates must have the following format: + // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` + // wildcard character is required; replacing it with a project ID is + // invalid. + Delegates []string `json:"delegates,omitempty"` + + // Lifetime: The desired lifetime duration of the access token in + // seconds. By default, the maximum allowed value is 1 hour. To set a + // lifetime of up to 12 hours, you can add the service account as an + // allowed value in an Organization Policy that enforces the + // `constraints/iam.allowServiceAccountCredentialLifetimeExtension` + // constraint. See detailed instructions at + // https://cloud.google.com/iam/help/credentials/lifetime If a value is + // not specified, the token's lifetime will be set to a default value of + // 1 hour. + Lifetime string `json:"lifetime,omitempty"` + + // Scope: Required. Code to identify the scopes to be included in the + // OAuth 2.0 access token. See + // https://developers.google.com/identity/protocols/googlescopes for + // more information. At least one value required. + Scope []string `json:"scope,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Delegates") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Delegates") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GenerateAccessTokenRequest) MarshalJSON() ([]byte, error) { + type NoMethod GenerateAccessTokenRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GenerateAccessTokenResponse struct { + // AccessToken: The OAuth 2.0 access token. + AccessToken string `json:"accessToken,omitempty"` + + // ExpireTime: Token expiration time. The expiration time is always set. + ExpireTime string `json:"expireTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AccessToken") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccessToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GenerateAccessTokenResponse) MarshalJSON() ([]byte, error) { + type NoMethod GenerateAccessTokenResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GenerateIdTokenRequest struct { + // Audience: Required. The audience for the token, such as the API or + // account that this token grants access to. + Audience string `json:"audience,omitempty"` + + // Delegates: The sequence of service accounts in a delegation chain. + // Each service account must be granted the + // `roles/iam.serviceAccountTokenCreator` role on its next service + // account in the chain. The last service account in the chain must be + // granted the `roles/iam.serviceAccountTokenCreator` role on the + // service account that is specified in the `name` field of the request. + // The delegates must have the following format: + // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` + // wildcard character is required; replacing it with a project ID is + // invalid. + Delegates []string `json:"delegates,omitempty"` + + // IncludeEmail: Include the service account email in the token. If set + // to `true`, the token will contain `email` and `email_verified` + // claims. + IncludeEmail bool `json:"includeEmail,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Audience") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Audience") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GenerateIdTokenRequest) MarshalJSON() ([]byte, error) { + type NoMethod GenerateIdTokenRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GenerateIdTokenResponse struct { + // Token: The OpenId Connect ID token. + Token string `json:"token,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Token") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Token") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GenerateIdTokenResponse) MarshalJSON() ([]byte, error) { + type NoMethod GenerateIdTokenResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SignBlobRequest struct { + // Delegates: The sequence of service accounts in a delegation chain. + // Each service account must be granted the + // `roles/iam.serviceAccountTokenCreator` role on its next service + // account in the chain. The last service account in the chain must be + // granted the `roles/iam.serviceAccountTokenCreator` role on the + // service account that is specified in the `name` field of the request. + // The delegates must have the following format: + // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` + // wildcard character is required; replacing it with a project ID is + // invalid. + Delegates []string `json:"delegates,omitempty"` + + // Payload: Required. The bytes to sign. + Payload string `json:"payload,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Delegates") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Delegates") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SignBlobRequest) MarshalJSON() ([]byte, error) { + type NoMethod SignBlobRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SignBlobResponse struct { + // KeyId: The ID of the key used to sign the blob. The key used for + // signing will remain valid for at least 12 hours after the blob is + // signed. To verify the signature, you can retrieve the public key in + // several formats from the following endpoints: - RSA public key + // wrapped in an X.509 v3 certificate: + // `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT + // _EMAIL}` - Raw key in JSON format: + // `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_ + // EMAIL}` - JSON Web Key (JWK): + // `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_ + // EMAIL}` + KeyId string `json:"keyId,omitempty"` + + // SignedBlob: The signature for the blob. Does not include the original + // blob. After the key pair referenced by the `key_id` response field + // expires, Google no longer exposes the public key that can be used to + // verify the blob. As a result, the receiver can no longer verify the + // signature. + SignedBlob string `json:"signedBlob,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "KeyId") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "KeyId") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SignBlobResponse) MarshalJSON() ([]byte, error) { + type NoMethod SignBlobResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SignJwtRequest struct { + // Delegates: The sequence of service accounts in a delegation chain. + // Each service account must be granted the + // `roles/iam.serviceAccountTokenCreator` role on its next service + // account in the chain. The last service account in the chain must be + // granted the `roles/iam.serviceAccountTokenCreator` role on the + // service account that is specified in the `name` field of the request. + // The delegates must have the following format: + // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` + // wildcard character is required; replacing it with a project ID is + // invalid. + Delegates []string `json:"delegates,omitempty"` + + // Payload: Required. The JWT payload to sign. Must be a serialized JSON + // object that contains a JWT Claims Set. For example: `{"sub": + // "user@example.com", "iat": 313435}` If the JWT Claims Set contains an + // expiration time (`exp`) claim, it must be an integer timestamp that + // is not in the past and no more than 12 hours in the future. + Payload string `json:"payload,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Delegates") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Delegates") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SignJwtRequest) MarshalJSON() ([]byte, error) { + type NoMethod SignJwtRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SignJwtResponse struct { + // KeyId: The ID of the key used to sign the JWT. The key used for + // signing will remain valid for at least 12 hours after the JWT is + // signed. To verify the signature, you can retrieve the public key in + // several formats from the following endpoints: - RSA public key + // wrapped in an X.509 v3 certificate: + // `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT + // _EMAIL}` - Raw key in JSON format: + // `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_ + // EMAIL}` - JSON Web Key (JWK): + // `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_ + // EMAIL}` + KeyId string `json:"keyId,omitempty"` + + // SignedJwt: The signed JWT. Contains the automatically generated + // header; the client-supplied payload; and the signature, which is + // generated using the key referenced by the `kid` field in the header. + // After the key pair referenced by the `key_id` response field expires, + // Google no longer exposes the public key that can be used to verify + // the JWT. As a result, the receiver can no longer verify the + // signature. + SignedJwt string `json:"signedJwt,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "KeyId") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "KeyId") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SignJwtResponse) MarshalJSON() ([]byte, error) { + type NoMethod SignJwtResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "iamcredentials.projects.serviceAccounts.generateAccessToken": + +type ProjectsServiceAccountsGenerateAccessTokenCall struct { + s *Service + name string + generateaccesstokenrequest *GenerateAccessTokenRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GenerateAccessToken: Generates an OAuth 2.0 access token for a +// service account. +// +// - name: The resource name of the service account for which the +// credentials are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` +// wildcard character is required; replacing it with a project ID is +// invalid. +func (r *ProjectsServiceAccountsService) GenerateAccessToken(name string, generateaccesstokenrequest *GenerateAccessTokenRequest) *ProjectsServiceAccountsGenerateAccessTokenCall { + c := &ProjectsServiceAccountsGenerateAccessTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.generateaccesstokenrequest = generateaccesstokenrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGenerateAccessTokenCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Context(ctx context.Context) *ProjectsServiceAccountsGenerateAccessTokenCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateaccesstokenrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:generateAccessToken") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iamcredentials.projects.serviceAccounts.generateAccessToken" call. +// Exactly one of *GenerateAccessTokenResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GenerateAccessTokenResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Do(opts ...googleapi.CallOption) (*GenerateAccessTokenResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GenerateAccessTokenResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Generates an OAuth 2.0 access token for a service account.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateAccessToken", + // "httpMethod": "POST", + // "id": "iamcredentials.projects.serviceAccounts.generateAccessToken", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + // "location": "path", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:generateAccessToken", + // "request": { + // "$ref": "GenerateAccessTokenRequest" + // }, + // "response": { + // "$ref": "GenerateAccessTokenResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iamcredentials.projects.serviceAccounts.generateIdToken": + +type ProjectsServiceAccountsGenerateIdTokenCall struct { + s *Service + name string + generateidtokenrequest *GenerateIdTokenRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GenerateIdToken: Generates an OpenID Connect ID token for a service +// account. +// +// - name: The resource name of the service account for which the +// credentials are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` +// wildcard character is required; replacing it with a project ID is +// invalid. +func (r *ProjectsServiceAccountsService) GenerateIdToken(name string, generateidtokenrequest *GenerateIdTokenRequest) *ProjectsServiceAccountsGenerateIdTokenCall { + c := &ProjectsServiceAccountsGenerateIdTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.generateidtokenrequest = generateidtokenrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountsGenerateIdTokenCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGenerateIdTokenCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountsGenerateIdTokenCall) Context(ctx context.Context) *ProjectsServiceAccountsGenerateIdTokenCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountsGenerateIdTokenCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateidtokenrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:generateIdToken") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iamcredentials.projects.serviceAccounts.generateIdToken" call. +// Exactly one of *GenerateIdTokenResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GenerateIdTokenResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsGenerateIdTokenCall) Do(opts ...googleapi.CallOption) (*GenerateIdTokenResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GenerateIdTokenResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Generates an OpenID Connect ID token for a service account.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateIdToken", + // "httpMethod": "POST", + // "id": "iamcredentials.projects.serviceAccounts.generateIdToken", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + // "location": "path", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:generateIdToken", + // "request": { + // "$ref": "GenerateIdTokenRequest" + // }, + // "response": { + // "$ref": "GenerateIdTokenResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iamcredentials.projects.serviceAccounts.signBlob": + +type ProjectsServiceAccountsSignBlobCall struct { + s *Service + name string + signblobrequest *SignBlobRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SignBlob: Signs a blob using a service account's system-managed +// private key. +// +// - name: The resource name of the service account for which the +// credentials are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` +// wildcard character is required; replacing it with a project ID is +// invalid. +func (r *ProjectsServiceAccountsService) SignBlob(name string, signblobrequest *SignBlobRequest) *ProjectsServiceAccountsSignBlobCall { + c := &ProjectsServiceAccountsSignBlobCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.signblobrequest = signblobrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountsSignBlobCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsSignBlobCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountsSignBlobCall) Context(ctx context.Context) *ProjectsServiceAccountsSignBlobCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountsSignBlobCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signblobrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:signBlob") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iamcredentials.projects.serviceAccounts.signBlob" call. +// Exactly one of *SignBlobResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SignBlobResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) (*SignBlobResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SignBlobResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Signs a blob using a service account's system-managed private key.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", + // "httpMethod": "POST", + // "id": "iamcredentials.projects.serviceAccounts.signBlob", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + // "location": "path", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:signBlob", + // "request": { + // "$ref": "SignBlobRequest" + // }, + // "response": { + // "$ref": "SignBlobResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iamcredentials.projects.serviceAccounts.signJwt": + +type ProjectsServiceAccountsSignJwtCall struct { + s *Service + name string + signjwtrequest *SignJwtRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SignJwt: Signs a JWT using a service account's system-managed private +// key. +// +// - name: The resource name of the service account for which the +// credentials are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` +// wildcard character is required; replacing it with a project ID is +// invalid. +func (r *ProjectsServiceAccountsService) SignJwt(name string, signjwtrequest *SignJwtRequest) *ProjectsServiceAccountsSignJwtCall { + c := &ProjectsServiceAccountsSignJwtCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.signjwtrequest = signjwtrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountsSignJwtCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsSignJwtCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountsSignJwtCall) Context(ctx context.Context) *ProjectsServiceAccountsSignJwtCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signjwtrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:signJwt") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iamcredentials.projects.serviceAccounts.signJwt" call. +// Exactly one of *SignJwtResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SignJwtResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (*SignJwtResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SignJwtResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Signs a JWT using a service account's system-managed private key.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", + // "httpMethod": "POST", + // "id": "iamcredentials.projects.serviceAccounts.signJwt", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + // "location": "path", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:signJwt", + // "request": { + // "$ref": "SignJwtRequest" + // }, + // "response": { + // "$ref": "SignJwtResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/impersonate/doc.go b/vendor/google.golang.org/api/impersonate/doc.go new file mode 100644 index 000000000000..155ef70e15a0 --- /dev/null +++ b/vendor/google.golang.org/api/impersonate/doc.go @@ -0,0 +1,32 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package impersonate is used to impersonate Google Credentials. +// +// # Required IAM roles +// +// In order to impersonate a service account the base service account must have +// the Service Account Token Creator role, roles/iam.serviceAccountTokenCreator, +// on the service account being impersonated. See +// https://cloud.google.com/iam/docs/understanding-service-accounts. +// +// Optionally, delegates can be used during impersonation if the base service +// account lacks the token creator role on the target. When using delegates, +// each service account must be granted roles/iam.serviceAccountTokenCreator +// on the next service account in the delgation chain. +// +// For example, if a base service account of SA1 is trying to impersonate target +// service account SA2 while using delegate service accounts DSA1 and DSA2, +// the following must be true: +// +// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on +// DSA1. +// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. +// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. +// +// If the base credential is an authorized user and not a service account, or if +// the option WithQuotaProject is set, the target service account must have a +// role that grants the serviceusage.services.use permission such as +// roles/serviceusage.serviceUsageConsumer. +package impersonate diff --git a/vendor/google.golang.org/api/impersonate/idtoken.go b/vendor/google.golang.org/api/impersonate/idtoken.go new file mode 100644 index 000000000000..a2defff15185 --- /dev/null +++ b/vendor/google.golang.org/api/impersonate/idtoken.go @@ -0,0 +1,129 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "golang.org/x/oauth2" + "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" +) + +// IDTokenConfig for generating an impersonated ID token. +type IDTokenConfig struct { + // Audience is the `aud` field for the token, such as an API endpoint the + // token will grant access to. Required. + Audience string + // TargetPrincipal is the email address of the service account to + // impersonate. Required. + TargetPrincipal string + // IncludeEmail includes the service account's email in the token. The + // resulting token will include both an `email` and `email_verified` + // claim. + IncludeEmail bool + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string +} + +// IDTokenSource creates an impersonated TokenSource that returns ID tokens +// configured with the provided config and using credentials loaded from +// Application Default Credentials as the base credentials. The tokens provided +// by the source are valid for one hour and are automatically refreshed. +func IDTokenSource(ctx context.Context, config IDTokenConfig, opts ...option.ClientOption) (oauth2.TokenSource, error) { + if config.Audience == "" { + return nil, fmt.Errorf("impersonate: an audience must be provided") + } + if config.TargetPrincipal == "" { + return nil, fmt.Errorf("impersonate: a target service account must be provided") + } + + clientOpts := append(defaultClientOptions(), opts...) + client, _, err := htransport.NewClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + + its := impersonatedIDTokenSource{ + client: client, + targetPrincipal: config.TargetPrincipal, + audience: config.Audience, + includeEmail: config.IncludeEmail, + } + for _, v := range config.Delegates { + its.delegates = append(its.delegates, formatIAMServiceAccountName(v)) + } + return oauth2.ReuseTokenSource(nil, its), nil +} + +type generateIDTokenRequest struct { + Audience string `json:"audience"` + IncludeEmail bool `json:"includeEmail"` + Delegates []string `json:"delegates,omitempty"` +} + +type generateIDTokenResponse struct { + Token string `json:"token"` +} + +type impersonatedIDTokenSource struct { + client *http.Client + + targetPrincipal string + audience string + includeEmail bool + delegates []string +} + +func (i impersonatedIDTokenSource) Token() (*oauth2.Token, error) { + now := time.Now() + genIDTokenReq := generateIDTokenRequest{ + Audience: i.audience, + IncludeEmail: i.includeEmail, + Delegates: i.delegates, + } + bodyBytes, err := json.Marshal(genIDTokenReq) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %v", err) + } + + url := fmt.Sprintf("%s/v1/%s:generateIdToken", iamCredentailsEndpoint, formatIAMServiceAccountName(i.targetPrincipal)) + req, err := http.NewRequest("POST", url, bytes.NewReader(bodyBytes)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %v", err) + } + req.Header.Set("Content-Type", "application/json") + resp, err := i.client.Do(req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate ID token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var generateIDTokenResp generateIDTokenResponse + if err := json.Unmarshal(body, &generateIDTokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) + } + return &oauth2.Token{ + AccessToken: generateIDTokenResp.Token, + // Generated ID tokens are good for one hour. + Expiry: now.Add(1 * time.Hour), + }, nil +} diff --git a/vendor/google.golang.org/api/impersonate/impersonate.go b/vendor/google.golang.org/api/impersonate/impersonate.go new file mode 100644 index 000000000000..52c32589b723 --- /dev/null +++ b/vendor/google.golang.org/api/impersonate/impersonate.go @@ -0,0 +1,184 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "golang.org/x/oauth2" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + htransport "google.golang.org/api/transport/http" +) + +var ( + iamCredentailsEndpoint = "https://iamcredentials.googleapis.com" + oauth2Endpoint = "https://oauth2.googleapis.com" +) + +// CredentialsConfig for generating impersonated credentials. +type CredentialsConfig struct { + // TargetPrincipal is the email address of the service account to + // impersonate. Required. + TargetPrincipal string + // Scopes that the impersonated credential should have. Required. + Scopes []string + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string + // Lifetime is the amount of time until the impersonated token expires. If + // unset the token's lifetime will be one hour and be automatically + // refreshed. If set the token may have a max lifetime of one hour and will + // not be refreshed. Service accounts that have been added to an org policy + // with constraints/iam.allowServiceAccountCredentialLifetimeExtension may + // request a token lifetime of up to 12 hours. Optional. + Lifetime time.Duration + // Subject is the sub field of a JWT. This field should only be set if you + // wish to impersonate as a user. This feature is useful when using domain + // wide delegation. Optional. + Subject string +} + +// defaultClientOptions ensures the base credentials will work with the IAM +// Credentials API if no scope or audience is set by the user. +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultAudience("https://iamcredentials.googleapis.com/"), + internaloption.WithDefaultScopes("https://www.googleapis.com/auth/cloud-platform"), + } +} + +// CredentialsTokenSource returns an impersonated CredentialsTokenSource configured with the provided +// config and using credentials loaded from Application Default Credentials as +// the base credentials. +func CredentialsTokenSource(ctx context.Context, config CredentialsConfig, opts ...option.ClientOption) (oauth2.TokenSource, error) { + if config.TargetPrincipal == "" { + return nil, fmt.Errorf("impersonate: a target service account must be provided") + } + if len(config.Scopes) == 0 { + return nil, fmt.Errorf("impersonate: scopes must be provided") + } + if config.Lifetime.Hours() > 12 { + return nil, fmt.Errorf("impersonate: max lifetime is 12 hours") + } + + var isStaticToken bool + // Default to the longest acceptable value of one hour as the token will + // be refreshed automatically if not set. + lifetime := 3600 * time.Second + if config.Lifetime != 0 { + lifetime = config.Lifetime + // Don't auto-refresh token if a lifetime is configured. + isStaticToken = true + } + + clientOpts := append(defaultClientOptions(), opts...) + client, _, err := htransport.NewClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + // If a subject is specified a different auth-flow is initiated to + // impersonate as the provided subject (user). + if config.Subject != "" { + return user(ctx, config, client, lifetime, isStaticToken) + } + + its := impersonatedTokenSource{ + client: client, + targetPrincipal: config.TargetPrincipal, + lifetime: fmt.Sprintf("%.fs", lifetime.Seconds()), + } + for _, v := range config.Delegates { + its.delegates = append(its.delegates, formatIAMServiceAccountName(v)) + } + its.scopes = make([]string, len(config.Scopes)) + copy(its.scopes, config.Scopes) + + if isStaticToken { + tok, err := its.Token() + if err != nil { + return nil, err + } + return oauth2.StaticTokenSource(tok), nil + } + return oauth2.ReuseTokenSource(nil, its), nil +} + +func formatIAMServiceAccountName(name string) string { + return fmt.Sprintf("projects/-/serviceAccounts/%s", name) +} + +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type generateAccessTokenResp struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +type impersonatedTokenSource struct { + client *http.Client + + targetPrincipal string + lifetime string + scopes []string + delegates []string +} + +// Token returns an impersonated Token. +func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { + reqBody := generateAccessTokenReq{ + Delegates: i.delegates, + Lifetime: i.lifetime, + Scope: i.scopes, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %v", err) + } + url := fmt.Sprintf("%s/v1/%s:generateAccessToken", iamCredentailsEndpoint, formatIAMServiceAccountName(i.targetPrincipal)) + req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %v", err) + } + req.Header.Set("Content-Type", "application/json") + + resp, err := i.client.Do(req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var accessTokenResp generateAccessTokenResp + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to parse expiry: %v", err) + } + return &oauth2.Token{ + AccessToken: accessTokenResp.AccessToken, + Expiry: expiry, + }, nil +} diff --git a/vendor/google.golang.org/api/impersonate/user.go b/vendor/google.golang.org/api/impersonate/user.go new file mode 100644 index 000000000000..059deab71177 --- /dev/null +++ b/vendor/google.golang.org/api/impersonate/user.go @@ -0,0 +1,169 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2" +) + +func user(ctx context.Context, c CredentialsConfig, client *http.Client, lifetime time.Duration, isStaticToken bool) (oauth2.TokenSource, error) { + u := userTokenSource{ + client: client, + targetPrincipal: c.TargetPrincipal, + subject: c.Subject, + lifetime: lifetime, + } + u.delegates = make([]string, len(c.Delegates)) + for i, v := range c.Delegates { + u.delegates[i] = formatIAMServiceAccountName(v) + } + u.scopes = make([]string, len(c.Scopes)) + copy(u.scopes, c.Scopes) + if isStaticToken { + tok, err := u.Token() + if err != nil { + return nil, err + } + return oauth2.StaticTokenSource(tok), nil + } + return oauth2.ReuseTokenSource(nil, u), nil +} + +type claimSet struct { + Iss string `json:"iss"` + Scope string `json:"scope,omitempty"` + Sub string `json:"sub,omitempty"` + Aud string `json:"aud"` + Iat int64 `json:"iat"` + Exp int64 `json:"exp"` +} + +type signJWTRequest struct { + Payload string `json:"payload"` + Delegates []string `json:"delegates,omitempty"` +} + +type signJWTResponse struct { + // KeyID is the key used to sign the JWT. + KeyID string `json:"keyId"` + // SignedJwt contains the automatically generated header; the + // client-supplied payload; and the signature, which is generated using + // the key referenced by the `kid` field in the header. + SignedJWT string `json:"signedJwt"` +} + +type exchangeTokenResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` +} + +type userTokenSource struct { + client *http.Client + + targetPrincipal string + subject string + scopes []string + lifetime time.Duration + delegates []string +} + +func (u userTokenSource) Token() (*oauth2.Token, error) { + signedJWT, err := u.signJWT() + if err != nil { + return nil, err + } + return u.exchangeToken(signedJWT) +} + +func (u userTokenSource) signJWT() (string, error) { + now := time.Now() + exp := now.Add(u.lifetime) + claims := claimSet{ + Iss: u.targetPrincipal, + Scope: strings.Join(u.scopes, " "), + Sub: u.subject, + Aud: fmt.Sprintf("%s/token", oauth2Endpoint), + Iat: now.Unix(), + Exp: exp.Unix(), + } + payloadBytes, err := json.Marshal(claims) + if err != nil { + return "", fmt.Errorf("impersonate: unable to marshal claims: %v", err) + } + signJWTReq := signJWTRequest{ + Payload: string(payloadBytes), + Delegates: u.delegates, + } + + bodyBytes, err := json.Marshal(signJWTReq) + if err != nil { + return "", fmt.Errorf("impersonate: unable to marshal request: %v", err) + } + reqURL := fmt.Sprintf("%s/v1/%s:signJwt", iamCredentailsEndpoint, formatIAMServiceAccountName(u.targetPrincipal)) + req, err := http.NewRequest("POST", reqURL, bytes.NewReader(bodyBytes)) + if err != nil { + return "", fmt.Errorf("impersonate: unable to create request: %v", err) + } + req.Header.Set("Content-Type", "application/json") + rawResp, err := u.client.Do(req) + if err != nil { + return "", fmt.Errorf("impersonate: unable to sign JWT: %v", err) + } + body, err := ioutil.ReadAll(io.LimitReader(rawResp.Body, 1<<20)) + if err != nil { + return "", fmt.Errorf("impersonate: unable to read body: %v", err) + } + if c := rawResp.StatusCode; c < 200 || c > 299 { + return "", fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var signJWTResp signJWTResponse + if err := json.Unmarshal(body, &signJWTResp); err != nil { + return "", fmt.Errorf("impersonate: unable to parse response: %v", err) + } + return signJWTResp.SignedJWT, nil +} + +func (u userTokenSource) exchangeToken(signedJWT string) (*oauth2.Token, error) { + now := time.Now() + v := url.Values{} + v.Set("grant_type", "assertion") + v.Set("assertion_type", "http://oauth.net/grant_type/jwt/1.0/bearer") + v.Set("assertion", signedJWT) + rawResp, err := u.client.PostForm(fmt.Sprintf("%s/token", oauth2Endpoint), v) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to exchange token: %v", err) + } + body, err := ioutil.ReadAll(io.LimitReader(rawResp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to read body: %v", err) + } + if c := rawResp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var tokenResp exchangeTokenResponse + if err := json.Unmarshal(body, &tokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) + } + + return &oauth2.Token{ + AccessToken: tokenResp.AccessToken, + TokenType: tokenResp.TokenType, + Expiry: now.Add(time.Second * time.Duration(tokenResp.ExpiresIn)), + }, nil +} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index b067a179b7a8..32d52413b30c 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -70,11 +70,12 @@ const ( // // - A self-signed JWT flow will be executed if the following conditions are // met: -// (1) At least one of the following is true: -// (a) No scope is provided -// (b) Scope for self-signed JWT flow is enabled -// (c) Audiences are explicitly provided by users -// (2) No service account impersontation +// +// (1) At least one of the following is true: +// (a) No scope is provided +// (b) Scope for self-signed JWT flow is enabled +// (c) Audiences are explicitly provided by users +// (2) No service account impersontation // // - Otherwise, executes standard OAuth 2.0 flow // More details: google.aip.dev/auth/4111 diff --git a/vendor/google.golang.org/api/internal/gensupport/error.go b/vendor/google.golang.org/api/internal/gensupport/error.go new file mode 100644 index 000000000000..886c6532b153 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/error.go @@ -0,0 +1,24 @@ +// Copyright 2022 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "errors" + + "github.com/googleapis/gax-go/v2/apierror" + "google.golang.org/api/googleapi" +) + +// WrapError creates an [apierror.APIError] from err, wraps it in err, and +// returns err. If err is not a [googleapi.Error] (or a +// [google.golang.org/grpc/status.Status]), it returns err without modification. +func WrapError(err error) error { + var herr *googleapi.Error + apiError, ok := apierror.ParseError(err, false) + if ok && errors.As(err, &herr) { + herr.Wrap(apiError) + } + return err +} diff --git a/vendor/google.golang.org/api/internal/gensupport/json.go b/vendor/google.golang.org/api/internal/gensupport/json.go index c01e32189f44..1b770464110c 100644 --- a/vendor/google.golang.org/api/internal/gensupport/json.go +++ b/vendor/google.golang.org/api/internal/gensupport/json.go @@ -13,9 +13,10 @@ import ( // MarshalJSON returns a JSON encoding of schema containing only selected fields. // A field is selected if any of the following is true: -// * it has a non-empty value -// * its field name is present in forceSendFields and it is not a nil pointer or nil interface -// * its field name is present in nullFields. +// - it has a non-empty value +// - its field name is present in forceSendFields and it is not a nil pointer or nil interface +// - its field name is present in nullFields. +// // The JSON key for each selected field is taken from the field's json: struct tag. func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { if len(forceSendFields) == 0 && len(nullFields) == 0 { diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go index d14a22470c19..8356e7f27b07 100644 --- a/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -17,92 +17,10 @@ import ( "sync" "time" + gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" ) -const sniffBuffSize = 512 - -func newContentSniffer(r io.Reader) *contentSniffer { - return &contentSniffer{r: r} -} - -// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. -type contentSniffer struct { - r io.Reader - start []byte // buffer for the sniffed bytes. - err error // set to any error encountered while reading bytes to be sniffed. - - ctype string // set on first sniff. - sniffed bool // set to true on first sniff. -} - -func (cs *contentSniffer) Read(p []byte) (n int, err error) { - // Ensure that the content type is sniffed before any data is consumed from Reader. - _, _ = cs.ContentType() - - if len(cs.start) > 0 { - n := copy(p, cs.start) - cs.start = cs.start[n:] - return n, nil - } - - // We may have read some bytes into start while sniffing, even if the read ended in an error. - // We should first return those bytes, then the error. - if cs.err != nil { - return 0, cs.err - } - - // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. - return cs.r.Read(p) -} - -// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. -func (cs *contentSniffer) ContentType() (string, bool) { - if cs.sniffed { - return cs.ctype, cs.ctype != "" - } - cs.sniffed = true - // If ReadAll hits EOF, it returns err==nil. - cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) - - // Don't try to detect the content type based on possibly incomplete data. - if cs.err != nil { - return "", false - } - - cs.ctype = http.DetectContentType(cs.start) - return cs.ctype, true -} - -// DetermineContentType determines the content type of the supplied reader. -// If the content type is already known, it can be specified via ctype. -// Otherwise, the content of media will be sniffed to determine the content type. -// If media implements googleapi.ContentTyper (deprecated), this will be used -// instead of sniffing the content. -// After calling DetectContentType the caller must not perform further reads on -// media, but rather read from the Reader that is returned. -func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { - // Note: callers could avoid calling DetectContentType if ctype != "", - // but doing the check inside this function reduces the amount of - // generated code. - if ctype != "" { - return media, ctype - } - - // For backwards compatibility, allow clients to set content - // type by providing a ContentTyper for media. - if typer, ok := media.(googleapi.ContentTyper); ok { - return media, typer.ContentType() - } - - sniffer := newContentSniffer(media) - if ctype, ok := sniffer.ContentType(); ok { - return sniffer, ctype - } - // If content type could not be sniffed, reads from sniffer will eventually fail with an error. - return sniffer, "" -} - type typeReader struct { io.Reader typ string @@ -234,7 +152,10 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { mi := &MediaInfo{} opts := googleapi.ProcessMediaOptions(options) if !opts.ForceEmptyContentType { - r, mi.mType = DetermineContentType(r, opts.ContentType) + mi.mType = opts.ContentType + if mi.mType == "" { + r, mi.mType = gax.DetermineContentType(r) + } } mi.chunkRetryDeadline = opts.ChunkRetryDeadline mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) @@ -245,7 +166,11 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { // call. It returns a MediaInfo using the given reader, size and media type. func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { rdr := ReaderAtToReader(r, size) - rdr, mType := DetermineContentType(rdr, mediaType) + mType := mediaType + if mType == "" { + rdr, mType = gax.DetermineContentType(rdr) + } + return &MediaInfo{ size: size, mType: mType, @@ -289,13 +214,12 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB // be retried because the data is stored in the MediaBuffer. media, _, _, _ = mi.buffer.Chunk() } + toCleanup := []io.Closer{} if media != nil { fb := readerFunc(body) fm := readerFunc(media) combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) - toCleanup := []io.Closer{ - combined, - } + toCleanup = append(toCleanup, combined) if fb != nil && fm != nil { getBody = func() (io.ReadCloser, error) { rb := ioutil.NopCloser(fb()) @@ -309,18 +233,30 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB return r, nil } } - cleanup = func() { - for _, closer := range toCleanup { - _ = closer.Close() - } - - } reqHeaders.Set("Content-Type", ctype) body = combined } if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { + // This happens when initiating a resumable upload session. + // The initial request contains a JSON body rather than media. + // It can be retried with a getBody function that re-creates the request body. + fb := readerFunc(body) + if fb != nil { + getBody = func() (io.ReadCloser, error) { + rb := ioutil.NopCloser(fb()) + toCleanup = append(toCleanup, rb) + return rb, nil + } + } reqHeaders.Set("X-Upload-Content-Type", mi.mType) } + // Ensure that any bodies created in getBody are cleaned up. + cleanup = func() { + for _, closer := range toCleanup { + _ = closer.Close() + } + + } return body, getBody, cleanup } diff --git a/vendor/google.golang.org/api/internal/gensupport/params.go b/vendor/google.golang.org/api/internal/gensupport/params.go index 6703721ffde5..1a30d2ca2524 100644 --- a/vendor/google.golang.org/api/internal/gensupport/params.go +++ b/vendor/google.golang.org/api/internal/gensupport/params.go @@ -37,7 +37,7 @@ func (u URLParams) SetMulti(key string, values []string) { u[key] = values } -// Encode encodes the values into ``URL encoded'' form +// Encode encodes the values into “URL encoded” form // ("bar=baz&foo=quux") sorted by key. func (u URLParams) Encode() string { return url.Values(u).Encode() diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index 0eae147fa92f..0c659188dda1 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -10,8 +10,12 @@ import ( "fmt" "io" "net/http" + "strings" "sync" "time" + + "github.com/google/uuid" + "google.golang.org/api/internal" ) // ResumableUpload is used by the generated APIs to provide resumable uploads. @@ -38,6 +42,11 @@ type ResumableUpload struct { // ChunkRetryDeadline configures the per-chunk deadline after which no further // retries should happen. ChunkRetryDeadline time.Duration + + // Track current request invocation ID and attempt count for retry metric + // headers. + invocationID string + attempts int } // Progress returns the number of bytes uploaded at this point. @@ -72,6 +81,10 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("User-Agent", rx.UserAgent) + baseXGoogHeader := "gl-go/" + GoVersion() + " gdcl/" + internal.Version + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", rx.invocationID, rx.attempts) + req.Header.Set("X-Goog-Api-Client", strings.Join([]string{baseXGoogHeader, invocationHeader}, " ")) + // Google's upload endpoint uses status code 308 for a // different purpose than the "308 Permanent Redirect" // since-standardized in RFC 7238. Because of the conflict in @@ -178,9 +191,11 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err for { var pause time.Duration - // Each chunk gets its own initialized-at-zero backoff. + // Each chunk gets its own initialized-at-zero backoff and invocation ID. bo := rx.Retry.backoff() quitAfter := time.After(retryDeadline) + rx.attempts = 1 + rx.invocationID = uuid.New().String() // Retry loop for a single chunk. for { @@ -223,6 +238,7 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err break } + rx.attempts++ pause = bo.Pause() if resp != nil && resp.Body != nil { resp.Body.Close() diff --git a/vendor/google.golang.org/api/internal/gensupport/retry.go b/vendor/google.golang.org/api/internal/gensupport/retry.go index 873dab36334c..20b57d925f17 100644 --- a/vendor/google.golang.org/api/internal/gensupport/retry.go +++ b/vendor/google.golang.org/api/internal/gensupport/retry.go @@ -5,7 +5,10 @@ package gensupport import ( + "errors" "io" + "net" + "strings" "time" "github.com/googleapis/gax-go/v2" @@ -65,6 +68,14 @@ func shouldRetry(status int, err error) bool { return true } } + var opErr *net.OpError + if errors.As(err, &opErr) { + if strings.Contains(opErr.Error(), "use of closed network connection") { + // TODO: check against net.ErrClosed (go 1.16+) instead of string + return true + } + } + // If Go 1.13 error unwrapping is available, use this to examine wrapped // errors. if err, ok := err.(interface{ Unwrap() error }); ok { diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index dd50cc20a58c..dd24139b3666 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -8,12 +8,36 @@ import ( "context" "encoding/json" "errors" + "fmt" "net/http" + "strings" "time" + "github.com/google/uuid" "github.com/googleapis/gax-go/v2" ) +// Use this error type to return an error which allows introspection of both +// the context error and the error from the service. +type wrappedCallErr struct { + ctxErr error + wrappedErr error +} + +func (e wrappedCallErr) Error() string { + return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr) +} + +func (e wrappedCallErr) Unwrap() error { + return e.wrappedErr +} + +// Is allows errors.Is to match the error from the call as well as context +// sentinel errors. +func (e wrappedCallErr) Is(target error) bool { + return errors.Is(e.ctxErr, target) || errors.Is(e.wrappedErr, target) +} + // SendRequest sends a single HTTP request using the given client. // If ctx is non-nil, it calls all hooks, then sends the request with // req.WithContext, then calls any functions returned by the hooks in @@ -71,6 +95,9 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r var resp *http.Response var err error + attempts := 1 + invocationID := uuid.New().String() + baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") // Loop to retry the request, up to the context deadline. var pause time.Duration @@ -90,12 +117,12 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r for { select { case <-ctx.Done(): - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err == nil { - err = ctx.Err() + // If we got an error and the context has been canceled, return an error acknowledging + // both the context cancelation and the service error. + if err != nil { + return resp, wrappedCallErr{ctx.Err(), err} } - return resp, err + return resp, ctx.Err() case <-time.After(pause): } @@ -104,11 +131,14 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r // select is satisfied at the same time, Go will choose one arbitrarily. // That can cause an operation to go through even if the context was // canceled before. - if err == nil { - err = ctx.Err() + if err != nil { + return resp, wrappedCallErr{ctx.Err(), err} } - return resp, err + return resp, ctx.Err() } + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) + xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") + req.Header.Set("X-Goog-Api-Client", xGoogHeader) resp, err = client.Do(req.WithContext(ctx)) @@ -123,6 +153,7 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r if req.GetBody == nil || !errorFunc(status, err) { break } + attempts++ var errBody error req.Body, errBody = req.GetBody() if errBody != nil { diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 24d6116a1db8..e96a33164539 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.70.0" +const Version = "0.104.0" diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go deleted file mode 100644 index 2e3d2dedbbf2..000000000000 --- a/vendor/google.golang.org/api/option/credentials_go19.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 -// +build go1.9 - -package option - -import ( - "golang.org/x/oauth2/google" - "google.golang.org/api/internal" -) - -type withCreds google.Credentials - -func (w *withCreds) Apply(o *internal.DialSettings) { - o.Credentials = (*google.Credentials)(w) -} - -// WithCredentials returns a ClientOption that authenticates API calls. -func WithCredentials(creds *google.Credentials) ClientOption { - return (*withCreds)(creds) -} diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go deleted file mode 100644 index 728a62f1ae6a..000000000000 --- a/vendor/google.golang.org/api/option/credentials_notgo19.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2018 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 -// +build !go1.9 - -package option - -import ( - "golang.org/x/oauth2/google" - "google.golang.org/api/internal" -) - -type withCreds google.DefaultCredentials - -func (w *withCreds) Apply(o *internal.DialSettings) { - o.Credentials = (*google.DefaultCredentials)(w) -} - -func WithCredentials(creds *google.DefaultCredentials) ClientOption { - return (*withCreds)(creds) -} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index 9ff697e0b8e8..b2085a1949ab 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -10,6 +10,7 @@ import ( "net/http" "golang.org/x/oauth2" + "golang.org/x/oauth2/google" "google.golang.org/api/internal" "google.golang.org/api/internal/impersonate" "google.golang.org/grpc" @@ -81,6 +82,9 @@ func (w withEndpoint) Apply(o *internal.DialSettings) { // WithScopes returns a ClientOption that overrides the default OAuth2 scopes // to be used for a service. +// +// If both WithScopes and WithTokenSource are used, scope settings from the +// token source will be used instead. func WithScopes(scope ...string) ClientOption { return withScopes(scope) } @@ -92,7 +96,9 @@ func (w withScopes) Apply(o *internal.DialSettings) { copy(o.Scopes, w) } -// WithUserAgent returns a ClientOption that sets the User-Agent. +// WithUserAgent returns a ClientOption that sets the User-Agent. This option +// is incompatible with the [WithHTTPClient] option. If you wish to provide a +// custom client you will need to add this header via RoundTripper middleware. func WithUserAgent(ua string) ClientOption { return withUA(ua) } @@ -144,8 +150,6 @@ func (w withGRPCDialOption) Apply(o *internal.DialSettings) { // WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC // connections that requests will be balanced between. -// -// This is an EXPERIMENTAL API and may be changed or removed in the future. func WithGRPCConnectionPool(size int) ClientOption { return withGRPCConnectionPool(size) } @@ -288,10 +292,10 @@ func (w withClientCertSource) Apply(o *internal.DialSettings) { // service account SA2 while using delegate service accounts DSA1 and DSA2, // the following must be true: // -// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on -// DSA1. -// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. -// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. +// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on +// DSA1. +// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. +// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. // // The resulting impersonated credential will either have the default scopes of // the client being instantiating or the scopes from WithScopes if provided. @@ -306,9 +310,9 @@ func (w withClientCertSource) Apply(o *internal.DialSettings) { // // This is an EXPERIMENTAL API and may be changed or removed in the future. // -// This option has been replaced by `impersonate` package: +// Deprecated: This option has been replaced by `impersonate` package: // `google.golang.org/api/impersonate`. Please use the `impersonate` package -// instead. +// instead with the WithTokenSource option. func ImpersonateCredentials(target string, delegates ...string) ClientOption { return impersonateServiceAccount{ target: target, @@ -328,3 +332,14 @@ func (i impersonateServiceAccount) Apply(o *internal.DialSettings) { o.ImpersonationConfig.Delegates = make([]string, len(i.delegates)) copy(o.ImpersonationConfig.Delegates, i.delegates) } + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.Credentials)(w) +} + +// WithCredentials returns a ClientOption that authenticates API calls. +func WithCredentials(creds *google.Credentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 0493a62d15bf..bc1fca74391e 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"3137353238303035303136333633393332333233\"", + "etag": "\"3134363638303431303535363634343235383633\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -115,11 +115,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -153,11 +148,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -187,11 +177,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -224,11 +209,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -265,11 +245,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -309,11 +284,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -362,11 +332,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -419,11 +384,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -463,11 +423,6 @@ "minimum": "1", "type": "integer" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -550,11 +505,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request.", "location": "query", @@ -619,11 +569,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request.", "location": "query", @@ -664,11 +609,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -764,11 +704,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -801,11 +736,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -846,11 +776,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -948,11 +873,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1017,11 +937,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1055,11 +970,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1089,11 +999,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1138,11 +1043,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1179,11 +1079,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1223,11 +1118,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1271,11 +1161,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1310,11 +1195,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1347,11 +1227,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1385,11 +1260,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1446,11 +1316,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1497,11 +1362,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1544,11 +1404,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1594,11 +1449,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1648,11 +1498,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1705,11 +1550,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1791,11 +1631,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1925,11 +1760,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "sourceBucket": { "description": "Name of the bucket in which to find the source object.", "location": "path", @@ -2018,11 +1848,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2100,11 +1925,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2152,11 +1972,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2277,11 +2092,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2362,11 +2172,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "startOffset": { "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", "location": "query", @@ -2481,11 +2286,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request, for Requester Pays buckets.", "location": "query", @@ -2620,11 +2420,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "rewriteToken": { "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", "location": "query", @@ -2694,11 +2489,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2753,11 +2543,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2861,11 +2646,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2944,11 +2724,6 @@ "location": "query", "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "startOffset": { "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", "location": "query", @@ -3207,11 +2982,6 @@ "required": true, "type": "string" }, - "provisionalUserProject": { - "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - "location": "query", - "type": "string" - }, "userProject": { "description": "The project to be billed for this request.", "location": "query", @@ -3235,7 +3005,7 @@ } } }, - "revision": "20220210", + "revision": "20220705", "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { @@ -3424,7 +3194,7 @@ "type": "string" }, "type": { - "description": "Type of the action. Currently, only Delete and SetStorageClass are supported.", + "description": "Type of the action. Currently, only Delete, SetStorageClass, and AbortIncompleteMultipartUpload are supported.", "type": "string" } }, @@ -3466,6 +3236,13 @@ "description": "A regular expression that satisfies the RE2 syntax. This condition is satisfied when the name of the object matches the RE2 pattern. Note: This feature is currently in the \"Early Access\" launch stage and is only available to a whitelisted set of users; that means that this feature may be changed in backward-incompatible ways and that it is not guaranteed to be released.", "type": "string" }, + "matchesPrefix": { + "description": "List of object name prefixes. This condition will be satisfied when at least one of the prefixes exactly matches the beginning of the object name.", + "items": { + "type": "string" + }, + "type": "array" + }, "matchesStorageClass": { "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.", "items": { @@ -3473,6 +3250,13 @@ }, "type": "array" }, + "matchesSuffix": { + "description": "List of object name suffixes. This condition will be satisfied when at least one of the suffixes exactly matches the end of the object name.", + "items": { + "type": "string" + }, + "type": "array" + }, "noncurrentTimeBefore": { "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when the noncurrent time on an object is before this date in UTC. This condition is relevant only for versioned objects.", "format": "date", @@ -4211,7 +3995,7 @@ "type": "string" }, "updated": { - "description": "The modification time of the object metadata in RFC 3339 format.", + "description": "The modification time of the object metadata in RFC 3339 format. Set initially to object creation time and then updated whenever any metadata of the object changes. This includes changes made by a requester, such as modifying custom metadata, as well as changes made by Cloud Storage on behalf of a requester, such as changing the storage class based on an Object Lifecycle Configuration.", "format": "date-time", "type": "string" } diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 5aa182bf6922..4613ebdfa72a 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -10,35 +10,35 @@ // // For product documentation, see: https://developers.google.com/storage/docs/json_api/ // -// Creating a client +// # Creating a client // // Usage example: // -// import "google.golang.org/api/storage/v1" -// ... -// ctx := context.Background() -// storageService, err := storage.NewService(ctx) +// import "google.golang.org/api/storage/v1" +// ... +// ctx := context.Background() +// storageService, err := storage.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // -// Other authentication options +// # Other authentication options // // By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: // -// storageService, err := storage.NewService(ctx, option.WithScopes(storage.DevstorageReadWriteScope)) +// storageService, err := storage.NewService(ctx, option.WithScopes(storage.DevstorageReadWriteScope)) // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // -// storageService, err := storage.NewService(ctx, option.WithAPIKey("AIza...")) +// storageService, err := storage.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// storageService, err := storage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// storageService, err := storage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package storage // import "google.golang.org/api/storage/v1" @@ -105,7 +105,7 @@ const ( // NewService creates a new Service. func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { - scopesOption := option.WithScopes( + scopesOption := internaloption.WithDefaultScopes( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only", "https://www.googleapis.com/auth/devstorage.full_control", @@ -790,8 +790,8 @@ type BucketLifecycleRuleAction struct { // action is SetStorageClass. StorageClass string `json:"storageClass,omitempty"` - // Type: Type of the action. Currently, only Delete and SetStorageClass - // are supported. + // Type: Type of the action. Currently, only Delete, SetStorageClass, + // and AbortIncompleteMultipartUpload are supported. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "StorageClass") to @@ -822,7 +822,7 @@ func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { type BucketLifecycleRuleCondition struct { // Age: Age of an object (in days). This condition is satisfied when an // object reaches the specified age. - Age int64 `json:"age,omitempty"` + Age *int64 `json:"age,omitempty"` // CreatedBefore: A date in RFC 3339 format with only the date part (for // instance, "2013-01-15"). This condition is satisfied when an object @@ -861,12 +861,22 @@ type BucketLifecycleRuleCondition struct { // ways and that it is not guaranteed to be released. MatchesPattern string `json:"matchesPattern,omitempty"` + // MatchesPrefix: List of object name prefixes. This condition will be + // satisfied when at least one of the prefixes exactly matches the + // beginning of the object name. + MatchesPrefix []string `json:"matchesPrefix,omitempty"` + // MatchesStorageClass: Objects having any of the storage classes // specified by this condition will be matched. Values include // MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, and // DURABLE_REDUCED_AVAILABILITY. MatchesStorageClass []string `json:"matchesStorageClass,omitempty"` + // MatchesSuffix: List of object name suffixes. This condition will be + // satisfied when at least one of the suffixes exactly matches the end + // of the object name. + MatchesSuffix []string `json:"matchesSuffix,omitempty"` + // NoncurrentTimeBefore: A date in RFC 3339 format with only the date // part (for instance, "2013-01-15"). This condition is satisfied when // the noncurrent time on an object is before this date in UTC. This @@ -1879,7 +1889,12 @@ type Object struct { TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"` // Updated: The modification time of the object metadata in RFC 3339 - // format. + // format. Set initially to object creation time and then updated + // whenever any metadata of the object changes. This includes changes + // made by a requester, such as modifying custom metadata, as well as + // changes made by Cloud Storage on behalf of a requester, such as + // changing the storage class based on an Object Lifecycle + // Configuration. Updated string `json:"updated,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2469,10 +2484,10 @@ type BucketAccessControlsDeleteCall struct { // Delete: Permanently deletes the ACL entry for the specified entity on // the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -2480,14 +2495,6 @@ func (r *BucketAccessControlsService) Delete(bucket string, entity string) *Buck return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketAccessControlsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsDeleteCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall { @@ -2553,7 +2560,7 @@ func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -2577,11 +2584,6 @@ func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -2612,10 +2614,10 @@ type BucketAccessControlsGetCall struct { // Get: Returns the ACL entry for the specified entity on the specified // bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -2623,14 +2625,6 @@ func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketA return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketAccessControlsGetCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsGetCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall { @@ -2714,17 +2708,17 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -2758,11 +2752,6 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -2802,14 +2791,6 @@ func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketAccessControlsInsertCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsInsertCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall { @@ -2884,17 +2865,17 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -2921,11 +2902,6 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -2967,14 +2943,6 @@ func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsL return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketAccessControlsListCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsListCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall { @@ -3057,17 +3025,17 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControls{ ServerResponse: googleapi.ServerResponse{ @@ -3094,11 +3062,6 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -3131,10 +3094,10 @@ type BucketAccessControlsPatchCall struct { // Patch: Patches an ACL entry on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -3143,14 +3106,6 @@ func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucket return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketAccessControlsPatchCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsPatchCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall { @@ -3226,17 +3181,17 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -3270,11 +3225,6 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -3310,10 +3260,10 @@ type BucketAccessControlsUpdateCall struct { // Update: Updates an ACL entry on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -3322,14 +3272,6 @@ func (r *BucketAccessControlsService) Update(bucket string, entity string, bucke return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketAccessControlsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsUpdateCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall { @@ -3405,17 +3347,17 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -3449,11 +3391,6 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -3510,14 +3447,6 @@ func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *BucketsDeleteCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall { @@ -3582,7 +3511,7 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -3611,11 +3540,6 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -3674,21 +3598,14 @@ func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64 // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { c.urlParams_.Set("projection", projection) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketsGetCall) ProvisionalUserProject(provisionalUserProject string) *BucketsGetCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { @@ -3771,17 +3688,17 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -3833,11 +3750,6 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -3889,14 +3801,6 @@ func (c *BucketsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequested return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketsGetIamPolicyCall) ProvisionalUserProject(provisionalUserProject string) *BucketsGetIamPolicyCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall { @@ -3979,17 +3883,17 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -4023,11 +3927,6 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "minimum": "1", // "type": "integer" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -4070,14 +3969,22 @@ func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsert // predefined set of access controls to this bucket. // // Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and +// +// "authenticatedRead" - Project team owners get OWNER access, and +// // allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// // their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// // get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and +// +// "publicReadWrite" - Project team owners get OWNER access, and +// // allUsers get WRITER access. func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -4089,16 +3996,26 @@ func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCa // object access controls to this bucket. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) @@ -4111,21 +4028,14 @@ func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAc // full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { c.urlParams_.Set("projection", projection) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketsInsertCall) ProvisionalUserProject(provisionalUserProject string) *BucketsInsertCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { @@ -4197,17 +4107,17 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -4287,11 +4197,6 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request.", // "location": "query", @@ -4360,21 +4265,14 @@ func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsListCall) Projection(projection string) *BucketsListCall { c.urlParams_.Set("projection", projection) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketsListCall) ProvisionalUserProject(provisionalUserProject string) *BucketsListCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { @@ -4454,17 +4352,17 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Buckets{ ServerResponse: googleapi.ServerResponse{ @@ -4522,11 +4420,6 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request.", // "location": "query", @@ -4581,9 +4474,9 @@ type BucketsLockRetentionPolicyCall struct { // LockRetentionPolicy: Locks retention policy on a bucket. // -// - bucket: Name of a bucket. -// - ifMetagenerationMatch: Makes the operation conditional on whether -// bucket's current metageneration matches the given value. +// - bucket: Name of a bucket. +// - ifMetagenerationMatch: Makes the operation conditional on whether +// bucket's current metageneration matches the given value. func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall { c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -4591,14 +4484,6 @@ func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatc return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketsLockRetentionPolicyCall) ProvisionalUserProject(provisionalUserProject string) *BucketsLockRetentionPolicyCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *BucketsLockRetentionPolicyCall { @@ -4668,17 +4553,17 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -4713,11 +4598,6 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -4782,14 +4662,22 @@ func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int // predefined set of access controls to this bucket. // // Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and +// +// "authenticatedRead" - Project team owners get OWNER access, and +// // allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// // their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// // get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and +// +// "publicReadWrite" - Project team owners get OWNER access, and +// // allUsers get WRITER access. func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -4801,16 +4689,26 @@ func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall // object access controls to this bucket. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) @@ -4821,21 +4719,14 @@ func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl // properties to return. Defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { c.urlParams_.Set("projection", projection) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketsPatchCall) ProvisionalUserProject(provisionalUserProject string) *BucketsPatchCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { @@ -4910,17 +4801,17 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -5012,11 +4903,6 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -5059,14 +4945,6 @@ func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSet return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketsSetIamPolicyCall) ProvisionalUserProject(provisionalUserProject string) *BucketsSetIamPolicyCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall { @@ -5141,17 +5019,17 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -5178,11 +5056,6 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -5227,14 +5100,6 @@ func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketsTestIamPermissionsCall) ProvisionalUserProject(provisionalUserProject string) *BucketsTestIamPermissionsCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall { @@ -5317,17 +5182,17 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5362,11 +5227,6 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -5433,14 +5293,22 @@ func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in // predefined set of access controls to this bucket. // // Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and +// +// "authenticatedRead" - Project team owners get OWNER access, and +// // allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// // their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// // get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and +// +// "publicReadWrite" - Project team owners get OWNER access, and +// // allUsers get WRITER access. func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -5452,16 +5320,26 @@ func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCa // object access controls to this bucket. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) @@ -5472,21 +5350,14 @@ func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAc // properties to return. Defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { c.urlParams_.Set("projection", projection) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *BucketsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *BucketsUpdateCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { @@ -5561,17 +5432,17 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ @@ -5663,11 +5534,6 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -5765,7 +5631,7 @@ func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -5802,10 +5668,10 @@ type DefaultObjectAccessControlsDeleteCall struct { // Delete: Permanently deletes the default object ACL entry for the // specified entity on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -5813,14 +5679,6 @@ func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *DefaultObjectAccessControlsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsDeleteCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall { @@ -5886,7 +5744,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -5910,11 +5768,6 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -5945,10 +5798,10 @@ type DefaultObjectAccessControlsGetCall struct { // Get: Returns the default object ACL entry for the specified entity on // the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -5956,14 +5809,6 @@ func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) * return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *DefaultObjectAccessControlsGetCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsGetCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall { @@ -6047,17 +5892,17 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6091,11 +5936,6 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -6136,14 +5976,6 @@ func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccessc return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *DefaultObjectAccessControlsInsertCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsInsertCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall { @@ -6218,17 +6050,17 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6255,11 +6087,6 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -6318,14 +6145,6 @@ func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagen return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *DefaultObjectAccessControlsListCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsListCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall { @@ -6408,17 +6227,17 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControls{ ServerResponse: googleapi.ServerResponse{ @@ -6457,11 +6276,6 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -6494,10 +6308,10 @@ type DefaultObjectAccessControlsPatchCall struct { // Patch: Patches a default object ACL entry on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -6506,14 +6320,6 @@ func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *DefaultObjectAccessControlsPatchCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsPatchCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall { @@ -6589,17 +6395,17 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6633,11 +6439,6 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -6673,10 +6474,10 @@ type DefaultObjectAccessControlsUpdateCall struct { // Update: Updates a default object ACL entry on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -6685,14 +6486,6 @@ func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *DefaultObjectAccessControlsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsUpdateCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall { @@ -6768,17 +6561,17 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -6812,11 +6605,6 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -6860,14 +6648,6 @@ func (r *NotificationsService) Delete(bucket string, notification string) *Notif return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *NotificationsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *NotificationsDeleteCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall { @@ -6933,7 +6713,7 @@ func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -6957,11 +6737,6 @@ func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -7001,14 +6776,6 @@ func (r *NotificationsService) Get(bucket string, notification string) *Notifica return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *NotificationsGetCall) ProvisionalUserProject(provisionalUserProject string) *NotificationsGetCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall { @@ -7092,17 +6859,17 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Notification{ ServerResponse: googleapi.ServerResponse{ @@ -7136,11 +6903,6 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -7183,14 +6945,6 @@ func (r *NotificationsService) Insert(bucket string, notification *Notification) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *NotificationsInsertCall) ProvisionalUserProject(provisionalUserProject string) *NotificationsInsertCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall { @@ -7265,17 +7019,17 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Notification{ ServerResponse: googleapi.ServerResponse{ @@ -7302,11 +7056,6 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -7350,14 +7099,6 @@ func (r *NotificationsService) List(bucket string) *NotificationsListCall { return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *NotificationsListCall) ProvisionalUserProject(provisionalUserProject string) *NotificationsListCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall { @@ -7440,17 +7181,17 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Notifications{ ServerResponse: googleapi.ServerResponse{ @@ -7477,11 +7218,6 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -7518,12 +7254,12 @@ type ObjectAccessControlsDeleteCall struct { // Delete: Permanently deletes the ACL entry for the specified entity on // the specified object. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7540,14 +7276,6 @@ func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAcc return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectAccessControlsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsDeleteCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall { @@ -7614,7 +7342,7 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -7651,11 +7379,6 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -7687,12 +7410,12 @@ type ObjectAccessControlsGetCall struct { // Get: Returns the ACL entry for the specified entity on the specified // object. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7709,14 +7432,6 @@ func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccess return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectAccessControlsGetCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsGetCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall { @@ -7801,17 +7516,17 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -7858,11 +7573,6 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -7895,9 +7605,9 @@ type ObjectAccessControlsInsertCall struct { // Insert: Creates a new ACL entry on the specified object. // -// - bucket: Name of a bucket. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7914,14 +7624,6 @@ func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAcc return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectAccessControlsInsertCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsInsertCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall { @@ -7997,17 +7699,17 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -8047,11 +7749,6 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -8087,9 +7784,9 @@ type ObjectAccessControlsListCall struct { // List: Retrieves ACL entries on the specified object. // -// - bucket: Name of a bucket. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -8105,14 +7802,6 @@ func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAcces return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectAccessControlsListCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsListCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall { @@ -8196,17 +7885,17 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControls{ ServerResponse: googleapi.ServerResponse{ @@ -8246,11 +7935,6 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -8284,12 +7968,12 @@ type ObjectAccessControlsPatchCall struct { // Patch: Patches an ACL entry on the specified object. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -8307,14 +7991,6 @@ func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAcce return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectAccessControlsPatchCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsPatchCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall { @@ -8391,17 +8067,17 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -8448,11 +8124,6 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -8489,12 +8160,12 @@ type ObjectAccessControlsUpdateCall struct { // Update: Updates an ACL entry on the specified object. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -8512,14 +8183,6 @@ func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAcc return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectAccessControlsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsUpdateCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall { @@ -8596,17 +8259,17 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ @@ -8653,11 +8316,6 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -8694,11 +8352,11 @@ type ObjectsComposeCall struct { // Compose: Concatenates a list of existing objects into a new object in // the same bucket. // -// - destinationBucket: Name of the bucket containing the source -// objects. The destination object is stored in this bucket. -// - destinationObject: Name of the new object. For information about -// how to URL encode object names to be path safe, see Encoding URI -// Path Parts. +// - destinationBucket: Name of the bucket containing the source +// objects. The destination object is stored in this bucket. +// - destinationObject: Name of the new object. For information about +// how to URL encode object names to be path safe, see Encoding URI +// Path Parts. func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.destinationBucket = destinationBucket @@ -8712,16 +8370,26 @@ func (r *ObjectsService) Compose(destinationBucket string, destinationObject str // to the destination object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) @@ -8748,21 +8416,15 @@ func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) // KmsKeyName sets the optional parameter "kmsKeyName": Resource name of // the Cloud KMS key, of the form // projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object +// +// that will be used to encrypt the object. Overrides the object +// // metadata's kms_key_name value, if any. func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { c.urlParams_.Set("kmsKeyName", kmsKeyName) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectsComposeCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsComposeCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall { @@ -8838,17 +8500,17 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -8920,11 +8582,6 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -8964,18 +8621,18 @@ type ObjectsCopyCall struct { // Copy: Copies a source object to a destination object. Optionally // overrides metadata. // -// - destinationBucket: Name of the bucket in which to store the new -// object. Overrides the provided object metadata's bucket value, if -// any.For information about how to URL encode object names to be path -// safe, see Encoding URI Path Parts. -// - destinationObject: Name of the new object. Required when the object -// metadata is not otherwise provided. Overrides the object metadata's -// name value, if any. -// - sourceBucket: Name of the bucket in which to find the source -// object. -// - sourceObject: Name of the source object. For information about how -// to URL encode object names to be path safe, see Encoding URI Path -// Parts. +// - destinationBucket: Name of the bucket in which to store the new +// object. Overrides the provided object metadata's bucket value, if +// any.For information about how to URL encode object names to be path +// safe, see Encoding URI Path Parts. +// - destinationObject: Name of the new object. Required when the object +// metadata is not otherwise provided. Overrides the object metadata's +// name value, if any. +// - sourceBucket: Name of the bucket in which to find the source +// object. +// - sourceObject: Name of the source object. For information about how +// to URL encode object names to be path safe, see Encoding URI Path +// Parts. func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket @@ -8990,7 +8647,9 @@ func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinat // "destinationKmsKeyName": Resource name of the Cloud KMS key, of the // form // projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object +// +// that will be used to encrypt the object. Overrides the object +// // metadata's kms_key_name value, if any. func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsCopyCall { c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) @@ -9002,16 +8661,26 @@ func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *O // to the destination object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) @@ -9096,21 +8765,14 @@ func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationN // specifies the acl property, when it defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { c.urlParams_.Set("projection", projection) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectsCopyCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsCopyCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // SourceGeneration sets the optional parameter "sourceGeneration": If // present, selects a specific revision of the source object (as opposed // to the latest version, the default). @@ -9196,17 +8858,17 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -9329,11 +8991,6 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "sourceBucket": { // "description": "Name of the bucket in which to find the source object.", // "location": "path", @@ -9389,9 +9046,9 @@ type ObjectsDeleteCall struct { // if versioning is not enabled for the bucket, or if the generation // parameter is used. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9443,14 +9100,6 @@ func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsDeleteCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { @@ -9516,7 +9165,7 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -9570,11 +9219,6 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -9605,9 +9249,9 @@ type ObjectsGetCall struct { // Get: Retrieves an object or its metadata. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9663,21 +9307,14 @@ func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64 // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { c.urlParams_.Set("projection", projection) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectsGetCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsGetCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { @@ -9758,7 +9395,7 @@ func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, } if err := googleapi.CheckMediaResponse(res); err != nil { res.Body.Close() - return nil, err + return nil, gensupport.WrapError(err) } return res, nil } @@ -9777,17 +9414,17 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -9864,11 +9501,6 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -9906,9 +9538,9 @@ type ObjectsGetIamPolicyCall struct { // GetIamPolicy: Returns an IAM policy for the specified object. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9924,14 +9556,6 @@ func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPol return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectsGetIamPolicyCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsGetIamPolicyCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall { @@ -10015,17 +9639,17 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -10065,11 +9689,6 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -10106,8 +9725,8 @@ type ObjectsInsertCall struct { // Insert: Stores a new object and metadata. // -// - bucket: Name of the bucket in which to store the new object. -// Overrides the provided object metadata's bucket value, if any. +// - bucket: Name of the bucket in which to store the new object. +// Overrides the provided object metadata's bucket value, if any. func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -10165,7 +9784,9 @@ func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in // KmsKeyName sets the optional parameter "kmsKeyName": Resource name of // the Cloud KMS key, of the form // projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object +// +// that will be used to encrypt the object. Overrides the object +// // metadata's kms_key_name value, if any. func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { c.urlParams_.Set("kmsKeyName", kmsKeyName) @@ -10185,16 +9806,26 @@ func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { // predefined set of access controls to this object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -10206,21 +9837,14 @@ func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCa // specifies the acl property, when it defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { c.urlParams_.Set("projection", projection) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectsInsertCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsInsertCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall { @@ -10377,17 +10001,17 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) if rx != nil { @@ -10404,7 +10028,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { } defer res.Body.Close() if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } } ret := &Object{ @@ -10520,11 +10144,6 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -10626,21 +10245,14 @@ func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { c.urlParams_.Set("projection", projection) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectsListCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsListCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // StartOffset sets the optional parameter "startOffset": Filter results // to objects whose names are lexicographically equal to or after // startOffset. If endOffset is also set, the objects listed will have @@ -10740,17 +10352,17 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Objects{ ServerResponse: googleapi.ServerResponse{ @@ -10823,11 +10435,6 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "startOffset": { // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", // "location": "query", @@ -10895,9 +10502,9 @@ type ObjectsPatchCall struct { // Patch: Patches an object's metadata. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -10954,16 +10561,26 @@ func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int // predefined set of access controls to this object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -10974,21 +10591,14 @@ func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall // properties to return. Defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { c.urlParams_.Set("projection", projection) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectsPatchCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsPatchCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request, for Requester Pays buckets. func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { @@ -11064,17 +10674,17 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -11172,11 +10782,6 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request, for Requester Pays buckets.", // "location": "query", @@ -11215,18 +10820,18 @@ type ObjectsRewriteCall struct { // Rewrite: Rewrites a source object to a destination object. Optionally // overrides metadata. // -// - destinationBucket: Name of the bucket in which to store the new -// object. Overrides the provided object metadata's bucket value, if -// any. -// - destinationObject: Name of the new object. Required when the object -// metadata is not otherwise provided. Overrides the object metadata's -// name value, if any. For information about how to URL encode object -// names to be path safe, see Encoding URI Path Parts. -// - sourceBucket: Name of the bucket in which to find the source -// object. -// - sourceObject: Name of the source object. For information about how -// to URL encode object names to be path safe, see Encoding URI Path -// Parts. +// - destinationBucket: Name of the bucket in which to store the new +// object. Overrides the provided object metadata's bucket value, if +// any. +// - destinationObject: Name of the new object. Required when the object +// metadata is not otherwise provided. Overrides the object metadata's +// name value, if any. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts. +// - sourceBucket: Name of the bucket in which to find the source +// object. +// - sourceObject: Name of the source object. For information about how +// to URL encode object names to be path safe, see Encoding URI Path +// Parts. func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket @@ -11241,7 +10846,9 @@ func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, desti // "destinationKmsKeyName": Resource name of the Cloud KMS key, of the // form // projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object +// +// that will be used to encrypt the object. Overrides the object +// // metadata's kms_key_name value, if any. func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) @@ -11253,16 +10860,26 @@ func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) // to the destination object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) @@ -11360,21 +10977,14 @@ func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall i // specifies the acl property, when it defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { c.urlParams_.Set("projection", projection) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectsRewriteCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsRewriteCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // RewriteToken sets the optional parameter "rewriteToken": Include this // field (from the previous rewrite response) on each rewrite request // after the first one, until the rewrite response 'done' flag is true. @@ -11471,17 +11081,17 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RewriteResponse{ ServerResponse: googleapi.ServerResponse{ @@ -11610,11 +11220,6 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "rewriteToken": { // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", // "location": "query", @@ -11674,9 +11279,9 @@ type ObjectsSetIamPolicyCall struct { // SetIamPolicy: Updates an IAM policy for the specified object. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11693,14 +11298,6 @@ func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPol return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectsSetIamPolicyCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsSetIamPolicyCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall { @@ -11776,17 +11373,17 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -11826,11 +11423,6 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -11868,10 +11460,10 @@ type ObjectsTestIamPermissionsCall struct { // TestIamPermissions: Tests a set of permissions on the given object to // see which, if any, are held by the caller. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -// - permissions: Permissions to test. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. +// - permissions: Permissions to test. func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11888,14 +11480,6 @@ func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTes return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectsTestIamPermissionsCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsTestIamPermissionsCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall { @@ -11979,17 +11563,17 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -12037,11 +11621,6 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -12077,9 +11656,9 @@ type ObjectsUpdateCall struct { // Update: Updates an object's metadata. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -12136,16 +11715,26 @@ func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in // predefined set of access controls to this object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -12156,21 +11745,14 @@ func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCa // properties to return. Defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { c.urlParams_.Set("projection", projection) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsUpdateCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall { @@ -12246,17 +11828,17 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -12354,11 +11936,6 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -12459,21 +12036,14 @@ func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { c.urlParams_.Set("projection", projection) return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ObjectsWatchAllCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsWatchAllCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // StartOffset sets the optional parameter "startOffset": Filter results // to objects whose names are lexicographically equal to or after // startOffset. If endOffset is also set, the objects listed will have @@ -12565,17 +12135,17 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Channel{ ServerResponse: googleapi.ServerResponse{ @@ -12648,11 +12218,6 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) // "location": "query", // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "startOffset": { // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", // "location": "query", @@ -12779,17 +12344,17 @@ func (c *ProjectsHmacKeysCreateCall) Do(opts ...googleapi.CallOption) (*HmacKey, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKey{ ServerResponse: googleapi.ServerResponse{ @@ -12928,7 +12493,7 @@ func (c *ProjectsHmacKeysDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -12982,9 +12547,9 @@ type ProjectsHmacKeysGetCall struct { // Get: Retrieves an HMAC key's metadata // -// - accessId: Name of the HMAC key. -// - projectId: Project ID owning the service account of the requested -// key. +// - accessId: Name of the HMAC key. +// - projectId: Project ID owning the service account of the requested +// key. func (r *ProjectsHmacKeysService) Get(projectId string, accessId string) *ProjectsHmacKeysGetCall { c := &ProjectsHmacKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -13075,17 +12640,17 @@ func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMeta if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKeyMetadata{ ServerResponse: googleapi.ServerResponse{ @@ -13276,17 +12841,17 @@ func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKeysMetadata{ ServerResponse: googleapi.ServerResponse{ @@ -13392,9 +12957,9 @@ type ProjectsHmacKeysUpdateCall struct { // Update: Updates the state of an HMAC key. See the HMAC Key resource // descriptor for valid states. // -// - accessId: Name of the HMAC key being updated. -// - projectId: Project ID owning the service account of the updated -// key. +// - accessId: Name of the HMAC key being updated. +// - projectId: Project ID owning the service account of the updated +// key. func (r *ProjectsHmacKeysService) Update(projectId string, accessId string, hmackeymetadata *HmacKeyMetadata) *ProjectsHmacKeysUpdateCall { c := &ProjectsHmacKeysUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -13478,17 +13043,17 @@ func (c *ProjectsHmacKeysUpdateCall) Do(opts ...googleapi.CallOption) (*HmacKeyM if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HmacKeyMetadata{ ServerResponse: googleapi.ServerResponse{ @@ -13564,14 +13129,6 @@ func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAc return c } -// ProvisionalUserProject sets the optional parameter -// "provisionalUserProject": The project to be billed for this request -// if the target bucket is requester-pays bucket. -func (c *ProjectsServiceAccountGetCall) ProvisionalUserProject(provisionalUserProject string) *ProjectsServiceAccountGetCall { - c.urlParams_.Set("provisionalUserProject", provisionalUserProject) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *ProjectsServiceAccountGetCall { @@ -13654,17 +13211,17 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceAccount{ ServerResponse: googleapi.ServerResponse{ @@ -13691,11 +13248,6 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi // "required": true, // "type": "string" // }, - // "provisionalUserProject": { - // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", - // "location": "query", - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request.", // "location": "query", diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/transport/cert/default_cert.go index 04aefec0afa5..21d0251531ce 100644 --- a/vendor/google.golang.org/api/transport/cert/default_cert.go +++ b/vendor/google.golang.org/api/transport/cert/default_cert.go @@ -14,32 +14,19 @@ package cert import ( "crypto/tls" - "crypto/x509" - "encoding/json" "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "os/user" - "path/filepath" "sync" - "time" -) - -const ( - metadataPath = ".secureConnect" - metadataFile = "context_aware_metadata.json" ) // defaultCertData holds all the variables pertaining to // the default certficate source created by DefaultSource. +// +// A singleton model is used to allow the source to be reused +// by the transport layer. type defaultCertData struct { - once sync.Once - source Source - err error - cachedCertMutex sync.Mutex - cachedCert *tls.Certificate + once sync.Once + source Source + err error } var ( @@ -49,93 +36,23 @@ var ( // Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) -// DefaultSource returns a certificate source that execs the command specified -// in the file at ~/.secureConnect/context_aware_metadata.json +// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable. +var errSourceUnavailable = errors.New("certificate source is unavailable") + +// DefaultSource returns a certificate source using the preferred EnterpriseCertificateProxySource. +// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource. // -// If that file does not exist, a nil source is returned. +// If neither source is available (due to missing configurations), a nil Source and a nil Error are +// returned to indicate that a default certificate source is unavailable. func DefaultSource() (Source, error) { defaultCert.once.Do(func() { - defaultCert.source, defaultCert.err = newSecureConnectSource() + defaultCert.source, defaultCert.err = NewEnterpriseCertificateProxySource("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.source, defaultCert.err = NewSecureConnectSource("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.source, defaultCert.err = nil, nil + } + } }) return defaultCert.source, defaultCert.err } - -type secureConnectSource struct { - metadata secureConnectMetadata -} - -type secureConnectMetadata struct { - Cmd []string `json:"cert_provider_command"` -} - -// newSecureConnectSource creates a secureConnectSource by reading the well-known file. -func newSecureConnectSource() (Source, error) { - user, err := user.Current() - if err != nil { - // Ignore. - return nil, nil - } - filename := filepath.Join(user.HomeDir, metadataPath, metadataFile) - file, err := ioutil.ReadFile(filename) - if os.IsNotExist(err) { - // Ignore. - return nil, nil - } - if err != nil { - return nil, err - } - - var metadata secureConnectMetadata - if err := json.Unmarshal(file, &metadata); err != nil { - return nil, fmt.Errorf("cert: could not parse JSON in %q: %v", filename, err) - } - if err := validateMetadata(metadata); err != nil { - return nil, fmt.Errorf("cert: invalid config in %q: %v", filename, err) - } - return (&secureConnectSource{ - metadata: metadata, - }).getClientCertificate, nil -} - -func validateMetadata(metadata secureConnectMetadata) error { - if len(metadata.Cmd) == 0 { - return errors.New("empty cert_provider_command") - } - return nil -} - -func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { - defaultCert.cachedCertMutex.Lock() - defer defaultCert.cachedCertMutex.Unlock() - if defaultCert.cachedCert != nil && !isCertificateExpired(defaultCert.cachedCert) { - return defaultCert.cachedCert, nil - } - // Expand OS environment variables in the cert provider command such as "$HOME". - for i := 0; i < len(s.metadata.Cmd); i++ { - s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) - } - command := s.metadata.Cmd - data, err := exec.Command(command[0], command[1:]...).Output() - if err != nil { - // TODO(cbro): read stderr for error message? Might contain sensitive info. - return nil, err - } - cert, err := tls.X509KeyPair(data, data) - if err != nil { - return nil, err - } - defaultCert.cachedCert = &cert - return &cert, nil -} - -// isCertificateExpired returns true if the given cert is expired or invalid. -func isCertificateExpired(cert *tls.Certificate) bool { - if len(cert.Certificate) == 0 { - return true - } - parsed, err := x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - return true - } - return time.Now().After(parsed.NotAfter) -} diff --git a/vendor/google.golang.org/api/transport/cert/enterprise_cert.go b/vendor/google.golang.org/api/transport/cert/enterprise_cert.go new file mode 100644 index 000000000000..eaa52e07c086 --- /dev/null +++ b/vendor/google.golang.org/api/transport/cert/enterprise_cert.go @@ -0,0 +1,56 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "errors" + "os" + + "github.com/googleapis/enterprise-certificate-proxy/client" +) + +type ecpSource struct { + key *client.Key +} + +// NewEnterpriseCertificateProxySource creates a certificate source +// using the Enterprise Certificate Proxy client, which delegates +// certifcate related operations to an OS-specific "signer binary" +// that communicates with the native keystore (ex. keychain on MacOS). +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate issuer and the location of the signer binary. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewEnterpriseCertificateProxySource(configFilePath string) (Source, error) { + key, err := client.Cred(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Enterprise Certificate Proxy is not supported. + return nil, errSourceUnavailable + } + return nil, err + } + + return (&ecpSource{ + key: key, + }).getClientCertificate, nil +} + +func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var cert tls.Certificate + cert.PrivateKey = s.key + cert.Certificate = s.key.CertificateChain() + return &cert, nil +} diff --git a/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go b/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go new file mode 100644 index 000000000000..5913cab80172 --- /dev/null +++ b/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go @@ -0,0 +1,123 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" + "time" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +type secureConnectSource struct { + metadata secureConnectMetadata + + // Cache the cert to avoid executing helper command repeatedly. + cachedCertMutex sync.Mutex + cachedCert *tls.Certificate +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// NewSecureConnectSource creates a certificate source using +// the Secure Connect Helper and its associated metadata file. +// +// The configFilePath points to the location of the context aware metadata file. +// If configFilePath is empty, use the default context aware metadata location. +func NewSecureConnectSource(configFilePath string) (Source, error) { + if configFilePath == "" { + user, err := user.Current() + if err != nil { + // Error locating the default config means Secure Connect is not supported. + return nil, errSourceUnavailable + } + configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) + } + + file, err := ioutil.ReadFile(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Secure Connect is not supported. + return nil, errSourceUnavailable + } + return nil, err + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + s.cachedCertMutex.Lock() + defer s.cachedCertMutex.Unlock() + if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) { + return s.cachedCert, nil + } + // Expand OS environment variables in the cert provider command such as "$HOME". + for i := 0; i < len(s.metadata.Cmd); i++ { + s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) + } + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + s.cachedCert = &cert + return &cert, nil +} + +// isCertificateExpired returns true if the given cert is expired or invalid. +func isCertificateExpired(cert *tls.Certificate) bool { + if len(cert.Certificate) == 0 { + return true + } + parsed, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return true + } + return time.Now().After(parsed.NotAfter) +} diff --git a/vendor/google.golang.org/api/transport/dial.go b/vendor/google.golang.org/api/transport/dial.go new file mode 100644 index 000000000000..652b8eba51d3 --- /dev/null +++ b/vendor/google.golang.org/api/transport/dial.go @@ -0,0 +1,48 @@ +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package transport + +import ( + "context" + "net/http" + + "golang.org/x/oauth2/google" + "google.golang.org/grpc" + + "google.golang.org/api/internal" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + htransport "google.golang.org/api/transport/http" +) + +// NewHTTPClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + return htransport.NewClient(ctx, opts...) +} + +// DialGRPC returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func DialGRPC(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return gtransport.Dial(ctx, opts...) +} + +// DialGRPCInsecure returns an insecure GRPC connection for use communicating +// with fake or mock Google cloud service implementations, such as emulators. +// The connection is configured with the given ClientOptions. +func DialGRPCInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return gtransport.DialInsecure(ctx, opts...) +} + +// Creds constructs a google.Credentials from the information in the options, +// or obtains the default credentials in the same way as google.FindDefaultCredentials. +func Creds(ctx context.Context, opts ...option.ClientOption) (*google.Credentials, error) { + var ds internal.DialSettings + for _, opt := range opts { + opt.Apply(&ds) + } + return internal.Creds(ctx, &ds) +} diff --git a/vendor/google.golang.org/api/transport/doc.go b/vendor/google.golang.org/api/transport/doc.go new file mode 100644 index 000000000000..7143abee4581 --- /dev/null +++ b/vendor/google.golang.org/api/transport/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport provides utility methods for creating authenticated +// transports to Google's HTTP and gRPC APIs. It is intended to be used in +// conjunction with google.golang.org/api/option. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package transport diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index d25497868cab..efcc8e6c641f 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" grpcgoogle "google.golang.org/grpc/credentials/google" + grpcinsecure "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/oauth" // Install grpclb, which is required for direct path. @@ -126,10 +127,26 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if err != nil { return nil, err } - var grpcOpts []grpc.DialOption + + var transportCreds credentials.TransportCredentials if insecure { - grpcOpts = []grpc.DialOption{grpc.WithInsecure()} - } else if !o.NoAuth { + transportCreds = grpcinsecure.NewCredentials() + } else { + transportCreds = credentials.NewTLS(&tls.Config{ + GetClientCertificate: clientCertSource, + }) + } + + // Initialize gRPC dial options with transport-level security options. + grpcOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(transportCreds), + } + + // Authentication can only be sent when communicating over a secure connection. + // + // TODO: Should we be more lenient in the future and allow sending credentials + // when dialing an insecure connection? + if !o.NoAuth && !insecure { if o.APIKey != "" { log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.") } @@ -142,21 +159,29 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C o.QuotaProject = internal.QuotaProjectFromCreds(creds) } + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(grpcTokenSource{ + TokenSource: oauth.TokenSource{creds.TokenSource}, + quotaProject: o.QuotaProject, + requestReason: o.RequestReason, + }), + ) + // Attempt Direct Path: if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() { + // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. grpcOpts = []grpc.DialOption{ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{oauth.TokenSource{creds.TokenSource}}))} if timeoutDialerOption != nil { grpcOpts = append(grpcOpts, timeoutDialerOption) } // Check if google-c2p resolver is enabled for DirectPath - // TODO(mohanli): remove grpc version guard once google-api-go-client is able to depends on the latest grpc - if grpc.Version >= "1.42" && strings.EqualFold(os.Getenv(enableDirectPathXds), "true") { + if strings.EqualFold(os.Getenv(enableDirectPathXds), "true") { // google-c2p resolver target must not have a port number if addr, _, err := net.SplitHostPort(endpoint); err == nil { - endpoint = "google-c2p-experimental:///" + addr + endpoint = "google-c2p:///" + addr } else { - endpoint = "google-c2p-experimental:///" + endpoint + endpoint = "google-c2p:///" + endpoint } } else { if !strings.HasPrefix(endpoint, "dns:///") { @@ -170,18 +195,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`)) } // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. - } else { - tlsConfig := &tls.Config{ - GetClientCertificate: clientCertSource, - } - grpcOpts = []grpc.DialOption{ - grpc.WithPerRPCCredentials(grpcTokenSource{ - TokenSource: oauth.TokenSource{creds.TokenSource}, - quotaProject: o.QuotaProject, - requestReason: o.RequestReason, - }), - grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), - } } } diff --git a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go index 4bf9e8217239..507cd3ec63ab 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go +++ b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go @@ -12,7 +12,6 @@ import ( "net" "syscall" - "golang.org/x/sys/unix" "google.golang.org/grpc" ) @@ -20,6 +19,9 @@ const ( // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By // default is 20 seconds. tcpUserTimeoutMilliseconds = 20000 + + // Copied from golang.org/x/sys/unix.TCP_USER_TIMEOUT. + tcpUserTimeoutOp = 0x12 ) func init() { @@ -33,7 +35,7 @@ func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { var syscallErr error controlErr := c.Control(func(fd uintptr) { syscallErr = syscall.SetsockoptInt( - int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, tcpUserTimeoutMilliseconds) + int(fd), syscall.IPPROTO_TCP, tcpUserTimeoutOp, tcpUserTimeoutMilliseconds) }) if syscallErr != nil { return syscallErr diff --git a/vendor/google.golang.org/api/transport/http/default_transport_go113.go b/vendor/google.golang.org/api/transport/http/default_transport_go113.go deleted file mode 100644 index 69b2280cddcf..000000000000 --- a/vendor/google.golang.org/api/transport/http/default_transport_go113.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -package http - -import "net/http" - -// clonedTransport returns the given RoundTripper as a cloned *http.Transport. -// It returns nil if the RoundTripper can't be cloned or coerced to -// *http.Transport. -func clonedTransport(rt http.RoundTripper) *http.Transport { - t, ok := rt.(*http.Transport) - if !ok { - return nil - } - return t.Clone() -} diff --git a/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go b/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go deleted file mode 100644 index 90ee1bd6185c..000000000000 --- a/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 -// +build !go1.13 - -package http - -import "net/http" - -// clonedTransport returns the given RoundTripper as a cloned *http.Transport. -// For versions of Go <1.13, this is not supported, so return nil. -func clonedTransport(rt http.RoundTripper) *http.Transport { - return nil -} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 179534a0c7c5..cab709f0c052 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -210,3 +210,14 @@ func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) ht Propagation: &propagation.HTTPFormat{}, } } + +// clonedTransport returns the given RoundTripper as a cloned *http.Transport. +// It returns nil if the RoundTripper can't be cloned or coerced to +// *http.Transport. +func clonedTransport(rt http.RoundTripper) *http.Transport { + t, ok := rt.(*http.Transport) + if !ok { + return nil + } + return t.Clone() +} diff --git a/vendor/google.golang.org/api/transport/internal/dca/dca.go b/vendor/google.golang.org/api/transport/internal/dca/dca.go index 071586e94463..78004f0475f8 100644 --- a/vendor/google.golang.org/api/transport/internal/dca/dca.go +++ b/vendor/google.golang.org/api/transport/internal/dca/dca.go @@ -6,17 +6,17 @@ // Authentication according to https://google.aip.dev/auth/4114 // // The overall logic for DCA is as follows: -// 1. If both endpoint override and client certificate are specified, use them as is. -// 2. If user does not specify client certificate, we will attempt to use default -// client certificate. -// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if -// client certificate is available and defaultEndpoint otherwise. +// 1. If both endpoint override and client certificate are specified, use them as is. +// 2. If user does not specify client certificate, we will attempt to use default +// client certificate. +// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +// client certificate is available and defaultEndpoint otherwise. // // Implications of the above logic: -// 1. If the user specifies a non-mTLS endpoint override but client certificate is -// available, we will pass along the cert anyway and let the server decide what to do. -// 2. If the user specifies an mTLS endpoint override but client certificate is not -// available, we will not fail-fast, but let backend throw error when connecting. +// 1. If the user specifies a non-mTLS endpoint override but client certificate is +// available, we will pass along the cert anyway and let the server decide what to do. +// 2. If the user specifies an mTLS endpoint override but client certificate is not +// available, we will not fail-fast, but let backend throw error when connecting. // // We would like to avoid introducing client-side logic that parses whether the // endpoint override is an mTLS url, since the url pattern may change at anytime. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 66fdb650f4ba..ec7c602ecf99 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -15,17 +15,20 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.18.1 // source: google/api/client.proto package annotations import ( reflect "reflect" + sync "sync" + api "google.golang.org/genproto/googleapis/api" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" descriptorpb "google.golang.org/protobuf/types/descriptorpb" + durationpb "google.golang.org/protobuf/types/known/durationpb" ) const ( @@ -35,6 +38,1047 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// The organization for which the client libraries are being published. +// Affects the url where generated docs are published, etc. +type ClientLibraryOrganization int32 + +const ( + // Not useful. + ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED ClientLibraryOrganization = 0 + // Google Cloud Platform Org. + ClientLibraryOrganization_CLOUD ClientLibraryOrganization = 1 + // Ads (Advertising) Org. + ClientLibraryOrganization_ADS ClientLibraryOrganization = 2 + // Photos Org. + ClientLibraryOrganization_PHOTOS ClientLibraryOrganization = 3 + // Street View Org. + ClientLibraryOrganization_STREET_VIEW ClientLibraryOrganization = 4 +) + +// Enum value maps for ClientLibraryOrganization. +var ( + ClientLibraryOrganization_name = map[int32]string{ + 0: "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", + 1: "CLOUD", + 2: "ADS", + 3: "PHOTOS", + 4: "STREET_VIEW", + } + ClientLibraryOrganization_value = map[string]int32{ + "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED": 0, + "CLOUD": 1, + "ADS": 2, + "PHOTOS": 3, + "STREET_VIEW": 4, + } +) + +func (x ClientLibraryOrganization) Enum() *ClientLibraryOrganization { + p := new(ClientLibraryOrganization) + *p = x + return p +} + +func (x ClientLibraryOrganization) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryOrganization) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[0].Descriptor() +} + +func (ClientLibraryOrganization) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[0] +} + +func (x ClientLibraryOrganization) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryOrganization.Descriptor instead. +func (ClientLibraryOrganization) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +// To where should client libraries be published? +type ClientLibraryDestination int32 + +const ( + // Client libraries will neither be generated nor published to package + // managers. + ClientLibraryDestination_CLIENT_LIBRARY_DESTINATION_UNSPECIFIED ClientLibraryDestination = 0 + // Generate the client library in a repo under github.com/googleapis, + // but don't publish it to package managers. + ClientLibraryDestination_GITHUB ClientLibraryDestination = 10 + // Publish the library to package managers like nuget.org and npmjs.com. + ClientLibraryDestination_PACKAGE_MANAGER ClientLibraryDestination = 20 +) + +// Enum value maps for ClientLibraryDestination. +var ( + ClientLibraryDestination_name = map[int32]string{ + 0: "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", + 10: "GITHUB", + 20: "PACKAGE_MANAGER", + } + ClientLibraryDestination_value = map[string]int32{ + "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED": 0, + "GITHUB": 10, + "PACKAGE_MANAGER": 20, + } +) + +func (x ClientLibraryDestination) Enum() *ClientLibraryDestination { + p := new(ClientLibraryDestination) + *p = x + return p +} + +func (x ClientLibraryDestination) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryDestination) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[1].Descriptor() +} + +func (ClientLibraryDestination) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[1] +} + +func (x ClientLibraryDestination) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryDestination.Descriptor instead. +func (ClientLibraryDestination) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +// Required information for every language. +type CommonLanguageSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Link to automatically generated reference documentation. Example: + // https://cloud.google.com/nodejs/docs/reference/asset/latest + ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` + // The destination where API teams want this client library to be published. + Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` +} + +func (x *CommonLanguageSettings) Reset() { + *x = CommonLanguageSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonLanguageSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonLanguageSettings) ProtoMessage() {} + +func (x *CommonLanguageSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonLanguageSettings.ProtoReflect.Descriptor instead. +func (*CommonLanguageSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +func (x *CommonLanguageSettings) GetReferenceDocsUri() string { + if x != nil { + return x.ReferenceDocsUri + } + return "" +} + +func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { + if x != nil { + return x.Destinations + } + return nil +} + +// Details about how and where to publish client libraries. +type ClientLibrarySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Version of the API to apply these settings to. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Launch stage of this version of the API. + LaunchStage api.LaunchStage `protobuf:"varint,2,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // When using transport=rest, the client request will encode enums as + // numbers rather than strings. + RestNumericEnums bool `protobuf:"varint,3,opt,name=rest_numeric_enums,json=restNumericEnums,proto3" json:"rest_numeric_enums,omitempty"` + // Settings for legacy Java features, supported in the Service YAML. + JavaSettings *JavaSettings `protobuf:"bytes,21,opt,name=java_settings,json=javaSettings,proto3" json:"java_settings,omitempty"` + // Settings for C++ client libraries. + CppSettings *CppSettings `protobuf:"bytes,22,opt,name=cpp_settings,json=cppSettings,proto3" json:"cpp_settings,omitempty"` + // Settings for PHP client libraries. + PhpSettings *PhpSettings `protobuf:"bytes,23,opt,name=php_settings,json=phpSettings,proto3" json:"php_settings,omitempty"` + // Settings for Python client libraries. + PythonSettings *PythonSettings `protobuf:"bytes,24,opt,name=python_settings,json=pythonSettings,proto3" json:"python_settings,omitempty"` + // Settings for Node client libraries. + NodeSettings *NodeSettings `protobuf:"bytes,25,opt,name=node_settings,json=nodeSettings,proto3" json:"node_settings,omitempty"` + // Settings for .NET client libraries. + DotnetSettings *DotnetSettings `protobuf:"bytes,26,opt,name=dotnet_settings,json=dotnetSettings,proto3" json:"dotnet_settings,omitempty"` + // Settings for Ruby client libraries. + RubySettings *RubySettings `protobuf:"bytes,27,opt,name=ruby_settings,json=rubySettings,proto3" json:"ruby_settings,omitempty"` + // Settings for Go client libraries. + GoSettings *GoSettings `protobuf:"bytes,28,opt,name=go_settings,json=goSettings,proto3" json:"go_settings,omitempty"` +} + +func (x *ClientLibrarySettings) Reset() { + *x = ClientLibrarySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientLibrarySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientLibrarySettings) ProtoMessage() {} + +func (x *ClientLibrarySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientLibrarySettings.ProtoReflect.Descriptor instead. +func (*ClientLibrarySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientLibrarySettings) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ClientLibrarySettings) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *ClientLibrarySettings) GetRestNumericEnums() bool { + if x != nil { + return x.RestNumericEnums + } + return false +} + +func (x *ClientLibrarySettings) GetJavaSettings() *JavaSettings { + if x != nil { + return x.JavaSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetCppSettings() *CppSettings { + if x != nil { + return x.CppSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPhpSettings() *PhpSettings { + if x != nil { + return x.PhpSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPythonSettings() *PythonSettings { + if x != nil { + return x.PythonSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetNodeSettings() *NodeSettings { + if x != nil { + return x.NodeSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetDotnetSettings() *DotnetSettings { + if x != nil { + return x.DotnetSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetRubySettings() *RubySettings { + if x != nil { + return x.RubySettings + } + return nil +} + +func (x *ClientLibrarySettings) GetGoSettings() *GoSettings { + if x != nil { + return x.GoSettings + } + return nil +} + +// This message configures the settings for publishing [Google Cloud Client +// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) +// generated from the service config. +type Publishing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of API method settings, e.g. the behavior for methods that use the + // long-running operation pattern. + MethodSettings []*MethodSettings `protobuf:"bytes,2,rep,name=method_settings,json=methodSettings,proto3" json:"method_settings,omitempty"` + // Link to a place that API users can report issues. Example: + // https://issuetracker.google.com/issues/new?component=190865&template=1161103 + NewIssueUri string `protobuf:"bytes,101,opt,name=new_issue_uri,json=newIssueUri,proto3" json:"new_issue_uri,omitempty"` + // Link to product home page. Example: + // https://cloud.google.com/asset-inventory/docs/overview + DocumentationUri string `protobuf:"bytes,102,opt,name=documentation_uri,json=documentationUri,proto3" json:"documentation_uri,omitempty"` + // Used as a tracking tag when collecting data about the APIs developer + // relations artifacts like docs, packages delivered to package managers, + // etc. Example: "speech". + ApiShortName string `protobuf:"bytes,103,opt,name=api_short_name,json=apiShortName,proto3" json:"api_short_name,omitempty"` + // GitHub label to apply to issues and pull requests opened for this API. + GithubLabel string `protobuf:"bytes,104,opt,name=github_label,json=githubLabel,proto3" json:"github_label,omitempty"` + // GitHub teams to be added to CODEOWNERS in the directory in GitHub + // containing source code for the client libraries for this API. + CodeownerGithubTeams []string `protobuf:"bytes,105,rep,name=codeowner_github_teams,json=codeownerGithubTeams,proto3" json:"codeowner_github_teams,omitempty"` + // A prefix used in sample code when demarking regions to be included in + // documentation. + DocTagPrefix string `protobuf:"bytes,106,opt,name=doc_tag_prefix,json=docTagPrefix,proto3" json:"doc_tag_prefix,omitempty"` + // For whom the client library is being published. + Organization ClientLibraryOrganization `protobuf:"varint,107,opt,name=organization,proto3,enum=google.api.ClientLibraryOrganization" json:"organization,omitempty"` + // Client library settings. If the same version string appears multiple + // times in this list, then the last one wins. Settings from earlier + // settings with the same version string are discarded. + LibrarySettings []*ClientLibrarySettings `protobuf:"bytes,109,rep,name=library_settings,json=librarySettings,proto3" json:"library_settings,omitempty"` +} + +func (x *Publishing) Reset() { + *x = Publishing{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Publishing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Publishing) ProtoMessage() {} + +func (x *Publishing) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Publishing.ProtoReflect.Descriptor instead. +func (*Publishing) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{2} +} + +func (x *Publishing) GetMethodSettings() []*MethodSettings { + if x != nil { + return x.MethodSettings + } + return nil +} + +func (x *Publishing) GetNewIssueUri() string { + if x != nil { + return x.NewIssueUri + } + return "" +} + +func (x *Publishing) GetDocumentationUri() string { + if x != nil { + return x.DocumentationUri + } + return "" +} + +func (x *Publishing) GetApiShortName() string { + if x != nil { + return x.ApiShortName + } + return "" +} + +func (x *Publishing) GetGithubLabel() string { + if x != nil { + return x.GithubLabel + } + return "" +} + +func (x *Publishing) GetCodeownerGithubTeams() []string { + if x != nil { + return x.CodeownerGithubTeams + } + return nil +} + +func (x *Publishing) GetDocTagPrefix() string { + if x != nil { + return x.DocTagPrefix + } + return "" +} + +func (x *Publishing) GetOrganization() ClientLibraryOrganization { + if x != nil { + return x.Organization + } + return ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED +} + +func (x *Publishing) GetLibrarySettings() []*ClientLibrarySettings { + if x != nil { + return x.LibrarySettings + } + return nil +} + +// Settings for Java client libraries. +type JavaSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The package name to use in Java. Clobbers the java_package option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.java.package_name" field + // in gapic.yaml. API teams should use the protobuf java_package option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 + LibraryPackage string `protobuf:"bytes,1,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"` + // Configure the Java class name to use instead of the service's for its + // corresponding generated GAPIC client. Keys are fully-qualified + // service names as they appear in the protobuf (including the full + // the language_settings.java.interface_names" field in gapic.yaml. API + // teams should otherwise use the service name as it appears in the + // protobuf. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // service_class_names: + // - google.pubsub.v1.Publisher: TopicAdmin + // - google.pubsub.v1.Subscriber: SubscriptionAdmin + ServiceClassNames map[string]string `protobuf:"bytes,2,rep,name=service_class_names,json=serviceClassNames,proto3" json:"service_class_names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,3,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *JavaSettings) Reset() { + *x = JavaSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JavaSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JavaSettings) ProtoMessage() {} + +func (x *JavaSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JavaSettings.ProtoReflect.Descriptor instead. +func (*JavaSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{3} +} + +func (x *JavaSettings) GetLibraryPackage() string { + if x != nil { + return x.LibraryPackage + } + return "" +} + +func (x *JavaSettings) GetServiceClassNames() map[string]string { + if x != nil { + return x.ServiceClassNames + } + return nil +} + +func (x *JavaSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for C++ client libraries. +type CppSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *CppSettings) Reset() { + *x = CppSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CppSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CppSettings) ProtoMessage() {} + +func (x *CppSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CppSettings.ProtoReflect.Descriptor instead. +func (*CppSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{4} +} + +func (x *CppSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Php client libraries. +type PhpSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PhpSettings) Reset() { + *x = PhpSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PhpSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PhpSettings) ProtoMessage() {} + +func (x *PhpSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PhpSettings.ProtoReflect.Descriptor instead. +func (*PhpSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{5} +} + +func (x *PhpSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Python client libraries. +type PythonSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PythonSettings) Reset() { + *x = PythonSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings) ProtoMessage() {} + +func (x *PythonSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings.ProtoReflect.Descriptor instead. +func (*PythonSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6} +} + +func (x *PythonSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Node client libraries. +type NodeSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *NodeSettings) Reset() { + *x = NodeSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeSettings) ProtoMessage() {} + +func (x *NodeSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeSettings.ProtoReflect.Descriptor instead. +func (*NodeSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{7} +} + +func (x *NodeSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Dotnet client libraries. +type DotnetSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *DotnetSettings) Reset() { + *x = DotnetSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DotnetSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DotnetSettings) ProtoMessage() {} + +func (x *DotnetSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DotnetSettings.ProtoReflect.Descriptor instead. +func (*DotnetSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{8} +} + +func (x *DotnetSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Ruby client libraries. +type RubySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *RubySettings) Reset() { + *x = RubySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RubySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RubySettings) ProtoMessage() {} + +func (x *RubySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RubySettings.ProtoReflect.Descriptor instead. +func (*RubySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{9} +} + +func (x *RubySettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Go client libraries. +type GoSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *GoSettings) Reset() { + *x = GoSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoSettings) ProtoMessage() {} + +func (x *GoSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoSettings.ProtoReflect.Descriptor instead. +func (*GoSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{10} +} + +func (x *GoSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Describes the generator configuration for a method. +type MethodSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the method, for which the options below apply. + // This is used to find the method to apply the options. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Describes settings to use for long-running operations when generating + // API methods for RPCs. Complements RPCs that use the annotations in + // google/longrunning/operations.proto. + // + // Example of a YAML configuration:: + // + // publishing: + // method_behavior: + // - selector: CreateAdDomain + // long_running: + // initial_poll_delay: + // seconds: 60 # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: + // seconds: 360 # 6 minutes + // total_poll_timeout: + // seconds: 54000 # 90 minutes + LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` +} + +func (x *MethodSettings) Reset() { + *x = MethodSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings) ProtoMessage() {} + +func (x *MethodSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings.ProtoReflect.Descriptor instead. +func (*MethodSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11} +} + +func (x *MethodSettings) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning { + if x != nil { + return x.LongRunning + } + return nil +} + +// Describes settings to use when generating API methods that use the +// long-running operation pattern. +// All default values below are from those used in the client library +// generators (e.g. +// [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). +type MethodSettings_LongRunning struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Initial delay after which the first poll request will be made. + // Default value: 5 seconds. + InitialPollDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=initial_poll_delay,json=initialPollDelay,proto3" json:"initial_poll_delay,omitempty"` + // Multiplier to gradually increase delay between subsequent polls until it + // reaches max_poll_delay. + // Default value: 1.5. + PollDelayMultiplier float32 `protobuf:"fixed32,2,opt,name=poll_delay_multiplier,json=pollDelayMultiplier,proto3" json:"poll_delay_multiplier,omitempty"` + // Maximum time between two subsequent poll requests. + // Default value: 45 seconds. + MaxPollDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=max_poll_delay,json=maxPollDelay,proto3" json:"max_poll_delay,omitempty"` + // Total polling timeout. + // Default value: 5 minutes. + TotalPollTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=total_poll_timeout,json=totalPollTimeout,proto3" json:"total_poll_timeout,omitempty"` +} + +func (x *MethodSettings_LongRunning) Reset() { + *x = MethodSettings_LongRunning{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings_LongRunning) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings_LongRunning) ProtoMessage() {} + +func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings_LongRunning.ProtoReflect.Descriptor instead. +func (*MethodSettings_LongRunning) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *MethodSettings_LongRunning) GetInitialPollDelay() *durationpb.Duration { + if x != nil { + return x.InitialPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetPollDelayMultiplier() float32 { + if x != nil { + return x.PollDelayMultiplier + } + return 0 +} + +func (x *MethodSettings_LongRunning) GetMaxPollDelay() *durationpb.Duration { + if x != nil { + return x.MaxPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetTotalPollTimeout() *durationpb.Duration { + if x != nil { + return x.TotalPollTimeout + } + return nil +} + var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.MethodOptions)(nil), @@ -78,26 +1122,26 @@ var ( // // For example, the proto RPC and annotation: // - // rpc CreateSubscription(CreateSubscriptionRequest) - // returns (Subscription) { - // option (google.api.method_signature) = "name,topic"; - // } + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } // // Would add the following Java overload (in addition to the method accepting // the request object): // - // public final Subscription createSubscription(String name, String topic) + // public final Subscription createSubscription(String name, String topic) // // The following backwards-compatibility guidelines apply: // - // * Adding this annotation to an unannotated method is backwards + // - Adding this annotation to an unannotated method is backwards // compatible. - // * Adding this annotation to a method which already has existing + // - Adding this annotation to a method which already has existing // method signature annotations is backwards compatible if and only if // the new method signature annotation is last in the sequence. - // * Modifying or removing an existing method signature annotation is + // - Modifying or removing an existing method signature annotation is // a breaking change. - // * Re-ordering existing method signature annotations is a breaking + // - Re-ordering existing method signature annotations is a breaking // change. // // repeated string method_signature = 1051; @@ -111,10 +1155,10 @@ var ( // // Example: // - // service Foo { - // option (google.api.default_host) = "foo.googleapi.com"; - // ... - // } + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } // // optional string default_host = 1049; E_DefaultHost = &file_google_api_client_proto_extTypes[1] @@ -122,22 +1166,22 @@ var ( // // Example: // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform"; - // ... - // } + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } // // If there is more than one scope, use a comma-separated string: // // Example: // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform," - // "https://www.googleapis.com/auth/monitoring"; - // ... - // } + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } // // optional string oauth_scopes = 1050; E_OauthScopes = &file_google_api_client_proto_extTypes[2] @@ -148,44 +1192,278 @@ var File_google_api_client_proto protoreflect.FileDescriptor var file_google_api_client_proto_rawDesc = []byte{ 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, - 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, - 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, - 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, - 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, - 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, + 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x73, 0x55, 0x72, 0x69, 0x12, + 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, + 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x64, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, + 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, + 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, + 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, + 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, + 0xe0, 0x03, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, + 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, + 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, + 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, + 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, + 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, + 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, + 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, + 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, + 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, + 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, + 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, + 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, + 0x4c, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, + 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, + 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, + 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a, 0x94, 0x02, + 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, + 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, + 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, + 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, + 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, + 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x2a, 0x79, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, + 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, + 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, + 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, + 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, + 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x2a, + 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, + 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, + 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, + 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, + 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, + 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, + 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_api_client_proto_rawDescOnce sync.Once + file_google_api_client_proto_rawDescData = file_google_api_client_proto_rawDesc +) + +func file_google_api_client_proto_rawDescGZIP() []byte { + file_google_api_client_proto_rawDescOnce.Do(func() { + file_google_api_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_client_proto_rawDescData) + }) + return file_google_api_client_proto_rawDescData } +var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 14) var file_google_api_client_proto_goTypes = []interface{}{ - (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*MethodSettings_LongRunning)(nil), // 15: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 16: google.api.LaunchStage + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 18: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 19: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ - 0, // 0: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 1, // 1: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 1, // 2: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 0, // [0:3] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination + 16, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 21: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 15, // 22: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 17, // 23: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 17, // 24: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 17, // 25: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 18, // 26: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 19, // 27: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 19, // 28: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 29, // [29:29] is the sub-list for method output_type + 29, // [29:29] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 26, // [26:29] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -193,18 +1471,178 @@ func file_google_api_client_proto_init() { if File_google_api_client_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_google_api_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonLanguageSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientLibrarySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Publishing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JavaSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CppSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PhpSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DotnetSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RubySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings_LongRunning); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, + NumEnums: 2, + NumMessages: 14, NumExtensions: 3, NumServices: 0, }, GoTypes: file_google_api_client_proto_goTypes, DependencyIndexes: file_google_api_client_proto_depIdxs, + EnumInfos: file_google_api_client_proto_enumTypes, + MessageInfos: file_google_api_client_proto_msgTypes, ExtensionInfos: file_google_api_client_proto_extTypes, }.Build() File_google_api_client_proto = out.File diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 4f34ab73cba1..6f11b7c500f8 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -127,19 +127,19 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // Example: // -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } // // This enables an HTTP REST to gRPC mapping as below: // @@ -151,21 +151,21 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // automatically become HTTP query parameters if there is no HTTP request body. // For example: // -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -// } +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } // // This enables a HTTP JSON to RPC mapping as below: // @@ -186,18 +186,18 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // specifies the mapping. Consider a REST update method on the // message resource collection: // -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } // // The following HTTP JSON to RPC mapping is enabled, where the // representation of the JSON in the request body is determined by @@ -213,19 +213,18 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // request body. This enables the following alternative definition of // the update method: // -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } // // The following HTTP JSON to RPC mapping is enabled: // @@ -243,20 +242,20 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // It is possible to define multiple HTTP methods for one RPC by using // the `additional_bindings` option. Example: // -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } // // This enables the following two alternative HTTP JSON to RPC mappings: // @@ -268,15 +267,15 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // ## Rules for HTTP mapping // -// 1. Leaf request fields (recursive expansion nested messages in the request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP -// request body. -// - All other fields are passed via the URL query parameters, and the -// parameter name is the field path in the request message. A repeated -// field can be represented as multiple query parameters under the same -// name. +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. // 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields // are passed via URL path and HTTP request body. // 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all @@ -284,12 +283,12 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // ### Path template syntax // -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; // // The syntax `*` matches a single URL path segment. The syntax `**` matches // zero or more URL path segments, which must be the last part of the URL path @@ -338,11 +337,11 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // Example: // -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} // // ## Special notes // diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 6515668d34f2..13ea54b2940f 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -157,45 +157,45 @@ func (ResourceDescriptor_Style) EnumDescriptor() ([]byte, []int) { // // Example: // -// message Topic { -// // Indicates this message defines a resource schema. -// // Declares the resource type in the format of {service}/{kind}. -// // For Kubernetes resources, the format is {api group}/{kind}. -// option (google.api.resource) = { -// type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" -// }; -// } +// message Topic { +// // Indicates this message defines a resource schema. +// // Declares the resource type in the format of {service}/{kind}. +// // For Kubernetes resources, the format is {api group}/{kind}. +// option (google.api.resource) = { +// type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" +// }; +// } // // The ResourceDescriptor Yaml config will look like: // -// resources: -// - type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" +// resources: +// - type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" // // Sometimes, resources have multiple patterns, typically because they can // live under multiple parents. // // Example: // -// message LogEntry { -// option (google.api.resource) = { -// type: "logging.googleapis.com/LogEntry" -// pattern: "projects/{project}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" -// }; -// } +// message LogEntry { +// option (google.api.resource) = { +// type: "logging.googleapis.com/LogEntry" +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" +// }; +// } // // The ResourceDescriptor Yaml config will look like: // -// resources: -// - type: 'logging.googleapis.com/LogEntry' -// pattern: "projects/{project}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" +// resources: +// - type: 'logging.googleapis.com/LogEntry' +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" type ResourceDescriptor struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go index dd45cf6e6c1a..6707a7b1c1d2 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -44,71 +44,71 @@ const ( // // Message Definition: // -// message Request { -// // The name of the Table -// // Values can be of the following formats: -// // - `projects//tables/` -// // - `projects//instances//tables/
` -// // - `region//zones//tables/
` -// string table_name = 1; -// -// // This value specifies routing for replication. -// // It can be in the following formats: -// // - `profiles/` -// // - a legacy `profile_id` that can be any string -// string app_profile_id = 2; -// } +// message Request { +// // The name of the Table +// // Values can be of the following formats: +// // - `projects//tables/
` +// // - `projects//instances//tables/
` +// // - `region//zones//tables/
` +// string table_name = 1; +// +// // This value specifies routing for replication. +// // It can be in the following formats: +// // - `profiles/` +// // - a legacy `profile_id` that can be any string +// string app_profile_id = 2; +// } // // Example message: // -// { -// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, -// app_profile_id: profiles/prof_qux -// } +// { +// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, +// app_profile_id: profiles/prof_qux +// } // // The routing header consists of one or multiple key-value pairs. Every key // and value must be percent-encoded, and joined together in the format of // `key1=value1&key2=value2`. // In the examples below I am skipping the percent-encoding for readablity. // -// Example 1 +// # Example 1 // // Extracting a field from the request to put into the routing header // unchanged, with the key equal to the field name. // // annotation: // -// option (google.api.routing) = { -// // Take the `app_profile_id`. -// routing_parameters { -// field: "app_profile_id" -// } -// }; +// option (google.api.routing) = { +// // Take the `app_profile_id`. +// routing_parameters { +// field: "app_profile_id" +// } +// }; // // result: // -// x-goog-request-params: app_profile_id=profiles/prof_qux +// x-goog-request-params: app_profile_id=profiles/prof_qux // -// Example 2 +// # Example 2 // // Extracting a field from the request to put into the routing header // unchanged, with the key different from the field name. // // annotation: // -// option (google.api.routing) = { -// // Take the `app_profile_id`, but name it `routing_id` in the header. -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; +// option (google.api.routing) = { +// // Take the `app_profile_id`, but name it `routing_id` in the header. +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; // // result: // -// x-goog-request-params: routing_id=profiles/prof_qux +// x-goog-request-params: routing_id=profiles/prof_qux // -// Example 3 +// # Example 3 // // Extracting a field from the request to put into the routing // header, while matching a path template syntax on the field's value. @@ -116,91 +116,91 @@ const ( // NB: it is more useful to send nothing than to send garbage for the purpose // of dynamic routing, since garbage pollutes cache. Thus the matching. // -// Sub-example 3a +// # Sub-example 3a // // The field matches the template. // // annotation: // -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed (with project-based -// // syntax). -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=projects/*/instances/*/**}" -// } -// }; +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with project-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; // // result: // -// x-goog-request-params: -// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz // -// Sub-example 3b +// # Sub-example 3b // // The field does not match the template. // // annotation: // -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed (with region-based -// // syntax). -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=regions/*/zones/*/**}" -// } -// }; +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with region-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// }; // // result: // -// +// // -// Sub-example 3c +// # Sub-example 3c // // Multiple alternative conflictingly named path templates are // specified. The one that matches is used to construct the header. // // annotation: // -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed, whether -// // using the region- or projects-based syntax. +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed, whether +// // using the region- or projects-based syntax. // -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=regions/*/zones/*/**}" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=projects/*/instances/*/**}" -// } -// }; +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; // // result: // -// x-goog-request-params: -// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz // -// Example 4 +// # Example 4 // // Extracting a single routing header key-value pair by matching a // template syntax on (a part of) a single request field. // // annotation: // -// option (google.api.routing) = { -// // Take just the project id from the `table_name` field. -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// }; +// option (google.api.routing) = { +// // Take just the project id from the `table_name` field. +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// }; // // result: // -// x-goog-request-params: routing_id=projects/proj_foo +// x-goog-request-params: routing_id=projects/proj_foo // -// Example 5 +// # Example 5 // // Extracting a single routing header key-value pair by matching // several conflictingly named path templates on (parts of) a single request @@ -208,87 +208,87 @@ const ( // // annotation: // -// option (google.api.routing) = { -// // If the `table_name` does not have instances information, -// // take just the project id for routing. -// // Otherwise take project + instance. -// -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*/instances/*}/**" -// } -// }; +// option (google.api.routing) = { +// // If the `table_name` does not have instances information, +// // take just the project id for routing. +// // Otherwise take project + instance. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*/instances/*}/**" +// } +// }; // // result: // -// x-goog-request-params: -// routing_id=projects/proj_foo/instances/instance_bar +// x-goog-request-params: +// routing_id=projects/proj_foo/instances/instance_bar // -// Example 6 +// # Example 6 // // Extracting multiple routing header key-value pairs by matching // several non-conflicting path templates on (parts of) a single request field. // -// Sub-example 6a +// # Sub-example 6a // // Make the templates strict, so that if the `table_name` does not // have an instance information, nothing is sent. // // annotation: // -// option (google.api.routing) = { -// // The routing code needs two keys instead of one composite -// // but works only for the tables with the "project-instance" name -// // syntax. -// -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/instances/*/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{instance_id=instances/*}/**" -// } -// }; +// option (google.api.routing) = { +// // The routing code needs two keys instead of one composite +// // but works only for the tables with the "project-instance" name +// // syntax. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/instances/*/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; // // result: // -// x-goog-request-params: -// project_id=projects/proj_foo&instance_id=instances/instance_bar +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar // -// Sub-example 6b +// # Sub-example 6b // // Make the templates loose, so that if the `table_name` does not // have an instance information, just the project id part is sent. // // annotation: // -// option (google.api.routing) = { -// // The routing code wants two keys instead of one composite -// // but will work with just the `project_id` for tables without -// // an instance in the `table_name`. -// -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{instance_id=instances/*}/**" -// } -// }; +// option (google.api.routing) = { +// // The routing code wants two keys instead of one composite +// // but will work with just the `project_id` for tables without +// // an instance in the `table_name`. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; // // result (is the same as 6a for our example message because it has the instance // information): // -// x-goog-request-params: -// project_id=projects/proj_foo&instance_id=instances/instance_bar +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar // -// Example 7 +// # Example 7 // // Extracting multiple routing header key-value pairs by matching // several path templates on multiple request fields. @@ -301,26 +301,26 @@ const ( // // annotation: // -// option (google.api.routing) = { -// // The routing needs both `project_id` and `routing_id` -// // (from the `app_profile_id` field) for routing. +// option (google.api.routing) = { +// // The routing needs both `project_id` and `routing_id` +// // (from the `app_profile_id` field) for routing. // -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; // // result: // -// x-goog-request-params: -// project_id=projects/proj_foo&routing_id=profiles/prof_qux +// x-goog-request-params: +// project_id=projects/proj_foo&routing_id=profiles/prof_qux // -// Example 8 +// # Example 8 // // Extracting a single routing header key-value pair by matching // several conflictingly named path templates on several request fields. The @@ -328,73 +328,73 @@ const ( // // annotation: // -// option (google.api.routing) = { -// // The `routing_id` can be a project id or a region id depending on -// // the table name format, but only if the `app_profile_id` is not set. -// // If `app_profile_id` is set it should be used instead. -// -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=regions/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; +// option (google.api.routing) = { +// // The `routing_id` can be a project id or a region id depending on +// // the table name format, but only if the `app_profile_id` is not set. +// // If `app_profile_id` is set it should be used instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=regions/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; // // result: // -// x-goog-request-params: routing_id=profiles/prof_qux +// x-goog-request-params: routing_id=profiles/prof_qux // -// Example 9 +// # Example 9 // // Bringing it all together. // // annotation: // -// option (google.api.routing) = { -// // For routing both `table_location` and a `routing_id` are needed. -// // -// // table_location can be either an instance id or a region+zone id. -// // -// // For `routing_id`, take the value of `app_profile_id` -// // - If it's in the format `profiles/`, send -// // just the `` part. -// // - If it's any other literal, send it as is. -// // If the `app_profile_id` is empty, and the `table_name` starts with -// // the project_id, send that instead. -// -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{table_location=instances/*}/tables/*" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{table_location=regions/*/zones/*}/tables/*" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "profiles/{routing_id=*}" -// } -// }; +// option (google.api.routing) = { +// // For routing both `table_location` and a `routing_id` are needed. +// // +// // table_location can be either an instance id or a region+zone id. +// // +// // For `routing_id`, take the value of `app_profile_id` +// // - If it's in the format `profiles/`, send +// // just the `` part. +// // - If it's any other literal, send it as is. +// // If the `app_profile_id` is empty, and the `table_name` starts with +// // the project_id, send that instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{table_location=instances/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_location=regions/*/zones/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "profiles/{routing_id=*}" +// } +// }; // // result: // -// x-goog-request-params: -// table_location=instances/instance_bar&routing_id=prof_qux +// x-goog-request-params: +// table_location=instances/instance_bar&routing_id=prof_qux type RoutingRule struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go new file mode 100644 index 000000000000..710753137732 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -0,0 +1,203 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.18.1 +// source: google/api/launch_stage.proto + +package api + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The launch stage as defined by [Google Cloud Platform +// Launch Stages](https://cloud.google.com/terms/launch-stages). +type LaunchStage int32 + +const ( + // Do not use this default value. + LaunchStage_LAUNCH_STAGE_UNSPECIFIED LaunchStage = 0 + // The feature is not yet implemented. Users can not use it. + LaunchStage_UNIMPLEMENTED LaunchStage = 6 + // Prelaunch features are hidden from users and are only visible internally. + LaunchStage_PRELAUNCH LaunchStage = 7 + // Early Access features are limited to a closed group of testers. To use + // these features, you must sign up in advance and sign a Trusted Tester + // agreement (which includes confidentiality provisions). These features may + // be unstable, changed in backward-incompatible ways, and are not + // guaranteed to be released. + LaunchStage_EARLY_ACCESS LaunchStage = 1 + // Alpha is a limited availability test for releases before they are cleared + // for widespread use. By Alpha, all significant design issues are resolved + // and we are in the process of verifying functionality. Alpha customers + // need to apply for access, agree to applicable terms, and have their + // projects allowlisted. Alpha releases don't have to be feature complete, + // no SLAs are provided, and there are no technical support obligations, but + // they will be far enough along that customers can actually use them in + // test environments or for limited-use tests -- just like they would in + // normal production cases. + LaunchStage_ALPHA LaunchStage = 2 + // Beta is the point at which we are ready to open a release for any + // customer to use. There are no SLA or technical support obligations in a + // Beta release. Products will be complete from a feature perspective, but + // may have some open outstanding issues. Beta releases are suitable for + // limited production use cases. + LaunchStage_BETA LaunchStage = 3 + // GA features are open to all developers and are considered stable and + // fully qualified for production use. + LaunchStage_GA LaunchStage = 4 + // Deprecated features are scheduled to be shut down and removed. For more + // information, see the "Deprecation Policy" section of our [Terms of + // Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + LaunchStage_DEPRECATED LaunchStage = 5 +) + +// Enum value maps for LaunchStage. +var ( + LaunchStage_name = map[int32]string{ + 0: "LAUNCH_STAGE_UNSPECIFIED", + 6: "UNIMPLEMENTED", + 7: "PRELAUNCH", + 1: "EARLY_ACCESS", + 2: "ALPHA", + 3: "BETA", + 4: "GA", + 5: "DEPRECATED", + } + LaunchStage_value = map[string]int32{ + "LAUNCH_STAGE_UNSPECIFIED": 0, + "UNIMPLEMENTED": 6, + "PRELAUNCH": 7, + "EARLY_ACCESS": 1, + "ALPHA": 2, + "BETA": 3, + "GA": 4, + "DEPRECATED": 5, + } +) + +func (x LaunchStage) Enum() *LaunchStage { + p := new(LaunchStage) + *p = x + return p +} + +func (x LaunchStage) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LaunchStage) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_launch_stage_proto_enumTypes[0].Descriptor() +} + +func (LaunchStage) Type() protoreflect.EnumType { + return &file_google_api_launch_stage_proto_enumTypes[0] +} + +func (x LaunchStage) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LaunchStage.Descriptor instead. +func (LaunchStage) EnumDescriptor() ([]byte, []int) { + return file_google_api_launch_stage_proto_rawDescGZIP(), []int{0} +} + +var File_google_api_launch_stage_proto protoreflect.FileDescriptor + +var file_google_api_launch_stage_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, + 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2a, 0x8c, 0x01, 0x0a, 0x0b, + 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x4c, + 0x41, 0x55, 0x4e, 0x43, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, + 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, + 0x50, 0x52, 0x45, 0x4c, 0x41, 0x55, 0x4e, 0x43, 0x48, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x45, + 0x41, 0x52, 0x4c, 0x59, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x09, 0x0a, + 0x05, 0x41, 0x4c, 0x50, 0x48, 0x41, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x45, 0x54, 0x41, + 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x41, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, + 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x05, 0x42, 0x5a, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x10, 0x4c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, 0x69, 0xa2, + 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_launch_stage_proto_rawDescOnce sync.Once + file_google_api_launch_stage_proto_rawDescData = file_google_api_launch_stage_proto_rawDesc +) + +func file_google_api_launch_stage_proto_rawDescGZIP() []byte { + file_google_api_launch_stage_proto_rawDescOnce.Do(func() { + file_google_api_launch_stage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_launch_stage_proto_rawDescData) + }) + return file_google_api_launch_stage_proto_rawDescData +} + +var file_google_api_launch_stage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_launch_stage_proto_goTypes = []interface{}{ + (LaunchStage)(0), // 0: google.api.LaunchStage +} +var file_google_api_launch_stage_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_launch_stage_proto_init() } +func file_google_api_launch_stage_proto_init() { + if File_google_api_launch_stage_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_launch_stage_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_launch_stage_proto_goTypes, + DependencyIndexes: file_google_api_launch_stage_proto_depIdxs, + EnumInfos: file_google_api_launch_stage_proto_enumTypes, + }.Build() + File_google_api_launch_stage_proto = out.File + file_google_api_launch_stage_proto_rawDesc = nil + file_google_api_launch_stage_proto_goTypes = nil + file_google_api_launch_stage_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go index 50b4e5e8f5ef..15ad78adccdd 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.13.0 +// protoc v3.12.2 // source: google/bigtable/admin/v2/bigtable_instance_admin.proto package admin @@ -45,6 +45,67 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type CreateClusterMetadata_TableProgress_State int32 + +const ( + CreateClusterMetadata_TableProgress_STATE_UNSPECIFIED CreateClusterMetadata_TableProgress_State = 0 + // The table has not yet begun copying to the new cluster. + CreateClusterMetadata_TableProgress_PENDING CreateClusterMetadata_TableProgress_State = 1 + // The table is actively being copied to the new cluster. + CreateClusterMetadata_TableProgress_COPYING CreateClusterMetadata_TableProgress_State = 2 + // The table has been fully copied to the new cluster. + CreateClusterMetadata_TableProgress_COMPLETED CreateClusterMetadata_TableProgress_State = 3 + // The table was deleted before it finished copying to the new cluster. + // Note that tables deleted after completion will stay marked as + // COMPLETED, not CANCELLED. + CreateClusterMetadata_TableProgress_CANCELLED CreateClusterMetadata_TableProgress_State = 4 +) + +// Enum value maps for CreateClusterMetadata_TableProgress_State. +var ( + CreateClusterMetadata_TableProgress_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "PENDING", + 2: "COPYING", + 3: "COMPLETED", + 4: "CANCELLED", + } + CreateClusterMetadata_TableProgress_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "PENDING": 1, + "COPYING": 2, + "COMPLETED": 3, + "CANCELLED": 4, + } +) + +func (x CreateClusterMetadata_TableProgress_State) Enum() *CreateClusterMetadata_TableProgress_State { + p := new(CreateClusterMetadata_TableProgress_State) + *p = x + return p +} + +func (x CreateClusterMetadata_TableProgress_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CreateClusterMetadata_TableProgress_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_bigtable_admin_v2_bigtable_instance_admin_proto_enumTypes[0].Descriptor() +} + +func (CreateClusterMetadata_TableProgress_State) Type() protoreflect.EnumType { + return &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_enumTypes[0] +} + +func (x CreateClusterMetadata_TableProgress_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CreateClusterMetadata_TableProgress_State.Descriptor instead. +func (CreateClusterMetadata_TableProgress_State) EnumDescriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDescGZIP(), []int{13, 0, 0} +} + // Request message for BigtableInstanceAdmin.CreateInstance. type CreateInstanceRequest struct { state protoimpl.MessageState @@ -869,6 +930,15 @@ type CreateClusterMetadata struct { RequestTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` // The time at which the operation failed or was completed successfully. FinishTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"` + // Keys: the full `name` of each table that existed in the instance when + // CreateCluster was first called, i.e. + // `projects//instances//tables/
`. Any table added + // to the instance by a later API call will be created in the new cluster by + // that API call, not this one. + // + // Values: information on how much of a table's data has been copied to the + // newly-created cluster so far. + Tables map[string]*CreateClusterMetadata_TableProgress `protobuf:"bytes,4,rep,name=tables,proto3" json:"tables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *CreateClusterMetadata) Reset() { @@ -924,6 +994,13 @@ func (x *CreateClusterMetadata) GetFinishTime() *timestamppb.Timestamp { return nil } +func (x *CreateClusterMetadata) GetTables() map[string]*CreateClusterMetadata_TableProgress { + if x != nil { + return x.Tables + } + return nil +} + // The metadata for the Operation returned by UpdateCluster. type UpdateClusterMetadata struct { state protoimpl.MessageState @@ -1565,6 +1642,238 @@ func (*UpdateAppProfileMetadata) Descriptor() ([]byte, []int) { return file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDescGZIP(), []int{23} } +// Request message for BigtableInstanceAdmin.ListHotTablets. +type ListHotTabletsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The cluster name to list hot tablets. + // Value is in the following form: + // `projects/{project}/instances/{instance}/clusters/{cluster}`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // The start time to list hot tablets. The hot tablets in the response will + // have start times between the requested start time and end time. Start time + // defaults to Now if it is unset, and end time defaults to Now - 24 hours if + // it is unset. The start time should be less than the end time, and the + // maximum allowed time range between start time and end time is 48 hours. + // Start time and end time should have values between Now and Now - 14 days. + StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // The end time to list hot tablets. + EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // Maximum number of results per page. + // + // A page_size that is empty or zero lets the server choose the number of + // items to return. A page_size which is strictly positive will return at most + // that many items. A negative page_size will cause an error. + // + // Following the first request, subsequent paginated calls do not need a + // page_size field. If a page_size is set in subsequent calls, it must match + // the page_size given in the first request. + PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // The value of `next_page_token` returned by a previous call. + PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListHotTabletsRequest) Reset() { + *x = ListHotTabletsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListHotTabletsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListHotTabletsRequest) ProtoMessage() {} + +func (x *ListHotTabletsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListHotTabletsRequest.ProtoReflect.Descriptor instead. +func (*ListHotTabletsRequest) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDescGZIP(), []int{24} +} + +func (x *ListHotTabletsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListHotTabletsRequest) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +func (x *ListHotTabletsRequest) GetEndTime() *timestamppb.Timestamp { + if x != nil { + return x.EndTime + } + return nil +} + +func (x *ListHotTabletsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListHotTabletsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// Response message for BigtableInstanceAdmin.ListHotTablets. +type ListHotTabletsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of hot tablets in the tables of the requested cluster that fall + // within the requested time range. Hot tablets are ordered by node cpu usage + // percent. If there are multiple hot tablets that correspond to the same + // tablet within a 15-minute interval, only the hot tablet with the highest + // node cpu usage will be included in the response. + HotTablets []*HotTablet `protobuf:"bytes,1,rep,name=hot_tablets,json=hotTablets,proto3" json:"hot_tablets,omitempty"` + // Set if not all hot tablets could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListHotTabletsResponse) Reset() { + *x = ListHotTabletsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListHotTabletsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListHotTabletsResponse) ProtoMessage() {} + +func (x *ListHotTabletsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListHotTabletsResponse.ProtoReflect.Descriptor instead. +func (*ListHotTabletsResponse) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDescGZIP(), []int{25} +} + +func (x *ListHotTabletsResponse) GetHotTablets() []*HotTablet { + if x != nil { + return x.HotTablets + } + return nil +} + +func (x *ListHotTabletsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Progress info for copying a table's data to the new cluster. +type CreateClusterMetadata_TableProgress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Estimate of the size of the table to be copied. + EstimatedSizeBytes int64 `protobuf:"varint,2,opt,name=estimated_size_bytes,json=estimatedSizeBytes,proto3" json:"estimated_size_bytes,omitempty"` + // Estimate of the number of bytes copied so far for this table. + // This will eventually reach 'estimated_size_bytes' unless the table copy + // is CANCELLED. + EstimatedCopiedBytes int64 `protobuf:"varint,3,opt,name=estimated_copied_bytes,json=estimatedCopiedBytes,proto3" json:"estimated_copied_bytes,omitempty"` + State CreateClusterMetadata_TableProgress_State `protobuf:"varint,4,opt,name=state,proto3,enum=google.bigtable.admin.v2.CreateClusterMetadata_TableProgress_State" json:"state,omitempty"` +} + +func (x *CreateClusterMetadata_TableProgress) Reset() { + *x = CreateClusterMetadata_TableProgress{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateClusterMetadata_TableProgress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateClusterMetadata_TableProgress) ProtoMessage() {} + +func (x *CreateClusterMetadata_TableProgress) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateClusterMetadata_TableProgress.ProtoReflect.Descriptor instead. +func (*CreateClusterMetadata_TableProgress) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDescGZIP(), []int{13, 0} +} + +func (x *CreateClusterMetadata_TableProgress) GetEstimatedSizeBytes() int64 { + if x != nil { + return x.EstimatedSizeBytes + } + return 0 +} + +func (x *CreateClusterMetadata_TableProgress) GetEstimatedCopiedBytes() int64 { + if x != nil { + return x.EstimatedCopiedBytes + } + return 0 +} + +func (x *CreateClusterMetadata_TableProgress) GetState() CreateClusterMetadata_TableProgress_State { + if x != nil { + return x.State + } + return CreateClusterMetadata_TableProgress_STATE_UNSPECIFIED +} + var File_google_bigtable_admin_v2_bigtable_instance_admin_proto protoreflect.FileDescriptor var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte{ @@ -1733,8 +2042,8 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0a, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xee, - 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x6d, 0x70, 0x52, 0x0a, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xea, + 0x05, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x59, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, @@ -1748,387 +2057,456 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x53, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x1a, 0xaa, 0x02, 0x0a, 0x0d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, + 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, + 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x64, 0x53, + 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x65, 0x73, 0x74, 0x69, + 0x6d, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, + 0x74, 0x65, 0x64, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x59, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x43, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x56, 0x0a, 0x05, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, + 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x50, 0x59, 0x49, 0x4e, + 0x47, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, + 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, + 0x04, 0x1a, 0x78, 0x0a, 0x0b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x53, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe1, 0x01, 0x0a, 0x15, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4c, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, + 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x22, - 0xe1, 0x01, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4c, 0x0a, 0x10, 0x6f, 0x72, 0x69, - 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, - 0x69, 0x6d, 0x65, 0x22, 0xfc, 0x01, 0x0a, 0x1c, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x3d, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x60, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x22, 0xa1, 0x01, 0x0a, 0x1b, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x40, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, - 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x80, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x45, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x0e, 0x61, 0x70, 0x70, - 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x49, 0x64, 0x12, 0x4a, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0xfc, 0x01, 0x0a, 0x1c, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x3d, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x3b, 0x0a, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0a, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x10, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa1, + 0x01, 0x0a, 0x1b, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, + 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, + 0x73, 0x6b, 0x22, 0x80, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, + 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, + 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, + 0x12, 0x4a, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0a, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x27, 0x0a, 0x0f, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x57, 0x61, 0x72, + 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x5b, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2f, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x29, 0x0a, 0x27, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0xb5, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0c, + 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, + 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x0b, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x29, 0x0a, + 0x10, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd0, 0x01, 0x0a, 0x17, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, - 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x5b, 0x0a, 0x14, 0x47, 0x65, 0x74, - 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x43, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x2f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x29, 0x0a, 0x27, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x41, - 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x45, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, + 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, + 0x73, 0x6b, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x77, 0x61, 0x72, + 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x67, 0x6e, + 0x6f, 0x72, 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x17, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x29, 0x0a, 0x27, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x70, 0x70, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8b, 0x02, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x48, + 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x44, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xb5, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, - 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x47, 0x0a, 0x0c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x86, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6f, 0x74, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x44, 0x0a, 0x0b, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0a, 0x68, 0x6f, 0x74, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0xcb, 0x21, + 0x0a, 0x15, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xda, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x78, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x26, 0x22, 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x24, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0xca, + 0x41, 0x22, 0x0a, 0x08, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x16, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x91, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, + 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xa4, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, + 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x23, 0x12, 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, + 0x86, 0x01, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x0b, 0x61, 0x70, - 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, - 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x66, 0x61, 0x69, - 0x6c, 0x65, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd0, 0x01, 0x0a, - 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x5f, - 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, - 0x66, 0x69, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, - 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, - 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x22, - 0x8c, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, - 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2f, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x29, 0x0a, 0x27, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x2c, 0x0a, 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, - 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x1a, - 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x32, 0x89, 0x20, 0x0a, 0x15, 0x42, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xda, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x78, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x22, - 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x24, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0xca, 0x41, 0x22, 0x0a, - 0x08, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0x91, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, - 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x2f, 0x76, 0x32, - 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xa4, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, - 0x12, 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x86, 0x01, 0x0a, - 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x1a, - 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0x12, 0xe8, 0x01, 0x0a, 0x15, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, - 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, - 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, - 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x78, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x32, 0x2a, - 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x6e, 0x61, + 0x32, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x26, 0x1a, 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0x12, 0xe8, 0x01, 0x0a, 0x15, 0x50, 0x61, 0x72, + 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, + 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x78, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x36, 0x32, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x08, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0xda, 0x41, 0x14, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xca, 0x41, + 0x22, 0x0a, 0x08, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x16, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, + 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x2a, 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x08, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0xda, 0x41, 0x14, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2c, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xca, 0x41, 0x22, 0x0a, 0x08, - 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x30, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x23, 0x2a, 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xdc, - 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0xdc, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, + 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x7c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x2c, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0xda, 0x41, 0x19, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0xca, 0x41, 0x20, + 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x99, 0x01, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, + 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xac, 0x01, 0x0a, + 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xad, 0x01, 0x0a, 0x0d, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x7c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0xda, 0x41, - 0x19, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0xca, 0x41, 0x20, 0x0a, 0x07, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x99, 0x01, - 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2b, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x3b, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, - 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xac, 0x01, 0x0a, 0x0c, 0x4c, 0x69, - 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0xda, - 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xad, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0x1d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, - 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5a, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x31, 0x1a, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x5a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x1a, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0xca, 0x41, 0x20, 0x0a, 0x07, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xf4, 0x01, 0x0a, 0x14, + 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x3f, 0x32, 0x34, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0xda, 0x41, 0x13, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2c, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xca, 0x41, 0x27, 0x0a, 0x07, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x94, 0x01, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x3b, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x2e, 0x2a, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0xca, 0x41, 0x20, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x12, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xf4, 0x01, 0x0a, 0x14, 0x50, 0x61, 0x72, - 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, - 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3f, - 0x32, 0x34, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0xda, - 0x41, 0x13, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xca, 0x41, 0x27, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x12, 0x1c, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x94, 0x01, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x2e, 0x2a, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0xda, - 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xd5, 0x01, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x31, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, - 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, + 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xd5, 0x01, 0x0a, 0x10, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, - 0x66, 0x69, 0x6c, 0x65, 0x22, 0x68, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x22, 0x2f, 0x2f, 0x76, - 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, - 0x7d, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x3a, 0x0b, 0x61, - 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0xda, 0x41, 0x21, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x69, 0x64, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0xa5, - 0x01, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x41, - 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x3e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x12, 0x2f, - 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, - 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, - 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb8, 0x01, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x41, - 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, - 0x66, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, 0x50, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x40, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x12, 0x2f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x70, 0x70, - 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0xfa, 0x01, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, + 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x68, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x22, + 0x2f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, + 0x3a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0xda, 0x41, 0x21, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x12, 0xa5, 0x01, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, + 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x3e, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x31, 0x12, 0x2f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb8, 0x01, 0x0a, 0x0f, 0x4c, 0x69, + 0x73, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, + 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, + 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x40, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x12, 0x2f, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0xfa, 0x01, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, + 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x93, 0x01, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x4a, 0x32, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x70, 0x70, 0x5f, 0x70, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x2a, + 0x7d, 0x3a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0xda, 0x41, + 0x17, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2c, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xca, 0x41, 0x26, 0x0a, 0x0a, 0x41, 0x70, 0x70, + 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, + 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x9d, 0x01, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x93, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x4a, 0x32, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, - 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x0b, - 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0xda, 0x41, 0x17, 0x61, 0x70, - 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xca, 0x41, 0x26, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, - 0x66, 0x69, 0x6c, 0x65, 0x12, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x9d, - 0x01, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x3e, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x2a, 0x2f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x93, - 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, - 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x48, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x9a, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, - 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x22, 0x4f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, + 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x22, 0x3e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x2a, 0x2f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x70, 0x70, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x93, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x48, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x9a, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x22, 0x4f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, + 0x01, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0xc5, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, + 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, + 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x5a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3d, 0x22, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, - 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, - 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0xc5, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, - 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, - 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5a, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x3d, 0x22, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, - 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, - 0x01, 0x2a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x9a, 0x03, 0xca, 0x41, 0x1c, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xf7, 0x02, 0x68, - 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, 0x74, - 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, + 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xbf, 0x01, 0x0a, + 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, + 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x4a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x68, 0x6f, 0x74, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x1a, 0x9a, + 0x03, 0xca, 0x41, 0x1c, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0xd2, 0x41, 0xf7, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, + 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2c, 0x68, - 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, - 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, - 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xe2, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x1a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, - 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, - 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, - 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, + 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, + 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, + 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xe2, 0x01, 0x0a, 0x1c, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x1a, 0x42, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x41, 0x64, + 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, + 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2143,121 +2521,136 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDescGZIP() [ return file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDescData } -var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 25) +var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 29) var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_goTypes = []interface{}{ - (*CreateInstanceRequest)(nil), // 0: google.bigtable.admin.v2.CreateInstanceRequest - (*GetInstanceRequest)(nil), // 1: google.bigtable.admin.v2.GetInstanceRequest - (*ListInstancesRequest)(nil), // 2: google.bigtable.admin.v2.ListInstancesRequest - (*ListInstancesResponse)(nil), // 3: google.bigtable.admin.v2.ListInstancesResponse - (*PartialUpdateInstanceRequest)(nil), // 4: google.bigtable.admin.v2.PartialUpdateInstanceRequest - (*DeleteInstanceRequest)(nil), // 5: google.bigtable.admin.v2.DeleteInstanceRequest - (*CreateClusterRequest)(nil), // 6: google.bigtable.admin.v2.CreateClusterRequest - (*GetClusterRequest)(nil), // 7: google.bigtable.admin.v2.GetClusterRequest - (*ListClustersRequest)(nil), // 8: google.bigtable.admin.v2.ListClustersRequest - (*ListClustersResponse)(nil), // 9: google.bigtable.admin.v2.ListClustersResponse - (*DeleteClusterRequest)(nil), // 10: google.bigtable.admin.v2.DeleteClusterRequest - (*CreateInstanceMetadata)(nil), // 11: google.bigtable.admin.v2.CreateInstanceMetadata - (*UpdateInstanceMetadata)(nil), // 12: google.bigtable.admin.v2.UpdateInstanceMetadata - (*CreateClusterMetadata)(nil), // 13: google.bigtable.admin.v2.CreateClusterMetadata - (*UpdateClusterMetadata)(nil), // 14: google.bigtable.admin.v2.UpdateClusterMetadata - (*PartialUpdateClusterMetadata)(nil), // 15: google.bigtable.admin.v2.PartialUpdateClusterMetadata - (*PartialUpdateClusterRequest)(nil), // 16: google.bigtable.admin.v2.PartialUpdateClusterRequest - (*CreateAppProfileRequest)(nil), // 17: google.bigtable.admin.v2.CreateAppProfileRequest - (*GetAppProfileRequest)(nil), // 18: google.bigtable.admin.v2.GetAppProfileRequest - (*ListAppProfilesRequest)(nil), // 19: google.bigtable.admin.v2.ListAppProfilesRequest - (*ListAppProfilesResponse)(nil), // 20: google.bigtable.admin.v2.ListAppProfilesResponse - (*UpdateAppProfileRequest)(nil), // 21: google.bigtable.admin.v2.UpdateAppProfileRequest - (*DeleteAppProfileRequest)(nil), // 22: google.bigtable.admin.v2.DeleteAppProfileRequest - (*UpdateAppProfileMetadata)(nil), // 23: google.bigtable.admin.v2.UpdateAppProfileMetadata - nil, // 24: google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry - (*Instance)(nil), // 25: google.bigtable.admin.v2.Instance - (*fieldmaskpb.FieldMask)(nil), // 26: google.protobuf.FieldMask - (*Cluster)(nil), // 27: google.bigtable.admin.v2.Cluster - (*timestamppb.Timestamp)(nil), // 28: google.protobuf.Timestamp - (*AppProfile)(nil), // 29: google.bigtable.admin.v2.AppProfile - (*v1.GetIamPolicyRequest)(nil), // 30: google.iam.v1.GetIamPolicyRequest - (*v1.SetIamPolicyRequest)(nil), // 31: google.iam.v1.SetIamPolicyRequest - (*v1.TestIamPermissionsRequest)(nil), // 32: google.iam.v1.TestIamPermissionsRequest - (*longrunning.Operation)(nil), // 33: google.longrunning.Operation - (*emptypb.Empty)(nil), // 34: google.protobuf.Empty - (*v1.Policy)(nil), // 35: google.iam.v1.Policy - (*v1.TestIamPermissionsResponse)(nil), // 36: google.iam.v1.TestIamPermissionsResponse + (CreateClusterMetadata_TableProgress_State)(0), // 0: google.bigtable.admin.v2.CreateClusterMetadata.TableProgress.State + (*CreateInstanceRequest)(nil), // 1: google.bigtable.admin.v2.CreateInstanceRequest + (*GetInstanceRequest)(nil), // 2: google.bigtable.admin.v2.GetInstanceRequest + (*ListInstancesRequest)(nil), // 3: google.bigtable.admin.v2.ListInstancesRequest + (*ListInstancesResponse)(nil), // 4: google.bigtable.admin.v2.ListInstancesResponse + (*PartialUpdateInstanceRequest)(nil), // 5: google.bigtable.admin.v2.PartialUpdateInstanceRequest + (*DeleteInstanceRequest)(nil), // 6: google.bigtable.admin.v2.DeleteInstanceRequest + (*CreateClusterRequest)(nil), // 7: google.bigtable.admin.v2.CreateClusterRequest + (*GetClusterRequest)(nil), // 8: google.bigtable.admin.v2.GetClusterRequest + (*ListClustersRequest)(nil), // 9: google.bigtable.admin.v2.ListClustersRequest + (*ListClustersResponse)(nil), // 10: google.bigtable.admin.v2.ListClustersResponse + (*DeleteClusterRequest)(nil), // 11: google.bigtable.admin.v2.DeleteClusterRequest + (*CreateInstanceMetadata)(nil), // 12: google.bigtable.admin.v2.CreateInstanceMetadata + (*UpdateInstanceMetadata)(nil), // 13: google.bigtable.admin.v2.UpdateInstanceMetadata + (*CreateClusterMetadata)(nil), // 14: google.bigtable.admin.v2.CreateClusterMetadata + (*UpdateClusterMetadata)(nil), // 15: google.bigtable.admin.v2.UpdateClusterMetadata + (*PartialUpdateClusterMetadata)(nil), // 16: google.bigtable.admin.v2.PartialUpdateClusterMetadata + (*PartialUpdateClusterRequest)(nil), // 17: google.bigtable.admin.v2.PartialUpdateClusterRequest + (*CreateAppProfileRequest)(nil), // 18: google.bigtable.admin.v2.CreateAppProfileRequest + (*GetAppProfileRequest)(nil), // 19: google.bigtable.admin.v2.GetAppProfileRequest + (*ListAppProfilesRequest)(nil), // 20: google.bigtable.admin.v2.ListAppProfilesRequest + (*ListAppProfilesResponse)(nil), // 21: google.bigtable.admin.v2.ListAppProfilesResponse + (*UpdateAppProfileRequest)(nil), // 22: google.bigtable.admin.v2.UpdateAppProfileRequest + (*DeleteAppProfileRequest)(nil), // 23: google.bigtable.admin.v2.DeleteAppProfileRequest + (*UpdateAppProfileMetadata)(nil), // 24: google.bigtable.admin.v2.UpdateAppProfileMetadata + (*ListHotTabletsRequest)(nil), // 25: google.bigtable.admin.v2.ListHotTabletsRequest + (*ListHotTabletsResponse)(nil), // 26: google.bigtable.admin.v2.ListHotTabletsResponse + nil, // 27: google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry + (*CreateClusterMetadata_TableProgress)(nil), // 28: google.bigtable.admin.v2.CreateClusterMetadata.TableProgress + nil, // 29: google.bigtable.admin.v2.CreateClusterMetadata.TablesEntry + (*Instance)(nil), // 30: google.bigtable.admin.v2.Instance + (*fieldmaskpb.FieldMask)(nil), // 31: google.protobuf.FieldMask + (*Cluster)(nil), // 32: google.bigtable.admin.v2.Cluster + (*timestamppb.Timestamp)(nil), // 33: google.protobuf.Timestamp + (*AppProfile)(nil), // 34: google.bigtable.admin.v2.AppProfile + (*HotTablet)(nil), // 35: google.bigtable.admin.v2.HotTablet + (*v1.GetIamPolicyRequest)(nil), // 36: google.iam.v1.GetIamPolicyRequest + (*v1.SetIamPolicyRequest)(nil), // 37: google.iam.v1.SetIamPolicyRequest + (*v1.TestIamPermissionsRequest)(nil), // 38: google.iam.v1.TestIamPermissionsRequest + (*longrunning.Operation)(nil), // 39: google.longrunning.Operation + (*emptypb.Empty)(nil), // 40: google.protobuf.Empty + (*v1.Policy)(nil), // 41: google.iam.v1.Policy + (*v1.TestIamPermissionsResponse)(nil), // 42: google.iam.v1.TestIamPermissionsResponse } var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_depIdxs = []int32{ - 25, // 0: google.bigtable.admin.v2.CreateInstanceRequest.instance:type_name -> google.bigtable.admin.v2.Instance - 24, // 1: google.bigtable.admin.v2.CreateInstanceRequest.clusters:type_name -> google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry - 25, // 2: google.bigtable.admin.v2.ListInstancesResponse.instances:type_name -> google.bigtable.admin.v2.Instance - 25, // 3: google.bigtable.admin.v2.PartialUpdateInstanceRequest.instance:type_name -> google.bigtable.admin.v2.Instance - 26, // 4: google.bigtable.admin.v2.PartialUpdateInstanceRequest.update_mask:type_name -> google.protobuf.FieldMask - 27, // 5: google.bigtable.admin.v2.CreateClusterRequest.cluster:type_name -> google.bigtable.admin.v2.Cluster - 27, // 6: google.bigtable.admin.v2.ListClustersResponse.clusters:type_name -> google.bigtable.admin.v2.Cluster - 0, // 7: google.bigtable.admin.v2.CreateInstanceMetadata.original_request:type_name -> google.bigtable.admin.v2.CreateInstanceRequest - 28, // 8: google.bigtable.admin.v2.CreateInstanceMetadata.request_time:type_name -> google.protobuf.Timestamp - 28, // 9: google.bigtable.admin.v2.CreateInstanceMetadata.finish_time:type_name -> google.protobuf.Timestamp - 4, // 10: google.bigtable.admin.v2.UpdateInstanceMetadata.original_request:type_name -> google.bigtable.admin.v2.PartialUpdateInstanceRequest - 28, // 11: google.bigtable.admin.v2.UpdateInstanceMetadata.request_time:type_name -> google.protobuf.Timestamp - 28, // 12: google.bigtable.admin.v2.UpdateInstanceMetadata.finish_time:type_name -> google.protobuf.Timestamp - 6, // 13: google.bigtable.admin.v2.CreateClusterMetadata.original_request:type_name -> google.bigtable.admin.v2.CreateClusterRequest - 28, // 14: google.bigtable.admin.v2.CreateClusterMetadata.request_time:type_name -> google.protobuf.Timestamp - 28, // 15: google.bigtable.admin.v2.CreateClusterMetadata.finish_time:type_name -> google.protobuf.Timestamp - 27, // 16: google.bigtable.admin.v2.UpdateClusterMetadata.original_request:type_name -> google.bigtable.admin.v2.Cluster - 28, // 17: google.bigtable.admin.v2.UpdateClusterMetadata.request_time:type_name -> google.protobuf.Timestamp - 28, // 18: google.bigtable.admin.v2.UpdateClusterMetadata.finish_time:type_name -> google.protobuf.Timestamp - 28, // 19: google.bigtable.admin.v2.PartialUpdateClusterMetadata.request_time:type_name -> google.protobuf.Timestamp - 28, // 20: google.bigtable.admin.v2.PartialUpdateClusterMetadata.finish_time:type_name -> google.protobuf.Timestamp - 16, // 21: google.bigtable.admin.v2.PartialUpdateClusterMetadata.original_request:type_name -> google.bigtable.admin.v2.PartialUpdateClusterRequest - 27, // 22: google.bigtable.admin.v2.PartialUpdateClusterRequest.cluster:type_name -> google.bigtable.admin.v2.Cluster - 26, // 23: google.bigtable.admin.v2.PartialUpdateClusterRequest.update_mask:type_name -> google.protobuf.FieldMask - 29, // 24: google.bigtable.admin.v2.CreateAppProfileRequest.app_profile:type_name -> google.bigtable.admin.v2.AppProfile - 29, // 25: google.bigtable.admin.v2.ListAppProfilesResponse.app_profiles:type_name -> google.bigtable.admin.v2.AppProfile - 29, // 26: google.bigtable.admin.v2.UpdateAppProfileRequest.app_profile:type_name -> google.bigtable.admin.v2.AppProfile - 26, // 27: google.bigtable.admin.v2.UpdateAppProfileRequest.update_mask:type_name -> google.protobuf.FieldMask - 27, // 28: google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.value:type_name -> google.bigtable.admin.v2.Cluster - 0, // 29: google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance:input_type -> google.bigtable.admin.v2.CreateInstanceRequest - 1, // 30: google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance:input_type -> google.bigtable.admin.v2.GetInstanceRequest - 2, // 31: google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances:input_type -> google.bigtable.admin.v2.ListInstancesRequest - 25, // 32: google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance:input_type -> google.bigtable.admin.v2.Instance - 4, // 33: google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance:input_type -> google.bigtable.admin.v2.PartialUpdateInstanceRequest - 5, // 34: google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance:input_type -> google.bigtable.admin.v2.DeleteInstanceRequest - 6, // 35: google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster:input_type -> google.bigtable.admin.v2.CreateClusterRequest - 7, // 36: google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster:input_type -> google.bigtable.admin.v2.GetClusterRequest - 8, // 37: google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters:input_type -> google.bigtable.admin.v2.ListClustersRequest - 27, // 38: google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster:input_type -> google.bigtable.admin.v2.Cluster - 16, // 39: google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateCluster:input_type -> google.bigtable.admin.v2.PartialUpdateClusterRequest - 10, // 40: google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster:input_type -> google.bigtable.admin.v2.DeleteClusterRequest - 17, // 41: google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile:input_type -> google.bigtable.admin.v2.CreateAppProfileRequest - 18, // 42: google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile:input_type -> google.bigtable.admin.v2.GetAppProfileRequest - 19, // 43: google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles:input_type -> google.bigtable.admin.v2.ListAppProfilesRequest - 21, // 44: google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile:input_type -> google.bigtable.admin.v2.UpdateAppProfileRequest - 22, // 45: google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile:input_type -> google.bigtable.admin.v2.DeleteAppProfileRequest - 30, // 46: google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 31, // 47: google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 32, // 48: google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 33, // 49: google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance:output_type -> google.longrunning.Operation - 25, // 50: google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance:output_type -> google.bigtable.admin.v2.Instance - 3, // 51: google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances:output_type -> google.bigtable.admin.v2.ListInstancesResponse - 25, // 52: google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance:output_type -> google.bigtable.admin.v2.Instance - 33, // 53: google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance:output_type -> google.longrunning.Operation - 34, // 54: google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance:output_type -> google.protobuf.Empty - 33, // 55: google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster:output_type -> google.longrunning.Operation - 27, // 56: google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster:output_type -> google.bigtable.admin.v2.Cluster - 9, // 57: google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters:output_type -> google.bigtable.admin.v2.ListClustersResponse - 33, // 58: google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster:output_type -> google.longrunning.Operation - 33, // 59: google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateCluster:output_type -> google.longrunning.Operation - 34, // 60: google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster:output_type -> google.protobuf.Empty - 29, // 61: google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile:output_type -> google.bigtable.admin.v2.AppProfile - 29, // 62: google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile:output_type -> google.bigtable.admin.v2.AppProfile - 20, // 63: google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles:output_type -> google.bigtable.admin.v2.ListAppProfilesResponse - 33, // 64: google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile:output_type -> google.longrunning.Operation - 34, // 65: google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile:output_type -> google.protobuf.Empty - 35, // 66: google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy:output_type -> google.iam.v1.Policy - 35, // 67: google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy:output_type -> google.iam.v1.Policy - 36, // 68: google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 49, // [49:69] is the sub-list for method output_type - 29, // [29:49] is the sub-list for method input_type - 29, // [29:29] is the sub-list for extension type_name - 29, // [29:29] is the sub-list for extension extendee - 0, // [0:29] is the sub-list for field type_name + 30, // 0: google.bigtable.admin.v2.CreateInstanceRequest.instance:type_name -> google.bigtable.admin.v2.Instance + 27, // 1: google.bigtable.admin.v2.CreateInstanceRequest.clusters:type_name -> google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry + 30, // 2: google.bigtable.admin.v2.ListInstancesResponse.instances:type_name -> google.bigtable.admin.v2.Instance + 30, // 3: google.bigtable.admin.v2.PartialUpdateInstanceRequest.instance:type_name -> google.bigtable.admin.v2.Instance + 31, // 4: google.bigtable.admin.v2.PartialUpdateInstanceRequest.update_mask:type_name -> google.protobuf.FieldMask + 32, // 5: google.bigtable.admin.v2.CreateClusterRequest.cluster:type_name -> google.bigtable.admin.v2.Cluster + 32, // 6: google.bigtable.admin.v2.ListClustersResponse.clusters:type_name -> google.bigtable.admin.v2.Cluster + 1, // 7: google.bigtable.admin.v2.CreateInstanceMetadata.original_request:type_name -> google.bigtable.admin.v2.CreateInstanceRequest + 33, // 8: google.bigtable.admin.v2.CreateInstanceMetadata.request_time:type_name -> google.protobuf.Timestamp + 33, // 9: google.bigtable.admin.v2.CreateInstanceMetadata.finish_time:type_name -> google.protobuf.Timestamp + 5, // 10: google.bigtable.admin.v2.UpdateInstanceMetadata.original_request:type_name -> google.bigtable.admin.v2.PartialUpdateInstanceRequest + 33, // 11: google.bigtable.admin.v2.UpdateInstanceMetadata.request_time:type_name -> google.protobuf.Timestamp + 33, // 12: google.bigtable.admin.v2.UpdateInstanceMetadata.finish_time:type_name -> google.protobuf.Timestamp + 7, // 13: google.bigtable.admin.v2.CreateClusterMetadata.original_request:type_name -> google.bigtable.admin.v2.CreateClusterRequest + 33, // 14: google.bigtable.admin.v2.CreateClusterMetadata.request_time:type_name -> google.protobuf.Timestamp + 33, // 15: google.bigtable.admin.v2.CreateClusterMetadata.finish_time:type_name -> google.protobuf.Timestamp + 29, // 16: google.bigtable.admin.v2.CreateClusterMetadata.tables:type_name -> google.bigtable.admin.v2.CreateClusterMetadata.TablesEntry + 32, // 17: google.bigtable.admin.v2.UpdateClusterMetadata.original_request:type_name -> google.bigtable.admin.v2.Cluster + 33, // 18: google.bigtable.admin.v2.UpdateClusterMetadata.request_time:type_name -> google.protobuf.Timestamp + 33, // 19: google.bigtable.admin.v2.UpdateClusterMetadata.finish_time:type_name -> google.protobuf.Timestamp + 33, // 20: google.bigtable.admin.v2.PartialUpdateClusterMetadata.request_time:type_name -> google.protobuf.Timestamp + 33, // 21: google.bigtable.admin.v2.PartialUpdateClusterMetadata.finish_time:type_name -> google.protobuf.Timestamp + 17, // 22: google.bigtable.admin.v2.PartialUpdateClusterMetadata.original_request:type_name -> google.bigtable.admin.v2.PartialUpdateClusterRequest + 32, // 23: google.bigtable.admin.v2.PartialUpdateClusterRequest.cluster:type_name -> google.bigtable.admin.v2.Cluster + 31, // 24: google.bigtable.admin.v2.PartialUpdateClusterRequest.update_mask:type_name -> google.protobuf.FieldMask + 34, // 25: google.bigtable.admin.v2.CreateAppProfileRequest.app_profile:type_name -> google.bigtable.admin.v2.AppProfile + 34, // 26: google.bigtable.admin.v2.ListAppProfilesResponse.app_profiles:type_name -> google.bigtable.admin.v2.AppProfile + 34, // 27: google.bigtable.admin.v2.UpdateAppProfileRequest.app_profile:type_name -> google.bigtable.admin.v2.AppProfile + 31, // 28: google.bigtable.admin.v2.UpdateAppProfileRequest.update_mask:type_name -> google.protobuf.FieldMask + 33, // 29: google.bigtable.admin.v2.ListHotTabletsRequest.start_time:type_name -> google.protobuf.Timestamp + 33, // 30: google.bigtable.admin.v2.ListHotTabletsRequest.end_time:type_name -> google.protobuf.Timestamp + 35, // 31: google.bigtable.admin.v2.ListHotTabletsResponse.hot_tablets:type_name -> google.bigtable.admin.v2.HotTablet + 32, // 32: google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.value:type_name -> google.bigtable.admin.v2.Cluster + 0, // 33: google.bigtable.admin.v2.CreateClusterMetadata.TableProgress.state:type_name -> google.bigtable.admin.v2.CreateClusterMetadata.TableProgress.State + 28, // 34: google.bigtable.admin.v2.CreateClusterMetadata.TablesEntry.value:type_name -> google.bigtable.admin.v2.CreateClusterMetadata.TableProgress + 1, // 35: google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance:input_type -> google.bigtable.admin.v2.CreateInstanceRequest + 2, // 36: google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance:input_type -> google.bigtable.admin.v2.GetInstanceRequest + 3, // 37: google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances:input_type -> google.bigtable.admin.v2.ListInstancesRequest + 30, // 38: google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance:input_type -> google.bigtable.admin.v2.Instance + 5, // 39: google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance:input_type -> google.bigtable.admin.v2.PartialUpdateInstanceRequest + 6, // 40: google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance:input_type -> google.bigtable.admin.v2.DeleteInstanceRequest + 7, // 41: google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster:input_type -> google.bigtable.admin.v2.CreateClusterRequest + 8, // 42: google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster:input_type -> google.bigtable.admin.v2.GetClusterRequest + 9, // 43: google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters:input_type -> google.bigtable.admin.v2.ListClustersRequest + 32, // 44: google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster:input_type -> google.bigtable.admin.v2.Cluster + 17, // 45: google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateCluster:input_type -> google.bigtable.admin.v2.PartialUpdateClusterRequest + 11, // 46: google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster:input_type -> google.bigtable.admin.v2.DeleteClusterRequest + 18, // 47: google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile:input_type -> google.bigtable.admin.v2.CreateAppProfileRequest + 19, // 48: google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile:input_type -> google.bigtable.admin.v2.GetAppProfileRequest + 20, // 49: google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles:input_type -> google.bigtable.admin.v2.ListAppProfilesRequest + 22, // 50: google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile:input_type -> google.bigtable.admin.v2.UpdateAppProfileRequest + 23, // 51: google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile:input_type -> google.bigtable.admin.v2.DeleteAppProfileRequest + 36, // 52: google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 37, // 53: google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 38, // 54: google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 25, // 55: google.bigtable.admin.v2.BigtableInstanceAdmin.ListHotTablets:input_type -> google.bigtable.admin.v2.ListHotTabletsRequest + 39, // 56: google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance:output_type -> google.longrunning.Operation + 30, // 57: google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance:output_type -> google.bigtable.admin.v2.Instance + 4, // 58: google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances:output_type -> google.bigtable.admin.v2.ListInstancesResponse + 30, // 59: google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance:output_type -> google.bigtable.admin.v2.Instance + 39, // 60: google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance:output_type -> google.longrunning.Operation + 40, // 61: google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance:output_type -> google.protobuf.Empty + 39, // 62: google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster:output_type -> google.longrunning.Operation + 32, // 63: google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster:output_type -> google.bigtable.admin.v2.Cluster + 10, // 64: google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters:output_type -> google.bigtable.admin.v2.ListClustersResponse + 39, // 65: google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster:output_type -> google.longrunning.Operation + 39, // 66: google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateCluster:output_type -> google.longrunning.Operation + 40, // 67: google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster:output_type -> google.protobuf.Empty + 34, // 68: google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile:output_type -> google.bigtable.admin.v2.AppProfile + 34, // 69: google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile:output_type -> google.bigtable.admin.v2.AppProfile + 21, // 70: google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles:output_type -> google.bigtable.admin.v2.ListAppProfilesResponse + 39, // 71: google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile:output_type -> google.longrunning.Operation + 40, // 72: google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile:output_type -> google.protobuf.Empty + 41, // 73: google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy:output_type -> google.iam.v1.Policy + 41, // 74: google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy:output_type -> google.iam.v1.Policy + 42, // 75: google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 26, // 76: google.bigtable.admin.v2.BigtableInstanceAdmin.ListHotTablets:output_type -> google.bigtable.admin.v2.ListHotTabletsResponse + 56, // [56:77] is the sub-list for method output_type + 35, // [35:56] is the sub-list for method input_type + 35, // [35:35] is the sub-list for extension type_name + 35, // [35:35] is the sub-list for extension extendee + 0, // [0:35] is the sub-list for field type_name } func init() { file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() } @@ -2555,19 +2948,56 @@ func file_google_bigtable_admin_v2_bigtable_instance_admin_proto_init() { return nil } } + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListHotTabletsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListHotTabletsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateClusterMetadata_TableProgress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc, - NumEnums: 0, - NumMessages: 25, + NumEnums: 1, + NumMessages: 29, NumExtensions: 0, NumServices: 1, }, GoTypes: file_google_bigtable_admin_v2_bigtable_instance_admin_proto_goTypes, DependencyIndexes: file_google_bigtable_admin_v2_bigtable_instance_admin_proto_depIdxs, + EnumInfos: file_google_bigtable_admin_v2_bigtable_instance_admin_proto_enumTypes, MessageInfos: file_google_bigtable_admin_v2_bigtable_instance_admin_proto_msgTypes, }.Build() File_google_bigtable_admin_v2_bigtable_instance_admin_proto = out.File @@ -2660,6 +3090,9 @@ type BigtableInstanceAdminClient interface { SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) // Returns permissions that the caller has on the specified instance resource. TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) + // Lists hot tablets in a cluster, within the time range provided. Hot + // tablets are ordered based on CPU usage. + ListHotTablets(ctx context.Context, in *ListHotTabletsRequest, opts ...grpc.CallOption) (*ListHotTabletsResponse, error) } type bigtableInstanceAdminClient struct { @@ -2850,6 +3283,15 @@ func (c *bigtableInstanceAdminClient) TestIamPermissions(ctx context.Context, in return out, nil } +func (c *bigtableInstanceAdminClient) ListHotTablets(ctx context.Context, in *ListHotTabletsRequest, opts ...grpc.CallOption) (*ListHotTabletsResponse, error) { + out := new(ListHotTabletsResponse) + err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // BigtableInstanceAdminServer is the server API for BigtableInstanceAdmin service. type BigtableInstanceAdminServer interface { // Create an instance within a project. @@ -2924,6 +3366,9 @@ type BigtableInstanceAdminServer interface { SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) // Returns permissions that the caller has on the specified instance resource. TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) + // Lists hot tablets in a cluster, within the time range provided. Hot + // tablets are ordered based on CPU usage. + ListHotTablets(context.Context, *ListHotTabletsRequest) (*ListHotTabletsResponse, error) } // UnimplementedBigtableInstanceAdminServer can be embedded to have forward compatible implementations. @@ -2990,6 +3435,9 @@ func (*UnimplementedBigtableInstanceAdminServer) SetIamPolicy(context.Context, * func (*UnimplementedBigtableInstanceAdminServer) TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") } +func (*UnimplementedBigtableInstanceAdminServer) ListHotTablets(context.Context, *ListHotTabletsRequest) (*ListHotTabletsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListHotTablets not implemented") +} func RegisterBigtableInstanceAdminServer(s *grpc.Server, srv BigtableInstanceAdminServer) { s.RegisterService(&_BigtableInstanceAdmin_serviceDesc, srv) @@ -3355,6 +3803,24 @@ func _BigtableInstanceAdmin_TestIamPermissions_Handler(srv interface{}, ctx cont return interceptor(ctx, in, info, handler) } +func _BigtableInstanceAdmin_ListHotTablets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListHotTabletsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableInstanceAdminServer).ListHotTablets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableInstanceAdminServer).ListHotTablets(ctx, req.(*ListHotTabletsRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _BigtableInstanceAdmin_serviceDesc = grpc.ServiceDesc{ ServiceName: "google.bigtable.admin.v2.BigtableInstanceAdmin", HandlerType: (*BigtableInstanceAdminServer)(nil), @@ -3439,6 +3905,10 @@ var _BigtableInstanceAdmin_serviceDesc = grpc.ServiceDesc{ MethodName: "TestIamPermissions", Handler: _BigtableInstanceAdmin_TestIamPermissions_Handler, }, + { + MethodName: "ListHotTablets", + Handler: _BigtableInstanceAdmin_ListHotTablets_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "google/bigtable/admin/v2/bigtable_instance_admin.proto", diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go index b2d4d51a4f2f..4170de70fddf 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.13.0 +// protoc v3.12.2 // source: google/bigtable/admin/v2/bigtable_table_admin.proto package admin @@ -65,6 +65,7 @@ type RestoreTableRequest struct { // Required. The source from which to restore. // // Types that are assignable to Source: + // // *RestoreTableRequest_Backup Source isRestoreTableRequest_Source `protobuf_oneof:"source"` } @@ -156,6 +157,7 @@ type RestoreTableMetadata struct { // `source` in [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. // // Types that are assignable to SourceInfo: + // // *RestoreTableMetadata_BackupInfo SourceInfo isRestoreTableMetadata_SourceInfo `protobuf_oneof:"source_info"` // If exists, the name of the long-running operation that will be used to @@ -341,15 +343,15 @@ type CreateTableRequest struct { // // Example: // - // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` - // `"other", "zz"]` - // * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` - // * Key assignment: - // - Tablet 1 `[, apple) => {"a"}.` - // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - // - Tablet 4 `[customer_2, other) => {"customer_2"}.` - // - Tablet 5 `[other, ) => {"other", "zz"}.` + // - Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` + // `"other", "zz"]` + // - initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` + // - Key assignment: + // - Tablet 1 `[, apple) => {"a"}.` + // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` + // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` + // - Tablet 4 `[customer_2, other) => {"customer_2"}.` + // - Tablet 5 `[other, ) => {"other", "zz"}.` InitialSplits []*CreateTableRequest_Split `protobuf:"bytes,4,rep,name=initial_splits,json=initialSplits,proto3" json:"initial_splits,omitempty"` } @@ -505,6 +507,7 @@ type DropRowRangeRequest struct { // Delete all rows or by prefix. // // Types that are assignable to Target: + // // *DropRowRangeRequest_RowKeyPrefix // *DropRowRangeRequest_DeleteAllDataFromTable Target isDropRowRangeRequest_Target `protobuf_oneof:"target"` @@ -799,6 +802,142 @@ func (x *GetTableRequest) GetView() Table_View { return Table_VIEW_UNSPECIFIED } +// The request for +// [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. +type UpdateTableRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The table to update. + // The table's `name` field is used to identify the table to update. + Table *Table `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` + // Required. The list of fields to update. + // A mask specifying which fields (e.g. `deletion_protection`) in the `table` + // field should be updated. This mask is relative to the `table` field, not to + // the request message. The wildcard (*) path is currently not supported. + // Currently UpdateTable is only supported for the following field: + // - `deletion_protection` + // + // If `column_families` is set in `update_mask`, it will return an + // UNIMPLEMENTED error. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateTableRequest) Reset() { + *x = UpdateTableRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateTableRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateTableRequest) ProtoMessage() {} + +func (x *UpdateTableRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateTableRequest.ProtoReflect.Descriptor instead. +func (*UpdateTableRequest) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{9} +} + +func (x *UpdateTableRequest) GetTable() *Table { + if x != nil { + return x.Table + } + return nil +} + +func (x *UpdateTableRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Metadata type for the operation returned by +// [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. +type UpdateTableMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the table being updated. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The time at which this operation started. + StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // If set, the time at which this operation finished or was canceled. + EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` +} + +func (x *UpdateTableMetadata) Reset() { + *x = UpdateTableMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateTableMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateTableMetadata) ProtoMessage() {} + +func (x *UpdateTableMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateTableMetadata.ProtoReflect.Descriptor instead. +func (*UpdateTableMetadata) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{10} +} + +func (x *UpdateTableMetadata) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateTableMetadata) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +func (x *UpdateTableMetadata) GetEndTime() *timestamppb.Timestamp { + if x != nil { + return x.EndTime + } + return nil +} + // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] type DeleteTableRequest struct { @@ -815,7 +954,7 @@ type DeleteTableRequest struct { func (x *DeleteTableRequest) Reset() { *x = DeleteTableRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -828,7 +967,7 @@ func (x *DeleteTableRequest) String() string { func (*DeleteTableRequest) ProtoMessage() {} func (x *DeleteTableRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -841,7 +980,7 @@ func (x *DeleteTableRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteTableRequest.ProtoReflect.Descriptor instead. func (*DeleteTableRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{9} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{11} } func (x *DeleteTableRequest) GetName() string { @@ -851,6 +990,126 @@ func (x *DeleteTableRequest) GetName() string { return "" } +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] +type UndeleteTableRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The unique name of the table to be restored. + // Values are of the form + // `projects/{project}/instances/{instance}/tables/{table}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *UndeleteTableRequest) Reset() { + *x = UndeleteTableRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UndeleteTableRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UndeleteTableRequest) ProtoMessage() {} + +func (x *UndeleteTableRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UndeleteTableRequest.ProtoReflect.Descriptor instead. +func (*UndeleteTableRequest) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{12} +} + +func (x *UndeleteTableRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Metadata type for the operation returned by +// [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable]. +type UndeleteTableMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the table being restored. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The time at which this operation started. + StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // If set, the time at which this operation finished or was cancelled. + EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` +} + +func (x *UndeleteTableMetadata) Reset() { + *x = UndeleteTableMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UndeleteTableMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UndeleteTableMetadata) ProtoMessage() {} + +func (x *UndeleteTableMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UndeleteTableMetadata.ProtoReflect.Descriptor instead. +func (*UndeleteTableMetadata) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{13} +} + +func (x *UndeleteTableMetadata) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UndeleteTableMetadata) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +func (x *UndeleteTableMetadata) GetEndTime() *timestamppb.Timestamp { + if x != nil { + return x.EndTime + } + return nil +} + // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] type ModifyColumnFamiliesRequest struct { @@ -872,7 +1131,7 @@ type ModifyColumnFamiliesRequest struct { func (x *ModifyColumnFamiliesRequest) Reset() { *x = ModifyColumnFamiliesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -885,7 +1144,7 @@ func (x *ModifyColumnFamiliesRequest) String() string { func (*ModifyColumnFamiliesRequest) ProtoMessage() {} func (x *ModifyColumnFamiliesRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -898,7 +1157,7 @@ func (x *ModifyColumnFamiliesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ModifyColumnFamiliesRequest.ProtoReflect.Descriptor instead. func (*ModifyColumnFamiliesRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{10} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{14} } func (x *ModifyColumnFamiliesRequest) GetName() string { @@ -931,7 +1190,7 @@ type GenerateConsistencyTokenRequest struct { func (x *GenerateConsistencyTokenRequest) Reset() { *x = GenerateConsistencyTokenRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -944,7 +1203,7 @@ func (x *GenerateConsistencyTokenRequest) String() string { func (*GenerateConsistencyTokenRequest) ProtoMessage() {} func (x *GenerateConsistencyTokenRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -957,7 +1216,7 @@ func (x *GenerateConsistencyTokenRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GenerateConsistencyTokenRequest.ProtoReflect.Descriptor instead. func (*GenerateConsistencyTokenRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{11} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{15} } func (x *GenerateConsistencyTokenRequest) GetName() string { @@ -981,7 +1240,7 @@ type GenerateConsistencyTokenResponse struct { func (x *GenerateConsistencyTokenResponse) Reset() { *x = GenerateConsistencyTokenResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -994,7 +1253,7 @@ func (x *GenerateConsistencyTokenResponse) String() string { func (*GenerateConsistencyTokenResponse) ProtoMessage() {} func (x *GenerateConsistencyTokenResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1007,7 +1266,7 @@ func (x *GenerateConsistencyTokenResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GenerateConsistencyTokenResponse.ProtoReflect.Descriptor instead. func (*GenerateConsistencyTokenResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{12} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{16} } func (x *GenerateConsistencyTokenResponse) GetConsistencyToken() string { @@ -1035,7 +1294,7 @@ type CheckConsistencyRequest struct { func (x *CheckConsistencyRequest) Reset() { *x = CheckConsistencyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1048,7 +1307,7 @@ func (x *CheckConsistencyRequest) String() string { func (*CheckConsistencyRequest) ProtoMessage() {} func (x *CheckConsistencyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1061,7 +1320,7 @@ func (x *CheckConsistencyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CheckConsistencyRequest.ProtoReflect.Descriptor instead. func (*CheckConsistencyRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{13} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{17} } func (x *CheckConsistencyRequest) GetName() string { @@ -1093,7 +1352,7 @@ type CheckConsistencyResponse struct { func (x *CheckConsistencyResponse) Reset() { *x = CheckConsistencyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1106,7 +1365,7 @@ func (x *CheckConsistencyResponse) String() string { func (*CheckConsistencyResponse) ProtoMessage() {} func (x *CheckConsistencyResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1119,7 +1378,7 @@ func (x *CheckConsistencyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CheckConsistencyResponse.ProtoReflect.Descriptor instead. func (*CheckConsistencyResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{14} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{18} } func (x *CheckConsistencyResponse) GetConsistent() bool { @@ -1166,7 +1425,7 @@ type SnapshotTableRequest struct { func (x *SnapshotTableRequest) Reset() { *x = SnapshotTableRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1179,7 +1438,7 @@ func (x *SnapshotTableRequest) String() string { func (*SnapshotTableRequest) ProtoMessage() {} func (x *SnapshotTableRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1192,7 +1451,7 @@ func (x *SnapshotTableRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SnapshotTableRequest.ProtoReflect.Descriptor instead. func (*SnapshotTableRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{15} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{19} } func (x *SnapshotTableRequest) GetName() string { @@ -1251,7 +1510,7 @@ type GetSnapshotRequest struct { func (x *GetSnapshotRequest) Reset() { *x = GetSnapshotRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1264,7 +1523,7 @@ func (x *GetSnapshotRequest) String() string { func (*GetSnapshotRequest) ProtoMessage() {} func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1277,7 +1536,7 @@ func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSnapshotRequest.ProtoReflect.Descriptor instead. func (*GetSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{16} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{20} } func (x *GetSnapshotRequest) GetName() string { @@ -1315,7 +1574,7 @@ type ListSnapshotsRequest struct { func (x *ListSnapshotsRequest) Reset() { *x = ListSnapshotsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1328,7 +1587,7 @@ func (x *ListSnapshotsRequest) String() string { func (*ListSnapshotsRequest) ProtoMessage() {} func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1341,7 +1600,7 @@ func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSnapshotsRequest.ProtoReflect.Descriptor instead. func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{17} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{21} } func (x *ListSnapshotsRequest) GetParent() string { @@ -1388,7 +1647,7 @@ type ListSnapshotsResponse struct { func (x *ListSnapshotsResponse) Reset() { *x = ListSnapshotsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1401,7 +1660,7 @@ func (x *ListSnapshotsResponse) String() string { func (*ListSnapshotsResponse) ProtoMessage() {} func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1414,7 +1673,7 @@ func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSnapshotsResponse.ProtoReflect.Descriptor instead. func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{18} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{22} } func (x *ListSnapshotsResponse) GetSnapshots() []*Snapshot { @@ -1452,7 +1711,7 @@ type DeleteSnapshotRequest struct { func (x *DeleteSnapshotRequest) Reset() { *x = DeleteSnapshotRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1465,7 +1724,7 @@ func (x *DeleteSnapshotRequest) String() string { func (*DeleteSnapshotRequest) ProtoMessage() {} func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1478,7 +1737,7 @@ func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteSnapshotRequest.ProtoReflect.Descriptor instead. func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{19} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{23} } func (x *DeleteSnapshotRequest) GetName() string { @@ -1510,7 +1769,7 @@ type SnapshotTableMetadata struct { func (x *SnapshotTableMetadata) Reset() { *x = SnapshotTableMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1523,7 +1782,7 @@ func (x *SnapshotTableMetadata) String() string { func (*SnapshotTableMetadata) ProtoMessage() {} func (x *SnapshotTableMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1536,7 +1795,7 @@ func (x *SnapshotTableMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use SnapshotTableMetadata.ProtoReflect.Descriptor instead. func (*SnapshotTableMetadata) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{20} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{24} } func (x *SnapshotTableMetadata) GetOriginalRequest() *SnapshotTableRequest { @@ -1583,7 +1842,7 @@ type CreateTableFromSnapshotMetadata struct { func (x *CreateTableFromSnapshotMetadata) Reset() { *x = CreateTableFromSnapshotMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1596,7 +1855,7 @@ func (x *CreateTableFromSnapshotMetadata) String() string { func (*CreateTableFromSnapshotMetadata) ProtoMessage() {} func (x *CreateTableFromSnapshotMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1609,7 +1868,7 @@ func (x *CreateTableFromSnapshotMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateTableFromSnapshotMetadata.ProtoReflect.Descriptor instead. func (*CreateTableFromSnapshotMetadata) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{21} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{25} } func (x *CreateTableFromSnapshotMetadata) GetOriginalRequest() *CreateTableFromSnapshotRequest { @@ -1657,7 +1916,7 @@ type CreateBackupRequest struct { func (x *CreateBackupRequest) Reset() { *x = CreateBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1670,7 +1929,7 @@ func (x *CreateBackupRequest) String() string { func (*CreateBackupRequest) ProtoMessage() {} func (x *CreateBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1683,7 +1942,7 @@ func (x *CreateBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateBackupRequest.ProtoReflect.Descriptor instead. func (*CreateBackupRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{22} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{26} } func (x *CreateBackupRequest) GetParent() string { @@ -1727,7 +1986,7 @@ type CreateBackupMetadata struct { func (x *CreateBackupMetadata) Reset() { *x = CreateBackupMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1740,7 +1999,7 @@ func (x *CreateBackupMetadata) String() string { func (*CreateBackupMetadata) ProtoMessage() {} func (x *CreateBackupMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1753,7 +2012,7 @@ func (x *CreateBackupMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateBackupMetadata.ProtoReflect.Descriptor instead. func (*CreateBackupMetadata) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{23} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{27} } func (x *CreateBackupMetadata) GetName() string { @@ -1793,7 +2052,7 @@ type UpdateBackupRequest struct { // Required. The backup to update. `backup.name`, and the fields to be updated // as specified by `update_mask` are required. Other fields are ignored. // Update is only supported for the following fields: - // * `backup.expire_time`. + // - `backup.expire_time`. Backup *Backup `protobuf:"bytes,1,opt,name=backup,proto3" json:"backup,omitempty"` // Required. A mask specifying which fields (e.g. `expire_time`) in the // Backup resource should be updated. This mask is relative to the Backup @@ -1806,7 +2065,7 @@ type UpdateBackupRequest struct { func (x *UpdateBackupRequest) Reset() { *x = UpdateBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1819,7 +2078,7 @@ func (x *UpdateBackupRequest) String() string { func (*UpdateBackupRequest) ProtoMessage() {} func (x *UpdateBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1832,7 +2091,7 @@ func (x *UpdateBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateBackupRequest.ProtoReflect.Descriptor instead. func (*UpdateBackupRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{24} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{28} } func (x *UpdateBackupRequest) GetBackup() *Backup { @@ -1864,7 +2123,7 @@ type GetBackupRequest struct { func (x *GetBackupRequest) Reset() { *x = GetBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1877,7 +2136,7 @@ func (x *GetBackupRequest) String() string { func (*GetBackupRequest) ProtoMessage() {} func (x *GetBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1890,7 +2149,7 @@ func (x *GetBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBackupRequest.ProtoReflect.Descriptor instead. func (*GetBackupRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{25} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{29} } func (x *GetBackupRequest) GetName() string { @@ -1915,7 +2174,7 @@ type DeleteBackupRequest struct { func (x *DeleteBackupRequest) Reset() { *x = DeleteBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1928,7 +2187,7 @@ func (x *DeleteBackupRequest) String() string { func (*DeleteBackupRequest) ProtoMessage() {} func (x *DeleteBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1941,7 +2200,7 @@ func (x *DeleteBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteBackupRequest.ProtoReflect.Descriptor instead. func (*DeleteBackupRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{26} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{30} } func (x *DeleteBackupRequest) GetName() string { @@ -1970,13 +2229,13 @@ type ListBackupsRequest struct { // roughly synonymous with equality. Filter rules are case insensitive. // // The fields eligible for filtering are: - // * `name` - // * `source_table` - // * `state` - // * `start_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - // * `end_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - // * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - // * `size_bytes` + // - `name` + // - `source_table` + // - `state` + // - `start_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // - `end_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // - `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // - `size_bytes` // // To filter on multiple expressions, provide each separate expression within // parentheses. By default, each expression is an AND expression. However, @@ -1984,29 +2243,29 @@ type ListBackupsRequest struct { // // Some examples of using filters are: // - // * `name:"exact"` --> The backup's name is the string "exact". - // * `name:howl` --> The backup's name contains the string "howl". - // * `source_table:prod` - // --> The source_table's name contains the string "prod". - // * `state:CREATING` --> The backup is pending creation. - // * `state:READY` --> The backup is fully created and ready for use. - // * `(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")` - // --> The backup name contains the string "howl" and start_time - // of the backup is before 2018-03-28T14:50:00Z. - // * `size_bytes > 10000000000` --> The backup's size is greater than 10GB + // - `name:"exact"` --> The backup's name is the string "exact". + // - `name:howl` --> The backup's name contains the string "howl". + // - `source_table:prod` + // --> The source_table's name contains the string "prod". + // - `state:CREATING` --> The backup is pending creation. + // - `state:READY` --> The backup is fully created and ready for use. + // - `(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")` + // --> The backup name contains the string "howl" and start_time + // of the backup is before 2018-03-28T14:50:00Z. + // - `size_bytes > 10000000000` --> The backup's size is greater than 10GB Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` // An expression for specifying the sort order of the results of the request. // The string value should specify one or more fields in [Backup][google.bigtable.admin.v2.Backup]. The full // syntax is described at https://aip.dev/132#ordering. // // Fields supported are: - // * name - // * source_table - // * expire_time - // * start_time - // * end_time - // * size_bytes - // * state + // - name + // - source_table + // - expire_time + // - start_time + // - end_time + // - size_bytes + // - state // // For example, "start_time". The default sorting order is ascending. // To specify descending order for the field, a suffix " desc" should @@ -2029,7 +2288,7 @@ type ListBackupsRequest struct { func (x *ListBackupsRequest) Reset() { *x = ListBackupsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2042,7 +2301,7 @@ func (x *ListBackupsRequest) String() string { func (*ListBackupsRequest) ProtoMessage() {} func (x *ListBackupsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2055,7 +2314,7 @@ func (x *ListBackupsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListBackupsRequest.ProtoReflect.Descriptor instead. func (*ListBackupsRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{27} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{31} } func (x *ListBackupsRequest) GetParent() string { @@ -2110,7 +2369,7 @@ type ListBackupsResponse struct { func (x *ListBackupsResponse) Reset() { *x = ListBackupsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2123,7 +2382,7 @@ func (x *ListBackupsResponse) String() string { func (*ListBackupsResponse) ProtoMessage() {} func (x *ListBackupsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2136,7 +2395,7 @@ func (x *ListBackupsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListBackupsResponse.ProtoReflect.Descriptor instead. func (*ListBackupsResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{28} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{32} } func (x *ListBackupsResponse) GetBackups() []*Backup { @@ -2166,7 +2425,7 @@ type CreateTableRequest_Split struct { func (x *CreateTableRequest_Split) Reset() { *x = CreateTableRequest_Split{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2179,7 +2438,7 @@ func (x *CreateTableRequest_Split) String() string { func (*CreateTableRequest_Split) ProtoMessage() {} func (x *CreateTableRequest_Split) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2210,9 +2469,10 @@ type ModifyColumnFamiliesRequest_Modification struct { // The ID of the column family to be modified. Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Column familiy modifications. + // Column family modifications. // // Types that are assignable to Mod: + // // *ModifyColumnFamiliesRequest_Modification_Create // *ModifyColumnFamiliesRequest_Modification_Update // *ModifyColumnFamiliesRequest_Modification_Drop @@ -2222,7 +2482,7 @@ type ModifyColumnFamiliesRequest_Modification struct { func (x *ModifyColumnFamiliesRequest_Modification) Reset() { *x = ModifyColumnFamiliesRequest_Modification{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2235,7 +2495,7 @@ func (x *ModifyColumnFamiliesRequest_Modification) String() string { func (*ModifyColumnFamiliesRequest_Modification) ProtoMessage() {} func (x *ModifyColumnFamiliesRequest_Modification) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2248,7 +2508,7 @@ func (x *ModifyColumnFamiliesRequest_Modification) ProtoReflect() protoreflect.M // Deprecated: Use ModifyColumnFamiliesRequest_Modification.ProtoReflect.Descriptor instead. func (*ModifyColumnFamiliesRequest_Modification) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{10, 0} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{14, 0} } func (x *ModifyColumnFamiliesRequest_Modification) GetId() string { @@ -2468,114 +2728,165 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, 0x22, 0x54, 0x0a, 0x12, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x22, 0x8e, 0x03, 0x0a, 0x1b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x6d, 0x0a, 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, - 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x1a, 0xbf, 0x01, 0x0a, 0x0c, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x40, 0x0a, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x06, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x04, 0x64, 0x72, 0x6f, 0x70, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x64, 0x72, 0x6f, 0x70, 0x42, 0x05, 0x0a, 0x03, - 0x6d, 0x6f, 0x64, 0x22, 0x61, 0x0a, 0x1f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, - 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x4f, 0x0a, 0x20, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, - 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x3a, 0x0a, 0x18, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, - 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x74, 0x22, 0x93, 0x02, 0x0a, 0x14, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, - 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x26, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, + 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, 0x22, 0x92, 0x01, 0x0a, + 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, + 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, + 0x6b, 0x22, 0x9b, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, + 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, + 0x54, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x56, 0x0a, 0x14, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x73, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x57, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, + 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x9d, 0x01, + 0x0a, 0x15, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x8e, 0x03, + 0x0a, 0x1b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, + 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0x98, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x26, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, - 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x81, 0x01, 0x0a, 0x15, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, - 0x5a, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6d, 0x0a, + 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, + 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x6f, 0x64, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x6d, + 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xbf, 0x01, 0x0a, + 0x0c, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x40, 0x0a, + 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, + 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, + 0x40, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x12, 0x14, 0x0a, 0x04, 0x64, 0x72, 0x6f, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x00, 0x52, 0x04, 0x64, 0x72, 0x6f, 0x70, 0x42, 0x05, 0x0a, 0x03, 0x6d, 0x6f, 0x64, 0x22, 0x61, + 0x0a, 0x1f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x22, 0x4f, 0x0a, 0x20, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, + 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, + 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x10, + 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x3a, 0x0a, 0x18, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, + 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x93, 0x02, 0x0a, + 0x14, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x0a, 0x24, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x24, 0x0a, + 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x57, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xee, 0x01, 0x0a, 0x15, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x59, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, - 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x98, 0x01, 0x0a, 0x14, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x0a, 0x24, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, + 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x40, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5a, 0x0a, 0x15, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xee, 0x01, 0x0a, 0x15, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x59, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x66, 0x69, + 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x66, 0x69, 0x6e, + 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x82, 0x02, 0x0a, 0x1f, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x63, 0x0a, 0x10, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, @@ -2584,401 +2895,410 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x3b, 0x0a, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x0a, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x82, 0x02, 0x0a, - 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x63, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, - 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x26, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, - 0x20, 0x0a, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, - 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x22, 0xbf, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, - 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, - 0x6d, 0x65, 0x22, 0x96, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x53, 0x0a, 0x10, 0x47, - 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2b, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0x56, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, - 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x44, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, - 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, - 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x79, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, + 0x52, 0x0a, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xbc, 0x01, 0x0a, + 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x0a, 0x24, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x06, + 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x07, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, - 0x98, 0x25, 0x0a, 0x12, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xab, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x4d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x22, 0x2a, 0x2f, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0xbf, 0x01, 0x0a, 0x14, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x96, 0x01, + 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x53, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, + 0x0a, 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x56, 0x0a, 0x13, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x26, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, + 0x72, 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, + 0x72, 0x42, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x79, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0xb2, 0x28, 0x0a, 0x12, 0x42, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x64, 0x6d, 0x69, + 0x6e, 0x12, 0xab, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x22, 0x4d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x22, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x15, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, + 0x8a, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, + 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x38, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, + 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x95, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x22, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x15, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x12, 0x8a, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, - 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x95, 0x01, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x42, 0x22, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, + 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0xda, + 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, + 0x64, 0x2c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0xca, 0x41, 0x28, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1f, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xa4, 0x01, 0x0a, + 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, + 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x39, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0xca, 0x41, 0x28, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x12, 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, - 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0xa4, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0xda, - 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x08, 0x47, 0x65, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, - 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8e, 0x01, 0x0a, - 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a, 0x2a, 0x2f, 0x76, 0x32, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xcf, 0x01, - 0x0a, 0x14, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, - 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, - 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x5f, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x22, 0x3f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, + 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xce, 0x01, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, + 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x39, 0x32, 0x30, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x05, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0xda, 0x41, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xca, 0x41, 0x1c, 0x0a, 0x05, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x12, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x8e, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x39, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, - 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x12, 0x6e, 0x61, 0x6d, - 0x65, 0x2c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x99, 0x01, 0x0a, 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x72, 0x6f, 0x70, - 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x42, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x22, - 0x37, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x72, 0x6f, 0x70, - 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xe8, 0x01, 0x0a, 0x18, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc6, 0x01, 0x0a, 0x0d, 0x55, 0x6e, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x66, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x38, 0x22, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x75, + 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0xca, 0x41, 0x1e, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x15, 0x55, 0x6e, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0xcf, 0x01, 0x0a, 0x14, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x22, 0x3f, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, + 0x41, 0x12, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x99, 0x01, 0x0a, 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x42, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x3c, 0x22, 0x37, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, + 0x3a, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x3a, 0x01, 0x2a, + 0x12, 0xe8, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, + 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x39, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x55, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x48, 0x22, 0x43, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x3a, 0x01, 0x2a, 0xda, - 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xda, 0x01, 0x0a, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x31, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, - 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x16, 0x6e, 0x61, 0x6d, - 0x65, 0x2c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x12, 0xea, 0x01, 0x0a, 0x0d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x55, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x48, 0x22, 0x43, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xda, 0x01, 0x0a, 0x10, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, + 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, + 0x41, 0x16, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x63, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0xea, 0x01, 0x0a, 0x0d, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x89, 0x01, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x38, 0x22, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x24, 0x6e, 0x61, 0x6d, + 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0xca, 0x41, 0x21, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x15, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xa8, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, - 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x89, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x22, 0x33, 0x2f, - 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, + 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0xbb, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x49, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, 0x38, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x24, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x2c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, - 0x2c, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0xca, 0x41, 0x21, 0x0a, - 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x15, 0x53, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0xa8, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, 0x38, 0x2f, 0x76, 0x32, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xbb, 0x01, 0x0a, 0x0d, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x49, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, - 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xa2, 0x01, 0x0a, 0x0e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x2a, 0x38, 0x2f, - 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xa2, + 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x3a, 0x2a, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0xe0, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, + 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x81, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x36, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xe0, - 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, - 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, - 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x81, 0x01, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x3a, 0x06, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0xca, 0x41, 0x1e, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x14, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0xa0, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, - 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x45, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc3, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x73, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, 0x41, 0x17, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x2c, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0xca, 0x41, 0x1e, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x12, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xa0, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x62, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x32, 0x3d, - 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x06, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, 0x41, 0x12, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2c, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x9c, 0x01, 0x0a, 0x0c, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x45, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x2a, 0x36, 0x2f, 0x76, 0x32, 0x2f, + 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x22, 0x45, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, - 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb3, 0x01, 0x0a, 0x0b, 0x4c, 0x69, - 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc3, 0x01, 0x0a, 0x0c, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, - 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, - 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, - 0xbb, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x62, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x47, 0x32, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, 0x41, 0x12, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, + 0x9c, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, - 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5d, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x01, 0x2a, 0xca, 0x41, 0x1d, - 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xec, 0x01, - 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, - 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xa0, 0x01, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x45, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x2a, + 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb3, + 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0xbb, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, + 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x5d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, + 0x01, 0x2a, 0xca, 0x41, 0x1d, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x14, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0xec, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xa0, + 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0xf3, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xa7, 0x01, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, 0x32, 0x2f, 0x7b, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xa4, 0x02, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, + 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, + 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9a, 0x01, 0x22, 0x41, 0x2f, + 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, + 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x3a, 0x01, 0x2a, 0x5a, 0x52, 0x22, 0x4d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, - 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, - 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xf3, 0x01, 0x0a, - 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, - 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, - 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xa7, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, - 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, - 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, - 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0xa4, 0x02, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, - 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, - 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, - 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9a, 0x01, 0x22, 0x41, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, - 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x5a, 0x52, - 0x22, 0x4d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, - 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, - 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, - 0x01, 0x2a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xde, 0x02, 0xca, 0x41, 0x1c, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xbb, 0x02, 0x68, - 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, 0x74, - 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, + 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xde, + 0x02, 0xca, 0x41, 0x1c, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0xd2, 0x41, 0xbb, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, - 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, - 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, - 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, - 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, - 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xdf, 0x01, 0x0a, 0x1c, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x17, 0x42, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, - 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, + 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, + 0xdf, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x42, 0x17, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, + 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2993,7 +3313,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP() []by return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescData } -var file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 31) +var file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 35) var file_google_bigtable_admin_v2_bigtable_table_admin_proto_goTypes = []interface{}{ (*RestoreTableRequest)(nil), // 0: google.bigtable.admin.v2.RestoreTableRequest (*RestoreTableMetadata)(nil), // 1: google.bigtable.admin.v2.RestoreTableMetadata @@ -3004,123 +3324,137 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_goTypes = []interfa (*ListTablesRequest)(nil), // 6: google.bigtable.admin.v2.ListTablesRequest (*ListTablesResponse)(nil), // 7: google.bigtable.admin.v2.ListTablesResponse (*GetTableRequest)(nil), // 8: google.bigtable.admin.v2.GetTableRequest - (*DeleteTableRequest)(nil), // 9: google.bigtable.admin.v2.DeleteTableRequest - (*ModifyColumnFamiliesRequest)(nil), // 10: google.bigtable.admin.v2.ModifyColumnFamiliesRequest - (*GenerateConsistencyTokenRequest)(nil), // 11: google.bigtable.admin.v2.GenerateConsistencyTokenRequest - (*GenerateConsistencyTokenResponse)(nil), // 12: google.bigtable.admin.v2.GenerateConsistencyTokenResponse - (*CheckConsistencyRequest)(nil), // 13: google.bigtable.admin.v2.CheckConsistencyRequest - (*CheckConsistencyResponse)(nil), // 14: google.bigtable.admin.v2.CheckConsistencyResponse - (*SnapshotTableRequest)(nil), // 15: google.bigtable.admin.v2.SnapshotTableRequest - (*GetSnapshotRequest)(nil), // 16: google.bigtable.admin.v2.GetSnapshotRequest - (*ListSnapshotsRequest)(nil), // 17: google.bigtable.admin.v2.ListSnapshotsRequest - (*ListSnapshotsResponse)(nil), // 18: google.bigtable.admin.v2.ListSnapshotsResponse - (*DeleteSnapshotRequest)(nil), // 19: google.bigtable.admin.v2.DeleteSnapshotRequest - (*SnapshotTableMetadata)(nil), // 20: google.bigtable.admin.v2.SnapshotTableMetadata - (*CreateTableFromSnapshotMetadata)(nil), // 21: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata - (*CreateBackupRequest)(nil), // 22: google.bigtable.admin.v2.CreateBackupRequest - (*CreateBackupMetadata)(nil), // 23: google.bigtable.admin.v2.CreateBackupMetadata - (*UpdateBackupRequest)(nil), // 24: google.bigtable.admin.v2.UpdateBackupRequest - (*GetBackupRequest)(nil), // 25: google.bigtable.admin.v2.GetBackupRequest - (*DeleteBackupRequest)(nil), // 26: google.bigtable.admin.v2.DeleteBackupRequest - (*ListBackupsRequest)(nil), // 27: google.bigtable.admin.v2.ListBackupsRequest - (*ListBackupsResponse)(nil), // 28: google.bigtable.admin.v2.ListBackupsResponse - (*CreateTableRequest_Split)(nil), // 29: google.bigtable.admin.v2.CreateTableRequest.Split - (*ModifyColumnFamiliesRequest_Modification)(nil), // 30: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification - (RestoreSourceType)(0), // 31: google.bigtable.admin.v2.RestoreSourceType - (*BackupInfo)(nil), // 32: google.bigtable.admin.v2.BackupInfo - (*OperationProgress)(nil), // 33: google.bigtable.admin.v2.OperationProgress - (*Table)(nil), // 34: google.bigtable.admin.v2.Table - (Table_View)(0), // 35: google.bigtable.admin.v2.Table.View - (*durationpb.Duration)(nil), // 36: google.protobuf.Duration - (*Snapshot)(nil), // 37: google.bigtable.admin.v2.Snapshot - (*timestamppb.Timestamp)(nil), // 38: google.protobuf.Timestamp - (*Backup)(nil), // 39: google.bigtable.admin.v2.Backup + (*UpdateTableRequest)(nil), // 9: google.bigtable.admin.v2.UpdateTableRequest + (*UpdateTableMetadata)(nil), // 10: google.bigtable.admin.v2.UpdateTableMetadata + (*DeleteTableRequest)(nil), // 11: google.bigtable.admin.v2.DeleteTableRequest + (*UndeleteTableRequest)(nil), // 12: google.bigtable.admin.v2.UndeleteTableRequest + (*UndeleteTableMetadata)(nil), // 13: google.bigtable.admin.v2.UndeleteTableMetadata + (*ModifyColumnFamiliesRequest)(nil), // 14: google.bigtable.admin.v2.ModifyColumnFamiliesRequest + (*GenerateConsistencyTokenRequest)(nil), // 15: google.bigtable.admin.v2.GenerateConsistencyTokenRequest + (*GenerateConsistencyTokenResponse)(nil), // 16: google.bigtable.admin.v2.GenerateConsistencyTokenResponse + (*CheckConsistencyRequest)(nil), // 17: google.bigtable.admin.v2.CheckConsistencyRequest + (*CheckConsistencyResponse)(nil), // 18: google.bigtable.admin.v2.CheckConsistencyResponse + (*SnapshotTableRequest)(nil), // 19: google.bigtable.admin.v2.SnapshotTableRequest + (*GetSnapshotRequest)(nil), // 20: google.bigtable.admin.v2.GetSnapshotRequest + (*ListSnapshotsRequest)(nil), // 21: google.bigtable.admin.v2.ListSnapshotsRequest + (*ListSnapshotsResponse)(nil), // 22: google.bigtable.admin.v2.ListSnapshotsResponse + (*DeleteSnapshotRequest)(nil), // 23: google.bigtable.admin.v2.DeleteSnapshotRequest + (*SnapshotTableMetadata)(nil), // 24: google.bigtable.admin.v2.SnapshotTableMetadata + (*CreateTableFromSnapshotMetadata)(nil), // 25: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata + (*CreateBackupRequest)(nil), // 26: google.bigtable.admin.v2.CreateBackupRequest + (*CreateBackupMetadata)(nil), // 27: google.bigtable.admin.v2.CreateBackupMetadata + (*UpdateBackupRequest)(nil), // 28: google.bigtable.admin.v2.UpdateBackupRequest + (*GetBackupRequest)(nil), // 29: google.bigtable.admin.v2.GetBackupRequest + (*DeleteBackupRequest)(nil), // 30: google.bigtable.admin.v2.DeleteBackupRequest + (*ListBackupsRequest)(nil), // 31: google.bigtable.admin.v2.ListBackupsRequest + (*ListBackupsResponse)(nil), // 32: google.bigtable.admin.v2.ListBackupsResponse + (*CreateTableRequest_Split)(nil), // 33: google.bigtable.admin.v2.CreateTableRequest.Split + (*ModifyColumnFamiliesRequest_Modification)(nil), // 34: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification + (RestoreSourceType)(0), // 35: google.bigtable.admin.v2.RestoreSourceType + (*BackupInfo)(nil), // 36: google.bigtable.admin.v2.BackupInfo + (*OperationProgress)(nil), // 37: google.bigtable.admin.v2.OperationProgress + (*Table)(nil), // 38: google.bigtable.admin.v2.Table + (Table_View)(0), // 39: google.bigtable.admin.v2.Table.View (*fieldmaskpb.FieldMask)(nil), // 40: google.protobuf.FieldMask - (*ColumnFamily)(nil), // 41: google.bigtable.admin.v2.ColumnFamily - (*v1.GetIamPolicyRequest)(nil), // 42: google.iam.v1.GetIamPolicyRequest - (*v1.SetIamPolicyRequest)(nil), // 43: google.iam.v1.SetIamPolicyRequest - (*v1.TestIamPermissionsRequest)(nil), // 44: google.iam.v1.TestIamPermissionsRequest - (*longrunning.Operation)(nil), // 45: google.longrunning.Operation - (*emptypb.Empty)(nil), // 46: google.protobuf.Empty - (*v1.Policy)(nil), // 47: google.iam.v1.Policy - (*v1.TestIamPermissionsResponse)(nil), // 48: google.iam.v1.TestIamPermissionsResponse + (*timestamppb.Timestamp)(nil), // 41: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 42: google.protobuf.Duration + (*Snapshot)(nil), // 43: google.bigtable.admin.v2.Snapshot + (*Backup)(nil), // 44: google.bigtable.admin.v2.Backup + (*ColumnFamily)(nil), // 45: google.bigtable.admin.v2.ColumnFamily + (*v1.GetIamPolicyRequest)(nil), // 46: google.iam.v1.GetIamPolicyRequest + (*v1.SetIamPolicyRequest)(nil), // 47: google.iam.v1.SetIamPolicyRequest + (*v1.TestIamPermissionsRequest)(nil), // 48: google.iam.v1.TestIamPermissionsRequest + (*longrunning.Operation)(nil), // 49: google.longrunning.Operation + (*emptypb.Empty)(nil), // 50: google.protobuf.Empty + (*v1.Policy)(nil), // 51: google.iam.v1.Policy + (*v1.TestIamPermissionsResponse)(nil), // 52: google.iam.v1.TestIamPermissionsResponse } var file_google_bigtable_admin_v2_bigtable_table_admin_proto_depIdxs = []int32{ - 31, // 0: google.bigtable.admin.v2.RestoreTableMetadata.source_type:type_name -> google.bigtable.admin.v2.RestoreSourceType - 32, // 1: google.bigtable.admin.v2.RestoreTableMetadata.backup_info:type_name -> google.bigtable.admin.v2.BackupInfo - 33, // 2: google.bigtable.admin.v2.RestoreTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress - 33, // 3: google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress - 34, // 4: google.bigtable.admin.v2.CreateTableRequest.table:type_name -> google.bigtable.admin.v2.Table - 29, // 5: google.bigtable.admin.v2.CreateTableRequest.initial_splits:type_name -> google.bigtable.admin.v2.CreateTableRequest.Split - 35, // 6: google.bigtable.admin.v2.ListTablesRequest.view:type_name -> google.bigtable.admin.v2.Table.View - 34, // 7: google.bigtable.admin.v2.ListTablesResponse.tables:type_name -> google.bigtable.admin.v2.Table - 35, // 8: google.bigtable.admin.v2.GetTableRequest.view:type_name -> google.bigtable.admin.v2.Table.View - 30, // 9: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications:type_name -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification - 36, // 10: google.bigtable.admin.v2.SnapshotTableRequest.ttl:type_name -> google.protobuf.Duration - 37, // 11: google.bigtable.admin.v2.ListSnapshotsResponse.snapshots:type_name -> google.bigtable.admin.v2.Snapshot - 15, // 12: google.bigtable.admin.v2.SnapshotTableMetadata.original_request:type_name -> google.bigtable.admin.v2.SnapshotTableRequest - 38, // 13: google.bigtable.admin.v2.SnapshotTableMetadata.request_time:type_name -> google.protobuf.Timestamp - 38, // 14: google.bigtable.admin.v2.SnapshotTableMetadata.finish_time:type_name -> google.protobuf.Timestamp - 4, // 15: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request:type_name -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest - 38, // 16: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time:type_name -> google.protobuf.Timestamp - 38, // 17: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time:type_name -> google.protobuf.Timestamp - 39, // 18: google.bigtable.admin.v2.CreateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup - 38, // 19: google.bigtable.admin.v2.CreateBackupMetadata.start_time:type_name -> google.protobuf.Timestamp - 38, // 20: google.bigtable.admin.v2.CreateBackupMetadata.end_time:type_name -> google.protobuf.Timestamp - 39, // 21: google.bigtable.admin.v2.UpdateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup - 40, // 22: google.bigtable.admin.v2.UpdateBackupRequest.update_mask:type_name -> google.protobuf.FieldMask - 39, // 23: google.bigtable.admin.v2.ListBackupsResponse.backups:type_name -> google.bigtable.admin.v2.Backup - 41, // 24: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create:type_name -> google.bigtable.admin.v2.ColumnFamily - 41, // 25: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update:type_name -> google.bigtable.admin.v2.ColumnFamily - 3, // 26: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:input_type -> google.bigtable.admin.v2.CreateTableRequest - 4, // 27: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:input_type -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest - 6, // 28: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:input_type -> google.bigtable.admin.v2.ListTablesRequest - 8, // 29: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:input_type -> google.bigtable.admin.v2.GetTableRequest - 9, // 30: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:input_type -> google.bigtable.admin.v2.DeleteTableRequest - 10, // 31: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:input_type -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest - 5, // 32: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:input_type -> google.bigtable.admin.v2.DropRowRangeRequest - 11, // 33: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:input_type -> google.bigtable.admin.v2.GenerateConsistencyTokenRequest - 13, // 34: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:input_type -> google.bigtable.admin.v2.CheckConsistencyRequest - 15, // 35: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:input_type -> google.bigtable.admin.v2.SnapshotTableRequest - 16, // 36: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:input_type -> google.bigtable.admin.v2.GetSnapshotRequest - 17, // 37: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:input_type -> google.bigtable.admin.v2.ListSnapshotsRequest - 19, // 38: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:input_type -> google.bigtable.admin.v2.DeleteSnapshotRequest - 22, // 39: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:input_type -> google.bigtable.admin.v2.CreateBackupRequest - 25, // 40: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:input_type -> google.bigtable.admin.v2.GetBackupRequest - 24, // 41: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:input_type -> google.bigtable.admin.v2.UpdateBackupRequest - 26, // 42: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:input_type -> google.bigtable.admin.v2.DeleteBackupRequest - 27, // 43: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:input_type -> google.bigtable.admin.v2.ListBackupsRequest - 0, // 44: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:input_type -> google.bigtable.admin.v2.RestoreTableRequest - 42, // 45: google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 43, // 46: google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 44, // 47: google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 34, // 48: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:output_type -> google.bigtable.admin.v2.Table - 45, // 49: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:output_type -> google.longrunning.Operation - 7, // 50: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:output_type -> google.bigtable.admin.v2.ListTablesResponse - 34, // 51: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:output_type -> google.bigtable.admin.v2.Table - 46, // 52: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:output_type -> google.protobuf.Empty - 34, // 53: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:output_type -> google.bigtable.admin.v2.Table - 46, // 54: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:output_type -> google.protobuf.Empty - 12, // 55: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:output_type -> google.bigtable.admin.v2.GenerateConsistencyTokenResponse - 14, // 56: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:output_type -> google.bigtable.admin.v2.CheckConsistencyResponse - 45, // 57: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:output_type -> google.longrunning.Operation - 37, // 58: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:output_type -> google.bigtable.admin.v2.Snapshot - 18, // 59: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:output_type -> google.bigtable.admin.v2.ListSnapshotsResponse - 46, // 60: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:output_type -> google.protobuf.Empty - 45, // 61: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:output_type -> google.longrunning.Operation - 39, // 62: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:output_type -> google.bigtable.admin.v2.Backup - 39, // 63: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:output_type -> google.bigtable.admin.v2.Backup - 46, // 64: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:output_type -> google.protobuf.Empty - 28, // 65: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:output_type -> google.bigtable.admin.v2.ListBackupsResponse - 45, // 66: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:output_type -> google.longrunning.Operation - 47, // 67: google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy:output_type -> google.iam.v1.Policy - 47, // 68: google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy:output_type -> google.iam.v1.Policy - 48, // 69: google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 48, // [48:70] is the sub-list for method output_type - 26, // [26:48] is the sub-list for method input_type - 26, // [26:26] is the sub-list for extension type_name - 26, // [26:26] is the sub-list for extension extendee - 0, // [0:26] is the sub-list for field type_name + 35, // 0: google.bigtable.admin.v2.RestoreTableMetadata.source_type:type_name -> google.bigtable.admin.v2.RestoreSourceType + 36, // 1: google.bigtable.admin.v2.RestoreTableMetadata.backup_info:type_name -> google.bigtable.admin.v2.BackupInfo + 37, // 2: google.bigtable.admin.v2.RestoreTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress + 37, // 3: google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress + 38, // 4: google.bigtable.admin.v2.CreateTableRequest.table:type_name -> google.bigtable.admin.v2.Table + 33, // 5: google.bigtable.admin.v2.CreateTableRequest.initial_splits:type_name -> google.bigtable.admin.v2.CreateTableRequest.Split + 39, // 6: google.bigtable.admin.v2.ListTablesRequest.view:type_name -> google.bigtable.admin.v2.Table.View + 38, // 7: google.bigtable.admin.v2.ListTablesResponse.tables:type_name -> google.bigtable.admin.v2.Table + 39, // 8: google.bigtable.admin.v2.GetTableRequest.view:type_name -> google.bigtable.admin.v2.Table.View + 38, // 9: google.bigtable.admin.v2.UpdateTableRequest.table:type_name -> google.bigtable.admin.v2.Table + 40, // 10: google.bigtable.admin.v2.UpdateTableRequest.update_mask:type_name -> google.protobuf.FieldMask + 41, // 11: google.bigtable.admin.v2.UpdateTableMetadata.start_time:type_name -> google.protobuf.Timestamp + 41, // 12: google.bigtable.admin.v2.UpdateTableMetadata.end_time:type_name -> google.protobuf.Timestamp + 41, // 13: google.bigtable.admin.v2.UndeleteTableMetadata.start_time:type_name -> google.protobuf.Timestamp + 41, // 14: google.bigtable.admin.v2.UndeleteTableMetadata.end_time:type_name -> google.protobuf.Timestamp + 34, // 15: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications:type_name -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification + 42, // 16: google.bigtable.admin.v2.SnapshotTableRequest.ttl:type_name -> google.protobuf.Duration + 43, // 17: google.bigtable.admin.v2.ListSnapshotsResponse.snapshots:type_name -> google.bigtable.admin.v2.Snapshot + 19, // 18: google.bigtable.admin.v2.SnapshotTableMetadata.original_request:type_name -> google.bigtable.admin.v2.SnapshotTableRequest + 41, // 19: google.bigtable.admin.v2.SnapshotTableMetadata.request_time:type_name -> google.protobuf.Timestamp + 41, // 20: google.bigtable.admin.v2.SnapshotTableMetadata.finish_time:type_name -> google.protobuf.Timestamp + 4, // 21: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request:type_name -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest + 41, // 22: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time:type_name -> google.protobuf.Timestamp + 41, // 23: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time:type_name -> google.protobuf.Timestamp + 44, // 24: google.bigtable.admin.v2.CreateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup + 41, // 25: google.bigtable.admin.v2.CreateBackupMetadata.start_time:type_name -> google.protobuf.Timestamp + 41, // 26: google.bigtable.admin.v2.CreateBackupMetadata.end_time:type_name -> google.protobuf.Timestamp + 44, // 27: google.bigtable.admin.v2.UpdateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup + 40, // 28: google.bigtable.admin.v2.UpdateBackupRequest.update_mask:type_name -> google.protobuf.FieldMask + 44, // 29: google.bigtable.admin.v2.ListBackupsResponse.backups:type_name -> google.bigtable.admin.v2.Backup + 45, // 30: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create:type_name -> google.bigtable.admin.v2.ColumnFamily + 45, // 31: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update:type_name -> google.bigtable.admin.v2.ColumnFamily + 3, // 32: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:input_type -> google.bigtable.admin.v2.CreateTableRequest + 4, // 33: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:input_type -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest + 6, // 34: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:input_type -> google.bigtable.admin.v2.ListTablesRequest + 8, // 35: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:input_type -> google.bigtable.admin.v2.GetTableRequest + 9, // 36: google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable:input_type -> google.bigtable.admin.v2.UpdateTableRequest + 11, // 37: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:input_type -> google.bigtable.admin.v2.DeleteTableRequest + 12, // 38: google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable:input_type -> google.bigtable.admin.v2.UndeleteTableRequest + 14, // 39: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:input_type -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest + 5, // 40: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:input_type -> google.bigtable.admin.v2.DropRowRangeRequest + 15, // 41: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:input_type -> google.bigtable.admin.v2.GenerateConsistencyTokenRequest + 17, // 42: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:input_type -> google.bigtable.admin.v2.CheckConsistencyRequest + 19, // 43: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:input_type -> google.bigtable.admin.v2.SnapshotTableRequest + 20, // 44: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:input_type -> google.bigtable.admin.v2.GetSnapshotRequest + 21, // 45: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:input_type -> google.bigtable.admin.v2.ListSnapshotsRequest + 23, // 46: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:input_type -> google.bigtable.admin.v2.DeleteSnapshotRequest + 26, // 47: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:input_type -> google.bigtable.admin.v2.CreateBackupRequest + 29, // 48: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:input_type -> google.bigtable.admin.v2.GetBackupRequest + 28, // 49: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:input_type -> google.bigtable.admin.v2.UpdateBackupRequest + 30, // 50: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:input_type -> google.bigtable.admin.v2.DeleteBackupRequest + 31, // 51: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:input_type -> google.bigtable.admin.v2.ListBackupsRequest + 0, // 52: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:input_type -> google.bigtable.admin.v2.RestoreTableRequest + 46, // 53: google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 47, // 54: google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 48, // 55: google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 38, // 56: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:output_type -> google.bigtable.admin.v2.Table + 49, // 57: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:output_type -> google.longrunning.Operation + 7, // 58: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:output_type -> google.bigtable.admin.v2.ListTablesResponse + 38, // 59: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:output_type -> google.bigtable.admin.v2.Table + 49, // 60: google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable:output_type -> google.longrunning.Operation + 50, // 61: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:output_type -> google.protobuf.Empty + 49, // 62: google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable:output_type -> google.longrunning.Operation + 38, // 63: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:output_type -> google.bigtable.admin.v2.Table + 50, // 64: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:output_type -> google.protobuf.Empty + 16, // 65: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:output_type -> google.bigtable.admin.v2.GenerateConsistencyTokenResponse + 18, // 66: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:output_type -> google.bigtable.admin.v2.CheckConsistencyResponse + 49, // 67: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:output_type -> google.longrunning.Operation + 43, // 68: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:output_type -> google.bigtable.admin.v2.Snapshot + 22, // 69: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:output_type -> google.bigtable.admin.v2.ListSnapshotsResponse + 50, // 70: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:output_type -> google.protobuf.Empty + 49, // 71: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:output_type -> google.longrunning.Operation + 44, // 72: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:output_type -> google.bigtable.admin.v2.Backup + 44, // 73: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:output_type -> google.bigtable.admin.v2.Backup + 50, // 74: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:output_type -> google.protobuf.Empty + 32, // 75: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:output_type -> google.bigtable.admin.v2.ListBackupsResponse + 49, // 76: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:output_type -> google.longrunning.Operation + 51, // 77: google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy:output_type -> google.iam.v1.Policy + 51, // 78: google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy:output_type -> google.iam.v1.Policy + 52, // 79: google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 56, // [56:80] is the sub-list for method output_type + 32, // [32:56] is the sub-list for method input_type + 32, // [32:32] is the sub-list for extension type_name + 32, // [32:32] is the sub-list for extension extendee + 0, // [0:32] is the sub-list for field type_name } func init() { file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() } @@ -3240,7 +3574,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteTableRequest); i { + switch v := v.(*UpdateTableRequest); i { case 0: return &v.state case 1: @@ -3252,7 +3586,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ModifyColumnFamiliesRequest); i { + switch v := v.(*UpdateTableMetadata); i { case 0: return &v.state case 1: @@ -3264,7 +3598,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GenerateConsistencyTokenRequest); i { + switch v := v.(*DeleteTableRequest); i { case 0: return &v.state case 1: @@ -3276,7 +3610,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GenerateConsistencyTokenResponse); i { + switch v := v.(*UndeleteTableRequest); i { case 0: return &v.state case 1: @@ -3288,7 +3622,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckConsistencyRequest); i { + switch v := v.(*UndeleteTableMetadata); i { case 0: return &v.state case 1: @@ -3300,7 +3634,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckConsistencyResponse); i { + switch v := v.(*ModifyColumnFamiliesRequest); i { case 0: return &v.state case 1: @@ -3312,7 +3646,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SnapshotTableRequest); i { + switch v := v.(*GenerateConsistencyTokenRequest); i { case 0: return &v.state case 1: @@ -3324,7 +3658,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSnapshotRequest); i { + switch v := v.(*GenerateConsistencyTokenResponse); i { case 0: return &v.state case 1: @@ -3336,7 +3670,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListSnapshotsRequest); i { + switch v := v.(*CheckConsistencyRequest); i { case 0: return &v.state case 1: @@ -3348,7 +3682,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListSnapshotsResponse); i { + switch v := v.(*CheckConsistencyResponse); i { case 0: return &v.state case 1: @@ -3360,7 +3694,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteSnapshotRequest); i { + switch v := v.(*SnapshotTableRequest); i { case 0: return &v.state case 1: @@ -3372,7 +3706,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SnapshotTableMetadata); i { + switch v := v.(*GetSnapshotRequest); i { case 0: return &v.state case 1: @@ -3384,7 +3718,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTableFromSnapshotMetadata); i { + switch v := v.(*ListSnapshotsRequest); i { case 0: return &v.state case 1: @@ -3396,7 +3730,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateBackupRequest); i { + switch v := v.(*ListSnapshotsResponse); i { case 0: return &v.state case 1: @@ -3408,7 +3742,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateBackupMetadata); i { + switch v := v.(*DeleteSnapshotRequest); i { case 0: return &v.state case 1: @@ -3420,7 +3754,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateBackupRequest); i { + switch v := v.(*SnapshotTableMetadata); i { case 0: return &v.state case 1: @@ -3432,7 +3766,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBackupRequest); i { + switch v := v.(*CreateTableFromSnapshotMetadata); i { case 0: return &v.state case 1: @@ -3444,7 +3778,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteBackupRequest); i { + switch v := v.(*CreateBackupRequest); i { case 0: return &v.state case 1: @@ -3456,7 +3790,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListBackupsRequest); i { + switch v := v.(*CreateBackupMetadata); i { case 0: return &v.state case 1: @@ -3468,7 +3802,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListBackupsResponse); i { + switch v := v.(*UpdateBackupRequest); i { case 0: return &v.state case 1: @@ -3480,7 +3814,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTableRequest_Split); i { + switch v := v.(*GetBackupRequest); i { case 0: return &v.state case 1: @@ -3492,6 +3826,54 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteBackupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListBackupsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListBackupsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateTableRequest_Split); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ModifyColumnFamiliesRequest_Modification); i { case 0: return &v.state @@ -3514,7 +3896,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { (*DropRowRangeRequest_RowKeyPrefix)(nil), (*DropRowRangeRequest_DeleteAllDataFromTable)(nil), } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[34].OneofWrappers = []interface{}{ (*ModifyColumnFamiliesRequest_Modification_Create)(nil), (*ModifyColumnFamiliesRequest_Modification_Update)(nil), (*ModifyColumnFamiliesRequest_Modification_Drop)(nil), @@ -3525,7 +3907,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc, NumEnums: 0, - NumMessages: 31, + NumMessages: 35, NumExtensions: 0, NumServices: 1, }, @@ -3568,8 +3950,12 @@ type BigtableTableAdminClient interface { ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) // Gets metadata information about the specified table. GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*Table, error) + // Updates a specified table. + UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) // Permanently deletes a specified table and all of its data. DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Restores a specified table which was accidentally deleted. + UndeleteTable(ctx context.Context, in *UndeleteTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) // Performs a series of column family modifications on the specified table. // Either all or none of the modifications will occur before this method // returns, but data requests received prior to that point may see a table @@ -3703,6 +4089,15 @@ func (c *bigtableTableAdminClient) GetTable(ctx context.Context, in *GetTableReq return out, nil } +func (c *bigtableTableAdminClient) UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *bigtableTableAdminClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", in, out, opts...) @@ -3712,6 +4107,15 @@ func (c *bigtableTableAdminClient) DeleteTable(ctx context.Context, in *DeleteTa return out, nil } +func (c *bigtableTableAdminClient) UndeleteTable(ctx context.Context, in *UndeleteTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { + out := new(longrunning.Operation) + err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *bigtableTableAdminClient) ModifyColumnFamilies(ctx context.Context, in *ModifyColumnFamiliesRequest, opts ...grpc.CallOption) (*Table, error) { out := new(Table) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", in, out, opts...) @@ -3884,8 +4288,12 @@ type BigtableTableAdminServer interface { ListTables(context.Context, *ListTablesRequest) (*ListTablesResponse, error) // Gets metadata information about the specified table. GetTable(context.Context, *GetTableRequest) (*Table, error) + // Updates a specified table. + UpdateTable(context.Context, *UpdateTableRequest) (*longrunning.Operation, error) // Permanently deletes a specified table and all of its data. DeleteTable(context.Context, *DeleteTableRequest) (*emptypb.Empty, error) + // Restores a specified table which was accidentally deleted. + UndeleteTable(context.Context, *UndeleteTableRequest) (*longrunning.Operation, error) // Performs a series of column family modifications on the specified table. // Either all or none of the modifications will occur before this method // returns, but data requests received prior to that point may see a table @@ -3991,9 +4399,15 @@ func (*UnimplementedBigtableTableAdminServer) ListTables(context.Context, *ListT func (*UnimplementedBigtableTableAdminServer) GetTable(context.Context, *GetTableRequest) (*Table, error) { return nil, status.Errorf(codes.Unimplemented, "method GetTable not implemented") } +func (*UnimplementedBigtableTableAdminServer) UpdateTable(context.Context, *UpdateTableRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateTable not implemented") +} func (*UnimplementedBigtableTableAdminServer) DeleteTable(context.Context, *DeleteTableRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteTable not implemented") } +func (*UnimplementedBigtableTableAdminServer) UndeleteTable(context.Context, *UndeleteTableRequest) (*longrunning.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method UndeleteTable not implemented") +} func (*UnimplementedBigtableTableAdminServer) ModifyColumnFamilies(context.Context, *ModifyColumnFamiliesRequest) (*Table, error) { return nil, status.Errorf(codes.Unimplemented, "method ModifyColumnFamilies not implemented") } @@ -4122,6 +4536,24 @@ func _BigtableTableAdmin_GetTable_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _BigtableTableAdmin_UpdateTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableAdminServer).UpdateTable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableAdminServer).UpdateTable(ctx, req.(*UpdateTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _BigtableTableAdmin_DeleteTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteTableRequest) if err := dec(in); err != nil { @@ -4140,6 +4572,24 @@ func _BigtableTableAdmin_DeleteTable_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _BigtableTableAdmin_UndeleteTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UndeleteTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableAdminServer).UndeleteTable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableAdminServer).UndeleteTable(ctx, req.(*UndeleteTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _BigtableTableAdmin_ModifyColumnFamilies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ModifyColumnFamiliesRequest) if err := dec(in); err != nil { @@ -4466,10 +4916,18 @@ var _BigtableTableAdmin_serviceDesc = grpc.ServiceDesc{ MethodName: "GetTable", Handler: _BigtableTableAdmin_GetTable_Handler, }, + { + MethodName: "UpdateTable", + Handler: _BigtableTableAdmin_UpdateTable_Handler, + }, { MethodName: "DeleteTable", Handler: _BigtableTableAdmin_DeleteTable_Handler, }, + { + MethodName: "UndeleteTable", + Handler: _BigtableTableAdmin_UndeleteTable_Handler, + }, { MethodName: "ModifyColumnFamilies", Handler: _BigtableTableAdmin_ModifyColumnFamilies_Handler, diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go index b80d4379deb8..23212665937d 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.13.0 +// protoc v3.12.2 // source: google/bigtable/admin/v2/common.proto package admin @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" timestamppb "google.golang.org/protobuf/types/known/timestamppb" @@ -169,37 +168,36 @@ var file_google_bigtable_admin_v2_common_proto_rawDesc = []byte{ 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0xb0, 0x01, 0x0a, 0x11, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, - 0x73, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, - 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x2a, 0x3d, 0x0a, 0x0b, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x07, 0x0a, 0x03, 0x53, 0x53, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x48, 0x44, 0x44, - 0x10, 0x02, 0x42, 0xd3, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x32, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, - 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x6f, 0x22, 0xb0, 0x01, 0x0a, 0x11, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, + 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, + 0x64, 0x54, 0x69, 0x6d, 0x65, 0x2a, 0x3d, 0x0a, 0x0b, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x53, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x48, + 0x44, 0x44, 0x10, 0x02, 0x42, 0xd3, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, + 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, + 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go index c7d38fc17a80..b42ac1d9e98c 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.13.0 +// protoc v3.12.2 // source: google/bigtable/admin/v2/instance.proto package admin @@ -103,13 +103,8 @@ const ( // An instance meant for production use. `serve_nodes` must be set // on the cluster. Instance_PRODUCTION Instance_Type = 1 - // The instance is meant for development and testing purposes only; it has - // no performance or uptime guarantees and is not covered by SLA. - // After a development instance is created, it can be upgraded by - // updating the instance to type `PRODUCTION`. An instance created - // as a production instance cannot be changed to a development instance. - // When creating a development instance, `serve_nodes` on the cluster must - // not be set. + // DEPRECATED: Prefer PRODUCTION for all use cases, as it no longer enforces + // a higher minimum node count than DEVELOPMENT. Instance_DEVELOPMENT Instance_Type = 2 ) @@ -259,6 +254,8 @@ type Instance struct { // For instances created before this field was added (August 2021), this value // is `seconds: 0, nanos: 1`. CreateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Output only. Reserved for future use. + SatisfiesPzs *bool `protobuf:"varint,8,opt,name=satisfies_pzs,json=satisfiesPzs,proto3,oneof" json:"satisfies_pzs,omitempty"` } func (x *Instance) Reset() { @@ -335,6 +332,13 @@ func (x *Instance) GetCreateTime() *timestamppb.Timestamp { return nil } +func (x *Instance) GetSatisfiesPzs() bool { + if x != nil && x.SatisfiesPzs != nil { + return *x.SatisfiesPzs + } + return false +} + // The Autoscaling targets for a Cluster. These determine the recommended nodes. type AutoscalingTargets struct { state protoimpl.MessageState @@ -343,8 +347,16 @@ type AutoscalingTargets struct { // The cpu utilization that the Autoscaler should be trying to achieve. // This number is on a scale from 0 (no utilization) to - // 100 (total utilization). + // 100 (total utilization), and is limited between 10 and 80, otherwise it + // will return INVALID_ARGUMENT error. CpuUtilizationPercent int32 `protobuf:"varint,2,opt,name=cpu_utilization_percent,json=cpuUtilizationPercent,proto3" json:"cpu_utilization_percent,omitempty"` + // The storage utilization that the Autoscaler should be trying to achieve. + // This number is limited between 2560 (2.5TiB) and 5120 (5TiB) for a SSD + // cluster and between 8192 (8TiB) and 16384 (16TiB) for an HDD cluster; + // otherwise it will return INVALID_ARGUMENT error. If this value is set to 0, + // it will be treated as if it were set to the default value: 2560 for SSD, + // 8192 for HDD. + StorageUtilizationGibPerNode int32 `protobuf:"varint,3,opt,name=storage_utilization_gib_per_node,json=storageUtilizationGibPerNode,proto3" json:"storage_utilization_gib_per_node,omitempty"` } func (x *AutoscalingTargets) Reset() { @@ -386,6 +398,13 @@ func (x *AutoscalingTargets) GetCpuUtilizationPercent() int32 { return 0 } +func (x *AutoscalingTargets) GetStorageUtilizationGibPerNode() int32 { + if x != nil { + return x.StorageUtilizationGibPerNode + } + return 0 +} + // Limits for the number of nodes a Cluster can autoscale up/down to. type AutoscalingLimits struct { state protoimpl.MessageState @@ -455,13 +474,12 @@ type Cluster struct { // The unique name of the cluster. Values are of the form // `projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // (`CreationOnly`) - // The location where this cluster's nodes and storage reside. For best + // Immutable. The location where this cluster's nodes and storage reside. For best // performance, clients should be located as close as possible to this // cluster. Currently only zones are supported, so values should be of the // form `projects/{project}/locations/{zone}`. Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` - // The current state of the cluster. + // Output only. The current state of the cluster. State Cluster_State `protobuf:"varint,3,opt,name=state,proto3,enum=google.bigtable.admin.v2.Cluster_State" json:"state,omitempty"` // The number of nodes allocated to this cluster. More nodes enable higher // throughput and more consistent performance. @@ -469,8 +487,7 @@ type Cluster struct { // Types that are assignable to Config: // *Cluster_ClusterConfig_ Config isCluster_Config `protobuf_oneof:"config"` - // (`CreationOnly`) - // The type of storage used by this cluster to serve its + // Immutable. The type of storage used by this cluster to serve its // parent instance's tables, unless explicitly overridden. DefaultStorageType StorageType `protobuf:"varint,5,opt,name=default_storage_type,json=defaultStorageType,proto3,enum=google.bigtable.admin.v2.StorageType" json:"default_storage_type,omitempty"` // Immutable. The encryption configuration for CMEK-protected clusters. @@ -583,7 +600,6 @@ type AppProfile struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // (`OutputOnly`) // The unique name of the app profile. Values are of the form // `projects/{project}/instances/{instance}/appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -596,7 +612,7 @@ type AppProfile struct { // [RFC 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for more // details. Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag,omitempty"` - // Optional long form description of the use case for this AppProfile. + // Long form description of the use case for this AppProfile. Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The routing policy for all read/write requests that use this app profile. // A value must be explicitly set. @@ -699,6 +715,118 @@ func (*AppProfile_MultiClusterRoutingUseAny_) isAppProfile_RoutingPolicy() {} func (*AppProfile_SingleClusterRouting_) isAppProfile_RoutingPolicy() {} +// A tablet is a defined by a start and end key and is explained in +// https://cloud.google.com/bigtable/docs/overview#architecture and +// https://cloud.google.com/bigtable/docs/performance#optimization. +// A Hot tablet is a tablet that exhibits high average cpu usage during the time +// interval from start time to end time. +type HotTablet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The unique name of the hot tablet. Values are of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/[a-zA-Z0-9_-]*`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Name of the table that contains the tablet. Values are of the form + // `projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. + TableName string `protobuf:"bytes,2,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` + // Output only. The start time of the hot tablet. + StartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // Output only. The end time of the hot tablet. + EndTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // Tablet Start Key (inclusive). + StartKey string `protobuf:"bytes,5,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + // Tablet End Key (inclusive). + EndKey string `protobuf:"bytes,6,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` + // Output only. The average CPU usage spent by a node on this tablet over the start_time to + // end_time time range. The percentage is the amount of CPU used by the node + // to serve the tablet, from 0% (tablet was not interacted with) to 100% (the + // node spent all cycles serving the hot tablet). + NodeCpuUsagePercent float32 `protobuf:"fixed32,7,opt,name=node_cpu_usage_percent,json=nodeCpuUsagePercent,proto3" json:"node_cpu_usage_percent,omitempty"` +} + +func (x *HotTablet) Reset() { + *x = HotTablet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HotTablet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HotTablet) ProtoMessage() {} + +func (x *HotTablet) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HotTablet.ProtoReflect.Descriptor instead. +func (*HotTablet) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_instance_proto_rawDescGZIP(), []int{5} +} + +func (x *HotTablet) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *HotTablet) GetTableName() string { + if x != nil { + return x.TableName + } + return "" +} + +func (x *HotTablet) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +func (x *HotTablet) GetEndTime() *timestamppb.Timestamp { + if x != nil { + return x.EndTime + } + return nil +} + +func (x *HotTablet) GetStartKey() string { + if x != nil { + return x.StartKey + } + return "" +} + +func (x *HotTablet) GetEndKey() string { + if x != nil { + return x.EndKey + } + return "" +} + +func (x *HotTablet) GetNodeCpuUsagePercent() float32 { + if x != nil { + return x.NodeCpuUsagePercent + } + return 0 +} + // Autoscaling config for a cluster. type Cluster_ClusterAutoscalingConfig struct { state protoimpl.MessageState @@ -714,7 +842,7 @@ type Cluster_ClusterAutoscalingConfig struct { func (x *Cluster_ClusterAutoscalingConfig) Reset() { *x = Cluster_ClusterAutoscalingConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[6] + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -727,7 +855,7 @@ func (x *Cluster_ClusterAutoscalingConfig) String() string { func (*Cluster_ClusterAutoscalingConfig) ProtoMessage() {} func (x *Cluster_ClusterAutoscalingConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[6] + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -770,7 +898,7 @@ type Cluster_ClusterConfig struct { func (x *Cluster_ClusterConfig) Reset() { *x = Cluster_ClusterConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[7] + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -783,7 +911,7 @@ func (x *Cluster_ClusterConfig) String() string { func (*Cluster_ClusterConfig) ProtoMessage() {} func (x *Cluster_ClusterConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[7] + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -820,14 +948,16 @@ type Cluster_EncryptionConfig struct { // `cloudkms.cryptoKeyEncrypterDecrypter` role on the CMEK key. // 2) Only regional keys can be used and the region of the CMEK key must // match the region of the cluster. - // 3) All clusters within an instance must use the same CMEK key. + // 3) All clusters within an instance must use the same CMEK key. + // Values are of the form + // `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}` KmsKeyName string `protobuf:"bytes,1,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"` } func (x *Cluster_EncryptionConfig) Reset() { *x = Cluster_EncryptionConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[8] + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -840,7 +970,7 @@ func (x *Cluster_EncryptionConfig) String() string { func (*Cluster_EncryptionConfig) ProtoMessage() {} func (x *Cluster_EncryptionConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[8] + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -881,7 +1011,7 @@ type AppProfile_MultiClusterRoutingUseAny struct { func (x *AppProfile_MultiClusterRoutingUseAny) Reset() { *x = AppProfile_MultiClusterRoutingUseAny{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[9] + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -894,7 +1024,7 @@ func (x *AppProfile_MultiClusterRoutingUseAny) String() string { func (*AppProfile_MultiClusterRoutingUseAny) ProtoMessage() {} func (x *AppProfile_MultiClusterRoutingUseAny) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[9] + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -936,7 +1066,7 @@ type AppProfile_SingleClusterRouting struct { func (x *AppProfile_SingleClusterRouting) Reset() { *x = AppProfile_SingleClusterRouting{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[10] + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -949,7 +1079,7 @@ func (x *AppProfile_SingleClusterRouting) String() string { func (*AppProfile_SingleClusterRouting) ProtoMessage() {} func (x *AppProfile_SingleClusterRouting) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[10] + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -994,192 +1124,228 @@ var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{ 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd8, 0x04, 0x0a, 0x08, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x64, - 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x46, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x35, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, - 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x01, 0x12, 0x0c, - 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x22, 0x3d, 0x0a, 0x04, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x50, 0x52, - 0x4f, 0x44, 0x55, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, - 0x56, 0x45, 0x4c, 0x4f, 0x50, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x3a, 0x53, 0xea, 0x41, 0x50, - 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, - 0x22, 0x4c, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x74, - 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x22, 0x6d, - 0x0a, 0x11, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, - 0x12, 0x2b, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x5f, 0x6e, 0x6f, - 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, - 0x6d, 0x61, 0x78, 0x53, 0x65, 0x72, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0xf4, 0x08, - 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x0e, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x05, 0x0a, 0x08, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, + 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x3e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x3b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x46, 0x0a, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, + 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, + 0x50, 0x7a, 0x73, 0x88, 0x01, 0x01, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x35, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, + 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, + 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x22, 0x3d, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x56, 0x45, 0x4c, 0x4f, + 0x50, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x3a, 0x53, 0xea, 0x41, 0x50, 0x0a, 0x25, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x42, 0x10, 0x0a, 0x0e, + 0x5f, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x22, 0x94, + 0x01, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x74, 0x69, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x46, 0x0a, + 0x20, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x67, 0x69, 0x62, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x64, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x1c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x69, 0x62, 0x50, 0x65, + 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x22, 0x6d, 0x0a, 0x11, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, + 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6d, 0x69, + 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x72, 0x76, 0x65, 0x4e, + 0x6f, 0x64, 0x65, 0x73, 0x22, 0xf7, 0x08, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x23, 0x0a, 0x21, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x14, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, + 0x03, 0xe0, 0x41, 0x05, 0x52, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x64, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x12, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x64, - 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x10, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xdf, + 0x01, 0x0a, 0x18, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, + 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5f, 0x0a, 0x12, 0x61, + 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x61, 0x75, 0x74, 0x6f, 0x73, + 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, + 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, - 0x41, 0x05, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xdf, 0x01, 0x0a, 0x18, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x5f, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, - 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x11, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, - 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x12, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x1a, 0x89, 0x01, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x78, 0x0a, 0x1a, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, - 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x18, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, + 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x61, 0x75, + 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, + 0x1a, 0x89, 0x01, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x78, 0x0a, 0x1a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, 0x75, + 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x1a, 0x5c, 0x0a, 0x10, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, - 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, - 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, - 0x22, 0x51, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, - 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, - 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x53, 0x49, 0x5a, - 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, - 0x44, 0x10, 0x04, 0x3a, 0x65, 0xea, 0x41, 0x62, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3a, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x22, 0x84, 0x05, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x82, 0x01, - 0x0a, 0x1d, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x75, 0x73, 0x65, 0x5f, 0x61, 0x6e, 0x79, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x4d, 0x75, 0x6c, 0x74, - 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, - 0x73, 0x65, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x19, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, - 0x6e, 0x79, 0x12, 0x71, 0x0a, 0x16, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, - 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, - 0x14, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x1a, 0x3c, 0x0a, 0x19, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, - 0x6e, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x49, 0x64, 0x73, 0x1a, 0x73, 0x0a, 0x14, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x73, 0x3a, 0x6f, 0xea, 0x41, 0x6c, 0x0a, 0x27, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x70, 0x70, 0x50, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x41, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, - 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x70, 0x70, - 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x7d, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0xd0, 0x02, 0x0a, 0x1c, - 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x49, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, - 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, - 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, - 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, - 0x3a, 0x56, 0x32, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, - 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, - 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x67, 0x52, 0x18, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x75, 0x74, 0x6f, 0x73, + 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x5c, 0x0a, 0x10, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x48, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0a, + 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x51, 0x0a, 0x05, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, + 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, + 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, + 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x53, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x3a, 0x65, 0xea, + 0x41, 0x62, 0x0a, 0x24, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x7d, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x84, + 0x05, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x82, 0x01, 0x0a, 0x1d, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x5f, 0x75, 0x73, 0x65, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x48, + 0x00, 0x52, 0x19, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x71, 0x0a, 0x16, + 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x14, 0x73, 0x69, 0x6e, 0x67, 0x6c, + 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x1a, + 0x3c, 0x0a, 0x19, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x1f, 0x0a, 0x0b, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x1a, 0x73, 0x0a, + 0x14, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x73, 0x3a, 0x6f, 0xea, 0x41, 0x6c, 0x0a, 0x27, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x12, 0x41, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, + 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x7d, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xd4, 0x03, 0x0a, 0x09, 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xfa, 0x41, 0x24, + 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, + 0x79, 0x12, 0x38, 0x0a, 0x16, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, + 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x02, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x70, 0x75, 0x55, + 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x3a, 0x7f, 0xea, 0x41, 0x7c, + 0x0a, 0x26, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x48, + 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x52, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x2f, + 0x7b, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x7d, 0x42, 0xd0, 0x02, 0x0a, + 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x49, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, + 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, + 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, + 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x3a, 0x3a, 0x56, 0x32, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, + 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, + 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1195,7 +1361,7 @@ func file_google_bigtable_admin_v2_instance_proto_rawDescGZIP() []byte { } var file_google_bigtable_admin_v2_instance_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_google_bigtable_admin_v2_instance_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_google_bigtable_admin_v2_instance_proto_msgTypes = make([]protoimpl.MessageInfo, 12) var file_google_bigtable_admin_v2_instance_proto_goTypes = []interface{}{ (Instance_State)(0), // 0: google.bigtable.admin.v2.Instance.State (Instance_Type)(0), // 1: google.bigtable.admin.v2.Instance.Type @@ -1205,34 +1371,37 @@ var file_google_bigtable_admin_v2_instance_proto_goTypes = []interface{}{ (*AutoscalingLimits)(nil), // 5: google.bigtable.admin.v2.AutoscalingLimits (*Cluster)(nil), // 6: google.bigtable.admin.v2.Cluster (*AppProfile)(nil), // 7: google.bigtable.admin.v2.AppProfile - nil, // 8: google.bigtable.admin.v2.Instance.LabelsEntry - (*Cluster_ClusterAutoscalingConfig)(nil), // 9: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig - (*Cluster_ClusterConfig)(nil), // 10: google.bigtable.admin.v2.Cluster.ClusterConfig - (*Cluster_EncryptionConfig)(nil), // 11: google.bigtable.admin.v2.Cluster.EncryptionConfig - (*AppProfile_MultiClusterRoutingUseAny)(nil), // 12: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny - (*AppProfile_SingleClusterRouting)(nil), // 13: google.bigtable.admin.v2.AppProfile.SingleClusterRouting - (*timestamppb.Timestamp)(nil), // 14: google.protobuf.Timestamp - (StorageType)(0), // 15: google.bigtable.admin.v2.StorageType + (*HotTablet)(nil), // 8: google.bigtable.admin.v2.HotTablet + nil, // 9: google.bigtable.admin.v2.Instance.LabelsEntry + (*Cluster_ClusterAutoscalingConfig)(nil), // 10: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig + (*Cluster_ClusterConfig)(nil), // 11: google.bigtable.admin.v2.Cluster.ClusterConfig + (*Cluster_EncryptionConfig)(nil), // 12: google.bigtable.admin.v2.Cluster.EncryptionConfig + (*AppProfile_MultiClusterRoutingUseAny)(nil), // 13: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny + (*AppProfile_SingleClusterRouting)(nil), // 14: google.bigtable.admin.v2.AppProfile.SingleClusterRouting + (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp + (StorageType)(0), // 16: google.bigtable.admin.v2.StorageType } var file_google_bigtable_admin_v2_instance_proto_depIdxs = []int32{ 0, // 0: google.bigtable.admin.v2.Instance.state:type_name -> google.bigtable.admin.v2.Instance.State 1, // 1: google.bigtable.admin.v2.Instance.type:type_name -> google.bigtable.admin.v2.Instance.Type - 8, // 2: google.bigtable.admin.v2.Instance.labels:type_name -> google.bigtable.admin.v2.Instance.LabelsEntry - 14, // 3: google.bigtable.admin.v2.Instance.create_time:type_name -> google.protobuf.Timestamp + 9, // 2: google.bigtable.admin.v2.Instance.labels:type_name -> google.bigtable.admin.v2.Instance.LabelsEntry + 15, // 3: google.bigtable.admin.v2.Instance.create_time:type_name -> google.protobuf.Timestamp 2, // 4: google.bigtable.admin.v2.Cluster.state:type_name -> google.bigtable.admin.v2.Cluster.State - 10, // 5: google.bigtable.admin.v2.Cluster.cluster_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterConfig - 15, // 6: google.bigtable.admin.v2.Cluster.default_storage_type:type_name -> google.bigtable.admin.v2.StorageType - 11, // 7: google.bigtable.admin.v2.Cluster.encryption_config:type_name -> google.bigtable.admin.v2.Cluster.EncryptionConfig - 12, // 8: google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any:type_name -> google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny - 13, // 9: google.bigtable.admin.v2.AppProfile.single_cluster_routing:type_name -> google.bigtable.admin.v2.AppProfile.SingleClusterRouting - 5, // 10: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_limits:type_name -> google.bigtable.admin.v2.AutoscalingLimits - 4, // 11: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_targets:type_name -> google.bigtable.admin.v2.AutoscalingTargets - 9, // 12: google.bigtable.admin.v2.Cluster.ClusterConfig.cluster_autoscaling_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig - 13, // [13:13] is the sub-list for method output_type - 13, // [13:13] is the sub-list for method input_type - 13, // [13:13] is the sub-list for extension type_name - 13, // [13:13] is the sub-list for extension extendee - 0, // [0:13] is the sub-list for field type_name + 11, // 5: google.bigtable.admin.v2.Cluster.cluster_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterConfig + 16, // 6: google.bigtable.admin.v2.Cluster.default_storage_type:type_name -> google.bigtable.admin.v2.StorageType + 12, // 7: google.bigtable.admin.v2.Cluster.encryption_config:type_name -> google.bigtable.admin.v2.Cluster.EncryptionConfig + 13, // 8: google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any:type_name -> google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny + 14, // 9: google.bigtable.admin.v2.AppProfile.single_cluster_routing:type_name -> google.bigtable.admin.v2.AppProfile.SingleClusterRouting + 15, // 10: google.bigtable.admin.v2.HotTablet.start_time:type_name -> google.protobuf.Timestamp + 15, // 11: google.bigtable.admin.v2.HotTablet.end_time:type_name -> google.protobuf.Timestamp + 5, // 12: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_limits:type_name -> google.bigtable.admin.v2.AutoscalingLimits + 4, // 13: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_targets:type_name -> google.bigtable.admin.v2.AutoscalingTargets + 10, // 14: google.bigtable.admin.v2.Cluster.ClusterConfig.cluster_autoscaling_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_google_bigtable_admin_v2_instance_proto_init() } @@ -1302,8 +1471,8 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } - file_google_bigtable_admin_v2_instance_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Cluster_ClusterAutoscalingConfig); i { + file_google_bigtable_admin_v2_instance_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HotTablet); i { case 0: return &v.state case 1: @@ -1315,7 +1484,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { } } file_google_bigtable_admin_v2_instance_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Cluster_ClusterConfig); i { + switch v := v.(*Cluster_ClusterAutoscalingConfig); i { case 0: return &v.state case 1: @@ -1327,7 +1496,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { } } file_google_bigtable_admin_v2_instance_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Cluster_EncryptionConfig); i { + switch v := v.(*Cluster_ClusterConfig); i { case 0: return &v.state case 1: @@ -1339,7 +1508,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { } } file_google_bigtable_admin_v2_instance_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppProfile_MultiClusterRoutingUseAny); i { + switch v := v.(*Cluster_EncryptionConfig); i { case 0: return &v.state case 1: @@ -1351,6 +1520,18 @@ func file_google_bigtable_admin_v2_instance_proto_init() { } } file_google_bigtable_admin_v2_instance_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppProfile_MultiClusterRoutingUseAny); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_instance_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppProfile_SingleClusterRouting); i { case 0: return &v.state @@ -1363,6 +1544,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { } } } + file_google_bigtable_admin_v2_instance_proto_msgTypes[0].OneofWrappers = []interface{}{} file_google_bigtable_admin_v2_instance_proto_msgTypes[3].OneofWrappers = []interface{}{ (*Cluster_ClusterConfig_)(nil), } @@ -1376,7 +1558,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_bigtable_admin_v2_instance_proto_rawDesc, NumEnums: 3, - NumMessages: 11, + NumMessages: 12, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go index 15e6612331cf..edd18c4f43d8 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.13.0 +// protoc v3.12.2 // source: google/bigtable/admin/v2/table.proto package admin @@ -152,7 +152,7 @@ const ( // Only populates `name` and fields related to the table's replication // state. Table_REPLICATION_VIEW Table_View = 3 - // Only populates 'name' and fields related to the table's encryption state. + // Only populates `name` and fields related to the table's encryption state. Table_ENCRYPTION_VIEW Table_View = 5 // Populates all fields. Table_FULL Table_View = 4 @@ -459,6 +459,7 @@ type RestoreInfo struct { // Information about the source used to restore the table. // // Types that are assignable to SourceInfo: + // // *RestoreInfo_BackupInfo SourceInfo isRestoreInfo_SourceInfo `protobuf_oneof:"source_info"` } @@ -545,19 +546,25 @@ type Table struct { // there will be an entry for the cluster with UNKNOWN `replication_status`. // Views: `REPLICATION_VIEW`, `ENCRYPTION_VIEW`, `FULL` ClusterStates map[string]*Table_ClusterState `protobuf:"bytes,2,rep,name=cluster_states,json=clusterStates,proto3" json:"cluster_states,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // (`CreationOnly`) // The column families configured for this table, mapped by column family ID. // Views: `SCHEMA_VIEW`, `FULL` ColumnFamilies map[string]*ColumnFamily `protobuf:"bytes,3,rep,name=column_families,json=columnFamilies,proto3" json:"column_families,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // (`CreationOnly`) - // The granularity (i.e. `MILLIS`) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. + // Immutable. The granularity (i.e. `MILLIS`) at which timestamps are stored in this + // table. Timestamps not matching the granularity will be rejected. // If unspecified at creation time, the value will be set to `MILLIS`. // Views: `SCHEMA_VIEW`, `FULL`. Granularity Table_TimestampGranularity `protobuf:"varint,4,opt,name=granularity,proto3,enum=google.bigtable.admin.v2.Table_TimestampGranularity" json:"granularity,omitempty"` - // Output only. If this table was restored from another data source (e.g. a - // backup), this field will be populated with information about the restore. + // Output only. If this table was restored from another data source (e.g. a backup), this + // field will be populated with information about the restore. RestoreInfo *RestoreInfo `protobuf:"bytes,6,opt,name=restore_info,json=restoreInfo,proto3" json:"restore_info,omitempty"` + // Set to true to make the table protected against data loss. i.e. deleting + // the following resources through Admin APIs are prohibited: + // - The table. + // - The column families in the table. + // - The instance containing the table. + // + // Note one can still delete the data stored in the table through Data APIs. + DeletionProtection bool `protobuf:"varint,9,opt,name=deletion_protection,json=deletionProtection,proto3" json:"deletion_protection,omitempty"` } func (x *Table) Reset() { @@ -627,6 +634,13 @@ func (x *Table) GetRestoreInfo() *RestoreInfo { return nil } +func (x *Table) GetDeletionProtection() bool { + if x != nil { + return x.DeletionProtection + } + return false +} + // A set of columns within a table which share a common configuration. type ColumnFamily struct { state protoimpl.MessageState @@ -690,6 +704,7 @@ type GcRule struct { // Garbage collection rules. // // Types that are assignable to Rule: + // // *GcRule_MaxNumVersions // *GcRule_MaxAge // *GcRule_Intersection_ @@ -809,12 +824,11 @@ type EncryptionInfo struct { // Output only. The type of encryption used to protect this resource. EncryptionType EncryptionInfo_EncryptionType `protobuf:"varint,3,opt,name=encryption_type,json=encryptionType,proto3,enum=google.bigtable.admin.v2.EncryptionInfo_EncryptionType" json:"encryption_type,omitempty"` - // Output only. The status of encrypt/decrypt calls on underlying data for - // this resource. Regardless of status, the existing data is always encrypted - // at rest. + // Output only. The status of encrypt/decrypt calls on underlying data for this resource. + // Regardless of status, the existing data is always encrypted at rest. EncryptionStatus *status.Status `protobuf:"bytes,4,opt,name=encryption_status,json=encryptionStatus,proto3" json:"encryption_status,omitempty"` - // Output only. The version of the Cloud KMS key specified in the parent - // cluster that is in use for the data underlying this table. + // Output only. The version of the Cloud KMS key specified in the parent cluster that is + // in use for the data underlying this table. KmsKeyVersion string `protobuf:"bytes,2,opt,name=kms_key_version,json=kmsKeyVersion,proto3" json:"kms_key_version,omitempty"` } @@ -993,10 +1007,12 @@ type Backup struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Output only. A globally unique identifier for the backup which cannot be + // A globally unique identifier for the backup which cannot be // changed. Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}/ - // backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*` + // + // backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*` + // // The final segment of the name must be between 1 and 50 characters // in length. // @@ -1004,8 +1020,8 @@ type Backup struct { // name of the form // `projects/{project}/instances/{instance}/clusters/{cluster}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. Immutable. Name of the table from which this backup was created. - // This needs to be in the same instance as the backup. Values are of the form + // Required. Immutable. Name of the table from which this backup was created. This needs + // to be in the same instance as the backup. Values are of the form // `projects/{project}/instances/{instance}/tables/{source_table}`. SourceTable string `protobuf:"bytes,2,opt,name=source_table,json=sourceTable,proto3" json:"source_table,omitempty"` // Required. The expiration time of the backup, with microseconds @@ -1016,9 +1032,8 @@ type Backup struct { ExpireTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` // Output only. `start_time` is the time that the backup was started // (i.e. approximately the time the - // [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup] - // request is received). The row data in this backup will be no older than - // this timestamp. + // [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup] request is received). The + // row data in this backup will be no older than this timestamp. StartTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` // Output only. `end_time` is the time that the backup was finished. The row // data in the backup will be no newer than this timestamp. @@ -1384,175 +1399,179 @@ var file_google_bigtable_admin_v2_table_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x0d, 0x0a, - 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0xd6, 0x09, 0x0a, + 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x9b, 0x0a, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x59, 0x0a, 0x0e, 0x63, 0x6c, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x0f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, - 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0e, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, - 0x69, 0x65, 0x73, 0x12, 0x56, 0x0a, 0x0b, 0x67, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, 0x69, - 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x47, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0b, - 0x67, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x0c, 0x72, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x1a, 0xe3, 0x02, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x6a, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0d, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x0f, 0x63, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, + 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, + 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x12, 0x5b, 0x0a, 0x0b, 0x67, 0x72, 0x61, 0x6e, + 0x75, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x47, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, + 0x69, 0x74, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0b, 0x67, 0x72, 0x61, 0x6e, 0x75, 0x6c, + 0x61, 0x72, 0x69, 0x74, 0x79, 0x12, 0x4d, 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x12, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xe8, 0x02, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x6f, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x56, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x56, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, + 0x8e, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, + 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4e, 0x49, + 0x54, 0x49, 0x41, 0x4c, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x50, + 0x4c, 0x41, 0x4e, 0x4e, 0x45, 0x44, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, + 0x43, 0x45, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x55, 0x4e, 0x50, 0x4c, 0x41, 0x4e, 0x4e, 0x45, + 0x44, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x03, 0x12, + 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x04, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, + 0x41, 0x44, 0x59, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4d, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x05, + 0x1a, 0x6e, 0x0a, 0x12, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x42, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x69, 0x0a, 0x13, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3c, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x14, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x47, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x25, 0x0a, 0x21, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, + 0x5f, 0x47, 0x52, 0x41, 0x4e, 0x55, 0x4c, 0x41, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x49, + 0x4c, 0x4c, 0x49, 0x53, 0x10, 0x01, 0x22, 0x71, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, 0x12, 0x14, + 0x0a, 0x10, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x56, 0x49, + 0x45, 0x57, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x4e, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x05, 0x12, + 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x3a, 0x5f, 0xea, 0x41, 0x5c, 0x0a, 0x22, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x12, 0x36, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x7d, 0x22, 0x49, 0x0a, 0x0c, 0x43, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x39, 0x0a, 0x07, 0x67, 0x63, + 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x8e, 0x01, 0x0a, 0x10, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4b, 0x4e, 0x4f, - 0x57, 0x4e, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c, 0x49, - 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x4c, 0x41, 0x4e, 0x4e, 0x45, - 0x44, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x02, 0x12, - 0x19, 0x0a, 0x15, 0x55, 0x4e, 0x50, 0x4c, 0x41, 0x4e, 0x4e, 0x45, 0x44, 0x5f, 0x4d, 0x41, 0x49, - 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, - 0x41, 0x44, 0x59, 0x10, 0x04, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x4f, - 0x50, 0x54, 0x49, 0x4d, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x1a, 0x6e, 0x0a, 0x12, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x42, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x69, 0x0a, 0x13, 0x43, - 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x14, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x47, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x12, 0x25, - 0x0a, 0x21, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x5f, 0x47, 0x52, 0x41, 0x4e, - 0x55, 0x4c, 0x41, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x49, 0x4c, 0x4c, 0x49, 0x53, 0x10, - 0x01, 0x22, 0x71, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, 0x12, 0x14, 0x0a, 0x10, 0x56, 0x49, 0x45, - 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x0f, - 0x0a, 0x0b, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x02, 0x12, - 0x14, 0x0a, 0x10, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x56, - 0x49, 0x45, 0x57, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x05, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, - 0x4c, 0x4c, 0x10, 0x04, 0x3a, 0x5f, 0xea, 0x41, 0x5c, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x36, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x7d, 0x22, 0x49, 0x0a, 0x0c, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, - 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x39, 0x0a, 0x07, 0x67, 0x63, 0x5f, 0x72, 0x75, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x06, 0x67, + 0x63, 0x52, 0x75, 0x6c, 0x65, 0x22, 0x90, 0x03, 0x0a, 0x06, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, + 0x12, 0x2a, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0e, 0x6d, 0x61, + 0x78, 0x4e, 0x75, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34, 0x0a, 0x07, + 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x41, + 0x67, 0x65, 0x12, 0x53, 0x0a, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x06, 0x67, 0x63, 0x52, 0x75, 0x6c, 0x65, - 0x22, 0x90, 0x03, 0x0a, 0x06, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x2a, 0x0a, 0x10, 0x6d, - 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x61, - 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x53, 0x0a, - 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x63, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x63, 0x52, - 0x75, 0x6c, 0x65, 0x2e, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x05, 0x75, 0x6e, 0x69, - 0x6f, 0x6e, 0x1a, 0x46, 0x0a, 0x0c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x63, 0x52, - 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x1a, 0x3f, 0x0a, 0x05, 0x55, 0x6e, - 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x63, - 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x42, 0x06, 0x0a, 0x04, 0x72, - 0x75, 0x6c, 0x65, 0x22, 0x8a, 0x03, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x65, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x44, 0x0a, - 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x58, 0x0a, 0x0f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, - 0x03, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, - 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x71, 0x0a, - 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, - 0x4c, 0x54, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, - 0x1f, 0x0a, 0x1b, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4e, 0x41, - 0x47, 0x45, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x02, - 0x22, 0x9a, 0x04, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x42, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, - 0x64, 0x61, 0x74, 0x61, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3b, 0x0a, - 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x64, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x48, 0x00, + 0x52, 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x1a, 0x46, 0x0a, 0x0c, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x05, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, - 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x02, - 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x4f, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x7d, 0x22, 0xf9, 0x04, - 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x1a, + 0x3f, 0x0a, 0x05, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, + 0x42, 0x06, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x22, 0x8a, 0x03, 0x0a, 0x0e, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x65, 0x0a, 0x0f, 0x65, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x44, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x58, 0x0a, 0x0f, 0x6b, 0x6d, 0x73, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x30, 0xe0, 0x41, 0x03, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x71, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, + 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, + 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, + 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, + 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x22, 0x9a, 0x04, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x35, + 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, + 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, + 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, + 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x12, 0x4f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x7d, 0x22, 0xf4, 0x04, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x05, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go index b4b10b9c82ee..e0664f87033e 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go @@ -42,6 +42,63 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// The desired view into RequestStats that should be returned in the response. +// +// See also: RequestStats message. +type ReadRowsRequest_RequestStatsView int32 + +const ( + // The default / unset value. The API will default to the NONE option below. + ReadRowsRequest_REQUEST_STATS_VIEW_UNSPECIFIED ReadRowsRequest_RequestStatsView = 0 + // Do not include any RequestStats in the response. This will leave the + // RequestStats embedded message unset in the response. + ReadRowsRequest_REQUEST_STATS_NONE ReadRowsRequest_RequestStatsView = 1 + // Include the full set of available RequestStats in the response, + // applicable to this read. + ReadRowsRequest_REQUEST_STATS_FULL ReadRowsRequest_RequestStatsView = 2 +) + +// Enum value maps for ReadRowsRequest_RequestStatsView. +var ( + ReadRowsRequest_RequestStatsView_name = map[int32]string{ + 0: "REQUEST_STATS_VIEW_UNSPECIFIED", + 1: "REQUEST_STATS_NONE", + 2: "REQUEST_STATS_FULL", + } + ReadRowsRequest_RequestStatsView_value = map[string]int32{ + "REQUEST_STATS_VIEW_UNSPECIFIED": 0, + "REQUEST_STATS_NONE": 1, + "REQUEST_STATS_FULL": 2, + } +) + +func (x ReadRowsRequest_RequestStatsView) Enum() *ReadRowsRequest_RequestStatsView { + p := new(ReadRowsRequest_RequestStatsView) + *p = x + return p +} + +func (x ReadRowsRequest_RequestStatsView) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ReadRowsRequest_RequestStatsView) Descriptor() protoreflect.EnumDescriptor { + return file_google_bigtable_v2_bigtable_proto_enumTypes[0].Descriptor() +} + +func (ReadRowsRequest_RequestStatsView) Type() protoreflect.EnumType { + return &file_google_bigtable_v2_bigtable_proto_enumTypes[0] +} + +func (x ReadRowsRequest_RequestStatsView) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ReadRowsRequest_RequestStatsView.Descriptor instead. +func (ReadRowsRequest_RequestStatsView) EnumDescriptor() ([]byte, []int) { + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{0, 0} +} + // Request message for Bigtable.ReadRows. type ReadRowsRequest struct { state protoimpl.MessageState @@ -52,8 +109,8 @@ type ReadRowsRequest struct { // Values are of the form // `projects//instances//tables/
`. TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` - // This value specifies routing for replication. If not specified, the - // "default" application profile will be used. + // This value specifies routing for replication. This API only accepts the + // empty value of app_profile_id. AppProfileId string `protobuf:"bytes,5,opt,name=app_profile_id,json=appProfileId,proto3" json:"app_profile_id,omitempty"` // The row keys and/or ranges to read sequentially. If not specified, reads // from all rows. @@ -64,6 +121,8 @@ type ReadRowsRequest struct { // The read will stop after committing to N rows' worth of results. The // default (zero) is to return all results. RowsLimit int64 `protobuf:"varint,4,opt,name=rows_limit,json=rowsLimit,proto3" json:"rows_limit,omitempty"` + // The view into RequestStats, as described above. + RequestStatsView ReadRowsRequest_RequestStatsView `protobuf:"varint,6,opt,name=request_stats_view,json=requestStatsView,proto3,enum=google.bigtable.v2.ReadRowsRequest_RequestStatsView" json:"request_stats_view,omitempty"` } func (x *ReadRowsRequest) Reset() { @@ -133,6 +192,13 @@ func (x *ReadRowsRequest) GetRowsLimit() int64 { return 0 } +func (x *ReadRowsRequest) GetRequestStatsView() ReadRowsRequest_RequestStatsView { + if x != nil { + return x.RequestStatsView + } + return ReadRowsRequest_REQUEST_STATS_VIEW_UNSPECIFIED +} + // Response message for Bigtable.ReadRows. type ReadRowsResponse struct { state protoimpl.MessageState @@ -149,6 +215,28 @@ type ReadRowsResponse struct { // lot of data that was filtered out since the last committed row // key, allowing the client to skip that work on a retry. LastScannedRowKey []byte `protobuf:"bytes,2,opt,name=last_scanned_row_key,json=lastScannedRowKey,proto3" json:"last_scanned_row_key,omitempty"` + // If requested, provide enhanced query performance statistics. The semantics + // dictate: + // - request_stats is empty on every (streamed) response, except + // - request_stats has non-empty information after all chunks have been + // streamed, where the ReadRowsResponse message only contains + // request_stats. + // - For example, if a read request would have returned an empty + // response instead a single ReadRowsResponse is streamed with empty + // chunks and request_stats filled. + // + // Visually, response messages will stream as follows: + // + // ... -> {chunks: [...]} -> {chunks: [], request_stats: {...}} + // \______________________/ \________________________________/ + // Primary response Trailer of RequestStats info + // + // Or if the read did not return any values: + // + // {chunks: [], request_stats: {...}} + // \________________________________/ + // Trailer of RequestStats info + RequestStats *RequestStats `protobuf:"bytes,3,opt,name=request_stats,json=requestStats,proto3" json:"request_stats,omitempty"` } func (x *ReadRowsResponse) Reset() { @@ -197,6 +285,13 @@ func (x *ReadRowsResponse) GetLastScannedRowKey() []byte { return nil } +func (x *ReadRowsResponse) GetRequestStats() *RequestStats { + if x != nil { + return x.RequestStats + } + return nil +} + // Request message for Bigtable.SampleRowKeys. type SampleRowKeysRequest struct { state protoimpl.MessageState @@ -1007,6 +1102,7 @@ type ReadRowsResponse_CellChunk struct { // Signals to the client concerning previous CellChunks received. // // Types that are assignable to RowStatus: + // // *ReadRowsResponse_CellChunk_ResetRow // *ReadRowsResponse_CellChunk_CommitRow RowStatus isReadRowsResponse_CellChunk_RowStatus `protobuf_oneof:"row_status"` @@ -1274,73 +1370,177 @@ var file_google_bigtable_v2_bigtable_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x88, 0x02, 0x0a, - 0x0f, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd4, 0x03, 0x0a, 0x0f, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, + 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x72, 0x6f, 0x77, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, + 0x53, 0x65, 0x74, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x35, 0x0a, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, + 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x6f, 0x77, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x62, 0x0a, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, + 0x5f, 0x76, 0x69, 0x65, 0x77, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x56, 0x69, 0x65, + 0x77, 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x56, + 0x69, 0x65, 0x77, 0x22, 0x66, 0x0a, 0x10, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x56, 0x69, 0x65, 0x77, 0x12, 0x22, 0x0a, 0x1e, 0x52, 0x45, 0x51, 0x55, 0x45, + 0x53, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x53, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x52, + 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x53, 0x5f, 0x4e, 0x4f, 0x4e, + 0x45, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x53, + 0x54, 0x41, 0x54, 0x53, 0x5f, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x02, 0x22, 0xb9, 0x04, 0x0a, 0x10, + 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x46, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x43, 0x68, 0x75, 0x6e, 0x6b, + 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x63, 0x61, 0x6e, + 0x6e, 0x65, 0x64, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x0d, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x1a, 0xe4, 0x02, 0x0a, 0x09, 0x43, 0x65, 0x6c, 0x6c, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x17, + 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x0b, 0x66, 0x61, 0x6d, 0x69, 0x6c, + 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x66, 0x61, 0x6d, 0x69, + 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x09, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, + 0x69, 0x63, 0x72, 0x6f, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x12, 0x16, 0x0a, 0x06, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x72, 0x65, 0x73, + 0x65, 0x74, 0x5f, 0x72, 0x6f, 0x77, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x65, 0x74, 0x52, 0x6f, 0x77, 0x12, 0x1f, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x5f, 0x72, 0x6f, 0x77, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, + 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x6f, 0x77, 0x42, 0x0c, 0x0a, 0x0a, 0x72, 0x6f, 0x77, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x53, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, - 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, + 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, - 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x53, 0x65, 0x74, 0x52, 0x04, 0x72, 0x6f, 0x77, - 0x73, 0x12, 0x35, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x6f, 0x77, 0x73, - 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x6f, - 0x77, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0xf2, 0x03, 0x0a, 0x10, 0x52, 0x65, 0x61, 0x64, - 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x06, - 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, - 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x63, 0x61, - 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, - 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x1a, 0xe4, 0x02, 0x0a, 0x09, 0x43, 0x65, 0x6c, 0x6c, 0x43, 0x68, - 0x75, 0x6e, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x0b, - 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0a, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x09, 0x71, - 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x71, 0x75, 0x61, - 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x69, 0x63, 0x72, 0x6f, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x69, 0x63, 0x72, 0x6f, - 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, - 0x0a, 0x09, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x72, 0x6f, 0x77, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x08, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x74, 0x52, 0x6f, 0x77, 0x12, 0x1f, 0x0a, - 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x72, 0x6f, 0x77, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x08, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x6f, 0x77, 0x42, 0x0c, - 0x0a, 0x0a, 0x72, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x87, 0x01, 0x0a, - 0x14, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, - 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x22, 0x53, 0x0a, 0x15, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x17, 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x73, - 0x65, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, - 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe2, 0x01, 0x0a, 0x10, - 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x64, 0x22, 0x53, 0x0a, 0x15, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, + 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x72, 0x6f, + 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x6f, 0x77, + 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe2, 0x01, 0x0a, 0x10, 0x4d, 0x75, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x07, + 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x09, 0x6d, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x13, 0x0a, 0x11, 0x4d, + 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0xb3, 0x02, 0x0a, 0x11, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x4a, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x1a, 0x61, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, + 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, + 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x6d, 0x75, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x12, 0x4d, 0x75, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, + 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, + 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x22, 0xff, 0x02, 0x0a, 0x18, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, + 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, + 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1c, + 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x48, 0x0a, 0x10, + 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0e, 0x74, 0x72, 0x75, 0x65, 0x5f, 0x6d, + 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x74, 0x72, + 0x75, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0f, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x48, 0x0a, 0x19, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2b, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x22, 0x7d, 0x0a, 0x12, + 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, + 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x50, + 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0xee, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, + 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, @@ -1350,286 +1550,202 @@ var file_google_bigtable_v2_bigtable_proto_rawDesc = []byte{ 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, - 0x3f, 0x0a, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0x13, 0x0a, 0x11, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb3, 0x02, 0x0a, 0x11, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x4a, 0x0a, 0x07, - 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x61, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x17, 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x09, 0x6d, 0x75, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x12, - 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x05, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xff, 0x02, 0x0a, 0x18, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, - 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, - 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x72, 0x6f, 0x77, 0x4b, 0x65, - 0x79, 0x12, 0x48, 0x0a, 0x10, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x64, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0e, 0x74, - 0x72, 0x75, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x0d, 0x74, 0x72, 0x75, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x45, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, - 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x4d, 0x75, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x48, 0x0a, 0x19, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x64, 0x22, 0x7d, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, - 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, - 0x22, 0x15, 0x0a, 0x13, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xee, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x61, 0x64, - 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, - 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x07, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x72, 0x6f, - 0x77, 0x4b, 0x65, 0x79, 0x12, 0x42, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, - 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x47, 0x0a, 0x1a, 0x52, 0x65, 0x61, 0x64, - 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x03, 0x72, 0x6f, 0x77, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x03, 0x72, 0x6f, - 0x77, 0x32, 0xb0, 0x14, 0x0a, 0x08, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x9b, - 0x02, 0x0a, 0x08, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x23, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc1, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x22, - 0x39, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, - 0x7d, 0x3a, 0x72, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, - 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, - 0x64, 0xda, 0x41, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, - 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, - 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x30, 0x01, 0x12, 0xac, 0x02, 0x0a, - 0x0d, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x28, + 0x42, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x61, - 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0xc3, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x12, 0x3e, 0x2f, 0x76, - 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, - 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, - 0xda, 0x41, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x19, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x30, 0x01, 0x12, 0xc1, 0x02, 0x0a, 0x09, - 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, - 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe6, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3f, 0x22, - 0x3a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, - 0x7d, 0x3a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, - 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x69, 0x64, 0xda, 0x41, 0x1c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, - 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0xda, 0x41, 0x2b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, - 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, - 0xb3, 0x02, 0x0a, 0x0a, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x25, + 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x72, 0x75, + 0x6c, 0x65, 0x73, 0x22, 0x47, 0x0a, 0x1a, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, + 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x29, 0x0a, 0x03, 0x72, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd3, 0x01, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x6f, 0x77, 0x73, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, - 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x12, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0xda, - 0x41, 0x21, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x65, 0x6e, 0x74, - 0x72, 0x69, 0x65, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x5f, 0x69, 0x64, 0x30, 0x01, 0x12, 0xad, 0x03, 0x0a, 0x11, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, - 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x12, 0x2c, 0x2e, 0x67, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x03, 0x72, 0x6f, 0x77, 0x32, 0xb0, 0x14, 0x0a, + 0x08, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x9b, 0x02, 0x0a, 0x08, 0x52, 0x65, + 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, + 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xba, 0x02, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x47, 0x22, 0x42, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x2f, 0x2a, 0x7d, 0x3a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x6f, 0x77, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, + 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0xc1, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x22, 0x39, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x61, + 0x64, 0x52, 0x6f, 0x77, 0x73, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, - 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x42, 0x74, + 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x0a, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x19, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x30, 0x01, 0x12, 0xac, 0x02, 0x0a, 0x0d, 0x53, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, + 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc3, + 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x12, 0x3e, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x52, 0x6f, 0x77, 0x4b, 0x65, 0x79, 0x73, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, + 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, + 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x0a, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x69, 0x64, 0x30, 0x01, 0x12, 0xc1, 0x02, 0x0a, 0x09, 0x4d, 0x75, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x6f, 0x77, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0xe6, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3f, 0x22, 0x3a, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x75, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, + 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, + 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x1c, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, + 0x65, 0x79, 0x2c, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xda, 0x41, 0x2b, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, + 0x79, 0x2c, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, + 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, 0xb3, 0x02, 0x0a, 0x0a, 0x4d, + 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, + 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd3, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x40, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x3a, 0x01, + 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x2c, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0xda, 0x41, 0x21, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x2c, + 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x30, 0x01, + 0x12, 0xad, 0x03, 0x0a, 0x11, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, + 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0xba, 0x02, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x22, 0x42, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, + 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x42, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x70, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x74, 0x72, + 0x75, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xda, 0x41, 0x51, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x74, 0x72, 0x75, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0xda, 0x41, 0x51, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, - 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x74, 0x72, 0x75, 0x65, 0x5f, 0x6d, 0x75, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, 0xee, 0x01, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, - 0x64, 0x57, 0x61, 0x72, 0x6d, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x41, - 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8d, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x22, - 0x26, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, + 0x12, 0xee, 0x01, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, + 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, + 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x69, + 0x6e, 0x67, 0x41, 0x6e, 0x64, 0x57, 0x61, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x8d, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x22, 0x26, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x69, + 0x6e, 0x67, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x39, 0x12, 0x25, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x70, 0x69, 0x6e, 0x67, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x39, - 0x12, 0x25, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0xda, 0x41, 0x13, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x12, 0xdd, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4d, - 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x12, 0x2d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, - 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe7, 0x01, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x48, 0x22, 0x43, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, - 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x4e, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x2c, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, - 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, - 0x64, 0xda, 0x41, 0x18, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, - 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x72, 0x75, 0x6c, 0x65, 0x73, 0xda, 0x41, 0x27, 0x74, + 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0xda, 0x41, 0x13, 0x6e, 0x61, + 0x6d, 0x65, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, + 0x64, 0x12, 0xdd, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, + 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, + 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe7, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x48, + 0x22, 0x43, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x52, 0x6f, 0x77, 0x3a, 0x01, 0x2a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x12, 0x3a, + 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x7b, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x10, 0x0a, 0x0e, 0x61, 0x70, + 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0xda, 0x41, 0x18, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, - 0x79, 0x2c, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x1a, 0xdb, 0x02, 0xca, 0x41, 0x17, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xbd, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, - 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x64, 0x61, 0x74, 0x61, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2c, + 0x79, 0x2c, 0x72, 0x75, 0x6c, 0x65, 0x73, 0xda, 0x41, 0x27, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x72, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x72, 0x75, 0x6c, + 0x65, 0x73, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, + 0x64, 0x1a, 0xdb, 0x02, 0xca, 0x41, 0x17, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, + 0xbd, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, + 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, - 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xeb, 0x02, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, - 0x0d, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2f, 0x76, 0x32, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, - 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, - 0xea, 0x41, 0x5c, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x36, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, - 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x7d, 0xea, - 0x41, 0x50, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x72, 0x65, + 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, + 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, + 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x72, + 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, + 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, + 0xeb, 0x02, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x42, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x1b, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0xea, 0x41, 0x50, 0x0a, 0x25, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0xea, 0x41, + 0x5c, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x36, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x7d, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1644,69 +1760,74 @@ func file_google_bigtable_v2_bigtable_proto_rawDescGZIP() []byte { return file_google_bigtable_v2_bigtable_proto_rawDescData } +var file_google_bigtable_v2_bigtable_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_bigtable_v2_bigtable_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_google_bigtable_v2_bigtable_proto_goTypes = []interface{}{ - (*ReadRowsRequest)(nil), // 0: google.bigtable.v2.ReadRowsRequest - (*ReadRowsResponse)(nil), // 1: google.bigtable.v2.ReadRowsResponse - (*SampleRowKeysRequest)(nil), // 2: google.bigtable.v2.SampleRowKeysRequest - (*SampleRowKeysResponse)(nil), // 3: google.bigtable.v2.SampleRowKeysResponse - (*MutateRowRequest)(nil), // 4: google.bigtable.v2.MutateRowRequest - (*MutateRowResponse)(nil), // 5: google.bigtable.v2.MutateRowResponse - (*MutateRowsRequest)(nil), // 6: google.bigtable.v2.MutateRowsRequest - (*MutateRowsResponse)(nil), // 7: google.bigtable.v2.MutateRowsResponse - (*CheckAndMutateRowRequest)(nil), // 8: google.bigtable.v2.CheckAndMutateRowRequest - (*CheckAndMutateRowResponse)(nil), // 9: google.bigtable.v2.CheckAndMutateRowResponse - (*PingAndWarmRequest)(nil), // 10: google.bigtable.v2.PingAndWarmRequest - (*PingAndWarmResponse)(nil), // 11: google.bigtable.v2.PingAndWarmResponse - (*ReadModifyWriteRowRequest)(nil), // 12: google.bigtable.v2.ReadModifyWriteRowRequest - (*ReadModifyWriteRowResponse)(nil), // 13: google.bigtable.v2.ReadModifyWriteRowResponse - (*ReadRowsResponse_CellChunk)(nil), // 14: google.bigtable.v2.ReadRowsResponse.CellChunk - (*MutateRowsRequest_Entry)(nil), // 15: google.bigtable.v2.MutateRowsRequest.Entry - (*MutateRowsResponse_Entry)(nil), // 16: google.bigtable.v2.MutateRowsResponse.Entry - (*RowSet)(nil), // 17: google.bigtable.v2.RowSet - (*RowFilter)(nil), // 18: google.bigtable.v2.RowFilter - (*Mutation)(nil), // 19: google.bigtable.v2.Mutation - (*ReadModifyWriteRule)(nil), // 20: google.bigtable.v2.ReadModifyWriteRule - (*Row)(nil), // 21: google.bigtable.v2.Row - (*wrapperspb.StringValue)(nil), // 22: google.protobuf.StringValue - (*wrapperspb.BytesValue)(nil), // 23: google.protobuf.BytesValue - (*status.Status)(nil), // 24: google.rpc.Status + (ReadRowsRequest_RequestStatsView)(0), // 0: google.bigtable.v2.ReadRowsRequest.RequestStatsView + (*ReadRowsRequest)(nil), // 1: google.bigtable.v2.ReadRowsRequest + (*ReadRowsResponse)(nil), // 2: google.bigtable.v2.ReadRowsResponse + (*SampleRowKeysRequest)(nil), // 3: google.bigtable.v2.SampleRowKeysRequest + (*SampleRowKeysResponse)(nil), // 4: google.bigtable.v2.SampleRowKeysResponse + (*MutateRowRequest)(nil), // 5: google.bigtable.v2.MutateRowRequest + (*MutateRowResponse)(nil), // 6: google.bigtable.v2.MutateRowResponse + (*MutateRowsRequest)(nil), // 7: google.bigtable.v2.MutateRowsRequest + (*MutateRowsResponse)(nil), // 8: google.bigtable.v2.MutateRowsResponse + (*CheckAndMutateRowRequest)(nil), // 9: google.bigtable.v2.CheckAndMutateRowRequest + (*CheckAndMutateRowResponse)(nil), // 10: google.bigtable.v2.CheckAndMutateRowResponse + (*PingAndWarmRequest)(nil), // 11: google.bigtable.v2.PingAndWarmRequest + (*PingAndWarmResponse)(nil), // 12: google.bigtable.v2.PingAndWarmResponse + (*ReadModifyWriteRowRequest)(nil), // 13: google.bigtable.v2.ReadModifyWriteRowRequest + (*ReadModifyWriteRowResponse)(nil), // 14: google.bigtable.v2.ReadModifyWriteRowResponse + (*ReadRowsResponse_CellChunk)(nil), // 15: google.bigtable.v2.ReadRowsResponse.CellChunk + (*MutateRowsRequest_Entry)(nil), // 16: google.bigtable.v2.MutateRowsRequest.Entry + (*MutateRowsResponse_Entry)(nil), // 17: google.bigtable.v2.MutateRowsResponse.Entry + (*RowSet)(nil), // 18: google.bigtable.v2.RowSet + (*RowFilter)(nil), // 19: google.bigtable.v2.RowFilter + (*RequestStats)(nil), // 20: google.bigtable.v2.RequestStats + (*Mutation)(nil), // 21: google.bigtable.v2.Mutation + (*ReadModifyWriteRule)(nil), // 22: google.bigtable.v2.ReadModifyWriteRule + (*Row)(nil), // 23: google.bigtable.v2.Row + (*wrapperspb.StringValue)(nil), // 24: google.protobuf.StringValue + (*wrapperspb.BytesValue)(nil), // 25: google.protobuf.BytesValue + (*status.Status)(nil), // 26: google.rpc.Status } var file_google_bigtable_v2_bigtable_proto_depIdxs = []int32{ - 17, // 0: google.bigtable.v2.ReadRowsRequest.rows:type_name -> google.bigtable.v2.RowSet - 18, // 1: google.bigtable.v2.ReadRowsRequest.filter:type_name -> google.bigtable.v2.RowFilter - 14, // 2: google.bigtable.v2.ReadRowsResponse.chunks:type_name -> google.bigtable.v2.ReadRowsResponse.CellChunk - 19, // 3: google.bigtable.v2.MutateRowRequest.mutations:type_name -> google.bigtable.v2.Mutation - 15, // 4: google.bigtable.v2.MutateRowsRequest.entries:type_name -> google.bigtable.v2.MutateRowsRequest.Entry - 16, // 5: google.bigtable.v2.MutateRowsResponse.entries:type_name -> google.bigtable.v2.MutateRowsResponse.Entry - 18, // 6: google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter:type_name -> google.bigtable.v2.RowFilter - 19, // 7: google.bigtable.v2.CheckAndMutateRowRequest.true_mutations:type_name -> google.bigtable.v2.Mutation - 19, // 8: google.bigtable.v2.CheckAndMutateRowRequest.false_mutations:type_name -> google.bigtable.v2.Mutation - 20, // 9: google.bigtable.v2.ReadModifyWriteRowRequest.rules:type_name -> google.bigtable.v2.ReadModifyWriteRule - 21, // 10: google.bigtable.v2.ReadModifyWriteRowResponse.row:type_name -> google.bigtable.v2.Row - 22, // 11: google.bigtable.v2.ReadRowsResponse.CellChunk.family_name:type_name -> google.protobuf.StringValue - 23, // 12: google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier:type_name -> google.protobuf.BytesValue - 19, // 13: google.bigtable.v2.MutateRowsRequest.Entry.mutations:type_name -> google.bigtable.v2.Mutation - 24, // 14: google.bigtable.v2.MutateRowsResponse.Entry.status:type_name -> google.rpc.Status - 0, // 15: google.bigtable.v2.Bigtable.ReadRows:input_type -> google.bigtable.v2.ReadRowsRequest - 2, // 16: google.bigtable.v2.Bigtable.SampleRowKeys:input_type -> google.bigtable.v2.SampleRowKeysRequest - 4, // 17: google.bigtable.v2.Bigtable.MutateRow:input_type -> google.bigtable.v2.MutateRowRequest - 6, // 18: google.bigtable.v2.Bigtable.MutateRows:input_type -> google.bigtable.v2.MutateRowsRequest - 8, // 19: google.bigtable.v2.Bigtable.CheckAndMutateRow:input_type -> google.bigtable.v2.CheckAndMutateRowRequest - 10, // 20: google.bigtable.v2.Bigtable.PingAndWarm:input_type -> google.bigtable.v2.PingAndWarmRequest - 12, // 21: google.bigtable.v2.Bigtable.ReadModifyWriteRow:input_type -> google.bigtable.v2.ReadModifyWriteRowRequest - 1, // 22: google.bigtable.v2.Bigtable.ReadRows:output_type -> google.bigtable.v2.ReadRowsResponse - 3, // 23: google.bigtable.v2.Bigtable.SampleRowKeys:output_type -> google.bigtable.v2.SampleRowKeysResponse - 5, // 24: google.bigtable.v2.Bigtable.MutateRow:output_type -> google.bigtable.v2.MutateRowResponse - 7, // 25: google.bigtable.v2.Bigtable.MutateRows:output_type -> google.bigtable.v2.MutateRowsResponse - 9, // 26: google.bigtable.v2.Bigtable.CheckAndMutateRow:output_type -> google.bigtable.v2.CheckAndMutateRowResponse - 11, // 27: google.bigtable.v2.Bigtable.PingAndWarm:output_type -> google.bigtable.v2.PingAndWarmResponse - 13, // 28: google.bigtable.v2.Bigtable.ReadModifyWriteRow:output_type -> google.bigtable.v2.ReadModifyWriteRowResponse - 22, // [22:29] is the sub-list for method output_type - 15, // [15:22] is the sub-list for method input_type - 15, // [15:15] is the sub-list for extension type_name - 15, // [15:15] is the sub-list for extension extendee - 0, // [0:15] is the sub-list for field type_name + 18, // 0: google.bigtable.v2.ReadRowsRequest.rows:type_name -> google.bigtable.v2.RowSet + 19, // 1: google.bigtable.v2.ReadRowsRequest.filter:type_name -> google.bigtable.v2.RowFilter + 0, // 2: google.bigtable.v2.ReadRowsRequest.request_stats_view:type_name -> google.bigtable.v2.ReadRowsRequest.RequestStatsView + 15, // 3: google.bigtable.v2.ReadRowsResponse.chunks:type_name -> google.bigtable.v2.ReadRowsResponse.CellChunk + 20, // 4: google.bigtable.v2.ReadRowsResponse.request_stats:type_name -> google.bigtable.v2.RequestStats + 21, // 5: google.bigtable.v2.MutateRowRequest.mutations:type_name -> google.bigtable.v2.Mutation + 16, // 6: google.bigtable.v2.MutateRowsRequest.entries:type_name -> google.bigtable.v2.MutateRowsRequest.Entry + 17, // 7: google.bigtable.v2.MutateRowsResponse.entries:type_name -> google.bigtable.v2.MutateRowsResponse.Entry + 19, // 8: google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter:type_name -> google.bigtable.v2.RowFilter + 21, // 9: google.bigtable.v2.CheckAndMutateRowRequest.true_mutations:type_name -> google.bigtable.v2.Mutation + 21, // 10: google.bigtable.v2.CheckAndMutateRowRequest.false_mutations:type_name -> google.bigtable.v2.Mutation + 22, // 11: google.bigtable.v2.ReadModifyWriteRowRequest.rules:type_name -> google.bigtable.v2.ReadModifyWriteRule + 23, // 12: google.bigtable.v2.ReadModifyWriteRowResponse.row:type_name -> google.bigtable.v2.Row + 24, // 13: google.bigtable.v2.ReadRowsResponse.CellChunk.family_name:type_name -> google.protobuf.StringValue + 25, // 14: google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier:type_name -> google.protobuf.BytesValue + 21, // 15: google.bigtable.v2.MutateRowsRequest.Entry.mutations:type_name -> google.bigtable.v2.Mutation + 26, // 16: google.bigtable.v2.MutateRowsResponse.Entry.status:type_name -> google.rpc.Status + 1, // 17: google.bigtable.v2.Bigtable.ReadRows:input_type -> google.bigtable.v2.ReadRowsRequest + 3, // 18: google.bigtable.v2.Bigtable.SampleRowKeys:input_type -> google.bigtable.v2.SampleRowKeysRequest + 5, // 19: google.bigtable.v2.Bigtable.MutateRow:input_type -> google.bigtable.v2.MutateRowRequest + 7, // 20: google.bigtable.v2.Bigtable.MutateRows:input_type -> google.bigtable.v2.MutateRowsRequest + 9, // 21: google.bigtable.v2.Bigtable.CheckAndMutateRow:input_type -> google.bigtable.v2.CheckAndMutateRowRequest + 11, // 22: google.bigtable.v2.Bigtable.PingAndWarm:input_type -> google.bigtable.v2.PingAndWarmRequest + 13, // 23: google.bigtable.v2.Bigtable.ReadModifyWriteRow:input_type -> google.bigtable.v2.ReadModifyWriteRowRequest + 2, // 24: google.bigtable.v2.Bigtable.ReadRows:output_type -> google.bigtable.v2.ReadRowsResponse + 4, // 25: google.bigtable.v2.Bigtable.SampleRowKeys:output_type -> google.bigtable.v2.SampleRowKeysResponse + 6, // 26: google.bigtable.v2.Bigtable.MutateRow:output_type -> google.bigtable.v2.MutateRowResponse + 8, // 27: google.bigtable.v2.Bigtable.MutateRows:output_type -> google.bigtable.v2.MutateRowsResponse + 10, // 28: google.bigtable.v2.Bigtable.CheckAndMutateRow:output_type -> google.bigtable.v2.CheckAndMutateRowResponse + 12, // 29: google.bigtable.v2.Bigtable.PingAndWarm:output_type -> google.bigtable.v2.PingAndWarmResponse + 14, // 30: google.bigtable.v2.Bigtable.ReadModifyWriteRow:output_type -> google.bigtable.v2.ReadModifyWriteRowResponse + 24, // [24:31] is the sub-list for method output_type + 17, // [17:24] is the sub-list for method input_type + 17, // [17:17] is the sub-list for extension type_name + 17, // [17:17] is the sub-list for extension extendee + 0, // [0:17] is the sub-list for field type_name } func init() { file_google_bigtable_v2_bigtable_proto_init() } @@ -1715,6 +1836,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { return } file_google_bigtable_v2_data_proto_init() + file_google_bigtable_v2_request_stats_proto_init() if !protoimpl.UnsafeEnabled { file_google_bigtable_v2_bigtable_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadRowsRequest); i { @@ -1930,13 +2052,14 @@ func file_google_bigtable_v2_bigtable_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_bigtable_v2_bigtable_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 17, NumExtensions: 0, NumServices: 1, }, GoTypes: file_google_bigtable_v2_bigtable_proto_goTypes, DependencyIndexes: file_google_bigtable_v2_bigtable_proto_depIdxs, + EnumInfos: file_google_bigtable_v2_bigtable_proto_enumTypes, MessageInfos: file_google_bigtable_v2_bigtable_proto_msgTypes, }.Build() File_google_bigtable_v2_bigtable_proto = out.File diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/request_stats.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/request_stats.pb.go new file mode 100644 index 000000000000..e3f27c891360 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/request_stats.pb.go @@ -0,0 +1,497 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/bigtable/v2/request_stats.proto + +package bigtable + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// ReadIterationStats captures information about the iteration of rows or cells +// over the course of a read, e.g. how many results were scanned in a read +// operation versus the results returned. +type ReadIterationStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The rows seen (scanned) as part of the request. This includes the count of + // rows returned, as captured below. + RowsSeenCount int64 `protobuf:"varint,1,opt,name=rows_seen_count,json=rowsSeenCount,proto3" json:"rows_seen_count,omitempty"` + // The rows returned as part of the request. + RowsReturnedCount int64 `protobuf:"varint,2,opt,name=rows_returned_count,json=rowsReturnedCount,proto3" json:"rows_returned_count,omitempty"` + // The cells seen (scanned) as part of the request. This includes the count of + // cells returned, as captured below. + CellsSeenCount int64 `protobuf:"varint,3,opt,name=cells_seen_count,json=cellsSeenCount,proto3" json:"cells_seen_count,omitempty"` + // The cells returned as part of the request. + CellsReturnedCount int64 `protobuf:"varint,4,opt,name=cells_returned_count,json=cellsReturnedCount,proto3" json:"cells_returned_count,omitempty"` +} + +func (x *ReadIterationStats) Reset() { + *x = ReadIterationStats{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadIterationStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadIterationStats) ProtoMessage() {} + +func (x *ReadIterationStats) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadIterationStats.ProtoReflect.Descriptor instead. +func (*ReadIterationStats) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_request_stats_proto_rawDescGZIP(), []int{0} +} + +func (x *ReadIterationStats) GetRowsSeenCount() int64 { + if x != nil { + return x.RowsSeenCount + } + return 0 +} + +func (x *ReadIterationStats) GetRowsReturnedCount() int64 { + if x != nil { + return x.RowsReturnedCount + } + return 0 +} + +func (x *ReadIterationStats) GetCellsSeenCount() int64 { + if x != nil { + return x.CellsSeenCount + } + return 0 +} + +func (x *ReadIterationStats) GetCellsReturnedCount() int64 { + if x != nil { + return x.CellsReturnedCount + } + return 0 +} + +// RequestLatencyStats provides a measurement of the latency of the request as +// it interacts with different systems over its lifetime, e.g. how long the +// request took to execute within a frontend server. +type RequestLatencyStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The latency measured by the frontend server handling this request, from + // when the request was received, to when this value is sent back in the + // response. For more context on the component that is measuring this latency, + // see: https://cloud.google.com/bigtable/docs/overview + // + // Note: This value may be slightly shorter than the value reported into + // aggregate latency metrics in Monitoring for this request + // (https://cloud.google.com/bigtable/docs/monitoring-instance) as this value + // needs to be sent in the response before the latency measurement including + // that transmission is finalized. + // + // Note: This value includes the end-to-end latency of contacting nodes in + // the targeted cluster, e.g. measuring from when the first byte arrives at + // the frontend server, to when this value is sent back as the last value in + // the response, including any latency incurred by contacting nodes, waiting + // for results from nodes, and finally sending results from nodes back to the + // caller. + FrontendServerLatency *durationpb.Duration `protobuf:"bytes,1,opt,name=frontend_server_latency,json=frontendServerLatency,proto3" json:"frontend_server_latency,omitempty"` +} + +func (x *RequestLatencyStats) Reset() { + *x = RequestLatencyStats{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestLatencyStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestLatencyStats) ProtoMessage() {} + +func (x *RequestLatencyStats) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestLatencyStats.ProtoReflect.Descriptor instead. +func (*RequestLatencyStats) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_request_stats_proto_rawDescGZIP(), []int{1} +} + +func (x *RequestLatencyStats) GetFrontendServerLatency() *durationpb.Duration { + if x != nil { + return x.FrontendServerLatency + } + return nil +} + +// FullReadStatsView captures all known information about a read. +type FullReadStatsView struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Iteration stats describe how efficient the read is, e.g. comparing + // rows seen vs. rows returned or cells seen vs cells returned can provide an + // indication of read efficiency (the higher the ratio of seen to retuned the + // better). + ReadIterationStats *ReadIterationStats `protobuf:"bytes,1,opt,name=read_iteration_stats,json=readIterationStats,proto3" json:"read_iteration_stats,omitempty"` + // Request latency stats describe the time taken to complete a request, from + // the server side. + RequestLatencyStats *RequestLatencyStats `protobuf:"bytes,2,opt,name=request_latency_stats,json=requestLatencyStats,proto3" json:"request_latency_stats,omitempty"` +} + +func (x *FullReadStatsView) Reset() { + *x = FullReadStatsView{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FullReadStatsView) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FullReadStatsView) ProtoMessage() {} + +func (x *FullReadStatsView) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FullReadStatsView.ProtoReflect.Descriptor instead. +func (*FullReadStatsView) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_request_stats_proto_rawDescGZIP(), []int{2} +} + +func (x *FullReadStatsView) GetReadIterationStats() *ReadIterationStats { + if x != nil { + return x.ReadIterationStats + } + return nil +} + +func (x *FullReadStatsView) GetRequestLatencyStats() *RequestLatencyStats { + if x != nil { + return x.RequestLatencyStats + } + return nil +} + +// RequestStats is the container for additional information pertaining to a +// single request, helpful for evaluating the performance of the sent request. +// Currently, there are the following supported methods: +// - google.bigtable.v2.ReadRows +type RequestStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Information pertaining to each request type received. The type is chosen + // based on the requested view. + // + // See the messages above for additional context. + // + // Types that are assignable to StatsView: + // + // *RequestStats_FullReadStatsView + StatsView isRequestStats_StatsView `protobuf_oneof:"stats_view"` +} + +func (x *RequestStats) Reset() { + *x = RequestStats{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestStats) ProtoMessage() {} + +func (x *RequestStats) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_request_stats_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestStats.ProtoReflect.Descriptor instead. +func (*RequestStats) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_request_stats_proto_rawDescGZIP(), []int{3} +} + +func (m *RequestStats) GetStatsView() isRequestStats_StatsView { + if m != nil { + return m.StatsView + } + return nil +} + +func (x *RequestStats) GetFullReadStatsView() *FullReadStatsView { + if x, ok := x.GetStatsView().(*RequestStats_FullReadStatsView); ok { + return x.FullReadStatsView + } + return nil +} + +type isRequestStats_StatsView interface { + isRequestStats_StatsView() +} + +type RequestStats_FullReadStatsView struct { + // Available with the ReadRowsRequest.RequestStatsView.REQUEST_STATS_FULL + // view, see package google.bigtable.v2. + FullReadStatsView *FullReadStatsView `protobuf:"bytes,1,opt,name=full_read_stats_view,json=fullReadStatsView,proto3,oneof"` +} + +func (*RequestStats_FullReadStatsView) isRequestStats_StatsView() {} + +var File_google_bigtable_v2_request_stats_proto protoreflect.FileDescriptor + +var file_google_bigtable_v2_request_stats_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc8, 0x01, 0x0a, + 0x12, 0x52, 0x65, 0x61, 0x64, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x73, 0x65, 0x65, 0x6e, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x72, 0x6f, + 0x77, 0x73, 0x53, 0x65, 0x65, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x72, + 0x6f, 0x77, 0x73, 0x5f, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x72, 0x6f, 0x77, 0x73, 0x52, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x53, 0x65, 0x65, 0x6e, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x30, 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x12, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x68, 0x0a, 0x13, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x51, + 0x0a, 0x17, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x66, 0x72, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x22, 0xca, 0x01, 0x0a, 0x11, 0x46, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x61, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x56, 0x69, 0x65, 0x77, 0x12, 0x58, 0x0a, 0x14, 0x72, 0x65, 0x61, 0x64, 0x5f, + 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x49, + 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x12, 0x72, + 0x65, 0x61, 0x64, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x12, 0x5b, 0x0a, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6c, 0x61, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x61, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0x76, + 0x0a, 0x0c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x56, + 0x69, 0x65, 0x77, 0x48, 0x00, 0x52, 0x11, 0x66, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x61, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x56, 0x69, 0x65, 0x77, 0x42, 0x0c, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x42, 0xbd, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, + 0x32, 0x42, 0x11, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_bigtable_v2_request_stats_proto_rawDescOnce sync.Once + file_google_bigtable_v2_request_stats_proto_rawDescData = file_google_bigtable_v2_request_stats_proto_rawDesc +) + +func file_google_bigtable_v2_request_stats_proto_rawDescGZIP() []byte { + file_google_bigtable_v2_request_stats_proto_rawDescOnce.Do(func() { + file_google_bigtable_v2_request_stats_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_bigtable_v2_request_stats_proto_rawDescData) + }) + return file_google_bigtable_v2_request_stats_proto_rawDescData +} + +var file_google_bigtable_v2_request_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_bigtable_v2_request_stats_proto_goTypes = []interface{}{ + (*ReadIterationStats)(nil), // 0: google.bigtable.v2.ReadIterationStats + (*RequestLatencyStats)(nil), // 1: google.bigtable.v2.RequestLatencyStats + (*FullReadStatsView)(nil), // 2: google.bigtable.v2.FullReadStatsView + (*RequestStats)(nil), // 3: google.bigtable.v2.RequestStats + (*durationpb.Duration)(nil), // 4: google.protobuf.Duration +} +var file_google_bigtable_v2_request_stats_proto_depIdxs = []int32{ + 4, // 0: google.bigtable.v2.RequestLatencyStats.frontend_server_latency:type_name -> google.protobuf.Duration + 0, // 1: google.bigtable.v2.FullReadStatsView.read_iteration_stats:type_name -> google.bigtable.v2.ReadIterationStats + 1, // 2: google.bigtable.v2.FullReadStatsView.request_latency_stats:type_name -> google.bigtable.v2.RequestLatencyStats + 2, // 3: google.bigtable.v2.RequestStats.full_read_stats_view:type_name -> google.bigtable.v2.FullReadStatsView + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_google_bigtable_v2_request_stats_proto_init() } +func file_google_bigtable_v2_request_stats_proto_init() { + if File_google_bigtable_v2_request_stats_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_bigtable_v2_request_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadIterationStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_request_stats_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestLatencyStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_request_stats_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FullReadStatsView); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_request_stats_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_bigtable_v2_request_stats_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*RequestStats_FullReadStatsView)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_bigtable_v2_request_stats_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_bigtable_v2_request_stats_proto_goTypes, + DependencyIndexes: file_google_bigtable_v2_request_stats_proto_depIdxs, + MessageInfos: file_google_bigtable_v2_request_stats_proto_msgTypes, + }.Build() + File_google_bigtable_v2_request_stats_proto = out.File + file_google_bigtable_v2_request_stats_proto_rawDesc = nil + file_google_bigtable_v2_request_stats_proto_goTypes = nil + file_google_bigtable_v2_request_stats_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/response_params.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/response_params.pb.go new file mode 100644 index 000000000000..7a3c8d680b9f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/response_params.pb.go @@ -0,0 +1,191 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/bigtable/v2/response_params.proto + +package bigtable + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Response metadata proto +// This is an experimental feature that will be used to get zone_id and +// cluster_id from response trailers to tag the metrics. This should not be +// used by customers directly +type ResponseParams struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The cloud bigtable zone associated with the cluster. + ZoneId *string `protobuf:"bytes,1,opt,name=zone_id,json=zoneId,proto3,oneof" json:"zone_id,omitempty"` + // Identifier for a cluster that represents set of + // bigtable resources. + ClusterId *string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3,oneof" json:"cluster_id,omitempty"` +} + +func (x *ResponseParams) Reset() { + *x = ResponseParams{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_response_params_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseParams) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseParams) ProtoMessage() {} + +func (x *ResponseParams) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_response_params_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseParams.ProtoReflect.Descriptor instead. +func (*ResponseParams) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_response_params_proto_rawDescGZIP(), []int{0} +} + +func (x *ResponseParams) GetZoneId() string { + if x != nil && x.ZoneId != nil { + return *x.ZoneId + } + return "" +} + +func (x *ResponseParams) GetClusterId() string { + if x != nil && x.ClusterId != nil { + return *x.ClusterId + } + return "" +} + +var File_google_bigtable_v2_response_params_proto protoreflect.FileDescriptor + +var file_google_bigtable_v2_response_params_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x22, 0x6d, + 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x12, 0x1c, 0x0a, 0x07, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x22, + 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x88, + 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x42, 0x0d, + 0x0a, 0x0b, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x42, 0xbf, 0x01, + 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x13, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, + 0x76, 0x32, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0xaa, 0x02, 0x18, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x56, + 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_bigtable_v2_response_params_proto_rawDescOnce sync.Once + file_google_bigtable_v2_response_params_proto_rawDescData = file_google_bigtable_v2_response_params_proto_rawDesc +) + +func file_google_bigtable_v2_response_params_proto_rawDescGZIP() []byte { + file_google_bigtable_v2_response_params_proto_rawDescOnce.Do(func() { + file_google_bigtable_v2_response_params_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_bigtable_v2_response_params_proto_rawDescData) + }) + return file_google_bigtable_v2_response_params_proto_rawDescData +} + +var file_google_bigtable_v2_response_params_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_bigtable_v2_response_params_proto_goTypes = []interface{}{ + (*ResponseParams)(nil), // 0: google.bigtable.v2.ResponseParams +} +var file_google_bigtable_v2_response_params_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_bigtable_v2_response_params_proto_init() } +func file_google_bigtable_v2_response_params_proto_init() { + if File_google_bigtable_v2_response_params_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_bigtable_v2_response_params_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseParams); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_bigtable_v2_response_params_proto_msgTypes[0].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_bigtable_v2_response_params_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_bigtable_v2_response_params_proto_goTypes, + DependencyIndexes: file_google_bigtable_v2_response_params_proto_depIdxs, + MessageInfos: file_google_bigtable_v2_response_params_proto_msgTypes, + }.Build() + File_google_bigtable_v2_response_params_proto = out.File + file_google_bigtable_v2_response_params_proto_rawDesc = nil + file_google_bigtable_v2_response_params_proto_goTypes = nil + file_google_bigtable_v2_response_params_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go new file mode 100644 index 000000000000..9fb745926a58 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go @@ -0,0 +1,208 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by aliasgen. DO NOT EDIT. + +// Package iam aliases all exported identifiers in package +// "cloud.google.com/go/iam/apiv1/iampb". +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb. +// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md +// for more details. +package iam + +import ( + src "cloud.google.com/go/iam/apiv1/iampb" + grpc "google.golang.org/grpc" +) + +// Deprecated: Please use consts in: cloud.google.com/go/iam/apiv1/iampb +const ( + AuditConfigDelta_ACTION_UNSPECIFIED = src.AuditConfigDelta_ACTION_UNSPECIFIED + AuditConfigDelta_ADD = src.AuditConfigDelta_ADD + AuditConfigDelta_REMOVE = src.AuditConfigDelta_REMOVE + AuditLogConfig_ADMIN_READ = src.AuditLogConfig_ADMIN_READ + AuditLogConfig_DATA_READ = src.AuditLogConfig_DATA_READ + AuditLogConfig_DATA_WRITE = src.AuditLogConfig_DATA_WRITE + AuditLogConfig_LOG_TYPE_UNSPECIFIED = src.AuditLogConfig_LOG_TYPE_UNSPECIFIED + BindingDelta_ACTION_UNSPECIFIED = src.BindingDelta_ACTION_UNSPECIFIED + BindingDelta_ADD = src.BindingDelta_ADD + BindingDelta_REMOVE = src.BindingDelta_REMOVE +) + +// Deprecated: Please use vars in: cloud.google.com/go/iam/apiv1/iampb +var ( + AuditConfigDelta_Action_name = src.AuditConfigDelta_Action_name + AuditConfigDelta_Action_value = src.AuditConfigDelta_Action_value + AuditLogConfig_LogType_name = src.AuditLogConfig_LogType_name + AuditLogConfig_LogType_value = src.AuditLogConfig_LogType_value + BindingDelta_Action_name = src.BindingDelta_Action_name + BindingDelta_Action_value = src.BindingDelta_Action_value + File_google_iam_v1_iam_policy_proto = src.File_google_iam_v1_iam_policy_proto + File_google_iam_v1_options_proto = src.File_google_iam_v1_options_proto + File_google_iam_v1_policy_proto = src.File_google_iam_v1_policy_proto +) + +// Specifies the audit configuration for a service. The configuration +// determines which permission types are logged, and what identities, if any, +// are exempted from logging. An AuditConfig must have one or more +// AuditLogConfigs. If there are AuditConfigs for both `allServices` and a +// specific service, the union of the two AuditConfigs is used for that +// service: the log_types specified in each AuditConfig are enabled, and the +// exempted_members in each AuditLogConfig are exempted. Example Policy with +// multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ +// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": +// "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", +// "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": +// "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For +// sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ +// logging. It also exempts jose@example.com from DATA_READ logging, and +// aliya@example.com from DATA_WRITE logging. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfig = src.AuditConfig + +// One delta entry for AuditConfig. Each individual change (only one +// exempted_member in each entry) to a AuditConfig will be a separate entry. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfigDelta = src.AuditConfigDelta + +// The type of action performed on an audit configuration in a policy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditConfigDelta_Action = src.AuditConfigDelta_Action + +// Provides the configuration for logging a type of permissions. Example: { +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ +// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables +// 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from +// DATA_READ logging. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditLogConfig = src.AuditLogConfig + +// The list of valid permission types for which logging can be configured. +// Admin writes are always logged, and are not configurable. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type AuditLogConfig_LogType = src.AuditLogConfig_LogType + +// Associates `members`, or principals, with a `role`. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type Binding = src.Binding + +// One delta entry for Binding. Each individual change (only one member in +// each entry) to a binding will be a separate entry. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type BindingDelta = src.BindingDelta + +// The type of action performed on a Binding in a policy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type BindingDelta_Action = src.BindingDelta_Action + +// Request message for `GetIamPolicy` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type GetIamPolicyRequest = src.GetIamPolicyRequest + +// Encapsulates settings provided to GetIamPolicy. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type GetPolicyOptions = src.GetPolicyOptions + +// IAMPolicyClient is the client API for IAMPolicy service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type IAMPolicyClient = src.IAMPolicyClient + +// IAMPolicyServer is the server API for IAMPolicy service. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type IAMPolicyServer = src.IAMPolicyServer + +// An Identity and Access Management (IAM) policy, which specifies access +// controls for Google Cloud resources. A `Policy` is a collection of +// `bindings`. A `binding` binds one or more `members`, or principals, to a +// single `role`. Principals can be user accounts, service accounts, Google +// groups, and domains (such as G Suite). A `role` is a named list of +// permissions; each `role` can be an IAM predefined role or a user-created +// custom role. For some types of Google Cloud resources, a `binding` can also +// specify a `condition`, which is a logical expression that allows access to a +// resource only if the expression evaluates to `true`. A condition can add +// constraints based on attributes of the request, the resource, or both. To +// learn which resources support conditions in their IAM policies, see the [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). +// **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": +// "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": +// "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - +// user:mike@example.com - group:admins@example.com - domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - user:eve@example.com +// role: roles/resourcemanager.organizationViewer condition: title: expirable +// access description: Does not grant access after Sep 2020 expression: +// request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= +// version: 3 For a description of IAM and its features, see the [IAM +// documentation](https://cloud.google.com/iam/docs/). +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type Policy = src.Policy + +// The difference delta between two policies. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type PolicyDelta = src.PolicyDelta + +// Request message for `SetIamPolicy` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type SetIamPolicyRequest = src.SetIamPolicyRequest + +// Request message for `TestIamPermissions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type TestIamPermissionsRequest = src.TestIamPermissionsRequest + +// Response message for `TestIamPermissions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type TestIamPermissionsResponse = src.TestIamPermissionsResponse + +// UnimplementedIAMPolicyServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb +type UnimplementedIAMPolicyServer = src.UnimplementedIAMPolicyServer + +// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb +func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { + return src.NewIAMPolicyClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb +func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { + src.RegisterIAMPolicyServer(s, srv) +} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go deleted file mode 100644 index f0a3048dbaef..000000000000 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go +++ /dev/null @@ -1,813 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.12.2 -// source: google/iam/v1/policy.proto - -package iam - -import ( - reflect "reflect" - sync "sync" - - _ "google.golang.org/genproto/googleapis/api/annotations" - expr "google.golang.org/genproto/googleapis/type/expr" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The type of action performed on a Binding in a policy. -type BindingDelta_Action int32 - -const ( - // Unspecified. - BindingDelta_ACTION_UNSPECIFIED BindingDelta_Action = 0 - // Addition of a Binding. - BindingDelta_ADD BindingDelta_Action = 1 - // Removal of a Binding. - BindingDelta_REMOVE BindingDelta_Action = 2 -) - -// Enum value maps for BindingDelta_Action. -var ( - BindingDelta_Action_name = map[int32]string{ - 0: "ACTION_UNSPECIFIED", - 1: "ADD", - 2: "REMOVE", - } - BindingDelta_Action_value = map[string]int32{ - "ACTION_UNSPECIFIED": 0, - "ADD": 1, - "REMOVE": 2, - } -) - -func (x BindingDelta_Action) Enum() *BindingDelta_Action { - p := new(BindingDelta_Action) - *p = x - return p -} - -func (x BindingDelta_Action) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (BindingDelta_Action) Descriptor() protoreflect.EnumDescriptor { - return file_google_iam_v1_policy_proto_enumTypes[0].Descriptor() -} - -func (BindingDelta_Action) Type() protoreflect.EnumType { - return &file_google_iam_v1_policy_proto_enumTypes[0] -} - -func (x BindingDelta_Action) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use BindingDelta_Action.Descriptor instead. -func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3, 0} -} - -// The type of action performed on an audit configuration in a policy. -type AuditConfigDelta_Action int32 - -const ( - // Unspecified. - AuditConfigDelta_ACTION_UNSPECIFIED AuditConfigDelta_Action = 0 - // Addition of an audit configuration. - AuditConfigDelta_ADD AuditConfigDelta_Action = 1 - // Removal of an audit configuration. - AuditConfigDelta_REMOVE AuditConfigDelta_Action = 2 -) - -// Enum value maps for AuditConfigDelta_Action. -var ( - AuditConfigDelta_Action_name = map[int32]string{ - 0: "ACTION_UNSPECIFIED", - 1: "ADD", - 2: "REMOVE", - } - AuditConfigDelta_Action_value = map[string]int32{ - "ACTION_UNSPECIFIED": 0, - "ADD": 1, - "REMOVE": 2, - } -) - -func (x AuditConfigDelta_Action) Enum() *AuditConfigDelta_Action { - p := new(AuditConfigDelta_Action) - *p = x - return p -} - -func (x AuditConfigDelta_Action) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (AuditConfigDelta_Action) Descriptor() protoreflect.EnumDescriptor { - return file_google_iam_v1_policy_proto_enumTypes[1].Descriptor() -} - -func (AuditConfigDelta_Action) Type() protoreflect.EnumType { - return &file_google_iam_v1_policy_proto_enumTypes[1] -} - -func (x AuditConfigDelta_Action) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use AuditConfigDelta_Action.Descriptor instead. -func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{4, 0} -} - -// Defines an Identity and Access Management (IAM) policy. It is used to -// specify access control policies for Cloud Platform resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or more -// `members` to a single `role`. Members can be user accounts, service accounts, -// Google groups, and domains (such as G Suite). A `role` is a named list of -// permissions (defined by IAM or configured by users). A `binding` can -// optionally specify a `condition`, which is a logic expression that further -// constrains the role binding based on attributes about the request and/or -// target resource. -// -// **JSON Example** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": ["user:eve@example.com"], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ] -// } -// -// **YAML Example** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') -// -// For a description of IAM and its features, see the -// [IAM developer's guide](https://cloud.google.com/iam/docs). -type Policy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Specifies the format of the policy. - // - // Valid values are 0, 1, and 3. Requests specifying an invalid value will be - // rejected. - // - // Operations affecting conditional bindings must specify version 3. This can - // be either setting a conditional policy, modifying a conditional binding, - // or removing a binding (conditional or unconditional) from the stored - // conditional policy. - // Operations on non-conditional policies may specify any valid value or - // leave the field unset. - // - // If no etag is provided in the call to `setIamPolicy`, version compliance - // checks against the stored policy is skipped. - Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - // Associates a list of `members` to a `role`. Optionally may specify a - // `condition` that determines when binding is in effect. - // `bindings` with no members will result in an error. - Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings,proto3" json:"bindings,omitempty"` - // `etag` is used for optimistic concurrency control as a way to help - // prevent simultaneous updates of a policy from overwriting each other. - // It is strongly suggested that systems make use of the `etag` in the - // read-modify-write cycle to perform policy updates in order to avoid race - // conditions: An `etag` is returned in the response to `getIamPolicy`, and - // systems are expected to put that etag in the request to `setIamPolicy` to - // ensure that their change will be applied to the same version of the policy. - // - // If no `etag` is provided in the call to `setIamPolicy`, then the existing - // policy is overwritten. Due to blind-set semantics of an etag-less policy, - // 'setIamPolicy' will not fail even if the incoming policy version does not - // meet the requirements for modifying the stored policy. - Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` -} - -func (x *Policy) Reset() { - *x = Policy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Policy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Policy) ProtoMessage() {} - -func (x *Policy) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Policy.ProtoReflect.Descriptor instead. -func (*Policy) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{0} -} - -func (x *Policy) GetVersion() int32 { - if x != nil { - return x.Version - } - return 0 -} - -func (x *Policy) GetBindings() []*Binding { - if x != nil { - return x.Bindings - } - return nil -} - -func (x *Policy) GetEtag() []byte { - if x != nil { - return x.Etag - } - return nil -} - -// Associates `members` with a `role`. -type Binding struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - // Specifies the identities requesting access for a Cloud Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. - // - // * `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a service - // account. For example, `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all the - // users of that domain. For example, `google.com` or `example.com`. - // - // - Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` - // The condition that is associated with this binding. - // NOTE: An unsatisfied condition will not allow user access via current - // binding. Different bindings, including their conditions, are examined - // independently. - Condition *expr.Expr `protobuf:"bytes,3,opt,name=condition,proto3" json:"condition,omitempty"` -} - -func (x *Binding) Reset() { - *x = Binding{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Binding) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Binding) ProtoMessage() {} - -func (x *Binding) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Binding.ProtoReflect.Descriptor instead. -func (*Binding) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{1} -} - -func (x *Binding) GetRole() string { - if x != nil { - return x.Role - } - return "" -} - -func (x *Binding) GetMembers() []string { - if x != nil { - return x.Members - } - return nil -} - -func (x *Binding) GetCondition() *expr.Expr { - if x != nil { - return x.Condition - } - return nil -} - -// The difference delta between two policies. -type PolicyDelta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The delta for Bindings between two policies. - BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas,proto3" json:"binding_deltas,omitempty"` - // The delta for AuditConfigs between two policies. - AuditConfigDeltas []*AuditConfigDelta `protobuf:"bytes,2,rep,name=audit_config_deltas,json=auditConfigDeltas,proto3" json:"audit_config_deltas,omitempty"` -} - -func (x *PolicyDelta) Reset() { - *x = PolicyDelta{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PolicyDelta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PolicyDelta) ProtoMessage() {} - -func (x *PolicyDelta) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PolicyDelta.ProtoReflect.Descriptor instead. -func (*PolicyDelta) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{2} -} - -func (x *PolicyDelta) GetBindingDeltas() []*BindingDelta { - if x != nil { - return x.BindingDeltas - } - return nil -} - -func (x *PolicyDelta) GetAuditConfigDeltas() []*AuditConfigDelta { - if x != nil { - return x.AuditConfigDeltas - } - return nil -} - -// One delta entry for Binding. Each individual change (only one member in each -// entry) to a binding will be a separate entry. -type BindingDelta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The action that was performed on a Binding. - // Required - Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"` - // Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. - // Required - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` - // A single identity requesting access for a Cloud Platform resource. - // Follows the same format of Binding.members. - // Required - Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"` - // The condition that is associated with this binding. - Condition *expr.Expr `protobuf:"bytes,4,opt,name=condition,proto3" json:"condition,omitempty"` -} - -func (x *BindingDelta) Reset() { - *x = BindingDelta{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BindingDelta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BindingDelta) ProtoMessage() {} - -func (x *BindingDelta) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BindingDelta.ProtoReflect.Descriptor instead. -func (*BindingDelta) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3} -} - -func (x *BindingDelta) GetAction() BindingDelta_Action { - if x != nil { - return x.Action - } - return BindingDelta_ACTION_UNSPECIFIED -} - -func (x *BindingDelta) GetRole() string { - if x != nil { - return x.Role - } - return "" -} - -func (x *BindingDelta) GetMember() string { - if x != nil { - return x.Member - } - return "" -} - -func (x *BindingDelta) GetCondition() *expr.Expr { - if x != nil { - return x.Condition - } - return nil -} - -// One delta entry for AuditConfig. Each individual change (only one -// exempted_member in each entry) to a AuditConfig will be a separate entry. -type AuditConfigDelta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The action that was performed on an audit configuration in a policy. - // Required - Action AuditConfigDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.AuditConfigDelta_Action" json:"action,omitempty"` - // Specifies a service that was configured for Cloud Audit Logging. - // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. - // `allServices` is a special value that covers all services. - // Required - Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` - // A single identity that is exempted from "data access" audit - // logging for the `service` specified above. - // Follows the same format of Binding.members. - ExemptedMember string `protobuf:"bytes,3,opt,name=exempted_member,json=exemptedMember,proto3" json:"exempted_member,omitempty"` - // Specifies the log_type that was be enabled. ADMIN_ACTIVITY is always - // enabled, and cannot be configured. - // Required - LogType string `protobuf:"bytes,4,opt,name=log_type,json=logType,proto3" json:"log_type,omitempty"` -} - -func (x *AuditConfigDelta) Reset() { - *x = AuditConfigDelta{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuditConfigDelta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuditConfigDelta) ProtoMessage() {} - -func (x *AuditConfigDelta) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuditConfigDelta.ProtoReflect.Descriptor instead. -func (*AuditConfigDelta) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{4} -} - -func (x *AuditConfigDelta) GetAction() AuditConfigDelta_Action { - if x != nil { - return x.Action - } - return AuditConfigDelta_ACTION_UNSPECIFIED -} - -func (x *AuditConfigDelta) GetService() string { - if x != nil { - return x.Service - } - return "" -} - -func (x *AuditConfigDelta) GetExemptedMember() string { - if x != nil { - return x.ExemptedMember - } - return "" -} - -func (x *AuditConfigDelta) GetLogType() string { - if x != nil { - return x.LogType - } - return "" -} - -var File_google_iam_v1_policy_proto protoreflect.FileDescriptor - -var file_google_iam_v1_policy_proto_rawDesc = []byte{ - 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x16, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x6a, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, - 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, - 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x68, 0x0a, - 0x07, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, - 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa2, 0x01, 0x0a, 0x0b, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x69, 0x6e, 0x64, 0x69, - 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x0d, 0x62, 0x69, - 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x4f, 0x0a, 0x13, 0x61, - 0x75, 0x64, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x74, - 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x11, 0x61, 0x75, 0x64, 0x69, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x22, 0xde, 0x01, 0x0a, - 0x0c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x3a, 0x0a, - 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, - 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, - 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, - 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x02, 0x22, 0xe7, 0x01, - 0x0a, 0x10, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, - 0x74, 0x61, 0x12, 0x3e, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, - 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, - 0x6c, 0x74, 0x61, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, - 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x4d, - 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, - 0x22, 0x35, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, - 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x02, 0x42, 0x83, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x61, 0x6d, 0xf8, 0x01, - 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_iam_v1_policy_proto_rawDescOnce sync.Once - file_google_iam_v1_policy_proto_rawDescData = file_google_iam_v1_policy_proto_rawDesc -) - -func file_google_iam_v1_policy_proto_rawDescGZIP() []byte { - file_google_iam_v1_policy_proto_rawDescOnce.Do(func() { - file_google_iam_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_policy_proto_rawDescData) - }) - return file_google_iam_v1_policy_proto_rawDescData -} - -var file_google_iam_v1_policy_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_iam_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_google_iam_v1_policy_proto_goTypes = []interface{}{ - (BindingDelta_Action)(0), // 0: google.iam.v1.BindingDelta.Action - (AuditConfigDelta_Action)(0), // 1: google.iam.v1.AuditConfigDelta.Action - (*Policy)(nil), // 2: google.iam.v1.Policy - (*Binding)(nil), // 3: google.iam.v1.Binding - (*PolicyDelta)(nil), // 4: google.iam.v1.PolicyDelta - (*BindingDelta)(nil), // 5: google.iam.v1.BindingDelta - (*AuditConfigDelta)(nil), // 6: google.iam.v1.AuditConfigDelta - (*expr.Expr)(nil), // 7: google.type.Expr -} -var file_google_iam_v1_policy_proto_depIdxs = []int32{ - 3, // 0: google.iam.v1.Policy.bindings:type_name -> google.iam.v1.Binding - 7, // 1: google.iam.v1.Binding.condition:type_name -> google.type.Expr - 5, // 2: google.iam.v1.PolicyDelta.binding_deltas:type_name -> google.iam.v1.BindingDelta - 6, // 3: google.iam.v1.PolicyDelta.audit_config_deltas:type_name -> google.iam.v1.AuditConfigDelta - 0, // 4: google.iam.v1.BindingDelta.action:type_name -> google.iam.v1.BindingDelta.Action - 7, // 5: google.iam.v1.BindingDelta.condition:type_name -> google.type.Expr - 1, // 6: google.iam.v1.AuditConfigDelta.action:type_name -> google.iam.v1.AuditConfigDelta.Action - 7, // [7:7] is the sub-list for method output_type - 7, // [7:7] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name -} - -func init() { file_google_iam_v1_policy_proto_init() } -func file_google_iam_v1_policy_proto_init() { - if File_google_iam_v1_policy_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Policy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Binding); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PolicyDelta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BindingDelta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuditConfigDelta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_iam_v1_policy_proto_rawDesc, - NumEnums: 2, - NumMessages: 5, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_iam_v1_policy_proto_goTypes, - DependencyIndexes: file_google_iam_v1_policy_proto_depIdxs, - EnumInfos: file_google_iam_v1_policy_proto_enumTypes, - MessageInfos: file_google_iam_v1_policy_proto_msgTypes, - }.Build() - File_google_iam_v1_policy_proto = out.File - file_google_iam_v1_policy_proto_rawDesc = nil - file_google_iam_v1_policy_proto_goTypes = nil - file_google_iam_v1_policy_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/longrunning/alias.go b/vendor/google.golang.org/genproto/googleapis/longrunning/alias.go new file mode 100644 index 000000000000..3addf3b11b1d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/longrunning/alias.go @@ -0,0 +1,115 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by aliasgen. DO NOT EDIT. + +// Package longrunning aliases all exported identifiers in package +// "cloud.google.com/go/longrunning/autogen/longrunningpb". +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb. +// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md +// for more details. +package longrunning + +import ( + src "cloud.google.com/go/longrunning/autogen/longrunningpb" + grpc "google.golang.org/grpc" +) + +// Deprecated: Please use vars in: cloud.google.com/go/longrunning/autogen/longrunningpb +var ( + E_OperationInfo = src.E_OperationInfo + File_google_longrunning_operations_proto = src.File_google_longrunning_operations_proto +) + +// The request message for +// [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type CancelOperationRequest = src.CancelOperationRequest + +// The request message for +// [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type DeleteOperationRequest = src.DeleteOperationRequest + +// The request message for +// [Operations.GetOperation][google.longrunning.Operations.GetOperation]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type GetOperationRequest = src.GetOperationRequest + +// The request message for +// [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type ListOperationsRequest = src.ListOperationsRequest + +// The response message for +// [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type ListOperationsResponse = src.ListOperationsResponse + +// This resource represents a long-running operation that is the result of a +// network API call. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type Operation = src.Operation + +// A message representing the message types used by a long-running operation. +// Example: rpc LongRunningRecognize(LongRunningRecognizeRequest) returns +// (google.longrunning.Operation) { option (google.longrunning.operation_info) +// = { response_type: "LongRunningRecognizeResponse" metadata_type: +// "LongRunningRecognizeMetadata" }; } +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type OperationInfo = src.OperationInfo +type Operation_Error = src.Operation_Error +type Operation_Response = src.Operation_Response + +// OperationsClient is the client API for Operations service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type OperationsClient = src.OperationsClient + +// OperationsServer is the server API for Operations service. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type OperationsServer = src.OperationsServer + +// UnimplementedOperationsServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type UnimplementedOperationsServer = src.UnimplementedOperationsServer + +// The request message for +// [Operations.WaitOperation][google.longrunning.Operations.WaitOperation]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type WaitOperationRequest = src.WaitOperationRequest + +// Deprecated: Please use funcs in: cloud.google.com/go/longrunning/autogen/longrunningpb +func NewOperationsClient(cc grpc.ClientConnInterface) OperationsClient { + return src.NewOperationsClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/longrunning/autogen/longrunningpb +func RegisterOperationsServer(s *grpc.Server, srv OperationsServer) { + src.RegisterOperationsServer(s, srv) +} diff --git a/vendor/google.golang.org/genproto/googleapis/pubsub/v1/alias.go b/vendor/google.golang.org/genproto/googleapis/pubsub/v1/alias.go new file mode 100644 index 000000000000..0fc2391b85a0 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/pubsub/v1/alias.go @@ -0,0 +1,503 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by aliasgen. DO NOT EDIT. + +// Package pubsub aliases all exported identifiers in package +// "cloud.google.com/go/pubsub/apiv1/pubsubpb". +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb. +// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md +// for more details. +package pubsub + +import ( + src "cloud.google.com/go/pubsub/apiv1/pubsubpb" + grpc "google.golang.org/grpc" +) + +// Deprecated: Please use consts in: cloud.google.com/go/pubsub/apiv1/pubsubpb +const ( + BigQueryConfig_ACTIVE = src.BigQueryConfig_ACTIVE + BigQueryConfig_NOT_FOUND = src.BigQueryConfig_NOT_FOUND + BigQueryConfig_PERMISSION_DENIED = src.BigQueryConfig_PERMISSION_DENIED + BigQueryConfig_SCHEMA_MISMATCH = src.BigQueryConfig_SCHEMA_MISMATCH + BigQueryConfig_STATE_UNSPECIFIED = src.BigQueryConfig_STATE_UNSPECIFIED + Encoding_BINARY = src.Encoding_BINARY + Encoding_ENCODING_UNSPECIFIED = src.Encoding_ENCODING_UNSPECIFIED + Encoding_JSON = src.Encoding_JSON + SchemaView_BASIC = src.SchemaView_BASIC + SchemaView_FULL = src.SchemaView_FULL + SchemaView_SCHEMA_VIEW_UNSPECIFIED = src.SchemaView_SCHEMA_VIEW_UNSPECIFIED + Schema_AVRO = src.Schema_AVRO + Schema_PROTOCOL_BUFFER = src.Schema_PROTOCOL_BUFFER + Schema_TYPE_UNSPECIFIED = src.Schema_TYPE_UNSPECIFIED + Subscription_ACTIVE = src.Subscription_ACTIVE + Subscription_RESOURCE_ERROR = src.Subscription_RESOURCE_ERROR + Subscription_STATE_UNSPECIFIED = src.Subscription_STATE_UNSPECIFIED +) + +// Deprecated: Please use vars in: cloud.google.com/go/pubsub/apiv1/pubsubpb +var ( + BigQueryConfig_State_name = src.BigQueryConfig_State_name + BigQueryConfig_State_value = src.BigQueryConfig_State_value + Encoding_name = src.Encoding_name + Encoding_value = src.Encoding_value + File_google_pubsub_v1_pubsub_proto = src.File_google_pubsub_v1_pubsub_proto + File_google_pubsub_v1_schema_proto = src.File_google_pubsub_v1_schema_proto + SchemaView_name = src.SchemaView_name + SchemaView_value = src.SchemaView_value + Schema_Type_name = src.Schema_Type_name + Schema_Type_value = src.Schema_Type_value + Subscription_State_name = src.Subscription_State_name + Subscription_State_value = src.Subscription_State_value +) + +// Request for the Acknowledge method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type AcknowledgeRequest = src.AcknowledgeRequest + +// Configuration for a BigQuery subscription. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type BigQueryConfig = src.BigQueryConfig + +// Possible states for a BigQuery subscription. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type BigQueryConfig_State = src.BigQueryConfig_State + +// Request for the CreateSchema method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type CreateSchemaRequest = src.CreateSchemaRequest + +// Request for the `CreateSnapshot` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type CreateSnapshotRequest = src.CreateSnapshotRequest + +// Dead lettering is done on a best effort basis. The same message might be +// dead lettered multiple times. If validation on any of the fields fails at +// subscription creation/updation, the create/update subscription request will +// fail. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type DeadLetterPolicy = src.DeadLetterPolicy + +// Request for the `DeleteSchema` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type DeleteSchemaRequest = src.DeleteSchemaRequest + +// Request for the `DeleteSnapshot` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type DeleteSnapshotRequest = src.DeleteSnapshotRequest + +// Request for the DeleteSubscription method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type DeleteSubscriptionRequest = src.DeleteSubscriptionRequest + +// Request for the `DeleteTopic` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type DeleteTopicRequest = src.DeleteTopicRequest + +// Request for the DetachSubscription method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type DetachSubscriptionRequest = src.DetachSubscriptionRequest + +// Response for the DetachSubscription method. Reserved for future use. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type DetachSubscriptionResponse = src.DetachSubscriptionResponse + +// Possible encoding types for messages. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type Encoding = src.Encoding + +// A policy that specifies the conditions for resource expiration (i.e., +// automatic resource deletion). +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ExpirationPolicy = src.ExpirationPolicy + +// Request for the GetSchema method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type GetSchemaRequest = src.GetSchemaRequest + +// Request for the GetSnapshot method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type GetSnapshotRequest = src.GetSnapshotRequest + +// Request for the GetSubscription method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type GetSubscriptionRequest = src.GetSubscriptionRequest + +// Request for the GetTopic method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type GetTopicRequest = src.GetTopicRequest + +// Request for the `ListSchemas` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListSchemasRequest = src.ListSchemasRequest + +// Response for the `ListSchemas` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListSchemasResponse = src.ListSchemasResponse + +// Request for the `ListSnapshots` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListSnapshotsRequest = src.ListSnapshotsRequest + +// Response for the `ListSnapshots` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListSnapshotsResponse = src.ListSnapshotsResponse + +// Request for the `ListSubscriptions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListSubscriptionsRequest = src.ListSubscriptionsRequest + +// Response for the `ListSubscriptions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListSubscriptionsResponse = src.ListSubscriptionsResponse + +// Request for the `ListTopicSnapshots` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListTopicSnapshotsRequest = src.ListTopicSnapshotsRequest + +// Response for the `ListTopicSnapshots` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListTopicSnapshotsResponse = src.ListTopicSnapshotsResponse + +// Request for the `ListTopicSubscriptions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListTopicSubscriptionsRequest = src.ListTopicSubscriptionsRequest + +// Response for the `ListTopicSubscriptions` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListTopicSubscriptionsResponse = src.ListTopicSubscriptionsResponse + +// Request for the `ListTopics` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListTopicsRequest = src.ListTopicsRequest + +// Response for the `ListTopics` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ListTopicsResponse = src.ListTopicsResponse + +// A policy constraining the storage of messages published to the topic. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type MessageStoragePolicy = src.MessageStoragePolicy + +// Request for the ModifyAckDeadline method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ModifyAckDeadlineRequest = src.ModifyAckDeadlineRequest + +// Request for the ModifyPushConfig method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ModifyPushConfigRequest = src.ModifyPushConfigRequest + +// Request for the Publish method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PublishRequest = src.PublishRequest + +// Response for the `Publish` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PublishResponse = src.PublishResponse + +// PublisherClient is the client API for Publisher service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PublisherClient = src.PublisherClient + +// PublisherServer is the server API for Publisher service. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PublisherServer = src.PublisherServer + +// A message that is published by publishers and consumed by subscribers. The +// message must contain either a non-empty data field or at least one +// attribute. Note that client libraries represent this object differently +// depending on the language. See the corresponding [client library +// documentation](https://cloud.google.com/pubsub/docs/reference/libraries) for +// more information. See [quotas and limits] +// (https://cloud.google.com/pubsub/quotas) for more information about message +// limits. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PubsubMessage = src.PubsubMessage + +// Request for the `Pull` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PullRequest = src.PullRequest + +// Response for the `Pull` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PullResponse = src.PullResponse + +// Configuration for a push delivery endpoint. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PushConfig = src.PushConfig + +// Contains information needed for generating an [OpenID Connect +// token](https://developers.google.com/identity/protocols/OpenIDConnect). +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type PushConfig_OidcToken = src.PushConfig_OidcToken +type PushConfig_OidcToken_ = src.PushConfig_OidcToken_ + +// A message and its corresponding acknowledgment ID. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ReceivedMessage = src.ReceivedMessage + +// A policy that specifies how Cloud Pub/Sub retries message delivery. Retry +// delay will be exponential based on provided minimum and maximum backoffs. +// https://en.wikipedia.org/wiki/Exponential_backoff. RetryPolicy will be +// triggered on NACKs or acknowledgement deadline exceeded events for a given +// message. Retry Policy is implemented on a best effort basis. At times, the +// delay between consecutive deliveries may not match the configuration. That +// is, delay can be more or less than configured backoff. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type RetryPolicy = src.RetryPolicy + +// A schema resource. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type Schema = src.Schema + +// SchemaServiceClient is the client API for SchemaService service. For +// semantics around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SchemaServiceClient = src.SchemaServiceClient + +// SchemaServiceServer is the server API for SchemaService service. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SchemaServiceServer = src.SchemaServiceServer + +// Settings for validating messages published against a schema. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SchemaSettings = src.SchemaSettings + +// View of Schema object fields to be returned by GetSchema and ListSchemas. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SchemaView = src.SchemaView + +// Possible schema definition types. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type Schema_Type = src.Schema_Type + +// Request for the `Seek` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SeekRequest = src.SeekRequest +type SeekRequest_Snapshot = src.SeekRequest_Snapshot +type SeekRequest_Time = src.SeekRequest_Time + +// Response for the `Seek` method (this response is empty). +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SeekResponse = src.SeekResponse + +// A snapshot resource. Snapshots are used in +// [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, +// which allow you to manage message acknowledgments in bulk. That is, you can +// set the acknowledgment state of messages in an existing subscription to the +// state captured by a snapshot. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type Snapshot = src.Snapshot + +// Request for the `StreamingPull` streaming RPC method. This request is used +// to establish the initial stream as well as to stream acknowledgements and +// ack deadline modifications from the client to the server. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type StreamingPullRequest = src.StreamingPullRequest + +// Response for the `StreamingPull` method. This response is used to stream +// messages from the server to the client. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type StreamingPullResponse = src.StreamingPullResponse + +// Acknowledgement IDs sent in one or more previous requests to acknowledge a +// previously received message. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type StreamingPullResponse_AcknowledgeConfirmation = src.StreamingPullResponse_AcknowledgeConfirmation + +// Acknowledgement IDs sent in one or more previous requests to modify the +// deadline for a specific message. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type StreamingPullResponse_ModifyAckDeadlineConfirmation = src.StreamingPullResponse_ModifyAckDeadlineConfirmation + +// Subscription properties sent as part of the response. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type StreamingPullResponse_SubscriptionProperties = src.StreamingPullResponse_SubscriptionProperties + +// SubscriberClient is the client API for Subscriber service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SubscriberClient = src.SubscriberClient + +// SubscriberServer is the server API for Subscriber service. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type SubscriberServer = src.SubscriberServer +type Subscriber_StreamingPullClient = src.Subscriber_StreamingPullClient +type Subscriber_StreamingPullServer = src.Subscriber_StreamingPullServer + +// A subscription resource. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type Subscription = src.Subscription + +// Possible states for a subscription. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type Subscription_State = src.Subscription_State + +// A topic resource. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type Topic = src.Topic + +// UnimplementedPublisherServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type UnimplementedPublisherServer = src.UnimplementedPublisherServer + +// UnimplementedSchemaServiceServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type UnimplementedSchemaServiceServer = src.UnimplementedSchemaServiceServer + +// UnimplementedSubscriberServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type UnimplementedSubscriberServer = src.UnimplementedSubscriberServer + +// Request for the UpdateSnapshot method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type UpdateSnapshotRequest = src.UpdateSnapshotRequest + +// Request for the UpdateSubscription method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type UpdateSubscriptionRequest = src.UpdateSubscriptionRequest + +// Request for the UpdateTopic method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type UpdateTopicRequest = src.UpdateTopicRequest + +// Request for the `ValidateMessage` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ValidateMessageRequest = src.ValidateMessageRequest +type ValidateMessageRequest_Name = src.ValidateMessageRequest_Name +type ValidateMessageRequest_Schema = src.ValidateMessageRequest_Schema + +// Response for the `ValidateMessage` method. Empty for now. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ValidateMessageResponse = src.ValidateMessageResponse + +// Request for the `ValidateSchema` method. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ValidateSchemaRequest = src.ValidateSchemaRequest + +// Response for the `ValidateSchema` method. Empty for now. +// +// Deprecated: Please use types in: cloud.google.com/go/pubsub/apiv1/pubsubpb +type ValidateSchemaResponse = src.ValidateSchemaResponse + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/apiv1/pubsubpb +func NewPublisherClient(cc grpc.ClientConnInterface) PublisherClient { + return src.NewPublisherClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/apiv1/pubsubpb +func NewSchemaServiceClient(cc grpc.ClientConnInterface) SchemaServiceClient { + return src.NewSchemaServiceClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/apiv1/pubsubpb +func NewSubscriberClient(cc grpc.ClientConnInterface) SubscriberClient { + return src.NewSubscriberClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/apiv1/pubsubpb +func RegisterPublisherServer(s *grpc.Server, srv PublisherServer) { + src.RegisterPublisherServer(s, srv) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/apiv1/pubsubpb +func RegisterSchemaServiceServer(s *grpc.Server, srv SchemaServiceServer) { + src.RegisterSchemaServiceServer(s, srv) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/pubsub/apiv1/pubsubpb +func RegisterSubscriberServer(s *grpc.Server, srv SubscriberServer) { + src.RegisterSubscriberServer(s, srv) +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go index 1258803152f3..3a47b902c9a9 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -37,7 +37,6 @@ const ( // The canonical error codes for gRPC APIs. // -// // Sometimes multiple error codes may apply. Services should return // the most specific error code that applies. For example, prefer // `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 1c7b93ec160b..2f3ab9249482 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -219,25 +219,25 @@ func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { // Example of an error when contacting the "pubsub.googleapis.com" API when it // is not enabled: // -// { "reason": "API_DISABLED" -// "domain": "googleapis.com" -// "metadata": { -// "resource": "projects/123", -// "service": "pubsub.googleapis.com" -// } -// } +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } // // This response indicates that the pubsub.googleapis.com API is not enabled. // // Example of an error that is returned when attempting to create a Spanner // instance in a region that is out of stock: // -// { "reason": "STOCKOUT" -// "domain": "spanner.googleapis.com", -// "metadata": { -// "availableRegions": "us-central1,us-east2" -// } -// } +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } type ErrorInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go new file mode 100644 index 000000000000..72afd8b000e5 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go @@ -0,0 +1,200 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/type/date.proto + +package date + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Represents a whole or partial calendar date, such as a birthday. The time of +// day and time zone are either specified elsewhere or are insignificant. The +// date is relative to the Gregorian Calendar. This can represent one of the +// following: +// +// * A full date, with non-zero year, month, and day values +// * A month and day value, with a zero year, such as an anniversary +// * A year on its own, with zero month and day values +// * A year and month value, with a zero day, such as a credit card expiration +// date +// +// Related types are [google.type.TimeOfDay][google.type.TimeOfDay] and +// `google.protobuf.Timestamp`. +type Date struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Year of the date. Must be from 1 to 9999, or 0 to specify a date without + // a year. + Year int32 `protobuf:"varint,1,opt,name=year,proto3" json:"year,omitempty"` + // Month of a year. Must be from 1 to 12, or 0 to specify a year without a + // month and day. + Month int32 `protobuf:"varint,2,opt,name=month,proto3" json:"month,omitempty"` + // Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 + // to specify a year by itself or a year and month where the day isn't + // significant. + Day int32 `protobuf:"varint,3,opt,name=day,proto3" json:"day,omitempty"` +} + +func (x *Date) Reset() { + *x = Date{} + if protoimpl.UnsafeEnabled { + mi := &file_google_type_date_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Date) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Date) ProtoMessage() {} + +func (x *Date) ProtoReflect() protoreflect.Message { + mi := &file_google_type_date_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Date.ProtoReflect.Descriptor instead. +func (*Date) Descriptor() ([]byte, []int) { + return file_google_type_date_proto_rawDescGZIP(), []int{0} +} + +func (x *Date) GetYear() int32 { + if x != nil { + return x.Year + } + return 0 +} + +func (x *Date) GetMonth() int32 { + if x != nil { + return x.Month + } + return 0 +} + +func (x *Date) GetDay() int32 { + if x != nil { + return x.Day + } + return 0 +} + +var File_google_type_date_proto protoreflect.FileDescriptor + +var file_google_type_date_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x22, 0x42, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x79, 0x65, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x79, 0x65, 0x61, + 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x61, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x64, 0x61, 0x79, 0x42, 0x5d, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x42, 0x09, 0x44, 0x61, + 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3b, 0x64, 0x61, 0x74, 0x65, 0xf8, + 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_type_date_proto_rawDescOnce sync.Once + file_google_type_date_proto_rawDescData = file_google_type_date_proto_rawDesc +) + +func file_google_type_date_proto_rawDescGZIP() []byte { + file_google_type_date_proto_rawDescOnce.Do(func() { + file_google_type_date_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_date_proto_rawDescData) + }) + return file_google_type_date_proto_rawDescData +} + +var file_google_type_date_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_type_date_proto_goTypes = []interface{}{ + (*Date)(nil), // 0: google.type.Date +} +var file_google_type_date_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_type_date_proto_init() } +func file_google_type_date_proto_init() { + if File_google_type_date_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_type_date_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Date); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_type_date_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_type_date_proto_goTypes, + DependencyIndexes: file_google_type_date_proto_depIdxs, + MessageInfos: file_google_type_date_proto_msgTypes, + }.Build() + File_google_type_date_proto = out.File + file_google_type_date_proto_rawDesc = nil + file_google_type_date_proto_goTypes = nil + file_google_type_date_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go index 2857e26eb386..38ef56f73cad 100644 --- a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go @@ -41,27 +41,27 @@ const ( // // Example (Comparison): // -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" +// title: "Summary size limit" +// description: "Determines if a summary is less than 100 chars" +// expression: "document.summary.size() < 100" // // Example (Equality): // -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == request.auth.claims.email" +// title: "Requestor is owner" +// description: "Determines if requestor is the document owner" +// expression: "document.owner == request.auth.claims.email" // // Example (Logic): // -// title: "Public documents" -// description: "Determine whether the document should be publicly visible" -// expression: "document.type != 'private' && document.type != 'internal'" +// title: "Public documents" +// description: "Determine whether the document should be publicly visible" +// expression: "document.type != 'private' && document.type != 'internal'" // // Example (Data Manipulation): // -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + string(document.create_time)" +// title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + string(document.create_time)" // // The exact variables and functions that may be referenced within an expression // are determined by the service that evaluates it. See the service diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index cd03f8c76888..52338d004ce3 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -53,9 +53,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index ae13ddac14e0..02f5dc531891 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -19,7 +19,7 @@ // Package attributes defines a generic key/value store used in various gRPC // components. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index 542594f5cc51..29475e31c979 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -48,7 +48,7 @@ type BackoffConfig struct { // here for more details: // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index bcc6f5451c90..392b21fb2d8e 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -27,6 +27,7 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" @@ -109,6 +110,11 @@ type SubConn interface { UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) } // NewSubConnOptions contains options to create new SubConn. @@ -192,7 +198,7 @@ type BuildOptions struct { // server can ignore this field. Authority string // ChannelzParentID is the parent ClientConn's channelz ID. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. @@ -243,7 +249,7 @@ type DoneInfo struct { // ServerLoad is the load received from server. It's usually sent as part of // trailing metadata. // - // The only supported type now is *orca_v1.LoadReport. + // The only supported type now is *orca_v3.LoadReport. ServerLoad interface{} } @@ -371,55 +377,20 @@ type ClientConnState struct { // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") -// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns -// and returns one aggregated connectivity state. -// -// It's not thread safe. -type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transient failure state. - numIdle uint64 // Number of addrConns in idle state. +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as interface{} to avoid a + // dependency cycle. Should also return a close function that will be + // called when all references to the Producer have been given up. + Build(grpcClientConnInterface interface{}) (p Producer, close func()) } -// RecordTransition records state change happening in subConn and based on that -// it evaluates what aggregated state should be. -// -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure; -// - Else if at least one SubConn is Idle, the aggregated state is Idle; -// - Else there are no subconns and the aggregated state is Transient Failure -// -// Shutdown is not considered. -func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal - case connectivity.Idle: - cse.numIdle += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - if cse.numTransientFailure > 0 { - return connectivity.TransientFailure - } - if cse.numIdle > 0 { - return connectivity.Idle - } - return connectivity.TransientFailure +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer interface { } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a67074a3ad06..3929c26d31e1 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -45,6 +45,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, + state: connectivity.Connecting, } // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we @@ -134,6 +135,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) return nil } @@ -153,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error { // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { b.picker = NewErrPicker(b.mergeErrors()) diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go new file mode 100644 index 000000000000..c33413581091 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + return cse.CurrentState() +} + +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index c393d7ffd3b2..1205aff23f79 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,14 +19,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -42,16 +41,13 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type LoadBalanceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to LoadBalanceRequestType: + // // *LoadBalanceRequest_InitialRequest // *LoadBalanceRequest_ClientStats LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` @@ -340,6 +336,7 @@ type LoadBalanceResponse struct { unknownFields protoimpl.UnknownFields // Types that are assignable to LoadBalanceResponseType: + // // *LoadBalanceResponse_InitialResponse // *LoadBalanceResponse_ServerList // *LoadBalanceResponse_FallbackResponse diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index cb4b3c203c51..cf1034830d58 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -1,3 +1,22 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines the GRPCLB LoadBalancing protocol. +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/lb/v1/load_balancer.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index fe423af182a4..dd15810d0aeb 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -19,7 +19,8 @@ // Package grpclb defines a grpclb balancer. // // To install grpclb balancer, import this package as: -// import _ "google.golang.org/grpc/balancer/grpclb" +// +// import _ "google.golang.org/grpc/balancer/grpclb" package grpclb import ( @@ -229,8 +230,9 @@ type lbBalancer struct { // regeneratePicker takes a snapshot of the balancer, and generates a picker from // it. The picker -// - always returns ErrTransientFailure if the balancer is in TransientFailure, -// - does two layer roundrobin pick otherwise. +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - does two layer roundrobin pick otherwise. +// // Caller must hold lb.mu. func (lb *lbBalancer) regeneratePicker(resetDrop bool) { if lb.state == connectivity.TransientFailure { @@ -290,14 +292,14 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) { // fallback and grpclb). lb.scState contains states for all SubConns, including // those in cache (SubConns are cached for 10 seconds after remove). // -// The aggregated state is: -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting; -// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE, -// they start to connect immediately. But there's a race between the overall -// state is reported, and when the new SubConn state arrives. And SubConns -// never go back to IDLE. -// - Else the aggregated state is TransientFailure. +// The aggregated state is: +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting; +// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE, +// they start to connect immediately. But there's a race between the overall +// state is reported, and when the new SubConn state arrives. And SubConns +// never go back to IDLE. +// - Else the aggregated state is TransientFailure. func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { var numConnecting uint64 @@ -413,8 +415,8 @@ func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { // this target is sent in the first message on the stream. if gc != nil { target := lb.dialTarget - if gc.TargetName != "" { - target = gc.TargetName + if gc.ServiceName != "" { + target = gc.ServiceName } if target != lb.target { lb.target = target diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go index b4e23dee0172..8942c31310af 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -34,7 +34,7 @@ const ( type grpclbServiceConfig struct { serviceconfig.LoadBalancingConfig ChildPolicy *[]map[string]json.RawMessage - TargetName string + ServiceName string } func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go index 330df4baa218..dab1959418e1 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -33,8 +33,8 @@ import ( "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/channelz" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -228,7 +228,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { } else if bundle := lb.grpclbClientConnCreds; bundle != nil { dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) } else { - dopts = append(dopts, grpc.WithInsecure()) + dopts = append(dopts, grpc.WithTransportCredentials(insecure.NewCredentials())) } if lb.opt.Dialer != nil { dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) @@ -239,9 +239,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { // Explicitly set pickfirst as the balancer. dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) - if channelz.IsOn() { - dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) - } + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) // Enable Keepalive for grpclb client. dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 274eb2f85802..f7031ad2251b 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: grpcrand.Intn(len(scs)), + next: uint32(grpcrand.Intn(len(scs))), } } @@ -69,15 +69,13 @@ type rrPicker struct { // created. The slice is immutable. Each Get() will do a round robin // selection from it and return the selected SubConn. subConns []balancer.SubConn - - mu sync.Mutex - next int + next uint32 } func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - p.mu.Lock() - sc := p.subConns[p.next] - p.next = (p.next + 1) % len(p.subConns) - p.mu.Unlock() + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] return balancer.PickResult{SubConn: sc}, nil } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index f4ea61746823..0359956d36fa 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -19,131 +19,182 @@ package grpc import ( + "context" "fmt" + "strings" "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} - -// exitIdle contains no data and is just a signal sent on the updateCh in -// ccBalancerWrapper to instruct the balancer to exit idle. -type exitIdle struct{} - -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - hasExitIdle bool - updateCh *buffer.Unbounded - closed *grpcsync.Event - done *grpcsync.Event + cc *ClientConn + + // Since these fields are accessed only from handleXxx() methods which are + // synchronized by the watcher goroutine, we do not need a mutex to protect + // these fields. + balancer *gracefulswitch.Balancer + curBalancerName string - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} + updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). + resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. + closed *grpcsync.Event // Indicates if close has been called. + done *grpcsync.Event // Indicates if close has completed its work. } -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, updateCh: buffer.NewUnbounded(), + resultCh: buffer.NewUnbounded(), closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) - _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler) + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { + ccs *balancer.ClientConnState +} + +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { + err error +} + +type switchToUpdate struct { + name string +} + +type subConnUpdate struct { + acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.updateCh.Get(): + case u := <-ccb.updateCh.Get(): ccb.updateCh.Load() if ccb.closed.HasFired() { break } - switch u := t.(type) { + switch update := u.(type) { + case *ccStateUpdate: + ccb.handleClientConnStateChange(update.ccs) case *scStateUpdate: - ccb.balancerMu.Lock() - ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) - ccb.balancerMu.Unlock() - case *acBalancerWrapper: - ccb.mu.Lock() - if ccb.subConns != nil { - delete(ccb.subConns, u) - ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) - } - ccb.mu.Unlock() - case exitIdle: - if ccb.cc.GetState() == connectivity.Idle { - if ei, ok := ccb.balancer.(balancer.ExitIdler); ok { - // We already checked that the balancer implements - // ExitIdle before pushing the event to updateCh, but - // check conditionally again as defensive programming. - ccb.balancerMu.Lock() - ei.ExitIdle() - ccb.balancerMu.Unlock() - } - } + ccb.handleSubConnStateChange(update) + case *exitIdleUpdate: + ccb.handleExitIdle() + case *resolverErrorUpdate: + ccb.handleResolverError(update.err) + case *switchToUpdate: + ccb.handleSwitchTo(update.name) + case *subConnUpdate: + ccb.handleRemoveSubConn(update.acbw) default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) } case <-ccb.closed.Done(): } if ccb.closed.HasFired() { - ccb.balancerMu.Lock() - ccb.balancer.Close() - ccb.balancerMu.Unlock() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) - ccb.done.Fire() - // Fire done before removing the addr conns. We can safely unblock - // ccb.close and allow the removeAddrConns to happen - // asynchronously. - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } + ccb.handleClose() return } } } -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + + var res interface{} + select { + case res = <-ccb.resultCh.Get(): + ccb.resultCh.Load() + case <-ccb.closed.Done(): + // Return early if the balancer wrapper is closed while we are waiting for + // the underlying balancer to process a ClientConnState update. + return nil + } + // If the returned error is nil, attempting to type assert to error leads to + // panic. So, this needs to handled separately. + if res == nil { + return nil + } + return res.(error) } -func (ccb *ccBalancerWrapper) exitIdle() bool { - if !ccb.hasExitIdle { - return false +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { + if ccb.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) + } + ccs.ResolverState.Addresses = addrs } - ccb.updateCh.Put(exitIdle{}) - return true + ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) } -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be // created. tearDown() generates a state change with Shutdown state, we @@ -161,44 +212,125 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co }) } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { + ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) +} + +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { + if ccb.cc.GetState() != connectivity.Idle { + return + } + ccb.balancer.ExitIdle() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() + ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { ccb.balancer.ResolverError(err) } +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.updateCh.Put(&switchToUpdate{name: name}) +} + +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { + // TODO: Other languages use case-insensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + + // TODO: Ensure that name is a registered LB policy when we get here. + // We currently only validate the `loadBalancingConfig` field. We need to do + // the same for the `loadBalancingPolicy` field and reject the service config + // if the specified policy is not registered. + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. +// +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleClose() { + ccb.balancer.Close() + ccb.done.Fire() +} + func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { if len(addrs) <= 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") - } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac} + acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock - // during switchBalancer() if the old balancer calls RemoveSubConn() in its - // Close(). - ccb.updateCh.Put(sc) + // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it + // was required to handle the RemoveSubConn() method asynchronously by pushing + // the update onto the update channel. This was done to avoid a deadlock as + // switchBalancer() was holding cc.mu when calling Close() on the old + // balancer, which would in turn call RemoveSubConn(). + // + // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this + // asynchronously is probably not required anymore since the switchTo() method + // handles the balancer switch by pushing the update onto the channel. + // TODO(easwars): Handle this inline. + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -210,11 +342,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -235,8 +362,9 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - mu sync.Mutex - ac *addrConn + mu sync.Mutex + ac *addrConn + producers map[balancer.ProducerBuilder]*refCountedProducer } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { @@ -290,3 +418,64 @@ func (acbw *acBalancerWrapper) getAddrConn() *addrConn { defer acbw.mu.Unlock() return acbw.ac } + +var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, returns errSubConnNotReady. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, errSubConnNotReady + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ed75290cdf34..66d141fce707 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,14 +18,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -41,10 +40,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. @@ -261,6 +256,7 @@ type GrpcLogEntry struct { // according to the type of the log entry. // // Types that are assignable to Payload: + // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader // *GrpcLogEntry_Message @@ -694,12 +690,12 @@ func (x *Message) GetData() []byte { // Header keys added by gRPC are omitted. To be more specific, // implementations will not log the following entries, and this is // not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials // // Implementations must always log grpc-trace-bin if it is present. // Practically speaking it will only be visible on server side because diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 000000000000..32b7fa5794e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 28f09dc87073..045668904519 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -79,7 +79,7 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") @@ -146,6 +146,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + for _, opt := range extraDialOptions { + opt.apply(&cc.dopts) + } + for _, opt := range opts { opt.apply(&cc.dopts) } @@ -159,23 +163,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtInfo, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") + pid := cc.dopts.channelzParentID + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, } - cc.csMgr.channelzID = cc.channelzID } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { return nil, errNoTransportSecurity @@ -281,7 +282,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerBuildOpts = balancer.BuildOptions{ + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, @@ -289,7 +290,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, - } + }) // Build the resolver. rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) @@ -398,7 +399,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier } // updateState updates the connectivity.State of ClientConn. @@ -464,34 +465,36 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error @@ -500,7 +503,7 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -519,7 +522,7 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // GetState returns the connectivity.State of ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. @@ -531,19 +534,12 @@ func (cc *ClientConn) GetState() connectivity.State { // the channel is idle. Does not wait for the connection attempts to begin // before returning. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() { - return - } - for ac := range cc.conns { - go ac.connect() - } + cc.balancerWrapper.exitIdle() } func (cc *ClientConn) scWatcher() { @@ -623,9 +619,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -653,16 +647,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) - } - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -670,24 +658,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -696,56 +672,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. // // Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - // Don't hold cc.mu while closing the balancers. The balancers may call - // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex - // would cause a deadlock in that case. - cc.mu.Unlock() - cc.balancerWrapper.close() - cc.mu.Lock() - } - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + cc.balancerWrapper.updateSubConnState(sc, s, err) } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. @@ -764,23 +712,26 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() + defer cc.mu.Unlock() if cc.conns == nil { - cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + cc.conns[ac] = struct{}{} - cc.mu.Unlock() return ac, nil } @@ -810,7 +761,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { // Target returns the target string of the ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -837,10 +788,16 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } @@ -853,21 +810,36 @@ func (ac *addrConn) connect() error { return nil } +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + // tryUpdateAddrs tries to update ac.addrs with the new addresses list. // -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// // If ac is TransientFailure, it updates ac.addrs and returns true. The updated // addresses will be picked up by retry in the next iteration after backoff. // // If ac is Shutdown or Idle, it updates ac.addrs and returns true. // +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() @@ -879,6 +851,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return true } + if equalAddresses(ac.addrs, addrs) { + return true + } + if ac.state == connectivity.Connecting { return false } @@ -959,14 +935,10 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { @@ -991,35 +963,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1041,7 +1004,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { // However, if a previously unavailable network becomes available, this may be // used to trigger an immediate reconnect. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1070,11 +1033,11 @@ func (cc *ClientConn) Close() error { rWrapper := cc.resolverWrapper cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil cc.mu.Unlock() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. cc.blockingpicker.close() - if bWrapper != nil { bWrapper.close() } @@ -1085,22 +1048,22 @@ func (cc *ClientConn) Close() error { for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", + ted := &channelz.TraceEventDesc{ + Desc: "Channel deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1130,7 +1093,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1271,38 +1234,33 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T // address was not successfully connected, or updates ac appropriately with the // new transport. func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { - // TODO: Delete prefaceReceived and move the logic to wait for it into the - // transport. - prefaceReceived := grpcsync.NewEvent() - connClosed := grpcsync.NewEvent() - addr.ServerName = ac.cc.getServerName(addr) hctx, hcancel := context.WithCancel(ac.ctx) - hcStarted := false // protected by ac.mu - onClose := func() { + onClose := grpcsync.OnceFunc(func() { ac.mu.Lock() defer ac.mu.Unlock() - defer connClosed.Fire() - if !hcStarted || hctx.Err() != nil { - // We didn't start the health check or set the state to READY, so - // no need to do anything else here. - // - // OR, we have already cancelled the health check context, meaning - // we have already called onClose once for this transport. In this - // case it would be dangerous to clear the transport and update the - // state, since there may be a new transport in this addrConn. + if ac.state == connectivity.Shutdown { + // Already shut down. tearDown() already cleared the transport and + // canceled hctx via ac.ctx, and we expected this connection to be + // closed, so do nothing here. return } hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. + return + } ac.transport = nil - // Refresh the name resolver + // Refresh the name resolver on any connection loss. ac.cc.resolveNow(resolver.ResolveNowOptions{}) - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - } - + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) + }) onGoAway := func(r transport.GoAwayReason) { ac.mu.Lock() ac.adjustParams(r) @@ -1312,71 +1270,47 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err) + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } - select { - case <-connectCtx.Done(): - // We didn't get the preface in time. + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Shutdown { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. - newTr.Close(transport.ErrConnClosing) - if connectCtx.Err() == context.DeadlineExceeded { - err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) - return err - } + go newTr.Close(transport.ErrConnClosing) return nil - case <-prefaceReceived.Done(): - // We got the preface - huzzah! things are good. - ac.mu.Lock() - defer ac.mu.Unlock() - if connClosed.HasFired() { - // onClose called first; go idle but do nothing else. - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - return nil - } - if ac.state == connectivity.Shutdown { - // This can happen if the subConn was removed while in `Connecting` - // state. tearDown() would have set the state to `Shutdown`, but - // would not have closed the transport since ac.transport would not - // been set at that point. - // - // We run this in a goroutine because newTr.Close() calls onClose() - // inline, which requires locking ac.mu. - // - // The error we pass to Close() is immaterial since there are no open - // streams at this point, so no trailers with error details will be sent - // out. We just need to pass a non-nil error. - go newTr.Close(transport.ErrConnClosing) - return nil - } - ac.curAddr = addr - ac.transport = newTr - hcStarted = true - ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) return nil - case <-connClosed.Done(): - // The transport has already closed. If we received the preface, too, - // this is not an error. - select { - case <-prefaceReceived.Done(): - return nil - default: - return errors.New("connection closed before server preface received") - } } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1497,19 +1431,18 @@ func (ac *addrConn) tearDown(err error) { curTr.GracefulClose() ac.mu.Lock() } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() } @@ -1628,7 +1561,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget return rb, nil @@ -1649,9 +1582,9 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { return nil, err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget return rb, nil diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go index 8bc7ceee0aff..7b953a520e5b 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -158,7 +158,7 @@ type altsHandshaker struct { // stub created using the passed conn and used to talk to the ALTS Handshaker // service in the metadata server. func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) if err != nil { return nil, err } @@ -174,7 +174,7 @@ func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, // stub created using the passed conn and used to talk to the ALTS Handshaker // service in the metadata server. func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go index 77d759cd956f..2de2c4affdaa 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -24,6 +24,7 @@ import ( "sync" grpc "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) var ( @@ -49,7 +50,7 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { // Create a new connection to the handshaker service. Note that // this connection stays open until the application is closed. var err error - hsConn, err = hsDialer(hsAddress, grpc.WithInsecure()) + hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 703b48da753b..1a40e17e8d3f 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/gcp/altscontext.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type AltsContext struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 40570e9bf2de..50eefa53830e 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/gcp/handshaker.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type HandshakeProtocol int32 const ( @@ -216,6 +211,7 @@ type Identity struct { unknownFields protoimpl.UnknownFields // Types that are assignable to IdentityOneof: + // // *Identity_ServiceAccount // *Identity_Hostname IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` @@ -664,6 +660,7 @@ type HandshakerReq struct { unknownFields protoimpl.UnknownFields // Types that are assignable to ReqOneof: + // // *HandshakerReq_ClientStart // *HandshakerReq_ServerStart // *HandshakerReq_Next diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index fd55176b9b69..d3562c6d5e62 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/handshaker.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 4fc3c79d6a39..b07412f18585 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/gcp/transport_security_common.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The security level of the created channel. The list is sorted in increasing // level of security. This order must always be maintained. type SecurityLevel int32 diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 96ff1877e754..5feac3aa0e41 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -36,16 +36,16 @@ import ( // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. If a status code is returned, it will be used as the status - // for the RPC. uri is the URI of the entry point for the request. - // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. Additionally, RequestInfo data will be - // available via ctx to this call. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go index 63625a4b6803..fbdf7dc2997a 100644 --- a/vendor/google.golang.org/grpc/credentials/google/google.go +++ b/vendor/google.golang.org/grpc/credentials/google/google.go @@ -50,7 +50,7 @@ func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credential ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) defer cancel() var err error - opts.PerRPCCreds, err = oauth.NewApplicationDefault(ctx) + opts.PerRPCCreds, err = newADC(ctx) if err != nil { logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err) } @@ -112,6 +112,9 @@ var ( newALTS = func() credentials.TransportCredentials { return alts.NewClientCreds(alts.DefaultClientOptions()) } + newADC = func(ctx context.Context) (credentials.PerRPCCredentials, error) { + return oauth.NewApplicationDefault(ctx) + } ) // NewWithMode should make a copy of Bundle, and switch mode. Modifying the diff --git a/vendor/google.golang.org/grpc/credentials/google/xds.go b/vendor/google.golang.org/grpc/credentials/google/xds.go index b8c2e8f9204c..2c5c8b9eee13 100644 --- a/vendor/google.golang.org/grpc/credentials/google/xds.go +++ b/vendor/google.golang.org/grpc/credentials/google/xds.go @@ -21,6 +21,7 @@ package google import ( "context" "net" + "net/url" "strings" "google.golang.org/grpc/credentials" @@ -28,13 +29,18 @@ import ( ) const cfeClusterNamePrefix = "google_cfe_" +const cfeClusterResourceNamePrefix = "/envoy.config.cluster.v3.Cluster/google_cfe_" +const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com" // clusterTransportCreds is a combo of TLS + ALTS. // // On the client, ClientHandshake picks TLS or ALTS based on address attributes. // - if attributes has cluster name -// - if cluster name has prefix "google_cfe_", use TLS +// - if cluster name has prefix "google_cfe_", or +// "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_", +// use TLS // - otherwise, use ALTS +// // - else, do TLS // // On the server, ServerHandshake always does TLS. @@ -50,18 +56,49 @@ func newClusterTransportCreds(tls, alts credentials.TransportCredentials) *clust } } -func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { +// clusterName returns the xDS cluster name stored in the attributes in the +// context. +func clusterName(ctx context.Context) string { chi := credentials.ClientHandshakeInfoFromContext(ctx) if chi.Attributes == nil { - return c.tls.ClientHandshake(ctx, authority, rawConn) + return "" + } + cluster, _ := internal.GetXDSHandshakeClusterName(chi.Attributes) + return cluster +} + +// isDirectPathCluster returns true if the cluster in the context is a +// directpath cluster, meaning ALTS should be used. +func isDirectPathCluster(ctx context.Context) bool { + cluster := clusterName(ctx) + if cluster == "" { + // No cluster; not xDS; use TLS. + return false + } + if strings.HasPrefix(cluster, cfeClusterNamePrefix) { + // xDS cluster prefixed by "google_cfe_"; use TLS. + return false } - cn, ok := internal.GetXDSHandshakeClusterName(chi.Attributes) - if !ok || strings.HasPrefix(cn, cfeClusterNamePrefix) { - return c.tls.ClientHandshake(ctx, authority, rawConn) + if !strings.HasPrefix(cluster, "xdstp:") { + // Other xDS cluster name; use ALTS. + return true + } + u, err := url.Parse(cluster) + if err != nil { + // Shouldn't happen, but assume ALTS. + return true + } + // If authority AND path match our CFE checks, use TLS; otherwise use ALTS. + return u.Host != cfeClusterAuthorityName || !strings.HasPrefix(u.Path, cfeClusterResourceNamePrefix) +} + +func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if isDirectPathCluster(ctx) { + // If attributes have cluster name, and cluster name is not cfe, it's a + // backend address, use ALTS. + return c.alts.ClientHandshake(ctx, authority, rawConn) } - // If attributes have cluster name, and cluster name is not cfe, it's a - // backend address, use ALTS. - return c.alts.ClientHandshake(ctx, authority, rawConn) + return c.tls.ClientHandshake(ctx, authority, rawConn) } func (c *clusterTransportCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 4fbed12565fd..82bee1443bfe 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -70,3 +70,29 @@ type info struct { func (info) AuthType() string { return "insecure" } + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 784822d0560a..ce2bbc10a142 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error // TLSChannelzSecurityValue defines the struct that TLS protocol should return // from GetSecurityValue(), containing security info like cipher and certificate used. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index c4bf09f9e940..8f5b536f11eb 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -20,22 +20,32 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) +func init() { + internal.AddGlobalDialOptions = func(opt ...DialOption) { + extraDialOptions = append(extraDialOptions, opt...) + } + internal.ClearGlobalDialOptions = func() { + extraDialOptions = nil + } + internal.WithBinaryLogger = withBinaryLogger +} + // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { @@ -45,19 +55,18 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + binaryLogger binarylog.Logger + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -73,10 +82,12 @@ type DialOption interface { apply(*dialOptions) } +var extraDialOptions []DialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -105,8 +116,9 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -116,8 +128,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s @@ -195,25 +208,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -286,7 +280,7 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -304,8 +298,8 @@ func WithReturnConnectionError() DialOption { // WithCredentialsBundle or WithPerRPCCredentials) which require transport // security is incompatible and will cause grpc.Dial() to fail. // -// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead. -// Will be supported throughout 1.x. +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.TransportCredentials = insecure.NewCredentials() @@ -315,7 +309,7 @@ func WithInsecure() DialOption { // WithNoProxy returns a DialOption which disables the use of proxies for this // ClientConn. This is ignored if WithDialer or WithContextDialer are used. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -346,7 +340,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // the ClientConn.WithCreds. This should not be used together with // WithTransportCredentials. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -402,7 +396,21 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { // all the RPCs and underlying network connections in this ClientConn. func WithStatsHandler(h stats.Handler) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.StatsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) + }) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl }) } @@ -414,7 +422,7 @@ func WithStatsHandler(h stats.Handler) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -494,11 +502,11 @@ func WithAuthority(a string) DialOption { // current ClientConn's parent. This function is used in nested channel creation // (e.g. grpclb dial). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id int64) DialOption { +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) @@ -539,9 +547,6 @@ func WithDefaultServiceConfig(s string) DialOption { // service config enables them. This does not impact transparent retries, which // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. -// -// Retry support is currently enabled by default, but may be disabled by -// setting the environment variable "GRPC_GO_RETRY" to "off". func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true @@ -559,7 +564,7 @@ func WithMaxHeaderListSize(s uint32) DialOption { // WithDisableHealthCheck disables the LB channel health checking for all // SubConns of this ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -606,7 +611,7 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { // resolver.Register. They will be matched against the scheme used for the // current Dial only, and will take precedence over the global registry. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 6d84f74c7d08..711763d54fb7 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -19,7 +19,7 @@ // Package encoding defines the interface for the compressor and codec, and // functions to register and retrieve compressors and codecs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. @@ -28,6 +28,8 @@ package encoding import ( "io" "strings" + + "google.golang.org/grpc/internal/grpcutil" ) // Identity specifies the optional encoding for uncompressed streams. @@ -73,6 +75,7 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) } // GetCompressor returns Compressor for the given compressor name. @@ -108,7 +111,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go index ce2f15ed288f..ca820bd47d44 100644 --- a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go +++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go @@ -19,7 +19,7 @@ // Package gzip implements and registers the gzip compressor // during the initialization. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 7c1f66409034..b5560b47ec4b 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -242,7 +242,7 @@ func (g *loggerT) V(l int) bool { // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index a66024d23e30..8e29a62f164f 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/health/v1/health.proto package grpc_health_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type HealthCheckResponse_ServingStatus int32 const ( diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 69f525d1baeb..a332dfd7b54e 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 668e0adcf0a9..bb96ef57be89 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -72,9 +72,12 @@ type UnaryServerInfo struct { } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 000000000000..08666f62a7cb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,384 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 5cc3aeddb213..809d73ccafb0 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -31,35 +31,42 @@ import ( // Logger is the global binary logger. It can be used to get binary logger for // each method. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be // built at init time from the configuration (environment variable or flags). // -// It is used to get a methodLogger for each individual method. +// It is used to get a MethodLogger for each individual method. var binLogger Logger var grpclogLogger = grpclog.Component("binarylog") -// SetLogger sets the binarg logger. +// SetLogger sets the binary logger. // // Only call this at init time. func SetLogger(l Logger) { binLogger = l } -// GetMethodLogger returns the methodLogger for the given methodName. +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -68,17 +75,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -88,83 +107,83 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // -// New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // -// New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } -// getMethodLogger returns the methodLogger for the given methodName. +// getMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index d8f4e7602fde..f9e80e27ab68 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -30,15 +30,15 @@ import ( // to build a new logger and assign it to binarylog.Logger. // // Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. // // If two configs exist for one certain method or service, the one specified // later overrides the previous config. @@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger { return l } -// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds // it to the right map in the logger. func (l *logger) fillMethodLoggerWithConfigString(config string) error { // "" is invalid. @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0cdb41831509..85e3ff2816ae 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -48,7 +48,13 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +type MethodLogger interface { + Log(LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,8 +63,9 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +// NewTruncatingMethodLogger returns a new truncating method logger. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -69,8 +76,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -85,11 +94,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { case *pb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -108,7 +121,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } @@ -119,7 +132,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index cd1807543eee..777cbcd7921d 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,6 +24,8 @@ package channelz import ( + "context" + "errors" "fmt" "sort" "sync" @@ -49,7 +51,8 @@ var ( // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { - NewChannelzStorage() + db.set(newChannelMap()) + idGen.reset() atomic.StoreInt32(&curState, 1) } } @@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorage initializes channelz data storage and id generator. +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. // -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) idGen.reset() + return func() error { - var err error cm := db.get() if cm == nil { return nil } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { return nil } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) + <-ticker.C } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err } } @@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid) - } else { - db.get().addChannel(id, cn, false, pid) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") } id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -244,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") } id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid) - return id + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") } id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid) - return id + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } // RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. @@ -326,6 +367,17 @@ type channelMap struct { normalSockets map[int64]*normalSocket } +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 000000000000..c9a27acd3710 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index b0013f9c8865..8e13a3d2ce7b 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 3c595d154bd3..7b2f350e2e64 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the channel itself from the channelz database. // The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. func (c *channel) deleteSelfIfReady() { if !c.deleteSelfFromTree() { return @@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the subchannel itself from the channelz database. // The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. func (sc *subChannel) deleteSelfIfReady() { if !sc.deleteSelfFromTree() { return @@ -686,12 +686,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 6f0272543110..7edd196bd3da 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -25,11 +25,15 @@ import ( ) const ( - prefix = "GRPC_GO_" - txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" + prefix = "GRPC_GO_" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" + advertiseCompressorsStr = prefix + "ADVERTISE_COMPRESSORS" ) var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + // AdvertiseCompressors is set if registered compressor should be advertised + // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). + AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 000000000000..821dd0a7c198 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 9bad03cec64f..af09711a3e88 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -26,13 +26,13 @@ import ( const ( // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. // Do not use this and read from env directly. Its value is read and kept in - // variable BootstrapFileName. + // variable XDSBootstrapFileName. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" - // XDSBootstrapFileContentEnv is the env variable to set bootstrapp file + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file // content. Do not use this and read from env directly. Its value is read - // and kept in variable BootstrapFileName. + // and kept in variable XDSBootstrapFileContent. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" @@ -41,6 +41,7 @@ const ( clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" @@ -76,13 +77,16 @@ var ( // environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to // "true". - XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false") // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") - + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". + XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false") // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go index d6c9e03fc4c8..6717b757f80d 100644 --- a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go @@ -20,13 +20,6 @@ package googlecloud import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "regexp" "runtime" "strings" "sync" @@ -35,43 +28,9 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const ( - linuxProductNameFile = "/sys/class/dmi/id/product_name" - windowsCheckCommand = "powershell.exe" - windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" - powershellOutputFilter = "Manufacturer" - windowsManufacturerRegex = ":(.*)" - - logPrefix = "[googlecloud]" -) +const logPrefix = "[googlecloud]" var ( - // The following two variables will be reassigned in tests. - runningOS = runtime.GOOS - manufacturerReader = func() (io.Reader, error) { - switch runningOS { - case "linux": - return os.Open(linuxProductNameFile) - case "windows": - cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) - out, err := cmd.Output() - if err != nil { - return nil, err - } - for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { - if strings.HasPrefix(line, powershellOutputFilter) { - re := regexp.MustCompile(windowsManufacturerRegex) - name := re.FindString(line) - name = strings.TrimLeft(name, ":") - return strings.NewReader(name), nil - } - } - return nil, errors.New("cannot determine the machine's manufacturer") - default: - return nil, fmt.Errorf("%s is not supported", runningOS) - } - } - vmOnGCEOnce sync.Once vmOnGCE bool @@ -84,21 +43,21 @@ var ( // package. We keep this to avoid depending on the cloud library module. func OnGCE() bool { vmOnGCEOnce.Do(func() { - vmOnGCE = isRunningOnGCE() + mf, err := manufacturer() + if err != nil { + logger.Infof("failed to read manufacturer, setting onGCE=false: %v") + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) }) return vmOnGCE } -// isRunningOnGCE checks whether the local system, without doing a network request is +// isRunningOnGCE checks whether the local system, without doing a network request, is // running on GCP. -func isRunningOnGCE() bool { - manufacturer, err := readManufacturer() - if err != nil { - logger.Infof("failed to read manufacturer %v, returning OnGCE=false", err) - return false - } +func isRunningOnGCE(manufacturer []byte, goos string) bool { name := string(manufacturer) - switch runningOS { + switch goos { case "linux": name = strings.TrimSpace(name) return name == "Google" || name == "Google Compute Engine" @@ -111,18 +70,3 @@ func isRunningOnGCE() bool { return false } } - -func readManufacturer() ([]byte, error) { - reader, err := manufacturerReader() - if err != nil { - return nil, err - } - if reader == nil { - return nil, errors.New("got nil reader") - } - manufacturer, err := ioutil.ReadAll(reader) - if err != nil { - return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err) - } - return manufacturer, nil -} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go new file mode 100644 index 000000000000..ffa0f1ddee5d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go @@ -0,0 +1,26 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go new file mode 100644 index 000000000000..e53b8ffc837f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go @@ -0,0 +1,27 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import "io/ioutil" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return ioutil.ReadFile(linuxProductNameFile) +} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go new file mode 100644 index 000000000000..2d7aaaaa70fe --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index 30a3b4258fc0..b68e26a36493 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -110,7 +110,7 @@ type LoggerV2 interface { // This is a copy of the DepthLoggerV2 defined in the external grpclog package. // It is defined here to avoid a circular dependency. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 740f83c2b766..517ea70642a1 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -52,6 +52,13 @@ func Intn(n int) int { return r.Intn(n) } +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + mu.Lock() + defer mu.Unlock() + return r.Int31n(n) +} + // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go new file mode 100644 index 000000000000..6635f7bca96d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" +) + +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 000000000000..9f4090967980 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + if !envconfig.AdvertiseCompressors { + return "" + } + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index 4e7475060c1c..ec62b4775e5b 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -25,7 +25,6 @@ import ( // ParseMethod splits service and method from the input. It expects format // "/service/method". -// func ParseMethod(methodName string) (service, method string, _ error) { if !strings.HasPrefix(methodName, "/") { return "", "", errors.New("invalid method name: should start with /") @@ -39,6 +38,11 @@ func ParseMethod(methodName string) (service, method string, _ error) { return methodName[:pos], methodName[pos+1:], nil } +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. const baseContentType = "application/grpc" // ContentSubtype returns the content-subtype for the given content-type. The diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 1b596bf3579f..fd0ee3dcaf1e 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -38,11 +38,10 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the // there's difference in white space. EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool @@ -64,6 +63,70 @@ var ( // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports interface{} // func(*grpc.Server, string) + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddGlobalServerOptions interface{} // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddGlobalDialOptions interface{} // func(opt ...DialOption) + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + ClearGlobalDialOptions func() + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -86,3 +149,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index b8733dbf340d..b2980f8ac44a 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -22,6 +22,9 @@ package metadata import ( + "fmt" + "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -72,3 +75,46 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } + +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +func Validate(md metadata.MD) error { + for k, vals := range md { + // pseudo-header will be ignored + if k[0] == ':' { + continue + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(k); i++ { + r := k[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) + } + } + if strings.HasSuffix(k, "-bin") { + continue + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) + } + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 000000000000..0177af4b5114 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 75301c514913..b08ac30adfef 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { + if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.Authority) + d.resolver, err = customAuthorityResolver(target.URL.Host) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 520d9229e1ed..c6e08221ff64 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 20852e59df29..160911687738 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -34,8 +34,8 @@ type builder struct { } func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.Authority != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) } // gRPC was parsing the dial target manually before PR #4817, and we @@ -49,8 +49,9 @@ func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolv } addr := resolver.Address{Addr: endpoint} if b.scheme == unixAbstractScheme { - // prepend "\x00" to address for unix-abstract - addr.Addr = "\x00" + addr.Addr + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr } cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) return &nopResolver{}, nil diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index badbdbf597f3..51e733e495a3 100644 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { // ServiceConfig contains a list of loadBalancingConfigs, each with a name and // config. This method iterates through that list in order, and stops at the // first policy that is supported. -// - If the config for the first supported policy is invalid, the whole service -// config is invalid. -// - If the list doesn't contain any supported policy, the whole service config -// is invalid. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { var ir intermediateBalancerConfig err := json.Unmarshal(b, &ir) diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index e5c6513edd13..b0ead4f54f82 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool { } return proto.Equal(e.s.s, tse.s.s) } + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 8394d252df03..aaa9c859a349 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -137,6 +137,7 @@ type earlyAbortStream struct { streamID uint32 contentSubtype string status *status.Status + rst bool } func (*earlyAbortStream) isTransportResponseFrame() bool { return false } @@ -190,7 +191,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure } func (*goAway) isTransportResponseFrame() bool { return false } @@ -208,6 +209,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -407,7 +416,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") } } } @@ -518,18 +527,6 @@ const minBatchSize = 1000 // As an optimization, to increase the batch size for each flush, loopy yields the processor, once // if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { - defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter.run returning. %v", err) - } - err = nil - } - }() for { it, err := l.cbuf.get(true) if err != nil { @@ -573,7 +570,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -661,11 +657,10 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { func (l *loopyWriter) originateStream(str *outStream) error { hdr := str.itl.dequeue().(*headerFrame) if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err + if err == errStreamDrain { // errStreamDrain need not close transport + return nil } - // Other errors(errStreamDrain) need not close transport. - return nil + return err } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err @@ -763,7 +758,7 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { } } if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("finished processing active streams while in draining mode") } return nil } @@ -786,6 +781,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { return err } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } return nil } @@ -793,7 +793,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("received GOAWAY with no active streams") } } return nil @@ -811,6 +811,14 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } +func (l *loopyWriter) closeConnectionHandler() error { + l.framer.writer.Flush() + // Exit loopyWriter entirely by returning an error here. This will lead to + // the transport closing the connection, and, ultimately, transport + // closure. + return ErrConnClosing +} + func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: @@ -839,6 +847,8 @@ func (l *loopyWriter) handle(i interface{}) error { return l.goAwayHandler(i) case *outFlowControlSizeRequest: return l.outFlowControlSizeRequestHandler(i) + case closeConnection: + return l.closeConnectionHandler() default: return fmt.Errorf("transport: unknown control message type %T", i) } @@ -880,9 +890,9 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possilbe HTTP2 frame size. + // maximum possible HTTP2 frame size. if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame // Client sends out empty data frame with endStream = true diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 1c3459c2b4c5..ebe8bfe330a5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -46,24 +46,32 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } st := &serverHandlerTransport{ @@ -79,7 +87,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed time-out: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -97,7 +107,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } @@ -138,15 +150,18 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats stats.Handler + stats []stats.Handler } -func (ht *serverHandlerTransport) Close() { - ht.closeOnce.Do(ht.closeCloseChanOnce) +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if logger.V(logLevel) { + logger.Infof("Closing serverHandlerTransport: %v", err) + } + close(ht.closedCh) + }) } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or @@ -228,15 +243,15 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) if err == nil { // transport has not been closed - if ht.stats != nil { - // Note: The trailer fields are compressed with hpack after this call returns. - // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } @@ -314,10 +329,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { }) if err == nil { - if ht.stats != nil { + for _, sh := range ht.stats { // Note: The header fields are compressed with hpack after this call returns. // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + sh.HandleRPC(s.Context(), &stats.OutHeader{ Header: md.Copy(), Compression: s.sendCompress, }) @@ -346,7 +361,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() req := ht.req @@ -369,14 +384,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) - if ht.stats != nil { - s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range ht.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: ht.RemoteAddr(), Compression: s.recvCompress, } - ht.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, @@ -442,10 +457,10 @@ func (ht *serverHandlerTransport) Drain() { // mapRecvMsgError returns the non-nil err into the appropriate // error value as expected by callers of *grpc.parser.recvMsg. // In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index f0c72d337105..3e582a2853c6 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -38,8 +38,10 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" @@ -57,11 +59,15 @@ var clientConnectionCounter uint64 // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter @@ -78,6 +84,7 @@ type http2Client struct { framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. controlBuf *controlBuffer fc *trInFlow // The scheme used: https if TLS is on, http otherwise. @@ -90,7 +97,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandler stats.Handler + statsHandlers []stats.Handler initialWindowSize int32 @@ -98,17 +105,15 @@ type http2Client struct { maxSendHeaderListSize *uint32 bdpEst *bdpEstimator - // onPrefaceReceipt is a callback that client transport calls upon - // receiving server preface to signal that a succefull HTTP2 - // connection was established. - onPrefaceReceipt func() maxConcurrentStreams uint32 streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 nextID uint32 + registeredCompressors string + // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables state transportState activeStreams map[uint32]*Stream @@ -132,7 +137,7 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData onGoAway func(GoAwayReason) @@ -192,7 +197,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -214,12 +219,38 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) } + // Any further errors will close the underlying connection defer func(conn net.Conn) { if err != nil { conn.Close() } }(conn) + + // The following defer and goroutine monitor the connectCtx for cancelation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if err := connectCtx.Err(); err != nil { + // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("newClientTransport: aborting due to connectCtx: %v", err) + } + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams // Validate keepalive parameters. if kp.Time == 0 { @@ -251,15 +282,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - rawConn := conn - // Pull the deadline from the connectCtx, which will be used for - // timeouts in the authentication protocol handshake. Can ignore the - // boolean as the deadline will return the zero value, which will make - // the conn not timeout on I/O operations. - deadline, _ := connectCtx.Deadline() - rawConn.SetDeadline(deadline) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) - rawConn.SetDeadline(time.Time{}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } @@ -297,6 +320,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -311,19 +336,20 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandler: opts.StatsHandler, + statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, - onPrefaceReceipt: onPrefaceReceipt, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), onGoAway: onGoAway, - onClose: onClose, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), + onClose: onClose, } + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) if md, ok := addr.Metadata.(*metadata.MD); ok { t.md = *md @@ -341,38 +367,50 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - if t.statsHandler != nil { - t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err == nil { + err = <-readerErrCh + } + if err != nil { + t.Close(err) + } + }() // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) - t.Close(err) return nil, err } if n != len(clientPreface) { err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - t.Close(err) return nil, err } var ss []http2.Setting @@ -392,14 +430,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts err = t.framer.fr.WriteSettings(ss...) if err != nil { err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) - t.Close(err) return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) - t.Close(err) return nil, err } } @@ -412,10 +448,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() - if err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } // Do not close the transport. Let reader goroutine handle it since // there might be data in the buffers. @@ -466,7 +500,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ Addr: t.remoteAddr, - AuthInfo: t.authInfo, + AuthInfo: t.authInfo, // Can be nil } } @@ -502,9 +536,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } + registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. @@ -584,7 +631,11 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } return nil, err } @@ -613,7 +664,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { - return nil, status.Errorf(codes.Internal, "transport: %v", err) + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) } callAuthData = make(map[string]string, len(data)) for k, v := range data { @@ -629,18 +687,17 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // NewStream errors result in transparent retry, as they mean nothing went onto // the wire. However, there are two notable exceptions: // -// 1. If the stream headers violate the max header list size allowed by the -// server. In this case there is no reason to retry at all, as it is -// assumed the RPC would continue to fail on subsequent attempts. -// 2. If the credentials errored when requesting their headers. In this case, -// it's possible a retry can fix the problem, but indefinitely transparently -// retrying is not appropriate as it is likely the credentials, if they can -// eventually succeed, would need I/O to do so. +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. type NewStreamError struct { Err error - DoNotRetry bool - DoNotTransparentRetry bool + AllowTransparentRetry bool } func (e NewStreamError) Error() string { @@ -649,11 +706,23 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -685,7 +754,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea cleanup(err) return err } - t.activeStreams[id] = s if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) @@ -719,6 +787,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.nextID += 2 s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.mu.Lock() + if t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + t.activeStreams[s.id] = s + t.mu.Unlock() if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -744,22 +819,17 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } for { success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { - if !checkForStreamQuota(it) { - return false - } - if !checkForHeaderListSize(it) { - return false - } - return true + return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { - return nil, &NewStreamError{Err: err} + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { @@ -767,29 +837,32 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea case <-ctx.Done(): return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, &NewStreamError{Err: errStreamDrain} + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, &NewStreamError{Err: ErrConnClosing} + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if t.statsHandler != nil { + if len(t.statsHandlers) != 0 { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) } - t.statsHandler.HandleRPC(s.ctx, outHeader) } return s, nil } @@ -872,19 +945,18 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. -// -// This method blocks until the addrConn that initiated this transport is -// re-connected. This happens because t.onClose() begins reconnect logic at the -// addrConn level and blocks until the addrConn is successfully connected. func (t *http2Client) Close(err error) { t.mu.Lock() - // Make sure we only Close once. + // Make sure we only close once. if t.state == closing { t.mu.Unlock() return } - // Call t.onClose before setting the state to closing to prevent the client - // from attempting to create new streams ASAP. + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. t.onClose() t.state = closing streams := t.activeStreams @@ -898,9 +970,7 @@ func (t *http2Client) Close(err error) { t.controlBuf.finish() t.cancel() t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() @@ -917,11 +987,11 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { connEnd := &stats.ConnEnd{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } @@ -937,11 +1007,14 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: GracefulClose called") + } t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close(ErrConnClosing) + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) @@ -1001,13 +1074,13 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.mu.Unlock() updateIWS := func(interface{}) bool { t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() return true } t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) @@ -1213,7 +1286,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { default: t.setGoAwayReason(f) close(t.goAway) - t.controlBuf.put(&incomingGoAway{}) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. @@ -1226,18 +1299,29 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { if upperLimit == 0 { // This is the first GoAway Frame. upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) - t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + if streamID > id && streamID <= upperLimit { + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } } } - t.prevGoAwayID = id - active := len(t.activeStreams) t.mu.Unlock() - if active == 0 { - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } } @@ -1433,7 +1517,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { if isHeader { inHeader := &stats.InHeader{ Client: true, @@ -1441,14 +1525,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, } - t.statsHandler.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), } - t.statsHandler.HandleRPC(s.ctx, inTrailer) + sh.HandleRPC(s.ctx, inTrailer) } } @@ -1465,33 +1549,35 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - defer close(t.readerDone) - // Check the validity of server preface. +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { frame, err := t.framer.fr.ReadFrame() if err != nil { - err = connectionErrorf(true, err, "error reading server preface: %v", err) - t.Close(err) // this kicks off resetTransport, so must be last before return - return - } - t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) - if t.keepaliveEnabled { - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + return connectionErrorf(true, err, "error reading server preface: %v", err) } sf, ok := frame.(*http2.SettingsFrame) if !ok { - // this kicks off resetTransport, so must be last before return - t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) - return + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) } - t.onPrefaceReceipt() t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } // loop to keep reading incoming messages on this transport. for { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 2c6eaf0e59cf..37e089bc8433 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -36,11 +36,13 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -52,10 +54,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -82,7 +84,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats stats.Handler + stats []stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -101,13 +103,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when Drain() is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -117,7 +119,7 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool @@ -231,6 +233,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime @@ -252,7 +259,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*Stream), - stats: config.StatsHandler, + stats: config.StatsHandlers, kp: kp, idle: time.Now(), kep: kep, @@ -260,6 +267,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, czData: new(channelzData), bufferPool: newBufferPool(), } + // Add peer information to the http2server context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ @@ -267,25 +277,25 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - if t.stats != nil { - t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.stats { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{} - t.stats.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { if err != nil { - t.Close() + t.Close(err) } }() @@ -323,10 +333,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + err := t.loopy.run() + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } t.conn.Close() t.controlBuf.finish() @@ -336,8 +345,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -353,15 +363,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeFrameSize, onWrite: func() {}, }) - return false + return nil } if streamID%2 != 1 || streamID <= t.maxStreamID { // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - return true + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) } t.maxStreamID = streamID @@ -443,8 +450,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) - return false + return nil } if !isGRPC || headerError { @@ -454,7 +462,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeProtocol, onWrite: func() {}, }) - return false + return nil } // "If :authority is missing, Host must be renamed to :authority." - A41 @@ -479,14 +487,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } - pr := &peer.Peer{ - Addr: t.remoteAddr, - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) @@ -501,7 +502,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -512,21 +513,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false + return nil } if httpMethod != http.MethodPost { t.mu.Unlock() + errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + logger.Infof("transport: %v", errMsg) } - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeProtocol, - onWrite: func() {}, + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) s.cancel() - return false + return nil } if t.inTapHandle != nil { var err error @@ -544,8 +547,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: s.id, contentSubtype: s.contentSubtype, status: stat, + rst: !frame.StreamEnded(), }) - return false + return nil } } t.activeStreams[streamID] = s @@ -561,8 +565,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) - if t.stats != nil { - s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: t.remoteAddr, @@ -571,7 +575,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), } - t.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) @@ -592,7 +596,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -625,19 +629,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() + t.Close(err) return } - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - } - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() + if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + t.Close(err) break } case *http2.DataFrame: @@ -838,8 +839,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -881,10 +882,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - if logger.V(logLevel) { - logger.Errorf("transport: Got too many pings from the client, closing the connection.") - } - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -925,12 +923,27 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { + return t.streamContextErr(s) + } + + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } - s.hdrMu.Lock() + if md.Len() > 0 { if s.header.Len() > 0 { s.header = metadata.Join(s.header, md) @@ -939,10 +952,8 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() - return err + return status.Convert(err).Err() } - s.hdrMu.Unlock() return nil } @@ -973,14 +984,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - if t.stats != nil { + for _, sh := range t.stats { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. outHeader := &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, } - t.stats.HandleRPC(s.Context(), outHeader) + sh.HandleRPC(s.Context(), outHeader) } return nil } @@ -990,17 +1001,19 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { return nil } - s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !s.updateHeaderSent() { // No headers have been sent. if len(s.header) > 0 { // Send a separate header frame. if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() return err } } else { // Send a trailer only response. @@ -1029,7 +1042,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { endStream: true, onWrite: t.setResetPingStrikes, } - s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) if !success { if err != nil { @@ -1041,10 +1054,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - if t.stats != nil { + for _, sh := range t.stats { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -1056,23 +1069,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -1082,12 +1084,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -1149,7 +1146,7 @@ func (t *http2Server) keepalive() { if logger.V(logLevel) { logger.Infof("transport: closing server transport due to maximum connection age.") } - t.Close() + t.controlBuf.put(closeConnection{}) case <-t.done: } return @@ -1165,10 +1162,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to idleness.") - } - t.Close() + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) return } if !outstandingPing { @@ -1195,12 +1189,15 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -1210,25 +1207,19 @@ func (t *http2Server) Close() { if err := t.conn.Close(); err != nil && logger.V(logLevel) { logger.Infof("transport: error closing conn during Close: %v", err) } - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() } - if t.stats != nil { + for _, sh := range t.stats { connEnd := &stats.ConnEnd{} - t.stats.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1250,6 +1241,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1269,6 +1265,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) @@ -1287,10 +1288,10 @@ func (t *http2Server) RemoteAddr() net.Addr { func (t *http2Server) Drain() { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) + t.drainEvent = grpcsync.NewEvent() t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } @@ -1311,19 +1312,20 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // Stop accepting more streams now. t.state = draining sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { + if retErr != nil { // Abruptly close the connection following the GoAway (via // loopywriter). But flush out what's inside the buffer first. t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + return false, retErr } return true, nil } @@ -1345,7 +1347,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { timer := time.NewTimer(time.Minute) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return @@ -1404,6 +1406,13 @@ func (t *http2Server) getOutFlowWindow() int64 { } } +func (t *http2Server) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + } +} + func getJitter(v time.Duration) time.Duration { if v == infinity { return 0 diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index d8247bcdf692..2c601a864d99 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -20,7 +20,6 @@ package transport import ( "bufio" - "bytes" "encoding/base64" "fmt" "io" @@ -45,14 +44,8 @@ import ( const ( // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues + // https://httpwg.org/specs/rfc7540.html#SettingValues http2InitHeaderTableSize = 4096 - // baseContentType is the base content-type for gRPC. This is a valid - // content-type on it's own, but can also include a content-subtype such as - // "proto" as a suffix after "+" or ";". See - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests - // for more details. - ) var ( @@ -257,13 +250,13 @@ func encodeGrpcMessage(msg string) string { } func encodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder for len(msg) > 0 { r, size := utf8.DecodeRuneInString(msg) for _, b := range []byte(string(r)) { if size > 1 { // If size > 1, r is not ascii. Always do percent encoding. - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) continue } @@ -272,14 +265,14 @@ func encodeGrpcMessageUnchecked(msg string) string { // // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". if b >= spaceByte && b <= tildeByte && b != percentByte { - buf.WriteByte(b) + sb.WriteByte(b) } else { - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) } } msg = msg[size:] } - return buf.String() + return sb.String() } // decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. @@ -297,23 +290,23 @@ func decodeGrpcMessage(msg string) string { } func decodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] if c == percentByte && i+2 < lenMsg { parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) if err != nil { - buf.WriteByte(c) + sb.WriteByte(c) } else { - buf.WriteByte(byte(parsed)) + sb.WriteByte(byte(parsed)) i += 2 } } else { - buf.WriteByte(c) + sb.WriteByte(c) } } - return buf.String() + return sb.String() } type bufWriter struct { @@ -322,8 +315,6 @@ type bufWriter struct { batchSize int conn net.Conn err error - - onFlush func() } func newBufWriter(conn net.Conn, batchSize int) *bufWriter { @@ -360,9 +351,6 @@ func (w *bufWriter) Flush() error { if w.offset == 0 { return nil } - if w.onFlush != nil { - w.onFlush() - } _, w.err = w.conn.Write(w.buf[:w.offset]) w.offset = 0 return w.err diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index d3bf65b2bdff..6cff20c8e022 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -42,6 +43,10 @@ import ( "google.golang.org/grpc/tap" ) +// ErrNoHeaders is used as a signal that a trailers only response was received, +// and is not a real error. +var ErrNoHeaders = errors.New("stream has no headers") + const logLevel = 2 type bufferPool struct { @@ -365,9 +370,15 @@ func (s *Stream) Header() (metadata.MD, error) { return s.header.Copy(), nil } s.waitOnHeader() + if !s.headerValid { return nil, s.status.Err() } + + if s.noHeaders { + return nil, ErrNoHeaders + } + return s.header.Copy(), nil } @@ -522,14 +533,14 @@ type ServerConfig struct { ConnectionTimeout time.Duration Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandler stats.Handler + StatsHandlers []stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -552,8 +563,8 @@ type ConnectOptions struct { CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters - // StatsHandler stores the handler for stats. - StatsHandler stats.Handler + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. @@ -563,7 +574,7 @@ type ConnectOptions struct { // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -572,8 +583,8 @@ type ConnectOptions struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onGoAway, onClose) } // Options provides additional hints and information for message @@ -690,7 +701,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() + Close(err error) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr @@ -741,6 +752,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 3604c7819fdc..fb4a88f59bd3 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -41,16 +41,17 @@ type MD map[string][]string // New creates an MD from a given key-value map. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may // result in errors if set in metadata. func New(m map[string]string) MD { - md := MD{} + md := make(MD, len(m)) for k, val := range m { key := strings.ToLower(k) md[key] = append(md[key], val) @@ -62,10 +63,11 @@ func New(m map[string]string) MD { // Pairs panics if len(kv) is odd. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -74,7 +76,7 @@ func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } - md := MD{} + md := make(MD, len(kv)/2) for i := 0; i < len(kv); i += 2 { key := strings.ToLower(kv[i]) md[key] = append(md[key], kv[i+1]) @@ -182,17 +184,51 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { if !ok { return nil, false } - out := MD{} + out := make(MD, len(md)) for k, v := range md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + out[key] = copyOf(v) } return out, true } +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Key must be lower-case. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + if strings.ToLower(k) == key { + return copyOf(v) + } + } + return nil +} + +// the returned slice must not be modified in place +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals +} + // FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD @@ -220,13 +256,18 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { return nil, false } - out := MD{} + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) for k, v := range raw.md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + out[key] = copyOf(v) } for _, added := range raw.added { if len(added)%2 == 1 { diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index e8367cb8993b..a5d5516ee060 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) @@ -129,9 +130,13 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if err == balancer.ErrNoSubConnAvailable { continue } - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, err + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } + return nil, nil, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -175,3 +180,9 @@ func (pw *pickerWrapper) close() { pw.done = true close(pw.blockingCh) } + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 5168b62b078a..b3a55481b944 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -44,79 +44,107 @@ func (*pickfirstBuilder) Name() string { } type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) - } if logger.V(2) { logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + return nil + } + + subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) - b.sc.Connect() + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } - if b.sc != sc { + if b.subConn != subConn { if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + b.state = state.ConnectivityState + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { + switch state.ConnectivityState { case connectivity.Ready: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) case connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } } @@ -125,8 +153,8 @@ func (b *pickfirstBalancer) Close() { } func (b *pickfirstBalancer) ExitIdle() { - if b.sc != nil && b.state == connectivity.Idle { - b.sc.Connect() + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() } } @@ -135,18 +163,18 @@ type picker struct { err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - sc balancer.SubConn + subConn balancer.SubConn } -func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - i.sc.Connect() +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 0a1e975ad916..cd45547854f0 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -25,7 +25,7 @@ import ( // PreparedMsg is responsible for creating a Marshalled and Compressed object. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index 58c802f8aec7..a6f26c8ab0f0 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH} mkdir -p ${GOBIN} echo "remove existing generated files" -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) @@ -57,7 +57,8 @@ LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto profiling/proto/service.proto - reflection/grpc_reflection_v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto ) # Generates only the new gRPC Service symbols @@ -68,7 +69,6 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto ${WORKDIR}/grpc-proto/grpc/testing/*.proto ${WORKDIR}/grpc-proto/grpc/core/*.proto ) @@ -80,8 +80,7 @@ SOURCES=( # Note that the protos listed here are all for testing purposes. All protos to # be used externally should have a go_package option (and they don't need to be # listed here). -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ -Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ @@ -117,15 +116,8 @@ done mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go - -# grpc/service_config/service_config.proto does not have a go_package option. -mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config - -# grpc/testing does not have a go_package option. -mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ -mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index e87ecd0eeb38..efcb7f3efd82 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -28,25 +28,40 @@ type addressMapEntry struct { // Multiple accesses may not be performed concurrently. Must be created via // NewAddressMap; do not construct directly. type AddressMap struct { - m map[string]addressMapEntryList + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} } type addressMapEntryList []*addressMapEntry // NewAddressMap creates a new AddressMap. func NewAddressMap() *AddressMap { - return &AddressMap{m: make(map[string]addressMapEntryList)} + return &AddressMap{m: make(map[Address]addressMapEntryList)} } // find returns the index of addr in the addressMapEntry slice, or -1 if not // present. func (l addressMapEntryList) find(addr Address) int { - if len(l) == 0 { - return -1 - } for i, entry := range l { - if entry.addr.ServerName == addr.ServerName && - entry.addr.Attributes.Equal(addr.Attributes) { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { return i } } @@ -55,7 +70,8 @@ func (l addressMapEntryList) find(addr Address) int { // Get returns the value for the address in the map, if present. func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { return entryList[entry].value, true } @@ -64,17 +80,19 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { // Set updates or adds the value to the address in the map. func (a *AddressMap) Set(addr Address, value interface{}) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { - a.m[addr.Addr][entry].value = value + entryList[entry].value = value return } - a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value}) + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) } // Delete removes addr from the map. func (a *AddressMap) Delete(addr Address) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] entry := entryList.find(addr) if entry == -1 { return @@ -85,7 +103,7 @@ func (a *AddressMap) Delete(addr Address) { copy(entryList[entry:], entryList[entry+1:]) entryList = entryList[:len(entryList)-1] } - a.m[addr.Addr] = entryList + a.m[addrKey] = entryList } // Len returns the number of entries in the map. @@ -107,3 +125,14 @@ func (a *AddressMap) Keys() []Address { } return ret } + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []interface{} { + ret := make([]interface{}, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index e28b68026062..967cbc7373ab 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -95,7 +96,7 @@ const ( // Address represents a server the client connects to. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -139,13 +140,18 @@ type Address struct { // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. -func (a *Address) Equal(o Address) bool { +func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && a.Type == o.Type && a.Metadata == o.Metadata } +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + return pretty.ToJSON(a) +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { @@ -230,12 +236,12 @@ type ClientConn interface { // // Examples: // -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { // Deprecated: use URL.Scheme instead. Scheme string diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 2c47cd54f07c..05a9d4e0bac0 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,7 +19,6 @@ package grpc import ( - "fmt" "strings" "sync" @@ -27,6 +26,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -97,10 +97,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { if ccr.done.HasFired() { return nil } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) - } + ccr.addChannelzTraceEvent(s) ccr.curState = s if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { return balancer.ErrBadResolverState @@ -125,10 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) ccr.curState.Addresses = addrs ccr.cc.updateResolverState(ccr.curState, nil) } @@ -141,7 +135,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) if ccr.cc.dopts.disableServiceConfig { channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return @@ -151,9 +145,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) return } - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) ccr.curState.ServiceConfig = scpr ccr.cc.updateResolverState(ccr.curState, nil) } @@ -180,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtInfo, - }) + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 5d407b004b0e..934fc1aa015e 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -198,7 +198,7 @@ func Header(md *metadata.MD) CallOption { // HeaderCallOption is a CallOption for collecting response header metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -220,7 +220,7 @@ func Trailer(md *metadata.MD) CallOption { // TrailerCallOption is a CallOption for collecting response trailer metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -242,7 +242,7 @@ func Peer(p *peer.Peer) CallOption { // PeerCallOption is a CallOption for collecting the identity of the remote // peer. The peer field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -282,7 +282,7 @@ func FailFast(failFast bool) CallOption { // FailFastCallOption is a CallOption for indicating whether an RPC should fail // fast or not. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -305,7 +305,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption { // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can receive. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -328,7 +328,7 @@ func MaxCallSendMsgSize(bytes int) CallOption { // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can send. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -351,7 +351,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { // PerRPCCredsCallOption is a CallOption that indicates the per-RPC // credentials to use for the call. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -369,7 +369,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // sending the request. If WithCompressor is also set, UseCompressor has // higher priority. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -379,7 +379,7 @@ func UseCompressor(name string) CallOption { // CompressorCallOption is a CallOption that indicates the compressor to use. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -416,7 +416,7 @@ func CallContentSubtype(contentSubtype string) CallOption { // ContentSubtypeCallOption is a CallOption that indicates the content-subtype // used for marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -444,7 +444,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -455,7 +455,7 @@ func ForceCodec(codec encoding.Codec) CallOption { // ForceCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -480,7 +480,7 @@ func CallCustomCodec(codec Codec) CallOption { // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -497,7 +497,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -508,7 +508,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption { // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of // memory to be used for caching this RPC for retry purposes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -548,10 +548,11 @@ type parser struct { // format. The caller owns the returned msg memory. // // If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index eadf9e05fd18..2808b7c83e80 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -73,6 +73,14 @@ func init() { internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + extraServerOptions = append(extraServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { + extraServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption } var statusOK = status.New(codes.OK, "") @@ -134,7 +142,7 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData serverWorkerChannels []chan *serverWorkerData @@ -149,8 +157,9 @@ type serverOptions struct { streamInt StreamServerInterceptor chainUnaryInts []UnaryServerInterceptor chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger inTapHandle tap.ServerInHandle - statsHandler stats.Handler + statsHandlers []stats.Handler maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int @@ -174,6 +183,7 @@ var defaultServerOptions = serverOptions{ writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, } +var extraServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -183,7 +193,7 @@ type ServerOption interface { // EmptyServerOption does not alter the server configuration. It can be embedded // in another structure to build custom server options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -207,10 +217,27 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption { } } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -218,11 +245,10 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -298,7 +324,7 @@ func CustomCodec(codec Codec) ServerOption { // https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. // Will be supported throughout 1.x. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -419,7 +445,7 @@ func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOptio // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -435,7 +461,21 @@ func InTapHandle(h tap.ServerInHandle) ServerOption { // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.statsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.statsHandlers = append(o.statsHandlers, h) + }) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl }) } @@ -462,7 +502,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { // new connections. If this is not set, the default is 120 seconds. A zero or // negative value will result in an immediate timeout. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -483,7 +523,7 @@ func MaxHeaderListSize(s uint32) ServerOption { // HeaderTableSize returns a ServerOption that sets the size of dynamic // header table for stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -498,7 +538,7 @@ func HeaderTableSize(s uint32) ServerOption { // zero (default) will disable workers and spawn a new goroutine for each // stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -560,6 +600,9 @@ func (s *Server) stopServerWorkers() { // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions + for _, o := range extraServerOptions { + o.apply(&opts) + } for _, o := range opt { o.apply(&opts) } @@ -584,9 +627,8 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } @@ -712,7 +754,7 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped") type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -724,9 +766,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -759,11 +800,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -773,8 +809,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -866,7 +910,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ConnectionTimeout: s.opts.connectionTimeout, Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, + StatsHandlers: s.opts.statsHandlers, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -887,7 +931,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { if err != credentials.ErrConnDispatched { // Don't log on ErrConnDispatched and io.EOF to prevent log spam. if err != io.EOF { - channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) } c.Close() } @@ -898,7 +942,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { } func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() + defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup var roundRobinCounter uint32 @@ -945,26 +989,27 @@ var _ http.Handler = (*Server)(nil) // To share one port (such as 443 for https) between gRPC and an // existing http.Handler, use a root http.Handler such as: // -// if r.ProtoMajor == 2 && strings.HasPrefix( -// r.Header.Get("Content-Type"), "application/grpc") { -// grpcServer.ServeHTTP(w, r) -// } else { -// yourMux.ServeHTTP(w, r) -// } +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } // // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally // separate from grpc-go's HTTP/2 server. Performance and features may vary // between the two paths. ServeHTTP does not support some gRPC features // available through grpc-go's HTTP/2 server. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } if !s.addConn(listenerAddressForServeHTTP, st) { @@ -1002,7 +1047,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { @@ -1075,8 +1120,10 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) - if err == nil && s.opts.statsHandler != nil { - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + if err == nil { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } } return err } @@ -1104,32 +1151,27 @@ func chainUnaryServerInterceptors(s *Server) { func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next UnaryHandler - } - state.next = func(ctx context.Context, req interface{}) (interface{}, error) { - if state.i == len(interceptors)-1 { - return interceptors[state.i](ctx, req, info, handler) - } - state.i++ - return interceptors[state.i-1](ctx, req, info, state.next) - } - return state.next(ctx, req) + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - sh := s.opts.statsHandler - if sh != nil || trInfo != nil || channelz.IsOn() { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - if sh != nil { + for _, sh := range shs { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, @@ -1160,7 +1202,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.Finish() } - if sh != nil { + for _, sh := range shs { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1180,9 +1222,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } - - binlog := binarylog.GetMethodLogger(stream.Method()) - if binlog != nil { + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ @@ -1202,7 +1251,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if peer, ok := peer.FromContext(ctx); ok { logEntry.PeerAddr = peer.Addr } - binlog.Log(logEntry) + for _, binlog := range binlogs { + binlog.Log(logEntry) + } } // comp and cp are used for compression. decomp and dc are used for @@ -1242,7 +1293,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } var payInfo *payloadInfo - if sh != nil || binlog != nil { + if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) @@ -1259,7 +1310,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - if sh != nil { + for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ RecvTime: time.Now(), Payload: v, @@ -1268,10 +1319,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Length: len(d), }) } - if binlog != nil { - binlog.Log(&binarylog.ClientMessage{ + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: d, - }) + } + for _, binlog := range binlogs { + binlog.Log(cm) + } } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) @@ -1283,9 +1337,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) @@ -1294,18 +1349,24 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if e := t.WriteStatus(stream, appStatus); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } - if binlog != nil { + if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { // Only log serverHeader if there was header. Otherwise it can // be trailer only. - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + } } - binlog.Log(&binarylog.ServerTrailer{ + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(st) + } } return appErr } @@ -1331,26 +1392,34 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerTrailer{ + } + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + binlog.Log(st) + } } return err } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerMessage{ + } + sm := &binarylog.ServerMessage{ Message: reply, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + binlog.Log(sm) + } } if channelz.IsOn() { t.IncrMsgSent() @@ -1362,11 +1431,14 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? err = t.WriteStatus(stream, statusOK) - if binlog != nil { - binlog.Log(&binarylog.ServerTrailer{ + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(st) + } } return err } @@ -1394,21 +1466,16 @@ func chainStreamServerInterceptors(s *Server) { func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next StreamHandler - } - state.next = func(srv interface{}, ss ServerStream) error { - if state.i == len(interceptors)-1 { - return interceptors[state.i](srv, ss, info, handler) - } - state.i++ - return interceptors[state.i-1](srv, ss, info, state.next) - } - return state.next(srv, ss) + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv interface{}, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1416,16 +1483,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if channelz.IsOn() { s.incrCallsStarted() } - sh := s.opts.statsHandler + shs := s.opts.statsHandlers var statsBegin *stats.Begin - if sh != nil { + if len(shs) != 0 { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - sh.HandleRPC(stream.Context(), statsBegin) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), statsBegin) + } } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ @@ -1437,10 +1506,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: sh, + statsHandler: shs, } - if sh != nil || trInfo != nil || channelz.IsOn() { + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1454,7 +1523,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } - if sh != nil { + if len(shs) != 0 { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1462,7 +1531,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), end) + } } if channelz.IsOn() { @@ -1475,8 +1546,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp }() } - ss.binlog = binarylog.GetMethodLogger(stream.Method()) - if ss.binlog != nil { + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1495,7 +1573,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if peer, ok := peer.FromContext(ss.Context()); ok { logEntry.PeerAddr = peer.Addr } - ss.binlog.Log(logEntry) + for _, binlog := range ss.binlogs { + binlog.Log(logEntry) + } } // If dc is set and matches the stream's compression, use it. Otherwise, try @@ -1549,7 +1629,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { @@ -1559,11 +1641,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } t.WriteStatus(ss.s, appStatus) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(st) + } } // TODO: Should we log an error from WriteStatus here and below? return appErr @@ -1574,11 +1659,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } err = t.WriteStatus(ss.s, statusOK) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(st) + } } return err } @@ -1654,7 +1742,7 @@ type streamKey struct{} // NewContextWithServerTransportStream creates a new context from ctx and // attaches stream to it. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1669,7 +1757,7 @@ func NewContextWithServerTransportStream(ctx context.Context, stream ServerTrans // // See also NewContextWithServerTransportStream. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1684,7 +1772,7 @@ type ServerTransportStream interface { // ctx. Returns nil if the given context has no stream associated with it // (which implies it is not an RPC invocation context). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1706,11 +1794,7 @@ func (s *Server) Stop() { s.done.Fire() }() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() listeners := s.lis @@ -1726,7 +1810,7 @@ func (s *Server) Stop() { } for _, cs := range conns { for st := range cs { - st.Close() + st.Close(errors.New("Server.Stop called")) } } if s.opts.numServerWorkers > 0 { @@ -1748,11 +1832,7 @@ func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() @@ -1805,12 +1885,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1822,8 +1916,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1837,6 +1937,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 22c4240cf7e8..01bbb2025aed 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -57,10 +57,9 @@ type lbConfig struct { type ServiceConfig struct { serviceconfig.Config - // LB is the load balancer the service providers recommends. The balancer - // specified via grpc.WithBalancerName will override this. This is deprecated; - // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig - // will be used. + // LB is the load balancer the service providers recommends. This is + // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, + // lbConfig will be used. LB *string // lbConfig is the service config's load balancing configuration. If @@ -218,7 +217,7 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfigForTesting = parseServiceConfig + internal.ParseServiceConfig = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { @@ -381,6 +380,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go index 73a2f926613e..35e7a20a04ba 100644 --- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -19,7 +19,7 @@ // Package serviceconfig defines types and methods for operating on gRPC // service configs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 6d163b6e3842..623be39f26ba 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -76,14 +76,14 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // -// - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, the appropriate Status is returned. +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. // -// - If err is nil, a Status is returned with codes.OK and no message. +// - If err is nil, a Status is returned with codes.OK and no message. // -// - Otherwise, err is an error not compatible with this package. In this -// case, a Status is returned with codes.Unknown and err's Error() message, -// and ok is false. +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 625d47b34e59..0f8e6c0149da 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -36,8 +36,10 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -46,10 +48,12 @@ import ( ) // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used @@ -137,13 +141,13 @@ type ClientStream interface { // To ensure resources are not leaked due to the stream returned, one of the following // actions must be performed: // -// 1. Call Close on the ClientConn. -// 2. Cancel the context provided. -// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated -// client-streaming RPC, for instance, might use the helper function -// CloseAndRecv (note that CloseSend does not Recv, therefore is not -// guaranteed to release all resources). -// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. // // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. @@ -164,6 +168,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -187,6 +196,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } return nil, toRPCErr(err) } @@ -293,20 +309,35 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) } - cs.binlog = binarylog.GetMethodLogger(method) - - if err := cs.newAttemptLocked(false /* isTransparent */); err != nil { - cs.finish(err) - return nil, err + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } } - op := func(a *csAttempt) error { return a.newStream() } + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) return nil, err } - if cs.binlog != nil { + if len(cs.binlogs) != 0 { md, _ := metadata.FromOutgoingContext(ctx) logEntry := &binarylog.ClientHeader{ OnClientSide: true, @@ -320,7 +351,9 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client logEntry.Timeout = 0 } } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } if desc != unaryStreamDesc { @@ -341,14 +374,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) + } + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) method := cs.callHdr.Method - sh := cs.cc.dopts.copts.StatsHandler var beginTime time.Time - if sh != nil { + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) beginTime = time.Now() begin := &stats.Begin{ @@ -377,44 +416,39 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { ctx = trace.NewContext(ctx, trInfo.tr) } - newAttempt := &csAttempt{ - ctx: ctx, - beginTime: beginTime, - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, - } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) - } - }() - - if err := ctx.Err(); err != nil { - return toRPCErr(err) - } - - if cs.cc.parsedTarget.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } - t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } @@ -423,12 +457,21 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - // Return without converting to an RPC error so retry code can - // inspect. - return err + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.p = &parser{r: s} return nil } @@ -454,7 +497,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlogs []binarylog.MethodLogger // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -504,8 +547,13 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandler stats.Handler - beginTime time.Time + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { @@ -525,41 +573,21 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. If the RPC should be // retried, the bool indicates whether it is being retried transparently. -func (cs *clientStream) shouldRetry(err error) (bool, error) { - if cs.attempt.s == nil { - // Error from NewClientStream. - nse, ok := err.(*transport.NewStreamError) - if !ok { - // Unexpected, but assume no I/O was performed and the RPC is not - // fatal, so retry indefinitely. - return true, nil - } - - // Unwrap and convert error. - err = toRPCErr(nse.Err) - - // Never retry DoNotRetry errors, which indicate the RPC should not be - // retried due to max header list size violation, etc. - if nse.DoNotRetry { - return false, err - } +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs - // In the event of a non-IO operation error from NewStream, we never - // attempted to write anything to the wire, so we can retry - // indefinitely. - if !nse.DoNotTransparentRetry { - return true, nil - } - } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. return false, err } + if a.s == nil && a.allowTransparentRetry { + return true, nil + } // Wait for the trailers. unprocessed := false - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - unprocessed = cs.attempt.s.Unprocessed() + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. @@ -571,14 +599,14 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if !cs.attempt.s.TrailersOnly() { + if a.s != nil { + if !a.s.TrailersOnly() { return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { @@ -595,10 +623,10 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } rp := cs.methodConfig.RetryPolicy @@ -643,19 +671,24 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(toRPCErr(lastErr)) - isTransparent, err := cs.shouldRetry(lastErr) + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(isTransparent); err != nil { + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -665,7 +698,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -679,6 +715,18 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } + if len(cs.buffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } + } a := cs.attempt cs.mu.Unlock() err := op(a) @@ -695,7 +743,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -704,17 +752,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD + noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() + if err == transport.ErrNoHeaders { + noHeader = true + return nil + } return toRPCErr(err) }, cs.commitAttemptLocked) + if err != nil { cs.finish(err) return nil, err } - if cs.binlog != nil && !cs.serverHeaderBinlogged { - // Only log if binary log is on and header has not been logged. + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. logEntry := &binarylog.ServerHeader{ OnClientSide: true, Header: m, @@ -723,10 +779,12 @@ func (cs *clientStream) Header() (metadata.MD, error) { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } - return m, err + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -744,10 +802,9 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { for _, f := range cs.buffer { - if err := f(a); err != nil { + if err := f(attempt); err != nil { return err } } @@ -795,47 +852,48 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err + return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ClientMessage{ + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, - }) + Message: data, + } + for _, binlog := range cs.binlogs { + binlog.Log(cm) + } } - return + return err } func (cs *clientStream) RecvMsg(m interface{}) error { - if cs.binlog != nil && !cs.serverHeaderBinlogged { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() } var recvInfo *payloadInfo - if cs.binlog != nil { + if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) }, cs.commitAttemptLocked) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ServerMessage{ + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ OnClientSide: true, Message: recvInfo.uncompressedBytes, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(sm) + } } if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - if cs.binlog != nil { + if len(cs.binlogs) != 0 { // finish will not log Trailer. Log Trailer here. logEntry := &binarylog.ServerTrailer{ OnClientSide: true, @@ -848,7 +906,9 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } } return err @@ -869,10 +929,13 @@ func (cs *clientStream) CloseSend() error { return nil } cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) - if cs.binlog != nil { - cs.binlog.Log(&binarylog.ClientHalfClose{ + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(chc) + } } // We never returned an error here for reasons. return nil @@ -905,10 +968,13 @@ func (cs *clientStream) finish(err error) { // // Only one of cancel or trailer needs to be logged. In the cases where // users don't call RecvMsg, users must have already canceled the RPC. - if cs.binlog != nil && status.Code(err) == codes.Canceled { - cs.binlog.Log(&binarylog.Cancel{ + if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { + c := &binarylog.Cancel{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(c) + } } if err == nil { cs.retryThrottler.successfulRPC() @@ -941,8 +1007,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { } return io.EOF } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -952,7 +1018,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { cs := a.cs - if a.statsHandler != nil && payInfo == nil { + if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} } @@ -980,6 +1046,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } return io.EOF // indicates successful end of stream. } + return toRPCErr(err) } if a.trInfo != nil { @@ -989,8 +1056,8 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1049,7 +1116,7 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - if a.statsHandler != nil { + for _, sh := range a.statsHandlers { end := &stats.End{ Client: true, BeginTime: a.beginTime, @@ -1057,7 +1124,7 @@ func (a *csAttempt) finish(err error) { Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.ctx, end) + sh.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1362,8 +1429,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1424,9 +1493,9 @@ type serverStream struct { maxSendMessageSize int trInfo *traceInfo - statsHandler stats.Handler + statsHandler []stats.Handler - binlog *binarylog.MethodLogger + binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1446,17 +1515,29 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) - if ss.binlog != nil && !ss.serverHeaderBinlogged { + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(sh) + } } return err } @@ -1465,6 +1546,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } @@ -1510,20 +1594,28 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if ss.binlog != nil { + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(sh) + } } - ss.binlog.Log(&binarylog.ServerMessage{ + sm := &binarylog.ServerMessage{ Message: data, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(sm) + } } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } } return nil } @@ -1557,13 +1649,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } }() var payInfo *payloadInfo - if ss.statsHandler != nil || ss.binlog != nil { + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} } if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { if err == io.EOF { - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientHalfClose{}) + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(chc) + } } return err } @@ -1572,20 +1667,25 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return toRPCErr(err) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), - }) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } } - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientMessage{ + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: payInfo.uncompressedBytes, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(cm) + } } return nil } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index dbf34e6bb5f5..bfa5dfa40e4d 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -19,7 +19,7 @@ // Package tap defines the function handles which are executed on the transport // layer of gRPC-Go and related information. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 9d3fd73da94e..6410a0b96c92 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.44.1-dev" +const Version = "1.52.3" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index d923187a7b3a..1d03c0914810 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -67,7 +67,9 @@ elif [[ "$#" -ne 0 ]]; then fi # - Ensure all source files contain a copyright message. -not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. not grep 'func Test[^(]' *_test.go @@ -81,7 +83,7 @@ not git grep -l 'x/net/context' -- "*.go" git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' # - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" @@ -107,7 +109,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy git status --porcelain 2>&1 | fail_on_output || \ @@ -119,8 +121,9 @@ done # # TODO(dfawley): don't use deprecated functions in examples or first-party # plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. @@ -147,7 +150,6 @@ grpc.NewGZIPDecompressor grpc.RPCCompressor grpc.RPCDecompressor grpc.ServiceConfig -grpc.WithBalancerName grpc.WithCompressor grpc.WithDecompressor grpc.WithDialer diff --git a/vendor/google.golang.org/protobuf/AUTHORS b/vendor/google.golang.org/protobuf/AUTHORS deleted file mode 100644 index 2b00ddba0dfe..000000000000 --- a/vendor/google.golang.org/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/google.golang.org/protobuf/CONTRIBUTORS b/vendor/google.golang.org/protobuf/CONTRIBUTORS deleted file mode 100644 index 1fbd3e976faf..000000000000 --- a/vendor/google.golang.org/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 07da5db3450e..5f28148d805b 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -19,7 +19,7 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/set" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -113,7 +113,7 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { } // unmarshalMessage unmarshals a message into the given protoreflect.Message. -func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { +func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { return unmarshal(d, m) } @@ -159,10 +159,10 @@ func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { } // Get the FieldDescriptor. - var fd pref.FieldDescriptor + var fd protoreflect.FieldDescriptor if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { // Only extension names are in [name] format. - extName := pref.FullName(name[1 : len(name)-1]) + extName := protoreflect.FullName(name[1 : len(name)-1]) extType, err := d.opts.Resolver.FindExtensionByName(extName) if err != nil && err != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) @@ -240,23 +240,23 @@ func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { } } -func isKnownValue(fd pref.FieldDescriptor) bool { +func isKnownValue(fd protoreflect.FieldDescriptor) bool { md := fd.Message() return md != nil && md.FullName() == genid.Value_message_fullname } -func isNullValue(fd pref.FieldDescriptor) bool { +func isNullValue(fd protoreflect.FieldDescriptor) bool { ed := fd.Enum() return ed != nil && ed.FullName() == genid.NullValue_enum_fullname } // unmarshalSingular unmarshals to the non-repeated field specified // by the given FieldDescriptor. -func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) error { - var val pref.Value +func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error { + var val protoreflect.Value var err error switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: val = m.NewField(fd) err = d.unmarshalMessage(val.Message(), false) default: @@ -272,63 +272,63 @@ func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) erro // unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by // the given FieldDescriptor. -func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { const b32 int = 32 const b64 int = 64 tok, err := d.Read() if err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } kind := fd.Kind() switch kind { - case pref.BoolKind: + case protoreflect.BoolKind: if tok.Kind() == json.Bool { - return pref.ValueOfBool(tok.Bool()), nil + return protoreflect.ValueOfBool(tok.Bool()), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if v, ok := unmarshalInt(tok, b32); ok { return v, nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if v, ok := unmarshalInt(tok, b64); ok { return v, nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if v, ok := unmarshalUint(tok, b32); ok { return v, nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if v, ok := unmarshalUint(tok, b64); ok { return v, nil } - case pref.FloatKind: + case protoreflect.FloatKind: if v, ok := unmarshalFloat(tok, b32); ok { return v, nil } - case pref.DoubleKind: + case protoreflect.DoubleKind: if v, ok := unmarshalFloat(tok, b64); ok { return v, nil } - case pref.StringKind: + case protoreflect.StringKind: if tok.Kind() == json.String { - return pref.ValueOfString(tok.ParsedString()), nil + return protoreflect.ValueOfString(tok.ParsedString()), nil } - case pref.BytesKind: + case protoreflect.BytesKind: if v, ok := unmarshalBytes(tok); ok { return v, nil } - case pref.EnumKind: + case protoreflect.EnumKind: if v, ok := unmarshalEnum(tok, fd); ok { return v, nil } @@ -337,10 +337,10 @@ func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) } - return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) } -func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) { +func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { switch tok.Kind() { case json.Number: return getInt(tok, bitSize) @@ -349,30 +349,30 @@ func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) { // Decode number from string. s := strings.TrimSpace(tok.ParsedString()) if len(s) != len(tok.ParsedString()) { - return pref.Value{}, false + return protoreflect.Value{}, false } dec := json.NewDecoder([]byte(s)) tok, err := dec.Read() if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } return getInt(tok, bitSize) } - return pref.Value{}, false + return protoreflect.Value{}, false } -func getInt(tok json.Token, bitSize int) (pref.Value, bool) { +func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { n, ok := tok.Int(bitSize) if !ok { - return pref.Value{}, false + return protoreflect.Value{}, false } if bitSize == 32 { - return pref.ValueOfInt32(int32(n)), true + return protoreflect.ValueOfInt32(int32(n)), true } - return pref.ValueOfInt64(n), true + return protoreflect.ValueOfInt64(n), true } -func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) { +func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { switch tok.Kind() { case json.Number: return getUint(tok, bitSize) @@ -381,30 +381,30 @@ func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) { // Decode number from string. s := strings.TrimSpace(tok.ParsedString()) if len(s) != len(tok.ParsedString()) { - return pref.Value{}, false + return protoreflect.Value{}, false } dec := json.NewDecoder([]byte(s)) tok, err := dec.Read() if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } return getUint(tok, bitSize) } - return pref.Value{}, false + return protoreflect.Value{}, false } -func getUint(tok json.Token, bitSize int) (pref.Value, bool) { +func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { n, ok := tok.Uint(bitSize) if !ok { - return pref.Value{}, false + return protoreflect.Value{}, false } if bitSize == 32 { - return pref.ValueOfUint32(uint32(n)), true + return protoreflect.ValueOfUint32(uint32(n)), true } - return pref.ValueOfUint64(n), true + return protoreflect.ValueOfUint64(n), true } -func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) { +func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { switch tok.Kind() { case json.Number: return getFloat(tok, bitSize) @@ -414,49 +414,49 @@ func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) { switch s { case "NaN": if bitSize == 32 { - return pref.ValueOfFloat32(float32(math.NaN())), true + return protoreflect.ValueOfFloat32(float32(math.NaN())), true } - return pref.ValueOfFloat64(math.NaN()), true + return protoreflect.ValueOfFloat64(math.NaN()), true case "Infinity": if bitSize == 32 { - return pref.ValueOfFloat32(float32(math.Inf(+1))), true + return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true } - return pref.ValueOfFloat64(math.Inf(+1)), true + return protoreflect.ValueOfFloat64(math.Inf(+1)), true case "-Infinity": if bitSize == 32 { - return pref.ValueOfFloat32(float32(math.Inf(-1))), true + return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true } - return pref.ValueOfFloat64(math.Inf(-1)), true + return protoreflect.ValueOfFloat64(math.Inf(-1)), true } // Decode number from string. if len(s) != len(strings.TrimSpace(s)) { - return pref.Value{}, false + return protoreflect.Value{}, false } dec := json.NewDecoder([]byte(s)) tok, err := dec.Read() if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } return getFloat(tok, bitSize) } - return pref.Value{}, false + return protoreflect.Value{}, false } -func getFloat(tok json.Token, bitSize int) (pref.Value, bool) { +func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { n, ok := tok.Float(bitSize) if !ok { - return pref.Value{}, false + return protoreflect.Value{}, false } if bitSize == 32 { - return pref.ValueOfFloat32(float32(n)), true + return protoreflect.ValueOfFloat32(float32(n)), true } - return pref.ValueOfFloat64(n), true + return protoreflect.ValueOfFloat64(n), true } -func unmarshalBytes(tok json.Token) (pref.Value, bool) { +func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { if tok.Kind() != json.String { - return pref.Value{}, false + return protoreflect.Value{}, false } s := tok.ParsedString() @@ -469,36 +469,36 @@ func unmarshalBytes(tok json.Token) (pref.Value, bool) { } b, err := enc.DecodeString(s) if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } - return pref.ValueOfBytes(b), true + return protoreflect.ValueOfBytes(b), true } -func unmarshalEnum(tok json.Token, fd pref.FieldDescriptor) (pref.Value, bool) { +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { switch tok.Kind() { case json.String: // Lookup EnumNumber based on name. s := tok.ParsedString() - if enumVal := fd.Enum().Values().ByName(pref.Name(s)); enumVal != nil { - return pref.ValueOfEnum(enumVal.Number()), true + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), true } case json.Number: if n, ok := tok.Int(32); ok { - return pref.ValueOfEnum(pref.EnumNumber(n)), true + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true } case json.Null: // This is only valid for google.protobuf.NullValue. if isNullValue(fd) { - return pref.ValueOfEnum(0), true + return protoreflect.ValueOfEnum(0), true } } - return pref.Value{}, false + return protoreflect.Value{}, false } -func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { +func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { tok, err := d.Read() if err != nil { return err @@ -508,7 +508,7 @@ func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { } switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: for { tok, err := d.Peek() if err != nil { @@ -549,7 +549,7 @@ func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { return nil } -func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { +func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { tok, err := d.Read() if err != nil { return err @@ -561,18 +561,18 @@ func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { // Determine ahead whether map entry is a scalar type or a message type in // order to call the appropriate unmarshalMapValue func inside the for loop // below. - var unmarshalMapValue func() (pref.Value, error) + var unmarshalMapValue func() (protoreflect.Value, error) switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: - unmarshalMapValue = func() (pref.Value, error) { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { val := mmap.NewValue() if err := d.unmarshalMessage(val.Message(), false); err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } return val, nil } default: - unmarshalMapValue = func() (pref.Value, error) { + unmarshalMapValue = func() (protoreflect.Value, error) { return d.unmarshalScalar(fd.MapValue()) } } @@ -618,7 +618,7 @@ Loop: // unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. // A map key type is any integral or string type. -func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref.MapKey, error) { +func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) { const b32 = 32 const b64 = 64 const base10 = 10 @@ -626,40 +626,40 @@ func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref. name := tok.Name() kind := fd.Kind() switch kind { - case pref.StringKind: - return pref.ValueOfString(name).MapKey(), nil + case protoreflect.StringKind: + return protoreflect.ValueOfString(name).MapKey(), nil - case pref.BoolKind: + case protoreflect.BoolKind: switch name { case "true": - return pref.ValueOfBool(true).MapKey(), nil + return protoreflect.ValueOfBool(true).MapKey(), nil case "false": - return pref.ValueOfBool(false).MapKey(), nil + return protoreflect.ValueOfBool(false).MapKey(), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if n, err := strconv.ParseInt(name, base10, b32); err == nil { - return pref.ValueOfInt32(int32(n)).MapKey(), nil + return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if n, err := strconv.ParseInt(name, base10, b64); err == nil { - return pref.ValueOfInt64(int64(n)).MapKey(), nil + return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if n, err := strconv.ParseUint(name, base10, b32); err == nil { - return pref.ValueOfUint32(uint32(n)).MapKey(), nil + return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if n, err := strconv.ParseUint(name, base10, b64); err == nil { - return pref.ValueOfUint64(uint64(n)).MapKey(), nil + return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil } default: panic(fmt.Sprintf("invalid kind for map key: %v", kind)) } - return pref.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) + return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) } diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index ba971f07810c..d09d22e139bc 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -18,7 +18,6 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -164,8 +163,8 @@ type typeURLFieldRanger struct { typeURL string } -func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { - if !f(typeFieldDesc, pref.ValueOfString(m.typeURL)) { +func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) { return } m.FieldRanger.Range(f) @@ -173,9 +172,9 @@ func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) // unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range // method to additionally iterate over unpopulated fields. -type unpopulatedFieldRanger struct{ pref.Message } +type unpopulatedFieldRanger struct{ protoreflect.Message } -func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { fds := m.Descriptor().Fields() for i := 0; i < fds.Len(); i++ { fd := fds.Get(i) @@ -184,10 +183,10 @@ func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) b } v := m.Get(fd) - isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil + isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() + isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil if isProto2Scalar || isSingularMessage { - v = pref.Value{} // use invalid value to emit null + v = protoreflect.Value{} // use invalid value to emit null } if !f(fd, v) { return @@ -199,7 +198,7 @@ func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) b // marshalMessage marshals the fields in the given protoreflect.Message. // If the typeURL is non-empty, then a synthetic "@type" field is injected // containing the URL as the value. -func (e encoder) marshalMessage(m pref.Message, typeURL string) error { +func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { return errors.New("no support for proto1 MessageSets") } @@ -220,7 +219,7 @@ func (e encoder) marshalMessage(m pref.Message, typeURL string) error { } var err error - order.RangeFields(fields, order.IndexNameFieldOrder, func(fd pref.FieldDescriptor, v pref.Value) bool { + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { name := fd.JSONName() if e.opts.UseProtoNames { name = fd.TextName() @@ -238,7 +237,7 @@ func (e encoder) marshalMessage(m pref.Message, typeURL string) error { } // marshalValue marshals the given protoreflect.Value. -func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { switch { case fd.IsList(): return e.marshalList(val.List(), fd) @@ -251,44 +250,44 @@ func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error { // marshalSingular marshals the given non-repeated field value. This includes // all scalar types, enums, messages, and groups. -func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { if !val.IsValid() { e.WriteNull() return nil } switch kind := fd.Kind(); kind { - case pref.BoolKind: + case protoreflect.BoolKind: e.WriteBool(val.Bool()) - case pref.StringKind: + case protoreflect.StringKind: if e.WriteString(val.String()) != nil { return errors.InvalidUTF8(string(fd.FullName())) } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: e.WriteInt(val.Int()) - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: e.WriteUint(val.Uint()) - case pref.Int64Kind, pref.Sint64Kind, pref.Uint64Kind, - pref.Sfixed64Kind, pref.Fixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind, + protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind: // 64-bit integers are written out as JSON string. e.WriteString(val.String()) - case pref.FloatKind: + case protoreflect.FloatKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 32) - case pref.DoubleKind: + case protoreflect.DoubleKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 64) - case pref.BytesKind: + case protoreflect.BytesKind: e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) - case pref.EnumKind: + case protoreflect.EnumKind: if fd.Enum().FullName() == genid.NullValue_enum_fullname { e.WriteNull() } else { @@ -300,7 +299,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } } - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: if err := e.marshalMessage(val.Message(), ""); err != nil { return err } @@ -312,7 +311,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } // marshalList marshals the given protoreflect.List. -func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error { +func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { e.StartArray() defer e.EndArray() @@ -326,12 +325,12 @@ func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error { } // marshalMap marshals given protoreflect.Map. -func (e encoder) marshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { +func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { e.StartObject() defer e.EndObject() var err error - order.RangeEntries(mmap, order.GenericKeyOrder, func(k pref.MapKey, v pref.Value) bool { + order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { if err = e.WriteName(k.String()); err != nil { return false } diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 72924a9050cf..c85f8469480a 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -17,14 +17,14 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) -type marshalFunc func(encoder, pref.Message) error +type marshalFunc func(encoder, protoreflect.Message) error // wellKnownTypeMarshaler returns a marshal function if the message type // has specialized serialization behavior. It returns nil otherwise. -func wellKnownTypeMarshaler(name pref.FullName) marshalFunc { +func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc { if name.Parent() == genid.GoogleProtobuf_package { switch name.Name() { case genid.Any_message_name: @@ -58,11 +58,11 @@ func wellKnownTypeMarshaler(name pref.FullName) marshalFunc { return nil } -type unmarshalFunc func(decoder, pref.Message) error +type unmarshalFunc func(decoder, protoreflect.Message) error // wellKnownTypeUnmarshaler returns a unmarshal function if the message type // has specialized serialization behavior. It returns nil otherwise. -func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc { +func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc { if name.Parent() == genid.GoogleProtobuf_package { switch name.Name() { case genid.Any_message_name: @@ -102,7 +102,7 @@ func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc { // custom JSON representation, that representation will be embedded adding a // field `value` which holds the custom JSON in addition to the `@type` field. -func (e encoder) marshalAny(m pref.Message) error { +func (e encoder) marshalAny(m protoreflect.Message) error { fds := m.Descriptor().Fields() fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) fdValue := fds.ByNumber(genid.Any_Value_field_number) @@ -163,7 +163,7 @@ func (e encoder) marshalAny(m pref.Message) error { return nil } -func (d decoder) unmarshalAny(m pref.Message) error { +func (d decoder) unmarshalAny(m protoreflect.Message) error { // Peek to check for json.ObjectOpen to avoid advancing a read. start, err := d.Peek() if err != nil { @@ -233,8 +233,8 @@ func (d decoder) unmarshalAny(m pref.Message) error { fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) fdValue := fds.ByNumber(genid.Any_Value_field_number) - m.Set(fdType, pref.ValueOfString(typeURL)) - m.Set(fdValue, pref.ValueOfBytes(b)) + m.Set(fdType, protoreflect.ValueOfString(typeURL)) + m.Set(fdValue, protoreflect.ValueOfBytes(b)) return nil } @@ -354,7 +354,7 @@ func (d decoder) skipJSONValue() error { // unmarshalAnyValue unmarshals the given custom-type message from the JSON // object's "value" field. -func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) error { +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error { // Skip ObjectOpen, and start reading the fields. d.Read() @@ -402,13 +402,13 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) erro // Wrapper types are encoded as JSON primitives like string, number or boolean. -func (e encoder) marshalWrapperType(m pref.Message) error { +func (e encoder) marshalWrapperType(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) val := m.Get(fd) return e.marshalSingular(val, fd) } -func (d decoder) unmarshalWrapperType(m pref.Message) error { +func (d decoder) unmarshalWrapperType(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) val, err := d.unmarshalScalar(fd) if err != nil { @@ -420,13 +420,13 @@ func (d decoder) unmarshalWrapperType(m pref.Message) error { // The JSON representation for Empty is an empty JSON object. -func (e encoder) marshalEmpty(pref.Message) error { +func (e encoder) marshalEmpty(protoreflect.Message) error { e.StartObject() e.EndObject() return nil } -func (d decoder) unmarshalEmpty(pref.Message) error { +func (d decoder) unmarshalEmpty(protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -462,12 +462,12 @@ func (d decoder) unmarshalEmpty(pref.Message) error { // The JSON representation for Struct is a JSON object that contains the encoded // Struct.fields map and follows the serialization rules for a map. -func (e encoder) marshalStruct(m pref.Message) error { +func (e encoder) marshalStruct(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) return e.marshalMap(m.Get(fd).Map(), fd) } -func (d decoder) unmarshalStruct(m pref.Message) error { +func (d decoder) unmarshalStruct(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) return d.unmarshalMap(m.Mutable(fd).Map(), fd) } @@ -476,12 +476,12 @@ func (d decoder) unmarshalStruct(m pref.Message) error { // ListValue.values repeated field and follows the serialization rules for a // repeated field. -func (e encoder) marshalListValue(m pref.Message) error { +func (e encoder) marshalListValue(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) return e.marshalList(m.Get(fd).List(), fd) } -func (d decoder) unmarshalListValue(m pref.Message) error { +func (d decoder) unmarshalListValue(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) return d.unmarshalList(m.Mutable(fd).List(), fd) } @@ -490,7 +490,7 @@ func (d decoder) unmarshalListValue(m pref.Message) error { // set. Each of the field in the oneof has its own custom serialization rule. A // Value message needs to be a oneof field set, else it is an error. -func (e encoder) marshalKnownValue(m pref.Message) error { +func (e encoder) marshalKnownValue(m protoreflect.Message) error { od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) fd := m.WhichOneof(od) if fd == nil { @@ -504,19 +504,19 @@ func (e encoder) marshalKnownValue(m pref.Message) error { return e.marshalSingular(m.Get(fd), fd) } -func (d decoder) unmarshalKnownValue(m pref.Message) error { +func (d decoder) unmarshalKnownValue(m protoreflect.Message) error { tok, err := d.Peek() if err != nil { return err } - var fd pref.FieldDescriptor - var val pref.Value + var fd protoreflect.FieldDescriptor + var val protoreflect.Value switch tok.Kind() { case json.Null: d.Read() fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) - val = pref.ValueOfEnum(0) + val = protoreflect.ValueOfEnum(0) case json.Bool: tok, err := d.Read() @@ -524,7 +524,7 @@ func (d decoder) unmarshalKnownValue(m pref.Message) error { return err } fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) - val = pref.ValueOfBool(tok.Bool()) + val = protoreflect.ValueOfBool(tok.Bool()) case json.Number: tok, err := d.Read() @@ -550,7 +550,7 @@ func (d decoder) unmarshalKnownValue(m pref.Message) error { return err } fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) - val = pref.ValueOfString(tok.ParsedString()) + val = protoreflect.ValueOfString(tok.ParsedString()) case json.ObjectOpen: fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) @@ -591,7 +591,7 @@ const ( maxSecondsInDuration = 315576000000 ) -func (e encoder) marshalDuration(m pref.Message) error { +func (e encoder) marshalDuration(m protoreflect.Message) error { fds := m.Descriptor().Fields() fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) @@ -623,7 +623,7 @@ func (e encoder) marshalDuration(m pref.Message) error { return nil } -func (d decoder) unmarshalDuration(m pref.Message) error { +func (d decoder) unmarshalDuration(m protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -646,8 +646,8 @@ func (d decoder) unmarshalDuration(m pref.Message) error { fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) - m.Set(fdSeconds, pref.ValueOfInt64(secs)) - m.Set(fdNanos, pref.ValueOfInt32(nanos)) + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(nanos)) return nil } @@ -779,7 +779,7 @@ const ( minTimestampSeconds = -62135596800 ) -func (e encoder) marshalTimestamp(m pref.Message) error { +func (e encoder) marshalTimestamp(m protoreflect.Message) error { fds := m.Descriptor().Fields() fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) @@ -805,7 +805,7 @@ func (e encoder) marshalTimestamp(m pref.Message) error { return nil } -func (d decoder) unmarshalTimestamp(m pref.Message) error { +func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -829,8 +829,8 @@ func (d decoder) unmarshalTimestamp(m pref.Message) error { fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) - m.Set(fdSeconds, pref.ValueOfInt64(secs)) - m.Set(fdNanos, pref.ValueOfInt32(int32(t.Nanosecond()))) + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond()))) return nil } @@ -839,14 +839,14 @@ func (d decoder) unmarshalTimestamp(m pref.Message) error { // lower-camel naming conventions. Encoding should fail if the path name would // end up differently after a round-trip. -func (e encoder) marshalFieldMask(m pref.Message) error { +func (e encoder) marshalFieldMask(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) list := m.Get(fd).List() paths := make([]string, 0, list.Len()) for i := 0; i < list.Len(); i++ { s := list.Get(i).String() - if !pref.FullName(s).IsValid() { + if !protoreflect.FullName(s).IsValid() { return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) } // Return error if conversion to camelCase is not reversible. @@ -861,7 +861,7 @@ func (e encoder) marshalFieldMask(m pref.Message) error { return nil } -func (d decoder) unmarshalFieldMask(m pref.Message) error { +func (d decoder) unmarshalFieldMask(m protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -880,10 +880,10 @@ func (d decoder) unmarshalFieldMask(m pref.Message) error { for _, s0 := range paths { s := strs.JSONSnakeCase(s0) - if strings.Contains(s0, "_") || !pref.FullName(s).IsValid() { + if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() { return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) } - list.Append(pref.ValueOfString(s)) + list.Append(protoreflect.ValueOfString(s)) } return nil } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 179d6e8fc1ce..4921b2d4a76f 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -17,7 +17,7 @@ import ( "google.golang.org/protobuf/internal/set" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -103,7 +103,7 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { } // unmarshalMessage unmarshals into the given protoreflect.Message. -func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { +func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) error { messageDesc := m.Descriptor() if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { return errors.New("no support for proto1 MessageSets") @@ -150,24 +150,24 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { } // Resolve the field descriptor. - var name pref.Name - var fd pref.FieldDescriptor - var xt pref.ExtensionType + var name protoreflect.Name + var fd protoreflect.FieldDescriptor + var xt protoreflect.ExtensionType var xtErr error var isFieldNumberName bool switch tok.NameKind() { case text.IdentName: - name = pref.Name(tok.IdentName()) + name = protoreflect.Name(tok.IdentName()) fd = fieldDescs.ByTextName(string(name)) case text.TypeName: // Handle extensions only. This code path is not for Any. - xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName())) + xt, xtErr = d.opts.Resolver.FindExtensionByName(protoreflect.FullName(tok.TypeName())) case text.FieldNumber: isFieldNumberName = true - num := pref.FieldNumber(tok.FieldNumber()) + num := protoreflect.FieldNumber(tok.FieldNumber()) if !num.IsValid() { return d.newError(tok.Pos(), "invalid field number: %d", num) } @@ -215,7 +215,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { switch { case fd.IsList(): kind := fd.Kind() - if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -232,7 +232,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { default: kind := fd.Kind() - if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -262,11 +262,11 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { // unmarshalSingular unmarshals a non-repeated field value specified by the // given FieldDescriptor. -func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { - var val pref.Value +func (d decoder) unmarshalSingular(fd protoreflect.FieldDescriptor, m protoreflect.Message) error { + var val protoreflect.Value var err error switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: val = m.NewField(fd) err = d.unmarshalMessage(val.Message(), true) default: @@ -280,94 +280,94 @@ func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) erro // unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the // given FieldDescriptor. -func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { tok, err := d.Read() if err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } if tok.Kind() != text.Scalar { - return pref.Value{}, d.unexpectedTokenError(tok) + return protoreflect.Value{}, d.unexpectedTokenError(tok) } kind := fd.Kind() switch kind { - case pref.BoolKind: + case protoreflect.BoolKind: if b, ok := tok.Bool(); ok { - return pref.ValueOfBool(b), nil + return protoreflect.ValueOfBool(b), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if n, ok := tok.Int32(); ok { - return pref.ValueOfInt32(n), nil + return protoreflect.ValueOfInt32(n), nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if n, ok := tok.Int64(); ok { - return pref.ValueOfInt64(n), nil + return protoreflect.ValueOfInt64(n), nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if n, ok := tok.Uint32(); ok { - return pref.ValueOfUint32(n), nil + return protoreflect.ValueOfUint32(n), nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if n, ok := tok.Uint64(); ok { - return pref.ValueOfUint64(n), nil + return protoreflect.ValueOfUint64(n), nil } - case pref.FloatKind: + case protoreflect.FloatKind: if n, ok := tok.Float32(); ok { - return pref.ValueOfFloat32(n), nil + return protoreflect.ValueOfFloat32(n), nil } - case pref.DoubleKind: + case protoreflect.DoubleKind: if n, ok := tok.Float64(); ok { - return pref.ValueOfFloat64(n), nil + return protoreflect.ValueOfFloat64(n), nil } - case pref.StringKind: + case protoreflect.StringKind: if s, ok := tok.String(); ok { if strs.EnforceUTF8(fd) && !utf8.ValidString(s) { - return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") + return protoreflect.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") } - return pref.ValueOfString(s), nil + return protoreflect.ValueOfString(s), nil } - case pref.BytesKind: + case protoreflect.BytesKind: if b, ok := tok.String(); ok { - return pref.ValueOfBytes([]byte(b)), nil + return protoreflect.ValueOfBytes([]byte(b)), nil } - case pref.EnumKind: + case protoreflect.EnumKind: if lit, ok := tok.Enum(); ok { // Lookup EnumNumber based on name. - if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil { - return pref.ValueOfEnum(enumVal.Number()), nil + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(lit)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), nil } } if num, ok := tok.Int32(); ok { - return pref.ValueOfEnum(pref.EnumNumber(num)), nil + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(num)), nil } default: panic(fmt.Sprintf("invalid scalar kind %v", kind)) } - return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) } // unmarshalList unmarshals into given protoreflect.List. A list value can // either be in [] syntax or simply just a single scalar/message value. -func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { +func (d decoder) unmarshalList(fd protoreflect.FieldDescriptor, list protoreflect.List) error { tok, err := d.Peek() if err != nil { return err } switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: switch tok.Kind() { case text.ListOpen: d.Read() @@ -441,22 +441,22 @@ func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { // unmarshalMap unmarshals into given protoreflect.Map. A map value is a // textproto message containing {key: , value: }. -func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { +func (d decoder) unmarshalMap(fd protoreflect.FieldDescriptor, mmap protoreflect.Map) error { // Determine ahead whether map entry is a scalar type or a message type in // order to call the appropriate unmarshalMapValue func inside // unmarshalMapEntry. - var unmarshalMapValue func() (pref.Value, error) + var unmarshalMapValue func() (protoreflect.Value, error) switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: - unmarshalMapValue = func() (pref.Value, error) { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { pval := mmap.NewValue() if err := d.unmarshalMessage(pval.Message(), true); err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } return pval, nil } default: - unmarshalMapValue = func() (pref.Value, error) { + unmarshalMapValue = func() (protoreflect.Value, error) { return d.unmarshalScalar(fd.MapValue()) } } @@ -494,9 +494,9 @@ func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { // unmarshalMap unmarshals into given protoreflect.Map. A map value is a // textproto message containing {key: , value: }. -func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error { - var key pref.MapKey - var pval pref.Value +func (d decoder) unmarshalMapEntry(fd protoreflect.FieldDescriptor, mmap protoreflect.Map, unmarshalMapValue func() (protoreflect.Value, error)) error { + var key protoreflect.MapKey + var pval protoreflect.Value Loop: for { // Read field name. @@ -520,7 +520,7 @@ Loop: return d.unexpectedTokenError(tok) } - switch name := pref.Name(tok.IdentName()); name { + switch name := protoreflect.Name(tok.IdentName()); name { case genid.MapEntry_Key_field_name: if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") @@ -535,7 +535,7 @@ Loop: key = val.MapKey() case genid.MapEntry_Value_field_name: - if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { + if kind := fd.MapValue().Kind(); (kind != protoreflect.MessageKind) && (kind != protoreflect.GroupKind) { if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -561,7 +561,7 @@ Loop: } if !pval.IsValid() { switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: // If value field is not set for message/group types, construct an // empty one as default. pval = mmap.NewValue() @@ -575,7 +575,7 @@ Loop: // unmarshalAny unmarshals an Any textproto. It can either be in expanded form // or non-expanded form. -func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { +func (d decoder) unmarshalAny(m protoreflect.Message, checkDelims bool) error { var typeURL string var bValue []byte var seenTypeUrl bool @@ -619,7 +619,7 @@ Loop: return d.syntaxError(tok.Pos(), "missing field separator :") } - switch name := pref.Name(tok.IdentName()); name { + switch name := protoreflect.Name(tok.IdentName()); name { case genid.Any_TypeUrl_field_name: if seenTypeUrl { return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) @@ -686,10 +686,10 @@ Loop: fds := m.Descriptor().Fields() if len(typeURL) > 0 { - m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) + m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), protoreflect.ValueOfString(typeURL)) } if len(bValue) > 0 { - m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) + m.Set(fds.ByNumber(genid.Any_Value_field_number), protoreflect.ValueOfBytes(bValue)) } return nil } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 8d5304dc5b32..ebf6c65284dd 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -20,7 +20,6 @@ import ( "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -150,7 +149,7 @@ type encoder struct { } // marshalMessage marshals the given protoreflect.Message. -func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { +func (e encoder) marshalMessage(m protoreflect.Message, inclDelims bool) error { messageDesc := m.Descriptor() if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { return errors.New("no support for proto1 MessageSets") @@ -190,7 +189,7 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { } // marshalField marshals the given field with protoreflect.Value. -func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalField(name string, val protoreflect.Value, fd protoreflect.FieldDescriptor) error { switch { case fd.IsList(): return e.marshalList(name, val.List(), fd) @@ -204,40 +203,40 @@ func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescript // marshalSingular marshals the given non-repeated field value. This includes // all scalar types, enums, messages, and groups. -func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { kind := fd.Kind() switch kind { - case pref.BoolKind: + case protoreflect.BoolKind: e.WriteBool(val.Bool()) - case pref.StringKind: + case protoreflect.StringKind: s := val.String() if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) { return errors.InvalidUTF8(string(fd.FullName())) } e.WriteString(s) - case pref.Int32Kind, pref.Int64Kind, - pref.Sint32Kind, pref.Sint64Kind, - pref.Sfixed32Kind, pref.Sfixed64Kind: + case protoreflect.Int32Kind, protoreflect.Int64Kind, + protoreflect.Sint32Kind, protoreflect.Sint64Kind, + protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: e.WriteInt(val.Int()) - case pref.Uint32Kind, pref.Uint64Kind, - pref.Fixed32Kind, pref.Fixed64Kind: + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, + protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: e.WriteUint(val.Uint()) - case pref.FloatKind: + case protoreflect.FloatKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 32) - case pref.DoubleKind: + case protoreflect.DoubleKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 64) - case pref.BytesKind: + case protoreflect.BytesKind: e.WriteString(string(val.Bytes())) - case pref.EnumKind: + case protoreflect.EnumKind: num := val.Enum() if desc := fd.Enum().Values().ByNumber(num); desc != nil { e.WriteLiteral(string(desc.Name())) @@ -246,7 +245,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error e.WriteInt(int64(num)) } - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: return e.marshalMessage(val.Message(), true) default: @@ -256,7 +255,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } // marshalList marshals the given protoreflect.List as multiple name-value fields. -func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error { +func (e encoder) marshalList(name string, list protoreflect.List, fd protoreflect.FieldDescriptor) error { size := list.Len() for i := 0; i < size; i++ { e.WriteName(name) @@ -268,9 +267,9 @@ func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescripto } // marshalMap marshals the given protoreflect.Map as multiple name-value fields. -func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { +func (e encoder) marshalMap(name string, mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { var err error - order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool { + order.RangeEntries(mmap, order.GenericKeyOrder, func(key protoreflect.MapKey, val protoreflect.Value) bool { e.WriteName(name) e.StartMessage() defer e.EndMessage() @@ -334,7 +333,7 @@ func (e encoder) marshalUnknown(b []byte) { // marshalAny marshals the given google.protobuf.Any message in expanded form. // It returns true if it was able to marshal, else false. -func (e encoder) marshalAny(any pref.Message) bool { +func (e encoder) marshalAny(any protoreflect.Message) bool { // Construct the embedded message. fds := any.Descriptor().Fields() fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index a427f8b7043b..ce57f57ebd48 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -21,10 +21,11 @@ import ( type Number int32 const ( - MinValidNumber Number = 1 - FirstReservedNumber Number = 19000 - LastReservedNumber Number = 19999 - MaxValidNumber Number = 1<<29 - 1 + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 + DefaultRecursionLimit = 10000 ) // IsValid reports whether the field number is semantically valid. @@ -55,6 +56,7 @@ const ( errCodeOverflow errCodeReserved errCodeEndGroup + errCodeRecursionDepth ) var ( @@ -112,6 +114,10 @@ func ConsumeField(b []byte) (Number, Type, int) { // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + return consumeFieldValueD(num, typ, b, DefaultRecursionLimit) +} + +func consumeFieldValueD(num Number, typ Type, b []byte, depth int) (n int) { switch typ { case VarintType: _, n = ConsumeVarint(b) @@ -126,6 +132,9 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { _, n = ConsumeBytes(b) return n case StartGroupType: + if depth < 0 { + return errCodeRecursionDepth + } n0 := len(b) for { num2, typ2, n := ConsumeTag(b) @@ -140,7 +149,7 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { return n0 - len(b) } - n = ConsumeFieldValue(num2, typ2, b) + n = consumeFieldValueD(num2, typ2, b, depth-1) if n < 0 { return n // forward error code } @@ -507,6 +516,7 @@ func EncodeTag(num Number, typ Type) uint64 { } // DecodeZigZag decodes a zig-zag-encoded uint64 as an int64. +// // Input: {…, 5, 3, 1, 0, 2, 4, 6, …} // Output: {…, -3, -2, -1, 0, +1, +2, +3, …} func DecodeZigZag(x uint64) int64 { @@ -514,6 +524,7 @@ func DecodeZigZag(x uint64) int64 { } // EncodeZigZag encodes an int64 as a zig-zag-encoded uint64. +// // Input: {…, -3, -2, -1, 0, +1, +2, +3, …} // Output: {…, 5, 3, 1, 0, 2, 4, 6, …} func EncodeZigZag(x int64) uint64 { @@ -521,6 +532,7 @@ func EncodeZigZag(x int64) uint64 { } // DecodeBool decodes a uint64 as a bool. +// // Input: { 0, 1, 2, …} // Output: {false, true, true, …} func DecodeBool(x uint64) bool { @@ -528,6 +540,7 @@ func DecodeBool(x uint64) bool { } // EncodeBool encodes a bool as a uint64. +// // Input: {false, true} // Output: { 0, 1} func EncodeBool(x bool) uint64 { diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index 360c63329d4d..db5248e1b512 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -14,7 +14,7 @@ import ( "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type list interface { @@ -30,17 +30,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { if isRoot { var name string switch vs.(type) { - case pref.Names: + case protoreflect.Names: name = "Names" - case pref.FieldNumbers: + case protoreflect.FieldNumbers: name = "FieldNumbers" - case pref.FieldRanges: + case protoreflect.FieldRanges: name = "FieldRanges" - case pref.EnumRanges: + case protoreflect.EnumRanges: name = "EnumRanges" - case pref.FileImports: + case protoreflect.FileImports: name = "FileImports" - case pref.Descriptor: + case protoreflect.Descriptor: name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" default: name = reflect.ValueOf(vs).Elem().Type().Name() @@ -50,17 +50,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { var ss []string switch vs := vs.(type) { - case pref.Names: + case protoreflect.Names: for i := 0; i < vs.Len(); i++ { ss = append(ss, fmt.Sprint(vs.Get(i))) } return start + joinStrings(ss, false) + end - case pref.FieldNumbers: + case protoreflect.FieldNumbers: for i := 0; i < vs.Len(); i++ { ss = append(ss, fmt.Sprint(vs.Get(i))) } return start + joinStrings(ss, false) + end - case pref.FieldRanges: + case protoreflect.FieldRanges: for i := 0; i < vs.Len(); i++ { r := vs.Get(i) if r[0]+1 == r[1] { @@ -70,7 +70,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { } } return start + joinStrings(ss, false) + end - case pref.EnumRanges: + case protoreflect.EnumRanges: for i := 0; i < vs.Len(); i++ { r := vs.Get(i) if r[0] == r[1] { @@ -80,7 +80,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { } } return start + joinStrings(ss, false) + end - case pref.FileImports: + case protoreflect.FileImports: for i := 0; i < vs.Len(); i++ { var rs records rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") @@ -88,11 +88,11 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { } return start + joinStrings(ss, allowMulti) + end default: - _, isEnumValue := vs.(pref.EnumValueDescriptors) + _, isEnumValue := vs.(protoreflect.EnumValueDescriptors) for i := 0; i < vs.Len(); i++ { m := reflect.ValueOf(vs).MethodByName("Get") v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue)) + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue)) } return start + joinStrings(ss, allowMulti && isEnumValue) + end } @@ -106,20 +106,20 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { // // Using a list allows us to print the accessors in a sensible order. var descriptorAccessors = map[reflect.Type][]string{ - reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, - reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, - reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt - reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, - reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"}, - reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"}, - reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, + reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, + reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, + reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, + reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt + reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, + reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"}, + reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"}, + reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, } -func FormatDesc(s fmt.State, r rune, t pref.Descriptor) { +func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) } -func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { rv := reflect.ValueOf(t) rt := rv.MethodByName("ProtoType").Type().In(0) @@ -128,7 +128,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { start = rt.Name() + "{" } - _, isFile := t.(pref.FileDescriptor) + _, isFile := t.(protoreflect.FileDescriptor) rs := records{allowMulti: allowMulti} if t.IsPlaceholder() { if isFile { @@ -146,7 +146,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { rs.Append(rv, "Name") } switch t := t.(type) { - case pref.FieldDescriptor: + case protoreflect.FieldDescriptor: for _, s := range descriptorAccessors[rt] { switch s { case "MapKey": @@ -156,9 +156,9 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { case "MapValue": if v := t.MapValue(); v != nil { switch v.Kind() { - case pref.EnumKind: + case protoreflect.EnumKind: rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) default: rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) @@ -180,7 +180,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { rs.Append(rv, s) } } - case pref.OneofDescriptor: + case protoreflect.OneofDescriptor: var ss []string fs := t.Fields() for i := 0; i < fs.Len(); i++ { @@ -216,7 +216,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { if !rv.IsValid() { panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) } - if _, ok := rv.Interface().(pref.Value); ok { + if _, ok := rv.Interface().(protoreflect.Value); ok { rv = rv.MethodByName("Interface").Call(nil)[0] if !rv.IsNil() { rv = rv.Elem() @@ -250,9 +250,9 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { switch v := v.(type) { case list: s = formatListOpt(v, false, rs.allowMulti) - case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor: - s = string(v.(pref.Descriptor).Name()) - case pref.Descriptor: + case protoreflect.FieldDescriptor, protoreflect.OneofDescriptor, protoreflect.EnumValueDescriptor, protoreflect.MethodDescriptor: + s = string(v.(protoreflect.Descriptor).Name()) + case protoreflect.Descriptor: s = string(v.FullName()) case string: s = strconv.Quote(v) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go index fdd9b13f2fcf..328dc733b042 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go @@ -15,8 +15,8 @@ import ( "strconv" ptext "google.golang.org/protobuf/internal/encoding/text" - errors "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" ) // Format is the serialization format used to represent the default value. @@ -35,56 +35,56 @@ const ( // Unmarshal deserializes the default string s according to the given kind k. // When k is an enum, a list of enum value descriptors must be provided. -func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) { +func Unmarshal(s string, k protoreflect.Kind, evs protoreflect.EnumValueDescriptors, f Format) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { switch k { - case pref.BoolKind: + case protoreflect.BoolKind: if f == GoTag { switch s { case "1": - return pref.ValueOfBool(true), nil, nil + return protoreflect.ValueOfBool(true), nil, nil case "0": - return pref.ValueOfBool(false), nil, nil + return protoreflect.ValueOfBool(false), nil, nil } } else { switch s { case "true": - return pref.ValueOfBool(true), nil, nil + return protoreflect.ValueOfBool(true), nil, nil case "false": - return pref.ValueOfBool(false), nil, nil + return protoreflect.ValueOfBool(false), nil, nil } } - case pref.EnumKind: + case protoreflect.EnumKind: if f == GoTag { // Go tags use the numeric form of the enum value. if n, err := strconv.ParseInt(s, 10, 32); err == nil { - if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil { - return pref.ValueOfEnum(ev.Number()), ev, nil + if ev := evs.ByNumber(protoreflect.EnumNumber(n)); ev != nil { + return protoreflect.ValueOfEnum(ev.Number()), ev, nil } } } else { // Descriptor default_value use the enum identifier. - ev := evs.ByName(pref.Name(s)) + ev := evs.ByName(protoreflect.Name(s)) if ev != nil { - return pref.ValueOfEnum(ev.Number()), ev, nil + return protoreflect.ValueOfEnum(ev.Number()), ev, nil } } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if v, err := strconv.ParseInt(s, 10, 32); err == nil { - return pref.ValueOfInt32(int32(v)), nil, nil + return protoreflect.ValueOfInt32(int32(v)), nil, nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if v, err := strconv.ParseInt(s, 10, 64); err == nil { - return pref.ValueOfInt64(int64(v)), nil, nil + return protoreflect.ValueOfInt64(int64(v)), nil, nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if v, err := strconv.ParseUint(s, 10, 32); err == nil { - return pref.ValueOfUint32(uint32(v)), nil, nil + return protoreflect.ValueOfUint32(uint32(v)), nil, nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if v, err := strconv.ParseUint(s, 10, 64); err == nil { - return pref.ValueOfUint64(uint64(v)), nil, nil + return protoreflect.ValueOfUint64(uint64(v)), nil, nil } - case pref.FloatKind, pref.DoubleKind: + case protoreflect.FloatKind, protoreflect.DoubleKind: var v float64 var err error switch s { @@ -98,29 +98,29 @@ func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) ( v, err = strconv.ParseFloat(s, 64) } if err == nil { - if k == pref.FloatKind { - return pref.ValueOfFloat32(float32(v)), nil, nil + if k == protoreflect.FloatKind { + return protoreflect.ValueOfFloat32(float32(v)), nil, nil } else { - return pref.ValueOfFloat64(float64(v)), nil, nil + return protoreflect.ValueOfFloat64(float64(v)), nil, nil } } - case pref.StringKind: + case protoreflect.StringKind: // String values are already unescaped and can be used as is. - return pref.ValueOfString(s), nil, nil - case pref.BytesKind: + return protoreflect.ValueOfString(s), nil, nil + case protoreflect.BytesKind: if b, ok := unmarshalBytes(s); ok { - return pref.ValueOfBytes(b), nil, nil + return protoreflect.ValueOfBytes(b), nil, nil } } - return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) + return protoreflect.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) } // Marshal serializes v as the default string according to the given kind k. // When specifying the Descriptor format for an enum kind, the associated // enum value descriptor must be provided. -func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) { +func Marshal(v protoreflect.Value, ev protoreflect.EnumValueDescriptor, k protoreflect.Kind, f Format) (string, error) { switch k { - case pref.BoolKind: + case protoreflect.BoolKind: if f == GoTag { if v.Bool() { return "1", nil @@ -134,17 +134,17 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) ( return "false", nil } } - case pref.EnumKind: + case protoreflect.EnumKind: if f == GoTag { return strconv.FormatInt(int64(v.Enum()), 10), nil } else { return string(ev.Name()), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: return strconv.FormatInt(v.Int(), 10), nil - case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: return strconv.FormatUint(v.Uint(), 10), nil - case pref.FloatKind, pref.DoubleKind: + case protoreflect.FloatKind, protoreflect.DoubleKind: f := v.Float() switch { case math.IsInf(f, -1): @@ -154,16 +154,16 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) ( case math.IsNaN(f): return "nan", nil default: - if k == pref.FloatKind { + if k == protoreflect.FloatKind { return strconv.FormatFloat(f, 'g', -1, 32), nil } else { return strconv.FormatFloat(f, 'g', -1, 64), nil } } - case pref.StringKind: + case protoreflect.StringKind: // String values are serialized as is without any escaping. return v.String(), nil - case pref.BytesKind: + case protoreflect.BytesKind: if s, ok := marshalBytes(v.Bytes()); ok { return s, nil } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go index c1866f3c1a78..a6693f0a2f39 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // The MessageSet wire format is equivalent to a message defined as follows, @@ -33,6 +33,7 @@ const ( // ExtensionName is the field name for extensions of MessageSet. // // A valid MessageSet extension must be of the form: +// // message MyMessage { // extend proto2.bridge.MessageSet { // optional MyMessage message_set_extension = 1234; @@ -42,13 +43,13 @@ const ( const ExtensionName = "message_set_extension" // IsMessageSet returns whether the message uses the MessageSet wire format. -func IsMessageSet(md pref.MessageDescriptor) bool { +func IsMessageSet(md protoreflect.MessageDescriptor) bool { xmd, ok := md.(interface{ IsMessageSet() bool }) return ok && xmd.IsMessageSet() } // IsMessageSetExtension reports this field properly extends a MessageSet. -func IsMessageSetExtension(fd pref.FieldDescriptor) bool { +func IsMessageSetExtension(fd protoreflect.FieldDescriptor) bool { switch { case fd.Name() != ExtensionName: return false diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 38f1931c6fd1..373d208374f8 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -11,10 +11,10 @@ import ( "strconv" "strings" - defval "google.golang.org/protobuf/internal/encoding/defval" - fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) var byteType = reflect.TypeOf(byte(0)) @@ -29,9 +29,9 @@ var byteType = reflect.TypeOf(byte(0)) // This does not populate the Enum or Message (except for weak message). // // This function is a best effort attempt; parsing errors are ignored. -func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor { - f := new(fdesc.Field) - f.L0.ParentFile = fdesc.SurrogateProto2 +func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { + f := new(filedesc.Field) + f.L0.ParentFile = filedesc.SurrogateProto2 for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -39,68 +39,68 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p } switch s := tag[:i]; { case strings.HasPrefix(s, "name="): - f.L0.FullName = pref.FullName(s[len("name="):]) + f.L0.FullName = protoreflect.FullName(s[len("name="):]) case strings.Trim(s, "0123456789") == "": n, _ := strconv.ParseUint(s, 10, 32) - f.L1.Number = pref.FieldNumber(n) + f.L1.Number = protoreflect.FieldNumber(n) case s == "opt": - f.L1.Cardinality = pref.Optional + f.L1.Cardinality = protoreflect.Optional case s == "req": - f.L1.Cardinality = pref.Required + f.L1.Cardinality = protoreflect.Required case s == "rep": - f.L1.Cardinality = pref.Repeated + f.L1.Cardinality = protoreflect.Repeated case s == "varint": switch goType.Kind() { case reflect.Bool: - f.L1.Kind = pref.BoolKind + f.L1.Kind = protoreflect.BoolKind case reflect.Int32: - f.L1.Kind = pref.Int32Kind + f.L1.Kind = protoreflect.Int32Kind case reflect.Int64: - f.L1.Kind = pref.Int64Kind + f.L1.Kind = protoreflect.Int64Kind case reflect.Uint32: - f.L1.Kind = pref.Uint32Kind + f.L1.Kind = protoreflect.Uint32Kind case reflect.Uint64: - f.L1.Kind = pref.Uint64Kind + f.L1.Kind = protoreflect.Uint64Kind } case s == "zigzag32": if goType.Kind() == reflect.Int32 { - f.L1.Kind = pref.Sint32Kind + f.L1.Kind = protoreflect.Sint32Kind } case s == "zigzag64": if goType.Kind() == reflect.Int64 { - f.L1.Kind = pref.Sint64Kind + f.L1.Kind = protoreflect.Sint64Kind } case s == "fixed32": switch goType.Kind() { case reflect.Int32: - f.L1.Kind = pref.Sfixed32Kind + f.L1.Kind = protoreflect.Sfixed32Kind case reflect.Uint32: - f.L1.Kind = pref.Fixed32Kind + f.L1.Kind = protoreflect.Fixed32Kind case reflect.Float32: - f.L1.Kind = pref.FloatKind + f.L1.Kind = protoreflect.FloatKind } case s == "fixed64": switch goType.Kind() { case reflect.Int64: - f.L1.Kind = pref.Sfixed64Kind + f.L1.Kind = protoreflect.Sfixed64Kind case reflect.Uint64: - f.L1.Kind = pref.Fixed64Kind + f.L1.Kind = protoreflect.Fixed64Kind case reflect.Float64: - f.L1.Kind = pref.DoubleKind + f.L1.Kind = protoreflect.DoubleKind } case s == "bytes": switch { case goType.Kind() == reflect.String: - f.L1.Kind = pref.StringKind + f.L1.Kind = protoreflect.StringKind case goType.Kind() == reflect.Slice && goType.Elem() == byteType: - f.L1.Kind = pref.BytesKind + f.L1.Kind = protoreflect.BytesKind default: - f.L1.Kind = pref.MessageKind + f.L1.Kind = protoreflect.MessageKind } case s == "group": - f.L1.Kind = pref.GroupKind + f.L1.Kind = protoreflect.GroupKind case strings.HasPrefix(s, "enum="): - f.L1.Kind = pref.EnumKind + f.L1.Kind = protoreflect.EnumKind case strings.HasPrefix(s, "json="): jsonName := s[len("json="):] if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { @@ -111,23 +111,23 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p f.L1.IsPacked = true case strings.HasPrefix(s, "weak="): f.L1.IsWeak = true - f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):])) + f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. s, i = tag[len("def="):], len(tag) v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag) - f.L1.Default = fdesc.DefaultValue(v, ev) + f.L1.Default = filedesc.DefaultValue(v, ev) case s == "proto3": - f.L0.ParentFile = fdesc.SurrogateProto3 + f.L0.ParentFile = filedesc.SurrogateProto3 } tag = strings.TrimPrefix(tag[i:], ",") } // The generator uses the group message name instead of the field name. // We obtain the real field name by lowercasing the group name. - if f.L1.Kind == pref.GroupKind { - f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName))) + if f.L1.Kind == protoreflect.GroupKind { + f.L0.FullName = protoreflect.FullName(strings.ToLower(string(f.L0.FullName))) } return f } @@ -140,38 +140,38 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p // Depending on the context on how Marshal is called, there are different ways // through which that information is determined. As such it is the caller's // responsibility to provide a function to obtain that information. -func Marshal(fd pref.FieldDescriptor, enumName string) string { +func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { var tag []string switch fd.Kind() { - case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind: + case protoreflect.BoolKind, protoreflect.EnumKind, protoreflect.Int32Kind, protoreflect.Uint32Kind, protoreflect.Int64Kind, protoreflect.Uint64Kind: tag = append(tag, "varint") - case pref.Sint32Kind: + case protoreflect.Sint32Kind: tag = append(tag, "zigzag32") - case pref.Sint64Kind: + case protoreflect.Sint64Kind: tag = append(tag, "zigzag64") - case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind: + case protoreflect.Sfixed32Kind, protoreflect.Fixed32Kind, protoreflect.FloatKind: tag = append(tag, "fixed32") - case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind: + case protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind, protoreflect.DoubleKind: tag = append(tag, "fixed64") - case pref.StringKind, pref.BytesKind, pref.MessageKind: + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind: tag = append(tag, "bytes") - case pref.GroupKind: + case protoreflect.GroupKind: tag = append(tag, "group") } tag = append(tag, strconv.Itoa(int(fd.Number()))) switch fd.Cardinality() { - case pref.Optional: + case protoreflect.Optional: tag = append(tag, "opt") - case pref.Required: + case protoreflect.Required: tag = append(tag, "req") - case pref.Repeated: + case protoreflect.Repeated: tag = append(tag, "rep") } if fd.IsPacked() { tag = append(tag, "packed") } name := string(fd.Name()) - if fd.Kind() == pref.GroupKind { + if fd.Kind() == protoreflect.GroupKind { // The name of the FieldDescriptor for a group field is // lowercased. To find the original capitalization, we // look in the field's MessageType. @@ -189,10 +189,10 @@ func Marshal(fd pref.FieldDescriptor, enumName string) string { // The previous implementation does not tag extension fields as proto3, // even when the field is defined in a proto3 file. Match that behavior // for consistency. - if fd.Syntax() == pref.Proto3 && !fd.IsExtension() { + if fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension() { tag = append(tag, "proto3") } - if fd.Kind() == pref.EnumKind && enumName != "" { + if fd.Kind() == protoreflect.EnumKind && enumName != "" { tag = append(tag, "enum="+enumName) } if fd.ContainingOneof() != nil { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index eb10ea10261a..427c62d037fc 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -8,7 +8,6 @@ import ( "bytes" "fmt" "io" - "regexp" "strconv" "unicode/utf8" @@ -381,7 +380,7 @@ func (d *Decoder) currentOpenKind() (Kind, byte) { case '[': return ListOpen, ']' } - panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh))) + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %c", openCh)) } func (d *Decoder) pushOpenStack(ch byte) { @@ -421,7 +420,7 @@ func (d *Decoder) parseFieldName() (tok Token, err error) { return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) } - return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in)) + return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in)) } // parseTypeName parses Any type URL or extension field name. The name is @@ -571,7 +570,7 @@ func (d *Decoder) parseScalar() (Token, error) { return tok, nil } - return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in)) + return Token{}, d.newSyntaxError("invalid scalar value: %s", errId(d.in)) } // parseLiteralValue parses a literal value. A literal value is used for @@ -653,8 +652,29 @@ func consume(b []byte, n int) []byte { return b } -// Any sequence that looks like a non-delimiter (for error reporting). -var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`) +// errId extracts a byte sequence that looks like an invalid ID +// (for the purposes of error reporting). +func errId(seq []byte) []byte { + const maxLen = 32 + for i := 0; i < len(seq); { + if i > maxLen { + return append(seq[:i:i], "…"...) + } + r, size := utf8.DecodeRune(seq[i:]) + if r > utf8.RuneSelf || (r != '/' && isDelim(byte(r))) { + if i == 0 { + // Either the first byte is invalid UTF-8 or a + // delimiter, or the first rune is non-ASCII. + // Return it as-is. + i = size + } + return seq[:i:i] + } + i += size + } + // No delimiter found. + return seq +} // isDelim returns true if given byte is a delimiter character. func isDelim(c byte) bool { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go index f2d90b78999f..81a5d8c86139 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -50,8 +50,10 @@ type number struct { // parseNumber constructs a number object from given input. It allows for the // following patterns: -// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) -// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// +// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) +// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// // It also returns the number of parsed bytes for the given number, 0 if it is // not a number. func parseNumber(input []byte) number { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go index 0ce8d6fb83d9..7ae6c2a3c26d 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go @@ -24,6 +24,6 @@ // the Go implementation should as well. // // The text format is almost a superset of JSON except: -// * message keys are not quoted strings, but identifiers -// * the top-level value must be a message without the delimiters +// - message keys are not quoted strings, but identifiers +// - the top-level value must be a message without the delimiters package text diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go index f90e909b37ab..fbcd349207dd 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.13 // +build !go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go index dc05f4191c01..5e72f1cde9e1 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.13 // +build go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go index b293b6947361..7cac1c19016f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -12,8 +12,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" ) // Builder construct a protoreflect.FileDescriptor from the raw descriptor. @@ -38,7 +37,7 @@ type Builder struct { // TypeResolver resolves extension field types for descriptor options. // If nil, it uses protoregistry.GlobalTypes. TypeResolver interface { - preg.ExtensionTypeResolver + protoregistry.ExtensionTypeResolver } // FileRegistry is use to lookup file, enum, and message dependencies. @@ -46,8 +45,8 @@ type Builder struct { // If nil, it uses protoregistry.GlobalFiles. FileRegistry interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) - FindDescriptorByName(pref.FullName) (pref.Descriptor, error) - RegisterFile(pref.FileDescriptor) error + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) + RegisterFile(protoreflect.FileDescriptor) error } } @@ -55,8 +54,8 @@ type Builder struct { // If so, it permits looking up an enum or message dependency based on the // sub-list and element index into filetype.Builder.DependencyIndexes. type resolverByIndex interface { - FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor - FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor + FindEnumByIndex(int32, int32, []Enum, []Message) protoreflect.EnumDescriptor + FindMessageByIndex(int32, int32, []Enum, []Message) protoreflect.MessageDescriptor } // Indexes of each sub-list in filetype.Builder.DependencyIndexes. @@ -70,7 +69,7 @@ const ( // Out is the output of the Builder. type Out struct { - File pref.FileDescriptor + File protoreflect.FileDescriptor // Enums is all enum descriptors in "flattened ordering". Enums []Enum @@ -97,10 +96,10 @@ func (db Builder) Build() (out Out) { // Initialize resolvers and registries if unpopulated. if db.TypeResolver == nil { - db.TypeResolver = preg.GlobalTypes + db.TypeResolver = protoregistry.GlobalTypes } if db.FileRegistry == nil { - db.FileRegistry = preg.GlobalFiles + db.FileRegistry = protoregistry.GlobalFiles } fd := newRawFile(db) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 98ab142aeee6..7c3689baee8a 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -17,7 +17,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -43,9 +43,9 @@ type ( L2 *FileL2 } FileL1 struct { - Syntax pref.Syntax + Syntax protoreflect.Syntax Path string - Package pref.FullName + Package protoreflect.FullName Enums Enums Messages Messages @@ -53,36 +53,36 @@ type ( Services Services } FileL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Imports FileImports Locations SourceLocations } ) -func (fd *File) ParentFile() pref.FileDescriptor { return fd } -func (fd *File) Parent() pref.Descriptor { return nil } -func (fd *File) Index() int { return 0 } -func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax } -func (fd *File) Name() pref.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() pref.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } -func (fd *File) Options() pref.ProtoMessage { +func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } +func (fd *File) Parent() protoreflect.Descriptor { return nil } +func (fd *File) Index() int { return 0 } +func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() } return descopts.File } -func (fd *File) Path() string { return fd.L1.Path } -func (fd *File) Package() pref.FullName { return fd.L1.Package } -func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports } -func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums } -func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages } -func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions } -func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services } -func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations } -func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } -func (fd *File) ProtoType(pref.FileDescriptor) {} -func (fd *File) ProtoInternal(pragma.DoNotImplement) {} +func (fd *File) Path() string { return fd.L1.Path } +func (fd *File) Package() protoreflect.FullName { return fd.L1.Package } +func (fd *File) Imports() protoreflect.FileImports { return &fd.lazyInit().Imports } +func (fd *File) Enums() protoreflect.EnumDescriptors { return &fd.L1.Enums } +func (fd *File) Messages() protoreflect.MessageDescriptors { return &fd.L1.Messages } +func (fd *File) Extensions() protoreflect.ExtensionDescriptors { return &fd.L1.Extensions } +func (fd *File) Services() protoreflect.ServiceDescriptors { return &fd.L1.Services } +func (fd *File) SourceLocations() protoreflect.SourceLocations { return &fd.lazyInit().Locations } +func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *File) ProtoType(protoreflect.FileDescriptor) {} +func (fd *File) ProtoInternal(pragma.DoNotImplement) {} func (fd *File) lazyInit() *FileL2 { if atomic.LoadUint32(&fd.once) == 0 { @@ -119,7 +119,7 @@ type ( eagerValues bool // controls whether EnumL2.Values is already populated } EnumL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Values EnumValues ReservedNames Names ReservedRanges EnumRanges @@ -130,41 +130,41 @@ type ( L1 EnumValueL1 } EnumValueL1 struct { - Options func() pref.ProtoMessage - Number pref.EnumNumber + Options func() protoreflect.ProtoMessage + Number protoreflect.EnumNumber } ) -func (ed *Enum) Options() pref.ProtoMessage { +func (ed *Enum) Options() protoreflect.ProtoMessage { if f := ed.lazyInit().Options; f != nil { return f() } return descopts.Enum } -func (ed *Enum) Values() pref.EnumValueDescriptors { +func (ed *Enum) Values() protoreflect.EnumValueDescriptors { if ed.L1.eagerValues { return &ed.L2.Values } return &ed.lazyInit().Values } -func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames } -func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges } -func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } -func (ed *Enum) ProtoType(pref.EnumDescriptor) {} +func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit().ReservedNames } +func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges } +func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {} func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 } -func (ed *EnumValue) Options() pref.ProtoMessage { +func (ed *EnumValue) Options() protoreflect.ProtoMessage { if f := ed.L1.Options; f != nil { return f() } return descopts.EnumValue } -func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number } -func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } -func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {} +func (ed *EnumValue) Number() protoreflect.EnumNumber { return ed.L1.Number } +func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *EnumValue) ProtoType(protoreflect.EnumValueDescriptor) {} type ( Message struct { @@ -180,14 +180,14 @@ type ( IsMessageSet bool // promoted from google.protobuf.MessageOptions } MessageL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Fields Fields Oneofs Oneofs ReservedNames Names ReservedRanges FieldRanges RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality ExtensionRanges FieldRanges - ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges + ExtensionRangeOptions []func() protoreflect.ProtoMessage // must be same length as ExtensionRanges } Field struct { @@ -195,10 +195,10 @@ type ( L1 FieldL1 } FieldL1 struct { - Options func() pref.ProtoMessage - Number pref.FieldNumber - Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers - Kind pref.Kind + Options func() protoreflect.ProtoMessage + Number protoreflect.FieldNumber + Cardinality protoreflect.Cardinality // must be consistent with Message.RequiredNumbers + Kind protoreflect.Kind StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions @@ -207,9 +207,9 @@ type ( HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions EnforceUTF8 bool // promoted from google.protobuf.FieldOptions Default defaultValue - ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields - Enum pref.EnumDescriptor - Message pref.MessageDescriptor + ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields + Enum protoreflect.EnumDescriptor + Message protoreflect.MessageDescriptor } Oneof struct { @@ -217,35 +217,35 @@ type ( L1 OneofL1 } OneofL1 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof } ) -func (md *Message) Options() pref.ProtoMessage { +func (md *Message) Options() protoreflect.ProtoMessage { if f := md.lazyInit().Options; f != nil { return f() } return descopts.Message } -func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } -func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields } -func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs } -func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames } -func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges } -func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers } -func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges } -func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage { +func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } +func (md *Message) Fields() protoreflect.FieldDescriptors { return &md.lazyInit().Fields } +func (md *Message) Oneofs() protoreflect.OneofDescriptors { return &md.lazyInit().Oneofs } +func (md *Message) ReservedNames() protoreflect.Names { return &md.lazyInit().ReservedNames } +func (md *Message) ReservedRanges() protoreflect.FieldRanges { return &md.lazyInit().ReservedRanges } +func (md *Message) RequiredNumbers() protoreflect.FieldNumbers { return &md.lazyInit().RequiredNumbers } +func (md *Message) ExtensionRanges() protoreflect.FieldRanges { return &md.lazyInit().ExtensionRanges } +func (md *Message) ExtensionRangeOptions(i int) protoreflect.ProtoMessage { if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil { return f() } return descopts.ExtensionRange } -func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums } -func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages } -func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions } -func (md *Message) ProtoType(pref.MessageDescriptor) {} -func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Message) Enums() protoreflect.EnumDescriptors { return &md.L1.Enums } +func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L1.Messages } +func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions } +func (md *Message) ProtoType(protoreflect.MessageDescriptor) {} +func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } func (md *Message) lazyInit() *MessageL2 { md.L0.ParentFile.lazyInit() // implicitly initializes L2 return md.L2 @@ -260,28 +260,28 @@ func (md *Message) IsMessageSet() bool { return md.L1.IsMessageSet } -func (fd *Field) Options() pref.ProtoMessage { +func (fd *Field) Options() protoreflect.ProtoMessage { if f := fd.L1.Options; f != nil { return f() } return descopts.Field } -func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } -func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } +func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } +func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) } func (fd *Field) HasOptionalKeyword() bool { - return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional + return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated { + if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { switch fd.L1.Kind { - case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind: + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: default: return true } @@ -290,40 +290,40 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } -func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() } +func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } -func (fd *Field) MapKey() pref.FieldDescriptor { +func (fd *Field) MapKey() protoreflect.FieldDescriptor { if !fd.IsMap() { return nil } return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) } -func (fd *Field) MapValue() pref.FieldDescriptor { +func (fd *Field) MapValue() protoreflect.FieldDescriptor { if !fd.IsMap() { return nil } return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) } -func (fd *Field) HasDefault() bool { return fd.L1.Default.has } -func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } -func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum } -func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof } -func (fd *Field) ContainingMessage() pref.MessageDescriptor { - return fd.L0.Parent.(pref.MessageDescriptor) +func (fd *Field) HasDefault() bool { return fd.L1.Default.has } +func (fd *Field) Default() protoreflect.Value { return fd.L1.Default.get(fd) } +func (fd *Field) DefaultEnumValue() protoreflect.EnumValueDescriptor { return fd.L1.Default.enum } +func (fd *Field) ContainingOneof() protoreflect.OneofDescriptor { return fd.L1.ContainingOneof } +func (fd *Field) ContainingMessage() protoreflect.MessageDescriptor { + return fd.L0.Parent.(protoreflect.MessageDescriptor) } -func (fd *Field) Enum() pref.EnumDescriptor { +func (fd *Field) Enum() protoreflect.EnumDescriptor { return fd.L1.Enum } -func (fd *Field) Message() pref.MessageDescriptor { +func (fd *Field) Message() protoreflect.MessageDescriptor { if fd.L1.IsWeak { if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { - return d.(pref.MessageDescriptor) + return d.(protoreflect.MessageDescriptor) } } return fd.L1.Message } -func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } -func (fd *Field) ProtoType(pref.FieldDescriptor) {} +func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8 // validation for the string field. This exists for Google-internal use only @@ -336,21 +336,21 @@ func (fd *Field) EnforceUTF8() bool { if fd.L1.HasEnforceUTF8 { return fd.L1.EnforceUTF8 } - return fd.L0.ParentFile.L1.Syntax == pref.Proto3 + return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 } func (od *Oneof) IsSynthetic() bool { - return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() + return od.L0.ParentFile.L1.Syntax == protoreflect.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() } -func (od *Oneof) Options() pref.ProtoMessage { +func (od *Oneof) Options() protoreflect.ProtoMessage { if f := od.L1.Options; f != nil { return f() } return descopts.Oneof } -func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields } -func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } -func (od *Oneof) ProtoType(pref.OneofDescriptor) {} +func (od *Oneof) Fields() protoreflect.FieldDescriptors { return &od.L1.Fields } +func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } +func (od *Oneof) ProtoType(protoreflect.OneofDescriptor) {} type ( Extension struct { @@ -359,55 +359,57 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number pref.FieldNumber - Extendee pref.MessageDescriptor - Cardinality pref.Cardinality - Kind pref.Kind + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind } ExtensionL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue - Enum pref.EnumDescriptor - Message pref.MessageDescriptor + Enum protoreflect.EnumDescriptor + Message protoreflect.MessageDescriptor } ) -func (xd *Extension) Options() pref.ProtoMessage { +func (xd *Extension) Options() protoreflect.ProtoMessage { if f := xd.lazyInit().Options; f != nil { return f() } return descopts.Field } -func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } -func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } -func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } -func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } -func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } -func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } -func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } +func (xd *Extension) Number() protoreflect.FieldNumber { return xd.L1.Number } +func (xd *Extension) Cardinality() protoreflect.Cardinality { return xd.L1.Cardinality } +func (xd *Extension) Kind() protoreflect.Kind { return xd.L1.Kind } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } +func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } +func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } +func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != protoreflect.Repeated } func (xd *Extension) HasOptionalKeyword() bool { - return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional -} -func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } -func (xd *Extension) IsExtension() bool { return true } -func (xd *Extension) IsWeak() bool { return false } -func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated } -func (xd *Extension) IsMap() bool { return false } -func (xd *Extension) MapKey() pref.FieldDescriptor { return nil } -func (xd *Extension) MapValue() pref.FieldDescriptor { return nil } -func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } -func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) } -func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum } -func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil } -func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee } -func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum } -func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message } -func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } -func (xd *Extension) ProtoType(pref.FieldDescriptor) {} -func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} + return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional +} +func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsExtension() bool { return true } +func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } +func (xd *Extension) IsMap() bool { return false } +func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } +func (xd *Extension) MapValue() protoreflect.FieldDescriptor { return nil } +func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } +func (xd *Extension) Default() protoreflect.Value { return xd.lazyInit().Default.get(xd) } +func (xd *Extension) DefaultEnumValue() protoreflect.EnumValueDescriptor { + return xd.lazyInit().Default.enum +} +func (xd *Extension) ContainingOneof() protoreflect.OneofDescriptor { return nil } +func (xd *Extension) ContainingMessage() protoreflect.MessageDescriptor { return xd.L1.Extendee } +func (xd *Extension) Enum() protoreflect.EnumDescriptor { return xd.lazyInit().Enum } +func (xd *Extension) Message() protoreflect.MessageDescriptor { return xd.lazyInit().Message } +func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } +func (xd *Extension) ProtoType(protoreflect.FieldDescriptor) {} +func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} func (xd *Extension) lazyInit() *ExtensionL2 { xd.L0.ParentFile.lazyInit() // implicitly initializes L2 return xd.L2 @@ -421,7 +423,7 @@ type ( } ServiceL1 struct{} ServiceL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Methods Methods } @@ -430,48 +432,48 @@ type ( L1 MethodL1 } MethodL1 struct { - Options func() pref.ProtoMessage - Input pref.MessageDescriptor - Output pref.MessageDescriptor + Options func() protoreflect.ProtoMessage + Input protoreflect.MessageDescriptor + Output protoreflect.MessageDescriptor IsStreamingClient bool IsStreamingServer bool } ) -func (sd *Service) Options() pref.ProtoMessage { +func (sd *Service) Options() protoreflect.ProtoMessage { if f := sd.lazyInit().Options; f != nil { return f() } return descopts.Service } -func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods } -func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } -func (sd *Service) ProtoType(pref.ServiceDescriptor) {} -func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} +func (sd *Service) Methods() protoreflect.MethodDescriptors { return &sd.lazyInit().Methods } +func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } +func (sd *Service) ProtoType(protoreflect.ServiceDescriptor) {} +func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} func (sd *Service) lazyInit() *ServiceL2 { sd.L0.ParentFile.lazyInit() // implicitly initializes L2 return sd.L2 } -func (md *Method) Options() pref.ProtoMessage { +func (md *Method) Options() protoreflect.ProtoMessage { if f := md.L1.Options; f != nil { return f() } return descopts.Method } -func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input } -func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output } -func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } -func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } -func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } -func (md *Method) ProtoType(pref.MethodDescriptor) {} -func (md *Method) ProtoInternal(pragma.DoNotImplement) {} +func (md *Method) Input() protoreflect.MessageDescriptor { return md.L1.Input } +func (md *Method) Output() protoreflect.MessageDescriptor { return md.L1.Output } +func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } +func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } +func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Method) ProtoType(protoreflect.MethodDescriptor) {} +func (md *Method) ProtoInternal(pragma.DoNotImplement) {} // Surrogate files are can be used to create standalone descriptors // where the syntax is only information derived from the parent file. var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}} + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} ) type ( @@ -479,24 +481,24 @@ type ( L0 BaseL0 } BaseL0 struct { - FullName pref.FullName // must be populated - ParentFile *File // must be populated - Parent pref.Descriptor + FullName protoreflect.FullName // must be populated + ParentFile *File // must be populated + Parent protoreflect.Descriptor Index int } ) -func (d *Base) Name() pref.Name { return d.L0.FullName.Name() } -func (d *Base) FullName() pref.FullName { return d.L0.FullName } -func (d *Base) ParentFile() pref.FileDescriptor { +func (d *Base) Name() protoreflect.Name { return d.L0.FullName.Name() } +func (d *Base) FullName() protoreflect.FullName { return d.L0.FullName } +func (d *Base) ParentFile() protoreflect.FileDescriptor { if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 { return nil // surrogate files are not real parents } return d.L0.ParentFile } -func (d *Base) Parent() pref.Descriptor { return d.L0.Parent } +func (d *Base) Parent() protoreflect.Descriptor { return d.L0.Parent } func (d *Base) Index() int { return d.L0.Index } -func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() } +func (d *Base) Syntax() protoreflect.Syntax { return d.L0.ParentFile.Syntax() } func (d *Base) IsPlaceholder() bool { return false } func (d *Base) ProtoInternal(pragma.DoNotImplement) {} @@ -513,7 +515,7 @@ func (s *stringName) InitJSON(name string) { s.nameJSON = name } -func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { +func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { s.once.Do(func() { if fd.IsExtension() { // For extensions, JSON and text are formatted the same way. @@ -533,7 +535,7 @@ func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { // Format the text name. s.nameText = string(fd.Name()) - if fd.Kind() == pref.GroupKind { + if fd.Kind() == protoreflect.GroupKind { s.nameText = string(fd.Message().Name()) } } @@ -541,10 +543,10 @@ func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { return s } -func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } -func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText } +func (s *stringName) getJSON(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } +func (s *stringName) getText(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameText } -func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { +func DefaultValue(v protoreflect.Value, ev protoreflect.EnumValueDescriptor) defaultValue { dv := defaultValue{has: v.IsValid(), val: v, enum: ev} if b, ok := v.Interface().([]byte); ok { // Store a copy of the default bytes, so that we can detect @@ -554,9 +556,9 @@ func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { return dv } -func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue { - var evs pref.EnumValueDescriptors - if k == pref.EnumKind { +func unmarshalDefault(b []byte, k protoreflect.Kind, pf *File, ed protoreflect.EnumDescriptor) defaultValue { + var evs protoreflect.EnumValueDescriptors + if k == protoreflect.EnumKind { // If the enum is declared within the same file, be careful not to // blindly call the Values method, lest we bind ourselves in a deadlock. if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf { @@ -567,9 +569,9 @@ func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) d // If we are unable to resolve the enum dependency, use a placeholder // enum value since we will not be able to parse the default value. - if ed.IsPlaceholder() && pref.Name(b).IsValid() { - v := pref.ValueOfEnum(0) - ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b))) + if ed.IsPlaceholder() && protoreflect.Name(b).IsValid() { + v := protoreflect.ValueOfEnum(0) + ev := PlaceholderEnumValue(ed.FullName().Parent().Append(protoreflect.Name(b))) return DefaultValue(v, ev) } } @@ -583,41 +585,41 @@ func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) d type defaultValue struct { has bool - val pref.Value - enum pref.EnumValueDescriptor + val protoreflect.Value + enum protoreflect.EnumValueDescriptor bytes []byte } -func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value { +func (dv *defaultValue) get(fd protoreflect.FieldDescriptor) protoreflect.Value { // Return the zero value as the default if unpopulated. if !dv.has { - if fd.Cardinality() == pref.Repeated { - return pref.Value{} + if fd.Cardinality() == protoreflect.Repeated { + return protoreflect.Value{} } switch fd.Kind() { - case pref.BoolKind: - return pref.ValueOfBool(false) - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: - return pref.ValueOfInt32(0) - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: - return pref.ValueOfInt64(0) - case pref.Uint32Kind, pref.Fixed32Kind: - return pref.ValueOfUint32(0) - case pref.Uint64Kind, pref.Fixed64Kind: - return pref.ValueOfUint64(0) - case pref.FloatKind: - return pref.ValueOfFloat32(0) - case pref.DoubleKind: - return pref.ValueOfFloat64(0) - case pref.StringKind: - return pref.ValueOfString("") - case pref.BytesKind: - return pref.ValueOfBytes(nil) - case pref.EnumKind: + case protoreflect.BoolKind: + return protoreflect.ValueOfBool(false) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return protoreflect.ValueOfInt32(0) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return protoreflect.ValueOfInt64(0) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return protoreflect.ValueOfUint32(0) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return protoreflect.ValueOfUint64(0) + case protoreflect.FloatKind: + return protoreflect.ValueOfFloat32(0) + case protoreflect.DoubleKind: + return protoreflect.ValueOfFloat64(0) + case protoreflect.StringKind: + return protoreflect.ValueOfString("") + case protoreflect.BytesKind: + return protoreflect.ValueOfBytes(nil) + case protoreflect.EnumKind: if evs := fd.Enum().Values(); evs.Len() > 0 { - return pref.ValueOfEnum(evs.Get(0).Number()) + return protoreflect.ValueOfEnum(evs.Get(0).Number()) } - return pref.ValueOfEnum(0) + return protoreflect.ValueOfEnum(0) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 66e1fee52243..4a1584c9d29f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // fileRaw is a data struct used when initializing a file descriptor from @@ -95,7 +95,7 @@ func (fd *File) unmarshalSeed(b []byte) { sb := getBuilder() defer putBuilder(sb) - var prevField pref.FieldNumber + var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int b0 := b @@ -110,16 +110,16 @@ func (fd *File) unmarshalSeed(b []byte) { case genid.FileDescriptorProto_Syntax_field_number: switch string(v) { case "proto2": - fd.L1.Syntax = pref.Proto2 + fd.L1.Syntax = protoreflect.Proto2 case "proto3": - fd.L1.Syntax = pref.Proto3 + fd.L1.Syntax = protoreflect.Proto3 default: panic("invalid syntax") } case genid.FileDescriptorProto_Name_field_number: fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: - fd.L1.Package = pref.FullName(sb.MakeString(v)) + fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -163,7 +163,7 @@ func (fd *File) unmarshalSeed(b []byte) { // If syntax is missing, it is assumed to be proto2. if fd.L1.Syntax == 0 { - fd.L1.Syntax = pref.Proto2 + fd.L1.Syntax = protoreflect.Proto2 } // Must allocate all declarations before parsing each descriptor type @@ -219,7 +219,7 @@ func (fd *File) unmarshalSeed(b []byte) { } } -func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd ed.L0.Index = i @@ -271,12 +271,12 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc } } -func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i - var prevField pref.FieldNumber + var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int var posEnums, posMessages, posExtensions int b0 := b @@ -387,7 +387,7 @@ func (md *Message) unmarshalSeedOptions(b []byte) { } } -func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { xd.L0.ParentFile = pf xd.L0.Parent = pd xd.L0.Index = i @@ -401,11 +401,11 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref b = b[m:] switch num { case genid.FieldDescriptorProto_Number_field_number: - xd.L1.Number = pref.FieldNumber(v) + xd.L1.Number = protoreflect.FieldNumber(v) case genid.FieldDescriptorProto_Label_field_number: - xd.L1.Cardinality = pref.Cardinality(v) + xd.L1.Cardinality = protoreflect.Cardinality(v) case genid.FieldDescriptorProto_Type_field_number: - xd.L1.Kind = pref.Kind(v) + xd.L1.Kind = protoreflect.Kind(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -423,7 +423,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref } } -func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { sd.L0.ParentFile = pf sd.L0.Parent = pd sd.L0.Index = i @@ -459,13 +459,13 @@ func putBuilder(b *strs.Builder) { // makeFullName converts b to a protoreflect.FullName, // where b must start with a leading dot. -func makeFullName(sb *strs.Builder, b []byte) pref.FullName { +func makeFullName(sb *strs.Builder, b []byte) protoreflect.FullName { if len(b) == 0 || b[0] != '.' { panic("name reference must be fully qualified") } - return pref.FullName(sb.MakeString(b[1:])) + return protoreflect.FullName(sb.MakeString(b[1:])) } -func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName { - return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix))) +func appendFullName(sb *strs.Builder, prefix protoreflect.FullName, suffix []byte) protoreflect.FullName { + return sb.AppendFullName(prefix, protoreflect.Name(strs.UnsafeString(suffix))) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 198451e3ec94..736a19a75bc7 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -13,7 +13,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) func (fd *File) lazyRawInit() { @@ -39,10 +39,10 @@ func (file *File) resolveMessages() { // Resolve message field dependency. switch fd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ } @@ -62,10 +62,10 @@ func (file *File) resolveExtensions() { // Resolve extension field dependency. switch xd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx) depIdx++ } @@ -92,7 +92,7 @@ func (file *File) resolveServices() { } } -func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor { +func (file *File) resolveEnumDependency(ed protoreflect.EnumDescriptor, i, j int32) protoreflect.EnumDescriptor { r := file.builder.FileRegistry if r, ok := r.(resolverByIndex); ok { if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil { @@ -105,12 +105,12 @@ func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref } } if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil { - return d.(pref.EnumDescriptor) + return d.(protoreflect.EnumDescriptor) } return ed } -func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor { +func (file *File) resolveMessageDependency(md protoreflect.MessageDescriptor, i, j int32) protoreflect.MessageDescriptor { r := file.builder.FileRegistry if r, ok := r.(resolverByIndex); ok { if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil { @@ -123,7 +123,7 @@ func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32 } } if d, _ := r.FindDescriptorByName(md.FullName()); d != nil { - return d.(pref.MessageDescriptor) + return d.(protoreflect.MessageDescriptor) } return md } @@ -158,7 +158,7 @@ func (fd *File) unmarshalFull(b []byte) { if imp == nil { imp = PlaceholderFile(path) } - fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) + fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp}) case genid.FileDescriptorProto_EnumType_field_number: fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ @@ -199,7 +199,7 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { case genid.EnumDescriptorProto_Value_field_number: rawValues = append(rawValues, v) case genid.EnumDescriptorProto_ReservedName_field_number: - ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v))) case genid.EnumDescriptorProto_ReservedRange_field_number: ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) case genid.EnumDescriptorProto_Options_field_number: @@ -219,7 +219,7 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions) } -func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { +func unmarshalEnumReservedRange(b []byte) (r [2]protoreflect.EnumNumber) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -229,9 +229,9 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { b = b[m:] switch num { case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: - r[0] = pref.EnumNumber(v) + r[0] = protoreflect.EnumNumber(v) case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: - r[1] = pref.EnumNumber(v) + r[1] = protoreflect.EnumNumber(v) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -241,7 +241,7 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { return r } -func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { vd.L0.ParentFile = pf vd.L0.Parent = pd vd.L0.Index = i @@ -256,7 +256,7 @@ func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref b = b[m:] switch num { case genid.EnumValueDescriptorProto_Number_field_number: - vd.L1.Number = pref.EnumNumber(v) + vd.L1.Number = protoreflect.EnumNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -294,7 +294,7 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { case genid.DescriptorProto_OneofDecl_field_number: rawOneofs = append(rawOneofs, v) case genid.DescriptorProto_ReservedName_field_number: - md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v))) case genid.DescriptorProto_ReservedRange_field_number: md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) case genid.DescriptorProto_ExtensionRange_field_number: @@ -326,7 +326,7 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { for i, b := range rawFields { fd := &md.L2.Fields.List[i] fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i) - if fd.L1.Cardinality == pref.Required { + if fd.L1.Cardinality == protoreflect.Required { md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number) } } @@ -359,7 +359,7 @@ func (md *Message) unmarshalOptions(b []byte) { } } -func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { +func unmarshalMessageReservedRange(b []byte) (r [2]protoreflect.FieldNumber) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -369,9 +369,9 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { b = b[m:] switch num { case genid.DescriptorProto_ReservedRange_Start_field_number: - r[0] = pref.FieldNumber(v) + r[0] = protoreflect.FieldNumber(v) case genid.DescriptorProto_ReservedRange_End_field_number: - r[1] = pref.FieldNumber(v) + r[1] = protoreflect.FieldNumber(v) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -381,7 +381,7 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { return r } -func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) { +func unmarshalMessageExtensionRange(b []byte) (r [2]protoreflect.FieldNumber, rawOptions []byte) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -391,9 +391,9 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions b = b[m:] switch num { case genid.DescriptorProto_ExtensionRange_Start_field_number: - r[0] = pref.FieldNumber(v) + r[0] = protoreflect.FieldNumber(v) case genid.DescriptorProto_ExtensionRange_End_field_number: - r[1] = pref.FieldNumber(v) + r[1] = protoreflect.FieldNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -410,7 +410,7 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions return r, rawOptions } -func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i @@ -426,11 +426,11 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des b = b[m:] switch num { case genid.FieldDescriptorProto_Number_field_number: - fd.L1.Number = pref.FieldNumber(v) + fd.L1.Number = protoreflect.FieldNumber(v) case genid.FieldDescriptorProto_Label_field_number: - fd.L1.Cardinality = pref.Cardinality(v) + fd.L1.Cardinality = protoreflect.Cardinality(v) case genid.FieldDescriptorProto_Type_field_number: - fd.L1.Kind = pref.Kind(v) + fd.L1.Kind = protoreflect.Kind(v) case genid.FieldDescriptorProto_OneofIndex_field_number: // In Message.unmarshalFull, we allocate slices for both // the field and oneof descriptors before unmarshaling either @@ -453,7 +453,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des case genid.FieldDescriptorProto_JsonName_field_number: fd.L1.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: - fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages + fd.L1.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: @@ -468,9 +468,9 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: fd.L1.Enum = PlaceholderEnum(name) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = PlaceholderMessage(name) } } @@ -504,7 +504,7 @@ func (fd *Field) unmarshalOptions(b []byte) { } } -func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { od.L0.ParentFile = pf od.L0.Parent = pd od.L0.Index = i @@ -553,7 +553,7 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { case genid.FieldDescriptorProto_JsonName_field_number: xd.L2.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: - xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions + xd.L2.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: @@ -568,9 +568,9 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: xd.L2.Enum = PlaceholderEnum(name) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: xd.L2.Message = PlaceholderMessage(name) } } @@ -627,7 +627,7 @@ func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions) } -func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i @@ -680,18 +680,18 @@ func appendOptions(dst, src []byte) []byte { // // The type of message to unmarshal to is passed as a pointer since the // vars in descopts may not yet be populated at the time this function is called. -func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage { +func (db *Builder) optionsUnmarshaler(p *protoreflect.ProtoMessage, b []byte) func() protoreflect.ProtoMessage { if b == nil { return nil } - var opts pref.ProtoMessage + var opts protoreflect.ProtoMessage var once sync.Once - return func() pref.ProtoMessage { + return func() protoreflect.ProtoMessage { once.Do(func() { if *p == nil { panic("Descriptor.Options called without importing the descriptor package") } - opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage) + opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(protoreflect.ProtoMessage) if err := (proto.UnmarshalOptions{ AllowPartial: true, Resolver: db.TypeResolver, diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go index aa294fff99a8..e3b6587da63a 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -17,31 +17,30 @@ import ( "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" ) -type FileImports []pref.FileImport +type FileImports []protoreflect.FileImport func (p *FileImports) Len() int { return len(*p) } -func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] } +func (p *FileImports) Get(i int) protoreflect.FileImport { return (*p)[i] } func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {} type Names struct { - List []pref.Name + List []protoreflect.Name once sync.Once - has map[pref.Name]int // protected by once + has map[protoreflect.Name]int // protected by once } func (p *Names) Len() int { return len(p.List) } -func (p *Names) Get(i int) pref.Name { return p.List[i] } -func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 } +func (p *Names) Get(i int) protoreflect.Name { return p.List[i] } +func (p *Names) Has(s protoreflect.Name) bool { return p.lazyInit().has[s] > 0 } func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *Names) ProtoInternal(pragma.DoNotImplement) {} func (p *Names) lazyInit() *Names { p.once.Do(func() { if len(p.List) > 0 { - p.has = make(map[pref.Name]int, len(p.List)) + p.has = make(map[protoreflect.Name]int, len(p.List)) for _, s := range p.List { p.has[s] = p.has[s] + 1 } @@ -67,14 +66,14 @@ func (p *Names) CheckValid() error { } type EnumRanges struct { - List [][2]pref.EnumNumber // start inclusive; end inclusive + List [][2]protoreflect.EnumNumber // start inclusive; end inclusive once sync.Once - sorted [][2]pref.EnumNumber // protected by once + sorted [][2]protoreflect.EnumNumber // protected by once } -func (p *EnumRanges) Len() int { return len(p.List) } -func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] } -func (p *EnumRanges) Has(n pref.EnumNumber) bool { +func (p *EnumRanges) Len() int { return len(p.List) } +func (p *EnumRanges) Get(i int) [2]protoreflect.EnumNumber { return p.List[i] } +func (p *EnumRanges) Has(n protoreflect.EnumNumber) bool { for ls := p.lazyInit().sorted; len(ls) > 0; { i := len(ls) / 2 switch r := enumRange(ls[i]); { @@ -129,14 +128,14 @@ func (r enumRange) String() string { } type FieldRanges struct { - List [][2]pref.FieldNumber // start inclusive; end exclusive + List [][2]protoreflect.FieldNumber // start inclusive; end exclusive once sync.Once - sorted [][2]pref.FieldNumber // protected by once + sorted [][2]protoreflect.FieldNumber // protected by once } -func (p *FieldRanges) Len() int { return len(p.List) } -func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] } -func (p *FieldRanges) Has(n pref.FieldNumber) bool { +func (p *FieldRanges) Len() int { return len(p.List) } +func (p *FieldRanges) Get(i int) [2]protoreflect.FieldNumber { return p.List[i] } +func (p *FieldRanges) Has(n protoreflect.FieldNumber) bool { for ls := p.lazyInit().sorted; len(ls) > 0; { i := len(ls) / 2 switch r := fieldRange(ls[i]); { @@ -221,17 +220,17 @@ func (r fieldRange) String() string { } type FieldNumbers struct { - List []pref.FieldNumber + List []protoreflect.FieldNumber once sync.Once - has map[pref.FieldNumber]struct{} // protected by once + has map[protoreflect.FieldNumber]struct{} // protected by once } -func (p *FieldNumbers) Len() int { return len(p.List) } -func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] } -func (p *FieldNumbers) Has(n pref.FieldNumber) bool { +func (p *FieldNumbers) Len() int { return len(p.List) } +func (p *FieldNumbers) Get(i int) protoreflect.FieldNumber { return p.List[i] } +func (p *FieldNumbers) Has(n protoreflect.FieldNumber) bool { p.once.Do(func() { if len(p.List) > 0 { - p.has = make(map[pref.FieldNumber]struct{}, len(p.List)) + p.has = make(map[protoreflect.FieldNumber]struct{}, len(p.List)) for _, n := range p.List { p.has[n] = struct{}{} } @@ -244,30 +243,38 @@ func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {} type OneofFields struct { - List []pref.FieldDescriptor + List []protoreflect.FieldDescriptor once sync.Once - byName map[pref.Name]pref.FieldDescriptor // protected by once - byJSON map[string]pref.FieldDescriptor // protected by once - byText map[string]pref.FieldDescriptor // protected by once - byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once + byName map[protoreflect.Name]protoreflect.FieldDescriptor // protected by once + byJSON map[string]protoreflect.FieldDescriptor // protected by once + byText map[string]protoreflect.FieldDescriptor // protected by once + byNum map[protoreflect.FieldNumber]protoreflect.FieldDescriptor // protected by once } -func (p *OneofFields) Len() int { return len(p.List) } -func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } -func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } -func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } -func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] } -func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } -func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } -func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} +func (p *OneofFields) Len() int { return len(p.List) } +func (p *OneofFields) Get(i int) protoreflect.FieldDescriptor { return p.List[i] } +func (p *OneofFields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { + return p.lazyInit().byName[s] +} +func (p *OneofFields) ByJSONName(s string) protoreflect.FieldDescriptor { + return p.lazyInit().byJSON[s] +} +func (p *OneofFields) ByTextName(s string) protoreflect.FieldDescriptor { + return p.lazyInit().byText[s] +} +func (p *OneofFields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + return p.lazyInit().byNum[n] +} +func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} func (p *OneofFields) lazyInit() *OneofFields { p.once.Do(func() { if len(p.List) > 0 { - p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) - p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) - p.byText = make(map[string]pref.FieldDescriptor, len(p.List)) - p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) + p.byName = make(map[protoreflect.Name]protoreflect.FieldDescriptor, len(p.List)) + p.byJSON = make(map[string]protoreflect.FieldDescriptor, len(p.List)) + p.byText = make(map[string]protoreflect.FieldDescriptor, len(p.List)) + p.byNum = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor, len(p.List)) for _, f := range p.List { // Field names and numbers are guaranteed to be unique. p.byName[f.Name()] = f @@ -284,123 +291,123 @@ type SourceLocations struct { // List is a list of SourceLocations. // The SourceLocation.Next field does not need to be populated // as it will be lazily populated upon first need. - List []pref.SourceLocation + List []protoreflect.SourceLocation // File is the parent file descriptor that these locations are relative to. // If non-nil, ByDescriptor verifies that the provided descriptor // is a child of this file descriptor. - File pref.FileDescriptor + File protoreflect.FileDescriptor once sync.Once byPath map[pathKey]int } -func (p *SourceLocations) Len() int { return len(p.List) } -func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] } -func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation { +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) protoreflect.SourceLocation { return p.lazyInit().List[i] } +func (p *SourceLocations) byKey(k pathKey) protoreflect.SourceLocation { if i, ok := p.lazyInit().byPath[k]; ok { return p.List[i] } - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } -func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation { +func (p *SourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation { return p.byKey(newPathKey(path)) } -func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation { +func (p *SourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation { if p.File != nil && desc != nil && p.File != desc.ParentFile() { - return pref.SourceLocation{} // mismatching parent files + return protoreflect.SourceLocation{} // mismatching parent files } var pathArr [16]int32 path := pathArr[:0] for { switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: // Reverse the path since it was constructed in reverse. for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { path[i], path[j] = path[j], path[i] } return p.byKey(newPathKey(path)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.FieldDescriptor: - isExtension := desc.(pref.FieldDescriptor).IsExtension() + case protoreflect.FieldDescriptor: + isExtension := desc.(protoreflect.FieldDescriptor).IsExtension() path = append(path, int32(desc.Index())) desc = desc.Parent() if isExtension { switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_Extension_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } else { switch desc.(type) { - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_Field_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } - case pref.OneofDescriptor: + case protoreflect.OneofDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.EnumDescriptor: + case protoreflect.EnumDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.EnumValueDescriptor: + case protoreflect.EnumValueDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.EnumDescriptor: + case protoreflect.EnumDescriptor: path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.ServiceDescriptor: + case protoreflect.ServiceDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.MethodDescriptor: + case protoreflect.MethodDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.ServiceDescriptor: + case protoreflect.ServiceDescriptor: path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } } @@ -435,7 +442,7 @@ type pathKey struct { str string // used if the path does not fit in arr } -func newPathKey(p pref.SourcePath) (k pathKey) { +func newPathKey(p protoreflect.SourcePath) (k pathKey) { if len(p) < len(k.arr) { for i, ps := range p { if ps < 0 || math.MaxUint8 <= ps { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go index dbf2c605bfe5..28240ebc5c4a 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -7,7 +7,7 @@ package filedesc import ( "google.golang.org/protobuf/internal/descopts" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) var ( @@ -30,78 +30,80 @@ var ( // PlaceholderFile is a placeholder, representing only the file path. type PlaceholderFile string -func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f } -func (f PlaceholderFile) Parent() pref.Descriptor { return nil } -func (f PlaceholderFile) Index() int { return 0 } -func (f PlaceholderFile) Syntax() pref.Syntax { return 0 } -func (f PlaceholderFile) Name() pref.Name { return "" } -func (f PlaceholderFile) FullName() pref.FullName { return "" } -func (f PlaceholderFile) IsPlaceholder() bool { return true } -func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File } -func (f PlaceholderFile) Path() string { return string(f) } -func (f PlaceholderFile) Package() pref.FullName { return "" } -func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles } -func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages } -func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums } -func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions } -func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices } -func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations } -func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return } -func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } +func (f PlaceholderFile) ParentFile() protoreflect.FileDescriptor { return f } +func (f PlaceholderFile) Parent() protoreflect.Descriptor { return nil } +func (f PlaceholderFile) Index() int { return 0 } +func (f PlaceholderFile) Syntax() protoreflect.Syntax { return 0 } +func (f PlaceholderFile) Name() protoreflect.Name { return "" } +func (f PlaceholderFile) FullName() protoreflect.FullName { return "" } +func (f PlaceholderFile) IsPlaceholder() bool { return true } +func (f PlaceholderFile) Options() protoreflect.ProtoMessage { return descopts.File } +func (f PlaceholderFile) Path() string { return string(f) } +func (f PlaceholderFile) Package() protoreflect.FullName { return "" } +func (f PlaceholderFile) Imports() protoreflect.FileImports { return emptyFiles } +func (f PlaceholderFile) Messages() protoreflect.MessageDescriptors { return emptyMessages } +func (f PlaceholderFile) Enums() protoreflect.EnumDescriptors { return emptyEnums } +func (f PlaceholderFile) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions } +func (f PlaceholderFile) Services() protoreflect.ServiceDescriptors { return emptyServices } +func (f PlaceholderFile) SourceLocations() protoreflect.SourceLocations { return emptySourceLocations } +func (f PlaceholderFile) ProtoType(protoreflect.FileDescriptor) { return } +func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderEnum is a placeholder, representing only the full name. -type PlaceholderEnum pref.FullName +type PlaceholderEnum protoreflect.FullName -func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil } -func (e PlaceholderEnum) Parent() pref.Descriptor { return nil } -func (e PlaceholderEnum) Index() int { return 0 } -func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 } -func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() } -func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) } -func (e PlaceholderEnum) IsPlaceholder() bool { return true } -func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum } -func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues } -func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames } -func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges } -func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return } -func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } +func (e PlaceholderEnum) ParentFile() protoreflect.FileDescriptor { return nil } +func (e PlaceholderEnum) Parent() protoreflect.Descriptor { return nil } +func (e PlaceholderEnum) Index() int { return 0 } +func (e PlaceholderEnum) Syntax() protoreflect.Syntax { return 0 } +func (e PlaceholderEnum) Name() protoreflect.Name { return protoreflect.FullName(e).Name() } +func (e PlaceholderEnum) FullName() protoreflect.FullName { return protoreflect.FullName(e) } +func (e PlaceholderEnum) IsPlaceholder() bool { return true } +func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return descopts.Enum } +func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } +func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } +func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } +func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderEnumValue is a placeholder, representing only the full name. -type PlaceholderEnumValue pref.FullName +type PlaceholderEnumValue protoreflect.FullName -func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil } -func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil } -func (e PlaceholderEnumValue) Index() int { return 0 } -func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 } -func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() } -func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) } -func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } -func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue } -func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 } -func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return } -func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } +func (e PlaceholderEnumValue) ParentFile() protoreflect.FileDescriptor { return nil } +func (e PlaceholderEnumValue) Parent() protoreflect.Descriptor { return nil } +func (e PlaceholderEnumValue) Index() int { return 0 } +func (e PlaceholderEnumValue) Syntax() protoreflect.Syntax { return 0 } +func (e PlaceholderEnumValue) Name() protoreflect.Name { return protoreflect.FullName(e).Name() } +func (e PlaceholderEnumValue) FullName() protoreflect.FullName { return protoreflect.FullName(e) } +func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } +func (e PlaceholderEnumValue) Options() protoreflect.ProtoMessage { return descopts.EnumValue } +func (e PlaceholderEnumValue) Number() protoreflect.EnumNumber { return 0 } +func (e PlaceholderEnumValue) ProtoType(protoreflect.EnumValueDescriptor) { return } +func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderMessage is a placeholder, representing only the full name. -type PlaceholderMessage pref.FullName +type PlaceholderMessage protoreflect.FullName -func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil } -func (m PlaceholderMessage) Parent() pref.Descriptor { return nil } -func (m PlaceholderMessage) Index() int { return 0 } -func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 } -func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() } -func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) } -func (m PlaceholderMessage) IsPlaceholder() bool { return true } -func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message } -func (m PlaceholderMessage) IsMapEntry() bool { return false } -func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields } -func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs } -func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames } -func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges } -func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers } -func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges } -func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") } -func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages } -func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums } -func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions } -func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return } -func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } +func (m PlaceholderMessage) ParentFile() protoreflect.FileDescriptor { return nil } +func (m PlaceholderMessage) Parent() protoreflect.Descriptor { return nil } +func (m PlaceholderMessage) Index() int { return 0 } +func (m PlaceholderMessage) Syntax() protoreflect.Syntax { return 0 } +func (m PlaceholderMessage) Name() protoreflect.Name { return protoreflect.FullName(m).Name() } +func (m PlaceholderMessage) FullName() protoreflect.FullName { return protoreflect.FullName(m) } +func (m PlaceholderMessage) IsPlaceholder() bool { return true } +func (m PlaceholderMessage) Options() protoreflect.ProtoMessage { return descopts.Message } +func (m PlaceholderMessage) IsMapEntry() bool { return false } +func (m PlaceholderMessage) Fields() protoreflect.FieldDescriptors { return emptyFields } +func (m PlaceholderMessage) Oneofs() protoreflect.OneofDescriptors { return emptyOneofs } +func (m PlaceholderMessage) ReservedNames() protoreflect.Names { return emptyNames } +func (m PlaceholderMessage) ReservedRanges() protoreflect.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) RequiredNumbers() protoreflect.FieldNumbers { return emptyFieldNumbers } +func (m PlaceholderMessage) ExtensionRanges() protoreflect.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) ExtensionRangeOptions(int) protoreflect.ProtoMessage { + panic("index out of range") +} +func (m PlaceholderMessage) Messages() protoreflect.MessageDescriptors { return emptyMessages } +func (m PlaceholderMessage) Enums() protoreflect.EnumDescriptors { return emptyEnums } +func (m PlaceholderMessage) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions } +func (m PlaceholderMessage) ProtoType(protoreflect.MessageDescriptor) { return } +func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index 0a0dd35de5a7..f0e38c4ef4e0 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -10,17 +10,16 @@ import ( "reflect" "google.golang.org/protobuf/internal/descopts" - fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/filedesc" pimpl "google.golang.org/protobuf/internal/impl" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) // Builder constructs type descriptors from a raw file descriptor // and associated Go types for each enum and message declaration. // -// -// Flattened Ordering +// # Flattened Ordering // // The protobuf type system represents declarations as a tree. Certain nodes in // the tree require us to either associate it with a concrete Go type or to @@ -52,7 +51,7 @@ import ( // that children themselves may have. type Builder struct { // File is the underlying file descriptor builder. - File fdesc.Builder + File filedesc.Builder // GoTypes is a unique set of the Go types for all declarations and // dependencies. Each type is represented as a zero value of the Go type. @@ -108,22 +107,22 @@ type Builder struct { // TypeRegistry is the registry to register each type descriptor. // If nil, it uses protoregistry.GlobalTypes. TypeRegistry interface { - RegisterMessage(pref.MessageType) error - RegisterEnum(pref.EnumType) error - RegisterExtension(pref.ExtensionType) error + RegisterMessage(protoreflect.MessageType) error + RegisterEnum(protoreflect.EnumType) error + RegisterExtension(protoreflect.ExtensionType) error } } // Out is the output of the builder. type Out struct { - File pref.FileDescriptor + File protoreflect.FileDescriptor } func (tb Builder) Build() (out Out) { // Replace the resolver with one that resolves dependencies by index, // which is faster and more reliable than relying on the global registry. if tb.File.FileRegistry == nil { - tb.File.FileRegistry = preg.GlobalFiles + tb.File.FileRegistry = protoregistry.GlobalFiles } tb.File.FileRegistry = &resolverByIndex{ goTypes: tb.GoTypes, @@ -133,7 +132,7 @@ func (tb Builder) Build() (out Out) { // Initialize registry if unpopulated. if tb.TypeRegistry == nil { - tb.TypeRegistry = preg.GlobalTypes + tb.TypeRegistry = protoregistry.GlobalTypes } fbOut := tb.File.Build() @@ -183,23 +182,23 @@ func (tb Builder) Build() (out Out) { for i := range fbOut.Messages { switch fbOut.Messages[i].Name() { case "FileOptions": - descopts.File = messageGoTypes[i].(pref.ProtoMessage) + descopts.File = messageGoTypes[i].(protoreflect.ProtoMessage) case "EnumOptions": - descopts.Enum = messageGoTypes[i].(pref.ProtoMessage) + descopts.Enum = messageGoTypes[i].(protoreflect.ProtoMessage) case "EnumValueOptions": - descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage) + descopts.EnumValue = messageGoTypes[i].(protoreflect.ProtoMessage) case "MessageOptions": - descopts.Message = messageGoTypes[i].(pref.ProtoMessage) + descopts.Message = messageGoTypes[i].(protoreflect.ProtoMessage) case "FieldOptions": - descopts.Field = messageGoTypes[i].(pref.ProtoMessage) + descopts.Field = messageGoTypes[i].(protoreflect.ProtoMessage) case "OneofOptions": - descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage) + descopts.Oneof = messageGoTypes[i].(protoreflect.ProtoMessage) case "ExtensionRangeOptions": - descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage) + descopts.ExtensionRange = messageGoTypes[i].(protoreflect.ProtoMessage) case "ServiceOptions": - descopts.Service = messageGoTypes[i].(pref.ProtoMessage) + descopts.Service = messageGoTypes[i].(protoreflect.ProtoMessage) case "MethodOptions": - descopts.Method = messageGoTypes[i].(pref.ProtoMessage) + descopts.Method = messageGoTypes[i].(protoreflect.ProtoMessage) } } } @@ -216,11 +215,11 @@ func (tb Builder) Build() (out Out) { const listExtDeps = 2 var goType reflect.Type switch fbOut.Extensions[i].L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) goType = reflect.TypeOf(tb.GoTypes[j]) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) goType = reflect.TypeOf(tb.GoTypes[j]) depIdx++ @@ -242,22 +241,22 @@ func (tb Builder) Build() (out Out) { return out } -var goTypeForPBKind = map[pref.Kind]reflect.Type{ - pref.BoolKind: reflect.TypeOf(bool(false)), - pref.Int32Kind: reflect.TypeOf(int32(0)), - pref.Sint32Kind: reflect.TypeOf(int32(0)), - pref.Sfixed32Kind: reflect.TypeOf(int32(0)), - pref.Int64Kind: reflect.TypeOf(int64(0)), - pref.Sint64Kind: reflect.TypeOf(int64(0)), - pref.Sfixed64Kind: reflect.TypeOf(int64(0)), - pref.Uint32Kind: reflect.TypeOf(uint32(0)), - pref.Fixed32Kind: reflect.TypeOf(uint32(0)), - pref.Uint64Kind: reflect.TypeOf(uint64(0)), - pref.Fixed64Kind: reflect.TypeOf(uint64(0)), - pref.FloatKind: reflect.TypeOf(float32(0)), - pref.DoubleKind: reflect.TypeOf(float64(0)), - pref.StringKind: reflect.TypeOf(string("")), - pref.BytesKind: reflect.TypeOf([]byte(nil)), +var goTypeForPBKind = map[protoreflect.Kind]reflect.Type{ + protoreflect.BoolKind: reflect.TypeOf(bool(false)), + protoreflect.Int32Kind: reflect.TypeOf(int32(0)), + protoreflect.Sint32Kind: reflect.TypeOf(int32(0)), + protoreflect.Sfixed32Kind: reflect.TypeOf(int32(0)), + protoreflect.Int64Kind: reflect.TypeOf(int64(0)), + protoreflect.Sint64Kind: reflect.TypeOf(int64(0)), + protoreflect.Sfixed64Kind: reflect.TypeOf(int64(0)), + protoreflect.Uint32Kind: reflect.TypeOf(uint32(0)), + protoreflect.Fixed32Kind: reflect.TypeOf(uint32(0)), + protoreflect.Uint64Kind: reflect.TypeOf(uint64(0)), + protoreflect.Fixed64Kind: reflect.TypeOf(uint64(0)), + protoreflect.FloatKind: reflect.TypeOf(float32(0)), + protoreflect.DoubleKind: reflect.TypeOf(float64(0)), + protoreflect.StringKind: reflect.TypeOf(string("")), + protoreflect.BytesKind: reflect.TypeOf([]byte(nil)), } type depIdxs []int32 @@ -274,13 +273,13 @@ type ( fileRegistry } fileRegistry interface { - FindFileByPath(string) (pref.FileDescriptor, error) - FindDescriptorByName(pref.FullName) (pref.Descriptor, error) - RegisterFile(pref.FileDescriptor) error + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) + RegisterFile(protoreflect.FileDescriptor) error } ) -func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor { +func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.EnumDescriptor { if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) { return &es[depIdx] } else { @@ -288,7 +287,7 @@ func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdes } } -func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor { +func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.MessageDescriptor { if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) { return &ms[depIdx-len(es)] } else { diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go index a72995f02d9e..bda8e8cf3fce 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !protolegacy // +build !protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go index 772e2f0e4d69..6d8d9bd6b01a 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build protolegacy // +build protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index abee5f30e9fd..a371f98de143 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -12,8 +12,8 @@ import ( "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Export is a zero-length named type that exists only to export a set of @@ -32,11 +32,11 @@ type enum = interface{} // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. -func (Export) EnumOf(e enum) pref.Enum { +func (Export) EnumOf(e enum) protoreflect.Enum { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e default: return legacyWrapEnum(reflect.ValueOf(e)) @@ -45,11 +45,11 @@ func (Export) EnumOf(e enum) pref.Enum { // EnumDescriptorOf returns the protoreflect.EnumDescriptor for e. // It returns nil if e is nil. -func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { +func (Export) EnumDescriptorOf(e enum) protoreflect.EnumDescriptor { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e.Descriptor() default: return LegacyLoadEnumDesc(reflect.TypeOf(e)) @@ -58,11 +58,11 @@ func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { // EnumTypeOf returns the protoreflect.EnumType for e. // It returns nil if e is nil. -func (Export) EnumTypeOf(e enum) pref.EnumType { +func (Export) EnumTypeOf(e enum) protoreflect.EnumType { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e.Type() default: return legacyLoadEnumType(reflect.TypeOf(e)) @@ -71,7 +71,7 @@ func (Export) EnumTypeOf(e enum) pref.EnumType { // EnumStringOf returns the enum value as a string, either as the name if // the number is resolvable, or the number formatted as a string. -func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { +func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNumber) string { ev := ed.Values().ByNumber(n) if ev != nil { return string(ev.Name()) @@ -84,7 +84,7 @@ func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { type message = interface{} // legacyMessageWrapper wraps a v2 message as a v1 message. -type legacyMessageWrapper struct{ m pref.ProtoMessage } +type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) } func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) } @@ -92,30 +92,30 @@ func (m legacyMessageWrapper) ProtoMessage() {} // ProtoMessageV1Of converts either a v1 or v2 message to a v1 message. // It returns nil if m is nil. -func (Export) ProtoMessageV1Of(m message) piface.MessageV1 { +func (Export) ProtoMessageV1Of(m message) protoiface.MessageV1 { switch mv := m.(type) { case nil: return nil - case piface.MessageV1: + case protoiface.MessageV1: return mv case unwrapper: return Export{}.ProtoMessageV1Of(mv.protoUnwrap()) - case pref.ProtoMessage: + case protoreflect.ProtoMessage: return legacyMessageWrapper{mv} default: panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) } } -func (Export) protoMessageV2Of(m message) pref.ProtoMessage { +func (Export) protoMessageV2Of(m message) protoreflect.ProtoMessage { switch mv := m.(type) { case nil: return nil - case pref.ProtoMessage: + case protoreflect.ProtoMessage: return mv case legacyMessageWrapper: return mv.m - case piface.MessageV1: + case protoiface.MessageV1: return nil default: panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) @@ -124,7 +124,7 @@ func (Export) protoMessageV2Of(m message) pref.ProtoMessage { // ProtoMessageV2Of converts either a v1 or v2 message to a v2 message. // It returns nil if m is nil. -func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { +func (Export) ProtoMessageV2Of(m message) protoreflect.ProtoMessage { if m == nil { return nil } @@ -136,7 +136,7 @@ func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { // MessageOf returns the protoreflect.Message interface over m. // It returns nil if m is nil. -func (Export) MessageOf(m message) pref.Message { +func (Export) MessageOf(m message) protoreflect.Message { if m == nil { return nil } @@ -148,7 +148,7 @@ func (Export) MessageOf(m message) pref.Message { // MessageDescriptorOf returns the protoreflect.MessageDescriptor for m. // It returns nil if m is nil. -func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { +func (Export) MessageDescriptorOf(m message) protoreflect.MessageDescriptor { if m == nil { return nil } @@ -160,7 +160,7 @@ func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { // MessageTypeOf returns the protoreflect.MessageType for m. // It returns nil if m is nil. -func (Export) MessageTypeOf(m message) pref.MessageType { +func (Export) MessageTypeOf(m message) protoreflect.MessageType { if m == nil { return nil } @@ -172,6 +172,6 @@ func (Export) MessageTypeOf(m message) pref.MessageType { // MessageStringOf returns the message value as a string, // which is the message serialized in the protobuf text format. -func (Export) MessageStringOf(m pref.ProtoMessage) string { +func (Export) MessageStringOf(m protoreflect.ProtoMessage) string { return prototext.MarshalOptions{Multiline: false}.Format(m) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index b82341e575cb..bff041edc946 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -8,18 +8,18 @@ import ( "sync" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) -func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) { +func (mi *MessageInfo) checkInitialized(in protoiface.CheckInitializedInput) (protoiface.CheckInitializedOutput, error) { var p pointer if ms, ok := in.Message.(*messageState); ok { p = ms.pointer() } else { p = in.Message.(*messageReflectWrapper).pointer() } - return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) + return protoiface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) } func (mi *MessageInfo) checkInitializedPointer(p pointer) error { @@ -90,7 +90,7 @@ var ( // needsInitCheck reports whether a message needs to be checked for partial initialization. // // It returns true if the message transitively includes any required or extension fields. -func needsInitCheck(md pref.MessageDescriptor) bool { +func needsInitCheck(md protoreflect.MessageDescriptor) bool { if v, ok := needsInitCheckMap.Load(md); ok { if has, ok := v.(bool); ok { return has @@ -101,7 +101,7 @@ func needsInitCheck(md pref.MessageDescriptor) bool { return needsInitCheckLocked(md) } -func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) { +func needsInitCheckLocked(md protoreflect.MessageDescriptor) (has bool) { if v, ok := needsInitCheckMap.Load(md); ok { // If has is true, we've previously determined that this message // needs init checks. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 08d35170b66c..e74cefdc506f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type extensionFieldInfo struct { @@ -23,7 +23,7 @@ type extensionFieldInfo struct { var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo -func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { +func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info @@ -32,7 +32,7 @@ func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { } // legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { +func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { return xi.(*extensionFieldInfo) } @@ -43,7 +43,7 @@ func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { return e } -func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { +func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { var wiretag uint64 if !xd.IsPacked() { wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()]) @@ -59,10 +59,10 @@ func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { // This is true for composite types, where we pass in a message, list, or map to fill in, // and for enums, where we pass in a prototype value to specify the concrete enum type. switch xd.Kind() { - case pref.MessageKind, pref.GroupKind, pref.EnumKind: + case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.EnumKind: e.unmarshalNeedsValue = true default: - if xd.Cardinality() == pref.Repeated { + if xd.Cardinality() == protoreflect.Repeated { e.unmarshalNeedsValue = true } } @@ -73,21 +73,21 @@ type lazyExtensionValue struct { atomicOnce uint32 // atomically set if value is valid mu sync.Mutex xi *extensionFieldInfo - value pref.Value + value protoreflect.Value b []byte - fn func() pref.Value + fn func() protoreflect.Value } type ExtensionField struct { - typ pref.ExtensionType + typ protoreflect.ExtensionType // value is either the value of GetValue, // or a *lazyExtensionValue that then returns the value of GetValue. - value pref.Value + value protoreflect.Value lazy *lazyExtensionValue } -func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { +func (f *ExtensionField) appendLazyBytes(xt protoreflect.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { if f.lazy == nil { f.lazy = &lazyExtensionValue{xi: xi} } @@ -97,7 +97,7 @@ func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFie f.lazy.b = append(f.lazy.b, b...) } -func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool { +func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { if f.typ == nil { return true } @@ -154,7 +154,7 @@ func (f *ExtensionField) lazyInit() { // Set sets the type and value of the extension field. // This must not be called concurrently. -func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { +func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) { f.typ = t f.value = v f.lazy = nil @@ -162,14 +162,14 @@ func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { // SetLazy sets the type and a value that is to be lazily evaluated upon first use. // This must not be called concurrently. -func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) { +func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { f.typ = t f.lazy = &lazyExtensionValue{fn: fn} } // Value returns the value of the extension field. // This may be called concurrently. -func (f *ExtensionField) Value() pref.Value { +func (f *ExtensionField) Value() protoreflect.Value { if f.lazy != nil { if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { f.lazyInit() @@ -181,7 +181,7 @@ func (f *ExtensionField) Value() pref.Value { // Type returns the type of the extension field. // This may be called concurrently. -func (f ExtensionField) Type() pref.ExtensionType { +func (f ExtensionField) Type() protoreflect.ExtensionType { return f.typ } @@ -193,7 +193,7 @@ func (f ExtensionField) IsSet() bool { // IsLazy reports whether a field is lazily encoded. // It is exported for testing. -func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { +func IsLazy(m protoreflect.Message, fd protoreflect.FieldDescriptor) bool { var mi *MessageInfo var p pointer switch m := m.(type) { @@ -206,7 +206,7 @@ func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { default: return false } - xd, ok := fd.(pref.ExtensionTypeDescriptor) + xd, ok := fd.(protoreflect.ExtensionTypeDescriptor) if !ok { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index cb4b482d166f..3fadd241e1c4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -12,9 +12,9 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) type errInvalidUTF8 struct{} @@ -30,7 +30,7 @@ func (errInvalidUTF8) Unwrap() error { return errors.Error } // to the appropriate field-specific function as necessary. // // The unmarshal function is set on each field individually as usual. -func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) { +func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si structInfo) { fs := si.oneofsByName[od.Name()] ft := fs.Type oneofFields := make(map[reflect.Type]*coderFieldInfo) @@ -118,13 +118,13 @@ func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structIn } } -func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { +func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { var once sync.Once - var messageType pref.MessageType + var messageType protoreflect.MessageType lazyInit := func() { once.Do(func() { messageName := fd.Message().FullName() - messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) }) } @@ -190,7 +190,7 @@ func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { } } -func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ size: sizeMessageInfo, @@ -280,7 +280,7 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh if n < 0 { return out, errDecode } - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: m.ProtoReflect(), }) @@ -288,27 +288,27 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh return out, err } out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } -func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int { +func sizeMessageValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { m := v.Message().Interface() return sizeMessage(m, tagsize, opts) } -func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendMessageValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { m := v.Message().Interface() return appendMessage(b, m, wiretag, opts) } -func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { +func consumeMessageValue(b []byte, v protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) { m := v.Message().Interface() out, err := consumeMessage(b, m, wtyp, opts) return v, out, err } -func isInitMessageValue(v pref.Value) error { +func isInitMessageValue(v protoreflect.Value) error { m := v.Message().Interface() return proto.CheckInitialized(m) } @@ -321,17 +321,17 @@ var coderMessageValue = valueCoderFuncs{ merge: mergeMessageValue, } -func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int { +func sizeGroupValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { m := v.Message().Interface() return sizeGroup(m, tagsize, opts) } -func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendGroupValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { m := v.Message().Interface() return appendGroup(b, m, wiretag, opts) } -func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { +func consumeGroupValue(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) { m := v.Message().Interface() out, err := consumeGroup(b, m, num, wtyp, opts) return v, out, err @@ -345,7 +345,7 @@ var coderGroupValue = valueCoderFuncs{ merge: mergeMessageValue, } -func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeGroupFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { num := fd.Number() if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ @@ -424,7 +424,7 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir if n < 0 { return out, errDecode } - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: m.ProtoReflect(), }) @@ -432,11 +432,11 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir return out, err } out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } -func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeMessageSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ size: sizeMessageSliceInfo, @@ -555,7 +555,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir return out, errDecode } mp := reflect.New(goType.Elem()) - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: asMessage(mp).ProtoReflect(), }) @@ -564,7 +564,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir } p.AppendPointerSlice(pointerOfValue(mp)) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } @@ -581,7 +581,7 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { // Slices of messages -func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { +func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { @@ -591,7 +591,7 @@ func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) i return n } -func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() mopts := opts.Options() for i, llen := 0, list.Len(); i < llen; i++ { @@ -608,30 +608,30 @@ func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts ma return b, nil } -func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { +func consumeMessageSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { - return pref.Value{}, out, errUnknown + return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return pref.Value{}, out, errDecode + return protoreflect.Value{}, out, errDecode } m := list.NewElement() - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: m.Message(), }) if err != nil { - return pref.Value{}, out, err + return protoreflect.Value{}, out, err } list.Append(m) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return listv, out, nil } -func isInitMessageSliceValue(listv pref.Value) error { +func isInitMessageSliceValue(listv protoreflect.Value) error { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() @@ -650,7 +650,7 @@ var coderMessageSliceValue = valueCoderFuncs{ merge: mergeMessageListValue, } -func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { +func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { @@ -660,7 +660,7 @@ func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int return n } -func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendGroupSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() mopts := opts.Options() for i, llen := 0, list.Len(); i < llen; i++ { @@ -676,26 +676,26 @@ func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts mars return b, nil } -func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { +func consumeGroupSliceValue(b []byte, listv protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.StartGroupType { - return pref.Value{}, out, errUnknown + return protoreflect.Value{}, out, errUnknown } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return pref.Value{}, out, errDecode + return protoreflect.Value{}, out, errDecode } m := list.NewElement() - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: m.Message(), }) if err != nil { - return pref.Value{}, out, err + return protoreflect.Value{}, out, err } list.Append(m) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return listv, out, nil } @@ -707,7 +707,7 @@ var coderGroupSliceValue = valueCoderFuncs{ merge: mergeMessageListValue, } -func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { num := fd.Number() if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ @@ -772,7 +772,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire return out, errDecode } mp := reflect.New(goType.Elem()) - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: asMessage(mp).ProtoReflect(), }) @@ -781,7 +781,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire } p.AppendPointerSlice(pointerOfValue(mp)) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } @@ -822,8 +822,8 @@ func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFie return out, nil } -func asMessage(v reflect.Value) pref.ProtoMessage { - if m, ok := v.Interface().(pref.ProtoMessage); ok { +func asMessage(v reflect.Value) protoreflect.ProtoMessage { + if m, ok := v.Interface().(protoreflect.ProtoMessage); ok { return m } return legacyWrapMessage(v).Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index c1245fef4876..111b9d16f993 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type mapInfo struct { @@ -19,12 +19,12 @@ type mapInfo struct { valWiretag uint64 keyFuncs valueCoderFuncs valFuncs valueCoderFuncs - keyZero pref.Value - keyKind pref.Kind + keyZero protoreflect.Value + keyKind protoreflect.Kind conv *mapConverter } -func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { +func encoderFuncsForMap(fd protoreflect.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { // TODO: Consider generating specialized map coders. keyField := fd.MapKey() valField := fd.MapValue() @@ -44,7 +44,7 @@ func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage keyKind: keyField.Kind(), conv: conv, } - if valField.Kind() == pref.MessageKind { + if valField.Kind() == protoreflect.MessageKind { valueMessage = getMessageInfo(ft.Elem()) } @@ -68,9 +68,9 @@ func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage }, } switch valField.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: funcs.merge = mergeMapOfMessage - case pref.BytesKind: + case protoreflect.BytesKind: funcs.merge = mergeMapOfBytes default: funcs.merge = mergeMap @@ -135,7 +135,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo err := errUnknown switch num { case genid.MapEntry_Key_field_number: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) if err != nil { @@ -144,7 +144,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo key = v n = o.n case genid.MapEntry_Value_field_number: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) if err != nil { @@ -192,7 +192,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi err := errUnknown switch num { case 1: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) if err != nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go index 2706bb67f5d8..4b15493f2f43 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.12 // +build !go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go index 1533ef600cd8..0b31b66eaf84 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.12 // +build go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index cd40527ff646..6b2fdbb739a2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -12,15 +12,15 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/order" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // coderMessageInfo contains per-message information used by the fast-path functions. // This is a different type from MessageInfo to keep MessageInfo as general-purpose as // possible. type coderMessageInfo struct { - methods piface.Methods + methods protoiface.Methods orderedCoderFields []*coderFieldInfo denseCoderFields []*coderFieldInfo @@ -38,13 +38,13 @@ type coderFieldInfo struct { funcs pointerCoderFuncs // fast-path per-field functions mi *MessageInfo // field's message ft reflect.Type - validation validationInfo // information used by message validation - num pref.FieldNumber // field number - offset offset // struct field offset - wiretag uint64 // field tag (number + wire type) - tagsize int // size of the varint-encoded tag - isPointer bool // true if IsNil may be called on the struct field - isRequired bool // true if field is required + validation validationInfo // information used by message validation + num protoreflect.FieldNumber // field number + offset offset // struct field offset + wiretag uint64 // field tag (number + wire type) + tagsize int // size of the varint-encoded tag + isPointer bool // true if IsNil may be called on the struct field + isRequired bool // true if field is required } func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { @@ -125,8 +125,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { funcs: funcs, mi: childMessage, validation: newFieldValidationInfo(mi, si, fd, ft), - isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(), - isRequired: fd.Cardinality() == pref.Required, + isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(), + isRequired: fd.Cardinality() == protoreflect.Required, } mi.orderedCoderFields = append(mi.orderedCoderFields, cf) mi.coderFields[cf.num] = cf @@ -149,7 +149,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num }) - var maxDense pref.FieldNumber + var maxDense protoreflect.FieldNumber for _, cf := range mi.orderedCoderFields { if cf.num >= 16 && cf.num >= 2*maxDense { break @@ -175,12 +175,12 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.needsInitCheck = needsInitCheck(mi.Desc) if mi.methods.Marshal == nil && mi.methods.Size == nil { - mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Flags |= protoiface.SupportMarshalDeterministic mi.methods.Marshal = mi.marshal mi.methods.Size = mi.size } if mi.methods.Unmarshal == nil { - mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Flags |= protoiface.SupportUnmarshalDiscardUnknown mi.methods.Unmarshal = mi.unmarshal } if mi.methods.CheckInitialized == nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go index 90705e3aea74..145c577bd6b2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index e89971238879..576dcf3aac50 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // pointerCoderFuncs is a set of pointer encoding functions. @@ -25,83 +25,83 @@ type pointerCoderFuncs struct { // valueCoderFuncs is a set of protoreflect.Value encoding functions. type valueCoderFuncs struct { - size func(v pref.Value, tagsize int, opts marshalOptions) int - marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) - unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) - isInit func(v pref.Value) error - merge func(dst, src pref.Value, opts mergeOptions) pref.Value + size func(v protoreflect.Value, tagsize int, opts marshalOptions) int + marshal func(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) + isInit func(v protoreflect.Value) error + merge func(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value } // fieldCoder returns pointer functions for a field, used for operating on // struct fields. -func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { +func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { switch { case fd.IsMap(): return encoderFuncsForMap(fd, ft) - case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked(): // Repeated fields (not packed). if ft.Kind() != reflect.Slice { break } ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolSlice } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumSlice } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32Slice } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32Slice } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32Slice } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64Slice } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64Slice } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64Slice } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32Slice } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32Slice } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatSlice } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64Slice } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64Slice } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoubleSlice } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringSliceValidateUTF8 } @@ -114,19 +114,19 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesSlice } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringSlice } if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesSlice } - case pref.MessageKind: + case protoreflect.MessageKind: return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft) - case pref.GroupKind: + case protoreflect.GroupKind: return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft) } - case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked(): // Packed repeated fields. // // Only repeated fields of primitive numeric types @@ -136,128 +136,128 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer } ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolPackedSlice } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumPackedSlice } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32PackedSlice } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32PackedSlice } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32PackedSlice } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64PackedSlice } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64PackedSlice } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64PackedSlice } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32PackedSlice } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32PackedSlice } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatPackedSlice } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64PackedSlice } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64PackedSlice } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoublePackedSlice } } - case fd.Kind() == pref.MessageKind: + case fd.Kind() == protoreflect.MessageKind: return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) - case fd.Kind() == pref.GroupKind: + case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil: + case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolNoZero } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumNoZero } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32NoZero } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32NoZero } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32NoZero } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64NoZero } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64NoZero } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64NoZero } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32NoZero } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32NoZero } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatNoZero } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64NoZero } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64NoZero } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoubleNoZero } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringNoZeroValidateUTF8 } @@ -270,7 +270,7 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesNoZero } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringNoZero } @@ -281,133 +281,133 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer case ft.Kind() == reflect.Ptr: ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolPtr } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumPtr } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32Ptr } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32Ptr } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32Ptr } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64Ptr } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64Ptr } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64Ptr } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32Ptr } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32Ptr } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatPtr } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64Ptr } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64Ptr } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoublePtr } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringPtrValidateUTF8 } if ft.Kind() == reflect.String { return nil, coderStringPtr } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringPtr } } default: switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBool } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnum } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32 } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32 } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32 } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64 } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64 } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64 } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32 } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32 } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloat } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64 } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64 } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDouble } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringValidateUTF8 } @@ -420,7 +420,7 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytes } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderString } @@ -434,122 +434,122 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer // encoderFuncsForValue returns value functions for a field, used for // extension values and map encoding. -func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs { +func encoderFuncsForValue(fd protoreflect.FieldDescriptor) valueCoderFuncs { switch { - case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked(): switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolSliceValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumSliceValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32SliceValue - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32SliceValue - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32SliceValue - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64SliceValue - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64SliceValue - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64SliceValue - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32SliceValue - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32SliceValue - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatSliceValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64SliceValue - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64SliceValue - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoubleSliceValue - case pref.StringKind: + case protoreflect.StringKind: // We don't have a UTF-8 validating coder for repeated string fields. // Value coders are used for extensions and maps. // Extensions are never proto3, and maps never contain lists. return coderStringSliceValue - case pref.BytesKind: + case protoreflect.BytesKind: return coderBytesSliceValue - case pref.MessageKind: + case protoreflect.MessageKind: return coderMessageSliceValue - case pref.GroupKind: + case protoreflect.GroupKind: return coderGroupSliceValue } - case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked(): switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolPackedSliceValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumPackedSliceValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32PackedSliceValue - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32PackedSliceValue - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32PackedSliceValue - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64PackedSliceValue - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64PackedSliceValue - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64PackedSliceValue - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32PackedSliceValue - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32PackedSliceValue - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatPackedSliceValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64PackedSliceValue - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64PackedSliceValue - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoublePackedSliceValue } default: switch fd.Kind() { default: - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32Value - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32Value - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32Value - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64Value - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64Value - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64Value - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32Value - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32Value - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64Value - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64Value - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoubleValue - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { return coderStringValueValidateUTF8 } return coderStringValue - case pref.BytesKind: + case protoreflect.BytesKind: return coderBytesValue - case pref.MessageKind: + case protoreflect.MessageKind: return coderMessageValue - case pref.GroupKind: + case protoreflect.GroupKind: return coderGroupValue } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index e118af1e20cd..757642e23c9e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index acd61bb50b2c..11a6128ba56b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -8,7 +8,7 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // unwrapper unwraps the value to the underlying value. @@ -20,13 +20,13 @@ type unwrapper interface { // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. type Converter interface { // PBValueOf converts a reflect.Value to a protoreflect.Value. - PBValueOf(reflect.Value) pref.Value + PBValueOf(reflect.Value) protoreflect.Value // GoValueOf converts a protoreflect.Value to a reflect.Value. - GoValueOf(pref.Value) reflect.Value + GoValueOf(protoreflect.Value) reflect.Value // IsValidPB returns whether a protoreflect.Value is compatible with this type. - IsValidPB(pref.Value) bool + IsValidPB(protoreflect.Value) bool // IsValidGo returns whether a reflect.Value is compatible with this type. IsValidGo(reflect.Value) bool @@ -34,12 +34,12 @@ type Converter interface { // New returns a new field value. // For scalars, it returns the default value of the field. // For composite types, it returns a new mutable value. - New() pref.Value + New() protoreflect.Value // Zero returns a new field value. // For scalars, it returns the default value of the field. // For composite types, it returns an immutable, empty value. - Zero() pref.Value + Zero() protoreflect.Value } // NewConverter matches a Go type with a protobuf field and returns a Converter @@ -50,7 +50,7 @@ type Converter interface { // This matcher deliberately supports a wider range of Go types than what // protoc-gen-go historically generated to be able to automatically wrap some // v1 messages generated by other forks of protoc-gen-go. -func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { +func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { switch { case fd.IsList(): return newListConverter(t, fd) @@ -76,68 +76,68 @@ var ( ) var ( - boolZero = pref.ValueOfBool(false) - int32Zero = pref.ValueOfInt32(0) - int64Zero = pref.ValueOfInt64(0) - uint32Zero = pref.ValueOfUint32(0) - uint64Zero = pref.ValueOfUint64(0) - float32Zero = pref.ValueOfFloat32(0) - float64Zero = pref.ValueOfFloat64(0) - stringZero = pref.ValueOfString("") - bytesZero = pref.ValueOfBytes(nil) + boolZero = protoreflect.ValueOfBool(false) + int32Zero = protoreflect.ValueOfInt32(0) + int64Zero = protoreflect.ValueOfInt64(0) + uint32Zero = protoreflect.ValueOfUint32(0) + uint64Zero = protoreflect.ValueOfUint64(0) + float32Zero = protoreflect.ValueOfFloat32(0) + float64Zero = protoreflect.ValueOfFloat64(0) + stringZero = protoreflect.ValueOfString("") + bytesZero = protoreflect.ValueOfBytes(nil) ) -func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { - defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value { - if fd.Cardinality() == pref.Repeated { +func newSingularConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { + defVal := func(fd protoreflect.FieldDescriptor, zero protoreflect.Value) protoreflect.Value { + if fd.Cardinality() == protoreflect.Repeated { // Default isn't defined for repeated fields. return zero } return fd.Default() } switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if t.Kind() == reflect.Bool { return &boolConverter{t, defVal(fd, boolZero)} } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if t.Kind() == reflect.Int32 { return &int32Converter{t, defVal(fd, int32Zero)} } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if t.Kind() == reflect.Int64 { return &int64Converter{t, defVal(fd, int64Zero)} } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if t.Kind() == reflect.Uint32 { return &uint32Converter{t, defVal(fd, uint32Zero)} } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if t.Kind() == reflect.Uint64 { return &uint64Converter{t, defVal(fd, uint64Zero)} } - case pref.FloatKind: + case protoreflect.FloatKind: if t.Kind() == reflect.Float32 { return &float32Converter{t, defVal(fd, float32Zero)} } - case pref.DoubleKind: + case protoreflect.DoubleKind: if t.Kind() == reflect.Float64 { return &float64Converter{t, defVal(fd, float64Zero)} } - case pref.StringKind: + case protoreflect.StringKind: if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { return &stringConverter{t, defVal(fd, stringZero)} } - case pref.BytesKind: + case protoreflect.BytesKind: if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { return &bytesConverter{t, defVal(fd, bytesZero)} } - case pref.EnumKind: + case protoreflect.EnumKind: // Handle enums, which must be a named int32 type. if t.Kind() == reflect.Int32 { return newEnumConverter(t, fd) } - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: return newMessageConverter(t) } panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) @@ -145,184 +145,184 @@ func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { type boolConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *boolConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfBool(v.Bool()) + return protoreflect.ValueOfBool(v.Bool()) } -func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *boolConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Bool()).Convert(c.goType) } -func (c *boolConverter) IsValidPB(v pref.Value) bool { +func (c *boolConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(bool) return ok } func (c *boolConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *boolConverter) New() pref.Value { return c.def } -func (c *boolConverter) Zero() pref.Value { return c.def } +func (c *boolConverter) New() protoreflect.Value { return c.def } +func (c *boolConverter) Zero() protoreflect.Value { return c.def } type int32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *int32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfInt32(int32(v.Int())) + return protoreflect.ValueOfInt32(int32(v.Int())) } -func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *int32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(int32(v.Int())).Convert(c.goType) } -func (c *int32Converter) IsValidPB(v pref.Value) bool { +func (c *int32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(int32) return ok } func (c *int32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *int32Converter) New() pref.Value { return c.def } -func (c *int32Converter) Zero() pref.Value { return c.def } +func (c *int32Converter) New() protoreflect.Value { return c.def } +func (c *int32Converter) Zero() protoreflect.Value { return c.def } type int64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *int64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfInt64(int64(v.Int())) + return protoreflect.ValueOfInt64(int64(v.Int())) } -func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *int64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(int64(v.Int())).Convert(c.goType) } -func (c *int64Converter) IsValidPB(v pref.Value) bool { +func (c *int64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(int64) return ok } func (c *int64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *int64Converter) New() pref.Value { return c.def } -func (c *int64Converter) Zero() pref.Value { return c.def } +func (c *int64Converter) New() protoreflect.Value { return c.def } +func (c *int64Converter) Zero() protoreflect.Value { return c.def } type uint32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *uint32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfUint32(uint32(v.Uint())) + return protoreflect.ValueOfUint32(uint32(v.Uint())) } -func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *uint32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType) } -func (c *uint32Converter) IsValidPB(v pref.Value) bool { +func (c *uint32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(uint32) return ok } func (c *uint32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *uint32Converter) New() pref.Value { return c.def } -func (c *uint32Converter) Zero() pref.Value { return c.def } +func (c *uint32Converter) New() protoreflect.Value { return c.def } +func (c *uint32Converter) Zero() protoreflect.Value { return c.def } type uint64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *uint64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfUint64(uint64(v.Uint())) + return protoreflect.ValueOfUint64(uint64(v.Uint())) } -func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *uint64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType) } -func (c *uint64Converter) IsValidPB(v pref.Value) bool { +func (c *uint64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(uint64) return ok } func (c *uint64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *uint64Converter) New() pref.Value { return c.def } -func (c *uint64Converter) Zero() pref.Value { return c.def } +func (c *uint64Converter) New() protoreflect.Value { return c.def } +func (c *uint64Converter) Zero() protoreflect.Value { return c.def } type float32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *float32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfFloat32(float32(v.Float())) + return protoreflect.ValueOfFloat32(float32(v.Float())) } -func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *float32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(float32(v.Float())).Convert(c.goType) } -func (c *float32Converter) IsValidPB(v pref.Value) bool { +func (c *float32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(float32) return ok } func (c *float32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *float32Converter) New() pref.Value { return c.def } -func (c *float32Converter) Zero() pref.Value { return c.def } +func (c *float32Converter) New() protoreflect.Value { return c.def } +func (c *float32Converter) Zero() protoreflect.Value { return c.def } type float64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *float64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfFloat64(float64(v.Float())) + return protoreflect.ValueOfFloat64(float64(v.Float())) } -func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *float64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(float64(v.Float())).Convert(c.goType) } -func (c *float64Converter) IsValidPB(v pref.Value) bool { +func (c *float64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(float64) return ok } func (c *float64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *float64Converter) New() pref.Value { return c.def } -func (c *float64Converter) Zero() pref.Value { return c.def } +func (c *float64Converter) New() protoreflect.Value { return c.def } +func (c *float64Converter) Zero() protoreflect.Value { return c.def } type stringConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfString(v.Convert(stringType).String()) + return protoreflect.ValueOfString(v.Convert(stringType).String()) } -func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { // pref.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) @@ -331,71 +331,71 @@ func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { } return reflect.ValueOf(s).Convert(c.goType) } -func (c *stringConverter) IsValidPB(v pref.Value) bool { +func (c *stringConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(string) return ok } func (c *stringConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *stringConverter) New() pref.Value { return c.def } -func (c *stringConverter) Zero() pref.Value { return c.def } +func (c *stringConverter) New() protoreflect.Value { return c.def } +func (c *stringConverter) Zero() protoreflect.Value { return c.def } type bytesConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *bytesConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } if c.goType.Kind() == reflect.String && v.Len() == 0 { - return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil) + return protoreflect.ValueOfBytes(nil) // ensure empty string is []byte(nil) } - return pref.ValueOfBytes(v.Convert(bytesType).Bytes()) + return protoreflect.ValueOfBytes(v.Convert(bytesType).Bytes()) } -func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *bytesConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Bytes()).Convert(c.goType) } -func (c *bytesConverter) IsValidPB(v pref.Value) bool { +func (c *bytesConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().([]byte) return ok } func (c *bytesConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *bytesConverter) New() pref.Value { return c.def } -func (c *bytesConverter) Zero() pref.Value { return c.def } +func (c *bytesConverter) New() protoreflect.Value { return c.def } +func (c *bytesConverter) Zero() protoreflect.Value { return c.def } type enumConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter { - var def pref.Value - if fd.Cardinality() == pref.Repeated { - def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number()) +func newEnumConverter(goType reflect.Type, fd protoreflect.FieldDescriptor) Converter { + var def protoreflect.Value + if fd.Cardinality() == protoreflect.Repeated { + def = protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number()) } else { def = fd.Default() } return &enumConverter{goType, def} } -func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *enumConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfEnum(pref.EnumNumber(v.Int())) + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v.Int())) } -func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *enumConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Enum()).Convert(c.goType) } -func (c *enumConverter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().(pref.EnumNumber) +func (c *enumConverter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(protoreflect.EnumNumber) return ok } @@ -403,11 +403,11 @@ func (c *enumConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *enumConverter) New() pref.Value { +func (c *enumConverter) New() protoreflect.Value { return c.def } -func (c *enumConverter) Zero() pref.Value { +func (c *enumConverter) Zero() protoreflect.Value { return c.def } @@ -419,7 +419,7 @@ func newMessageConverter(goType reflect.Type) Converter { return &messageConverter{goType} } -func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *messageConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } @@ -430,13 +430,13 @@ func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { v = reflect.Zero(reflect.PtrTo(v.Type())) } } - if m, ok := v.Interface().(pref.ProtoMessage); ok { - return pref.ValueOfMessage(m.ProtoReflect()) + if m, ok := v.Interface().(protoreflect.ProtoMessage); ok { + return protoreflect.ValueOfMessage(m.ProtoReflect()) } - return pref.ValueOfMessage(legacyWrapMessage(v)) + return protoreflect.ValueOfMessage(legacyWrapMessage(v)) } -func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *messageConverter) GoValueOf(v protoreflect.Value) reflect.Value { m := v.Message() var rv reflect.Value if u, ok := m.(unwrapper); ok { @@ -460,7 +460,7 @@ func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { return rv } -func (c *messageConverter) IsValidPB(v pref.Value) bool { +func (c *messageConverter) IsValidPB(v protoreflect.Value) bool { m := v.Message() var rv reflect.Value if u, ok := m.(unwrapper); ok { @@ -478,14 +478,14 @@ func (c *messageConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *messageConverter) New() pref.Value { +func (c *messageConverter) New() protoreflect.Value { if c.isNonPointer() { return c.PBValueOf(reflect.New(c.goType).Elem()) } return c.PBValueOf(reflect.New(c.goType.Elem())) } -func (c *messageConverter) Zero() pref.Value { +func (c *messageConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index 6fccab520e59..f89136516f96 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -8,10 +8,10 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) -func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { +func newListConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { switch { case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice: return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)} @@ -26,16 +26,16 @@ type listConverter struct { c Converter } -func (c *listConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *listConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } pv := reflect.New(c.goType) pv.Elem().Set(v) - return pref.ValueOfList(&listReflect{pv, c.c}) + return protoreflect.ValueOfList(&listReflect{pv, c.c}) } -func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *listConverter) GoValueOf(v protoreflect.Value) reflect.Value { rv := v.List().(*listReflect).v if rv.IsNil() { return reflect.Zero(c.goType) @@ -43,7 +43,7 @@ func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { return rv.Elem() } -func (c *listConverter) IsValidPB(v pref.Value) bool { +func (c *listConverter) IsValidPB(v protoreflect.Value) bool { list, ok := v.Interface().(*listReflect) if !ok { return false @@ -55,12 +55,12 @@ func (c *listConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *listConverter) New() pref.Value { - return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) +func (c *listConverter) New() protoreflect.Value { + return protoreflect.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) } -func (c *listConverter) Zero() pref.Value { - return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) +func (c *listConverter) Zero() protoreflect.Value { + return protoreflect.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) } type listPtrConverter struct { @@ -68,18 +68,18 @@ type listPtrConverter struct { c Converter } -func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *listPtrConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfList(&listReflect{v, c.c}) + return protoreflect.ValueOfList(&listReflect{v, c.c}) } -func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *listPtrConverter) GoValueOf(v protoreflect.Value) reflect.Value { return v.List().(*listReflect).v } -func (c *listPtrConverter) IsValidPB(v pref.Value) bool { +func (c *listPtrConverter) IsValidPB(v protoreflect.Value) bool { list, ok := v.Interface().(*listReflect) if !ok { return false @@ -91,11 +91,11 @@ func (c *listPtrConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *listPtrConverter) New() pref.Value { +func (c *listPtrConverter) New() protoreflect.Value { return c.PBValueOf(reflect.New(c.goType.Elem())) } -func (c *listPtrConverter) Zero() pref.Value { +func (c *listPtrConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } @@ -110,16 +110,16 @@ func (ls *listReflect) Len() int { } return ls.v.Elem().Len() } -func (ls *listReflect) Get(i int) pref.Value { +func (ls *listReflect) Get(i int) protoreflect.Value { return ls.conv.PBValueOf(ls.v.Elem().Index(i)) } -func (ls *listReflect) Set(i int, v pref.Value) { +func (ls *listReflect) Set(i int, v protoreflect.Value) { ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v)) } -func (ls *listReflect) Append(v pref.Value) { +func (ls *listReflect) Append(v protoreflect.Value) { ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v))) } -func (ls *listReflect) AppendMutable() pref.Value { +func (ls *listReflect) AppendMutable() protoreflect.Value { if _, ok := ls.conv.(*messageConverter); !ok { panic("invalid AppendMutable on list with non-message type") } @@ -130,7 +130,7 @@ func (ls *listReflect) AppendMutable() pref.Value { func (ls *listReflect) Truncate(i int) { ls.v.Elem().Set(ls.v.Elem().Slice(0, i)) } -func (ls *listReflect) NewElement() pref.Value { +func (ls *listReflect) NewElement() protoreflect.Value { return ls.conv.New() } func (ls *listReflect) IsValid() bool { diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index de06b2593f89..f30b0a0576de 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -8,7 +8,7 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type mapConverter struct { @@ -16,7 +16,7 @@ type mapConverter struct { keyConv, valConv Converter } -func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { +func newMapConverter(t reflect.Type, fd protoreflect.FieldDescriptor) *mapConverter { if t.Kind() != reflect.Map { panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) } @@ -27,18 +27,18 @@ func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { } } -func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *mapConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) + return protoreflect.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) } -func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *mapConverter) GoValueOf(v protoreflect.Value) reflect.Value { return v.Map().(*mapReflect).v } -func (c *mapConverter) IsValidPB(v pref.Value) bool { +func (c *mapConverter) IsValidPB(v protoreflect.Value) bool { mapv, ok := v.Interface().(*mapReflect) if !ok { return false @@ -50,11 +50,11 @@ func (c *mapConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *mapConverter) New() pref.Value { +func (c *mapConverter) New() protoreflect.Value { return c.PBValueOf(reflect.MakeMap(c.goType)) } -func (c *mapConverter) Zero() pref.Value { +func (c *mapConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } @@ -67,29 +67,29 @@ type mapReflect struct { func (ms *mapReflect) Len() int { return ms.v.Len() } -func (ms *mapReflect) Has(k pref.MapKey) bool { +func (ms *mapReflect) Has(k protoreflect.MapKey) bool { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.v.MapIndex(rk) return rv.IsValid() } -func (ms *mapReflect) Get(k pref.MapKey) pref.Value { +func (ms *mapReflect) Get(k protoreflect.MapKey) protoreflect.Value { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.v.MapIndex(rk) if !rv.IsValid() { - return pref.Value{} + return protoreflect.Value{} } return ms.valConv.PBValueOf(rv) } -func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) { +func (ms *mapReflect) Set(k protoreflect.MapKey, v protoreflect.Value) { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.valConv.GoValueOf(v) ms.v.SetMapIndex(rk, rv) } -func (ms *mapReflect) Clear(k pref.MapKey) { +func (ms *mapReflect) Clear(k protoreflect.MapKey) { rk := ms.keyConv.GoValueOf(k.Value()) ms.v.SetMapIndex(rk, reflect.Value{}) } -func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { +func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { if _, ok := ms.valConv.(*messageConverter); !ok { panic("invalid Mutable on map with non-message value type") } @@ -100,7 +100,7 @@ func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { } return v } -func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { +func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { iter := mapRange(ms.v) for iter.Next() { k := ms.keyConv.PBValueOf(iter.Key()).MapKey() @@ -110,7 +110,7 @@ func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { } } } -func (ms *mapReflect) NewValue() pref.Value { +func (ms *mapReflect) NewValue() protoreflect.Value { return ms.valConv.New() } func (ms *mapReflect) IsValid() bool { diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index 949dc49a65b3..cda0520c275c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -12,12 +12,12 @@ import ( "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoiface" - piface "google.golang.org/protobuf/runtime/protoiface" ) var errDecode = errors.New("cannot parse invalid wire-format data") +var errRecursionDepth = errors.New("exceeded maximum recursion depth") type unmarshalOptions struct { flags protoiface.UnmarshalInputFlags @@ -25,6 +25,7 @@ type unmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + depth int } func (o unmarshalOptions) Options() proto.UnmarshalOptions { @@ -36,14 +37,17 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions { } } -func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 } +func (o unmarshalOptions) DiscardUnknown() bool { + return o.flags&protoiface.UnmarshalDiscardUnknown != 0 +} func (o unmarshalOptions) IsDefault() bool { - return o.flags == 0 && o.resolver == preg.GlobalTypes + return o.flags == 0 && o.resolver == protoregistry.GlobalTypes } var lazyUnmarshalOptions = unmarshalOptions{ - resolver: preg.GlobalTypes, + resolver: protoregistry.GlobalTypes, + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -52,7 +56,7 @@ type unmarshalOutput struct { } // unmarshal is protoreflect.Methods.Unmarshal. -func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { +func (mi *MessageInfo) unmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { var p pointer if ms, ok := in.Message.(*messageState); ok { p = ms.pointer() @@ -62,12 +66,13 @@ func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutp out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, + depth: in.Depth, }) - var flags piface.UnmarshalOutputFlags + var flags protoiface.UnmarshalOutputFlags if out.initialized { - flags |= piface.UnmarshalInitialized + flags |= protoiface.UnmarshalInitialized } - return piface.UnmarshalOutput{ + return protoiface.UnmarshalOutput{ Flags: flags, }, err } @@ -82,6 +87,10 @@ var errUnknown = errors.New("unknown") func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { mi.init() + opts.depth-- + if opts.depth < 0 { + return out, errRecursionDepth + } if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } @@ -202,7 +211,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p var err error xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num) if err != nil { - if err == preg.NotFound { + if err == protoregistry.NotFound { return out, errUnknown } return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err) diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go index 8c1eab4bfd86..5f3ef5ad732f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go @@ -7,15 +7,15 @@ package impl import ( "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type EnumInfo struct { GoReflectType reflect.Type // int32 kind - Desc pref.EnumDescriptor + Desc protoreflect.EnumDescriptor } -func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum { - return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum) +func (t *EnumInfo) New(n protoreflect.EnumNumber) protoreflect.Enum { + return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(protoreflect.Enum) } -func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc } +func (t *EnumInfo) Descriptor() protoreflect.EnumDescriptor { return t.Desc } diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index e904fd993657..cb25b0bae1d7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -9,8 +9,8 @@ import ( "sync" "sync/atomic" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // ExtensionInfo implements ExtensionType. @@ -45,7 +45,7 @@ type ExtensionInfo struct { // since the message may no longer implement the MessageV1 interface. // // Deprecated: Use the ExtendedType method instead. - ExtendedType piface.MessageV1 + ExtendedType protoiface.MessageV1 // ExtensionType is the zero value of the extension type. // @@ -83,31 +83,31 @@ const ( extensionInfoFullInit = 2 ) -func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) { +func InitExtensionInfo(xi *ExtensionInfo, xd protoreflect.ExtensionDescriptor, goType reflect.Type) { xi.goType = goType xi.desc = extensionTypeDescriptor{xd, xi} xi.init = extensionInfoDescInit } -func (xi *ExtensionInfo) New() pref.Value { +func (xi *ExtensionInfo) New() protoreflect.Value { return xi.lazyInit().New() } -func (xi *ExtensionInfo) Zero() pref.Value { +func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value { +func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { return xi.lazyInit().GoValueOf(v).Interface() } -func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool { +func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor { +func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { if atomic.LoadUint32(&xi.init) < extensionInfoDescInit { xi.lazyInitSlow() } @@ -144,13 +144,13 @@ func (xi *ExtensionInfo) lazyInitSlow() { } type extensionTypeDescriptor struct { - pref.ExtensionDescriptor + protoreflect.ExtensionDescriptor xi *ExtensionInfo } -func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType { +func (xtd *extensionTypeDescriptor) Type() protoreflect.ExtensionType { return xtd.xi } -func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor { +func (xtd *extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor { return xtd.ExtensionDescriptor } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index f7d7ffb51039..c2a803bb2f92 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -13,13 +13,12 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" ) // legacyEnumName returns the name of enums used in legacy code. // It is neither the protobuf full name nor the qualified Go name, // but rather an odd hybrid of both. -func legacyEnumName(ed pref.EnumDescriptor) string { +func legacyEnumName(ed protoreflect.EnumDescriptor) string { var protoPkg string enumName := string(ed.FullName()) if fd := ed.ParentFile(); fd != nil { @@ -34,68 +33,68 @@ func legacyEnumName(ed pref.EnumDescriptor) string { // legacyWrapEnum wraps v as a protoreflect.Enum, // where v must be a int32 kind and not implement the v2 API already. -func legacyWrapEnum(v reflect.Value) pref.Enum { +func legacyWrapEnum(v reflect.Value) protoreflect.Enum { et := legacyLoadEnumType(v.Type()) - return et.New(pref.EnumNumber(v.Int())) + return et.New(protoreflect.EnumNumber(v.Int())) } var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType // legacyLoadEnumType dynamically loads a protoreflect.EnumType for t, // where t must be an int32 kind and not implement the v2 API already. -func legacyLoadEnumType(t reflect.Type) pref.EnumType { +func legacyLoadEnumType(t reflect.Type) protoreflect.EnumType { // Fast-path: check if a EnumType is cached for this concrete type. if et, ok := legacyEnumTypeCache.Load(t); ok { - return et.(pref.EnumType) + return et.(protoreflect.EnumType) } // Slow-path: derive enum descriptor and initialize EnumType. - var et pref.EnumType + var et protoreflect.EnumType ed := LegacyLoadEnumDesc(t) et = &legacyEnumType{ desc: ed, goType: t, } if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok { - return et.(pref.EnumType) + return et.(protoreflect.EnumType) } return et } type legacyEnumType struct { - desc pref.EnumDescriptor + desc protoreflect.EnumDescriptor goType reflect.Type m sync.Map // map[protoreflect.EnumNumber]proto.Enum } -func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum { +func (t *legacyEnumType) New(n protoreflect.EnumNumber) protoreflect.Enum { if e, ok := t.m.Load(n); ok { - return e.(pref.Enum) + return e.(protoreflect.Enum) } e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType} t.m.Store(n, e) return e } -func (t *legacyEnumType) Descriptor() pref.EnumDescriptor { +func (t *legacyEnumType) Descriptor() protoreflect.EnumDescriptor { return t.desc } type legacyEnumWrapper struct { - num pref.EnumNumber - pbTyp pref.EnumType + num protoreflect.EnumNumber + pbTyp protoreflect.EnumType goTyp reflect.Type } -func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor { +func (e *legacyEnumWrapper) Descriptor() protoreflect.EnumDescriptor { return e.pbTyp.Descriptor() } -func (e *legacyEnumWrapper) Type() pref.EnumType { +func (e *legacyEnumWrapper) Type() protoreflect.EnumType { return e.pbTyp } -func (e *legacyEnumWrapper) Number() pref.EnumNumber { +func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { return e.num } -func (e *legacyEnumWrapper) ProtoReflect() pref.Enum { +func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } func (e *legacyEnumWrapper) protoUnwrap() interface{} { @@ -105,8 +104,8 @@ func (e *legacyEnumWrapper) protoUnwrap() interface{} { } var ( - _ pref.Enum = (*legacyEnumWrapper)(nil) - _ unwrapper = (*legacyEnumWrapper)(nil) + _ protoreflect.Enum = (*legacyEnumWrapper)(nil) + _ unwrapper = (*legacyEnumWrapper)(nil) ) var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor @@ -115,15 +114,15 @@ var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor // which must be an int32 kind and not implement the v2 API already. // // This is exported for testing purposes. -func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { +func LegacyLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { // Fast-path: check if an EnumDescriptor is cached for this concrete type. if ed, ok := legacyEnumDescCache.Load(t); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } // Slow-path: initialize EnumDescriptor from the raw descriptor. ev := reflect.Zero(t).Interface() - if _, ok := ev.(pref.Enum); ok { + if _, ok := ev.(protoreflect.Enum); ok { panic(fmt.Sprintf("%v already implements proto.Enum", t)) } edV1, ok := ev.(enumV1) @@ -132,7 +131,7 @@ func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { } b, idxs := edV1.EnumDescriptor() - var ed pref.EnumDescriptor + var ed protoreflect.EnumDescriptor if len(idxs) == 1 { ed = legacyLoadFileDesc(b).Enums().Get(idxs[0]) } else { @@ -158,10 +157,10 @@ var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescript // We are unable to use the global enum registry since it is // unfortunately keyed by the protobuf full name, which we also do not know. // Thus, this produces some bogus enum descriptor based on the Go type name. -func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { +func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { // Fast-path: check if an EnumDescriptor is cached for this concrete type. if ed, ok := aberrantEnumDescCache.Load(t); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } // Slow-path: construct a bogus, but unique EnumDescriptor. @@ -182,7 +181,7 @@ func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { // An exhaustive query is clearly impractical, but can be best-effort. if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } return ed } @@ -192,7 +191,7 @@ func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { // It should be sufficiently unique within a program. // // This is exported for testing purposes. -func AberrantDeriveFullName(t reflect.Type) pref.FullName { +func AberrantDeriveFullName(t reflect.Type) protoreflect.FullName { sanitize := func(r rune) rune { switch { case r == '/': @@ -215,5 +214,5 @@ func AberrantDeriveFullName(t reflect.Type) pref.FullName { ss[i] = "x" + s } } - return pref.FullName(strings.Join(ss, ".")) + return protoreflect.FullName(strings.Join(ss, ".")) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go index e3fb0b578586..9b64ad5bba28 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -12,21 +12,21 @@ import ( "reflect" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // These functions exist to support exported APIs in generated protobufs. // While these are deprecated, they cannot be removed for compatibility reasons. // LegacyEnumName returns the name of enums used in legacy code. -func (Export) LegacyEnumName(ed pref.EnumDescriptor) string { +func (Export) LegacyEnumName(ed protoreflect.EnumDescriptor) string { return legacyEnumName(ed) } // LegacyMessageTypeOf returns the protoreflect.MessageType for m, // with name used as the message name if necessary. -func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType { +func (Export) LegacyMessageTypeOf(m protoiface.MessageV1, name protoreflect.FullName) protoreflect.MessageType { if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } @@ -36,9 +36,9 @@ func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.M // UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. // The input can either be a string representing the enum value by name, // or a number representing the enum number itself. -func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) { +func (Export) UnmarshalJSONEnum(ed protoreflect.EnumDescriptor, b []byte) (protoreflect.EnumNumber, error) { if b[0] == '"' { - var name pref.Name + var name protoreflect.Name if err := json.Unmarshal(b, &name); err != nil { return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) } @@ -48,7 +48,7 @@ func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumb } return ev.Number(), nil } else { - var num pref.EnumNumber + var num protoreflect.EnumNumber if err := json.Unmarshal(b, &num); err != nil { return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) } @@ -81,8 +81,8 @@ func (Export) CompressGZIP(in []byte) (out []byte) { blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3. blockSize = len(in) } - binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000) - binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff) + binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)) + binary.LittleEndian.PutUint16(blockHeader[3:5], ^uint16(blockSize)) out = append(out, blockHeader[:]...) out = append(out, in[:blockSize]...) in = in[blockSize:] diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 49e723161c01..87b30d0504c1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -12,16 +12,16 @@ import ( ptag "google.golang.org/protobuf/internal/encoding/tag" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) func (xi *ExtensionInfo) initToLegacy() { xd := xi.desc - var parent piface.MessageV1 + var parent protoiface.MessageV1 messageName := xd.ContainingMessage().FullName() - if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil { + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(messageName); mt != nil { // Create a new parent message and unwrap it if possible. mv := mt.New().Interface() t := reflect.TypeOf(mv) @@ -31,7 +31,7 @@ func (xi *ExtensionInfo) initToLegacy() { // Check whether the message implements the legacy v1 Message interface. mz := reflect.Zero(t).Interface() - if mz, ok := mz.(piface.MessageV1); ok { + if mz, ok := mz.(protoiface.MessageV1); ok { parent = mz } } @@ -46,7 +46,7 @@ func (xi *ExtensionInfo) initToLegacy() { // Reconstruct the legacy enum full name. var enumName string - if xd.Kind() == pref.EnumKind { + if xd.Kind() == protoreflect.EnumKind { enumName = legacyEnumName(xd.Enum()) } @@ -77,16 +77,16 @@ func (xi *ExtensionInfo) initFromLegacy() { // field number is specified. In such a case, use a placeholder. if xi.ExtendedType == nil || xi.ExtensionType == nil { xd := placeholderExtension{ - name: pref.FullName(xi.Name), - number: pref.FieldNumber(xi.Field), + name: protoreflect.FullName(xi.Name), + number: protoreflect.FieldNumber(xi.Field), } xi.desc = extensionTypeDescriptor{xd, xi} return } // Resolve enum or message dependencies. - var ed pref.EnumDescriptor - var md pref.MessageDescriptor + var ed protoreflect.EnumDescriptor + var md protoreflect.MessageDescriptor t := reflect.TypeOf(xi.ExtensionType) isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 @@ -94,18 +94,18 @@ func (xi *ExtensionInfo) initFromLegacy() { t = t.Elem() } switch v := reflect.Zero(t).Interface().(type) { - case pref.Enum: + case protoreflect.Enum: ed = v.Descriptor() case enumV1: ed = LegacyLoadEnumDesc(t) - case pref.ProtoMessage: + case protoreflect.ProtoMessage: md = v.ProtoReflect().Descriptor() case messageV1: md = LegacyLoadMessageDesc(t) } // Derive basic field information from the struct tag. - var evs pref.EnumValueDescriptors + var evs protoreflect.EnumValueDescriptors if ed != nil { evs = ed.Values() } @@ -114,8 +114,8 @@ func (xi *ExtensionInfo) initFromLegacy() { // Construct a v2 ExtensionType. xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)} xd.L0.ParentFile = filedesc.SurrogateProto2 - xd.L0.FullName = pref.FullName(xi.Name) - xd.L1.Number = pref.FieldNumber(xi.Field) + xd.L0.FullName = protoreflect.FullName(xi.Name) + xd.L1.Number = protoreflect.FieldNumber(xi.Field) xd.L1.Cardinality = fd.L1.Cardinality xd.L1.Kind = fd.L1.Kind xd.L2.IsPacked = fd.L1.IsPacked @@ -138,39 +138,39 @@ func (xi *ExtensionInfo) initFromLegacy() { } type placeholderExtension struct { - name pref.FullName - number pref.FieldNumber + name protoreflect.FullName + number protoreflect.FieldNumber } -func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil } -func (x placeholderExtension) Parent() pref.Descriptor { return nil } -func (x placeholderExtension) Index() int { return 0 } -func (x placeholderExtension) Syntax() pref.Syntax { return 0 } -func (x placeholderExtension) Name() pref.Name { return x.name.Name() } -func (x placeholderExtension) FullName() pref.FullName { return x.name } -func (x placeholderExtension) IsPlaceholder() bool { return true } -func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field } -func (x placeholderExtension) Number() pref.FieldNumber { return x.number } -func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } -func (x placeholderExtension) Kind() pref.Kind { return 0 } -func (x placeholderExtension) HasJSONName() bool { return false } -func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } -func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } -func (x placeholderExtension) HasPresence() bool { return false } -func (x placeholderExtension) HasOptionalKeyword() bool { return false } -func (x placeholderExtension) IsExtension() bool { return true } -func (x placeholderExtension) IsWeak() bool { return false } -func (x placeholderExtension) IsPacked() bool { return false } -func (x placeholderExtension) IsList() bool { return false } -func (x placeholderExtension) IsMap() bool { return false } -func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil } -func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil } -func (x placeholderExtension) HasDefault() bool { return false } -func (x placeholderExtension) Default() pref.Value { return pref.Value{} } -func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil } -func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil } -func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil } -func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil } -func (x placeholderExtension) Message() pref.MessageDescriptor { return nil } -func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return } -func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } +func (x placeholderExtension) ParentFile() protoreflect.FileDescriptor { return nil } +func (x placeholderExtension) Parent() protoreflect.Descriptor { return nil } +func (x placeholderExtension) Index() int { return 0 } +func (x placeholderExtension) Syntax() protoreflect.Syntax { return 0 } +func (x placeholderExtension) Name() protoreflect.Name { return x.name.Name() } +func (x placeholderExtension) FullName() protoreflect.FullName { return x.name } +func (x placeholderExtension) IsPlaceholder() bool { return true } +func (x placeholderExtension) Options() protoreflect.ProtoMessage { return descopts.Field } +func (x placeholderExtension) Number() protoreflect.FieldNumber { return x.number } +func (x placeholderExtension) Cardinality() protoreflect.Cardinality { return 0 } +func (x placeholderExtension) Kind() protoreflect.Kind { return 0 } +func (x placeholderExtension) HasJSONName() bool { return false } +func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) HasPresence() bool { return false } +func (x placeholderExtension) HasOptionalKeyword() bool { return false } +func (x placeholderExtension) IsExtension() bool { return true } +func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsPacked() bool { return false } +func (x placeholderExtension) IsList() bool { return false } +func (x placeholderExtension) IsMap() bool { return false } +func (x placeholderExtension) MapKey() protoreflect.FieldDescriptor { return nil } +func (x placeholderExtension) MapValue() protoreflect.FieldDescriptor { return nil } +func (x placeholderExtension) HasDefault() bool { return false } +func (x placeholderExtension) Default() protoreflect.Value { return protoreflect.Value{} } +func (x placeholderExtension) DefaultEnumValue() protoreflect.EnumValueDescriptor { return nil } +func (x placeholderExtension) ContainingOneof() protoreflect.OneofDescriptor { return nil } +func (x placeholderExtension) ContainingMessage() protoreflect.MessageDescriptor { return nil } +func (x placeholderExtension) Enum() protoreflect.EnumDescriptor { return nil } +func (x placeholderExtension) Message() protoreflect.MessageDescriptor { return nil } +func (x placeholderExtension) ProtoType(protoreflect.FieldDescriptor) { return } +func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 029feeefd792..61c483fac06e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -16,14 +16,12 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" - piface "google.golang.org/protobuf/runtime/protoiface" ) // legacyWrapMessage wraps v as a protoreflect.Message, // where v must be a *struct kind and not implement the v2 API already. -func legacyWrapMessage(v reflect.Value) pref.Message { +func legacyWrapMessage(v reflect.Value) protoreflect.Message { t := v.Type() if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessage{v: v} @@ -35,7 +33,7 @@ func legacyWrapMessage(v reflect.Value) pref.Message { // legacyLoadMessageType dynamically loads a protoreflect.Type for t, // where t must be not implement the v2 API already. // The provided name is used if it cannot be determined from the message. -func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType { +func legacyLoadMessageType(t reflect.Type, name protoreflect.FullName) protoreflect.MessageType { if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessageType{t} } @@ -47,7 +45,7 @@ var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo // legacyLoadMessageInfo dynamically loads a *MessageInfo for t, // where t must be a *struct kind and not implement the v2 API already. // The provided name is used if it cannot be determined from the message. -func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { +func legacyLoadMessageInfo(t reflect.Type, name protoreflect.FullName) *MessageInfo { // Fast-path: check if a MessageInfo is cached for this concrete type. if mt, ok := legacyMessageTypeCache.Load(t); ok { return mt.(*MessageInfo) @@ -68,7 +66,7 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { // supports deterministic serialization or not, but this // preserves the v1 implementation's behavior of always // calling Marshal methods when present. - mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Flags |= protoiface.SupportMarshalDeterministic } if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { mi.methods.Unmarshal = legacyUnmarshal @@ -89,18 +87,18 @@ var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDesc // which should be a *struct kind and must not implement the v2 API already. // // This is exported for testing purposes. -func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { +func LegacyLoadMessageDesc(t reflect.Type) protoreflect.MessageDescriptor { return legacyLoadMessageDesc(t, "") } -func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func legacyLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { // Fast-path: check if a MessageDescriptor is cached for this concrete type. if mi, ok := legacyMessageDescCache.Load(t); ok { - return mi.(pref.MessageDescriptor) + return mi.(protoreflect.MessageDescriptor) } // Slow-path: initialize MessageDescriptor from the raw descriptor. mv := reflect.Zero(t).Interface() - if _, ok := mv.(pref.ProtoMessage); ok { + if _, ok := mv.(protoreflect.ProtoMessage); ok { panic(fmt.Sprintf("%v already implements proto.Message", t)) } mdV1, ok := mv.(messageV1) @@ -164,7 +162,7 @@ var ( // // This is a best-effort derivation of the message descriptor using the protobuf // tags on the struct fields. -func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func aberrantLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { aberrantMessageDescLock.Lock() defer aberrantMessageDescLock.Unlock() if aberrantMessageDescCache == nil { @@ -172,7 +170,7 @@ func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDes } return aberrantLoadMessageDescReentrant(t, name) } -func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { // Fast-path: check if an MessageDescriptor is cached for this concrete type. if md, ok := aberrantMessageDescCache[t]; ok { return md @@ -225,9 +223,9 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0] for i := 0; i < vs.Len(); i++ { v := vs.Index(i) - md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{ - pref.FieldNumber(v.FieldByName("Start").Int()), - pref.FieldNumber(v.FieldByName("End").Int() + 1), + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(v.FieldByName("Start").Int()), + protoreflect.FieldNumber(v.FieldByName("End").Int() + 1), }) md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil) } @@ -245,7 +243,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M n := len(md.L2.Oneofs.List) md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{}) od := &md.L2.Oneofs.List[n] - od.L0.FullName = md.FullName().Append(pref.Name(tag)) + od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) od.L0.ParentFile = md.L0.ParentFile od.L0.Parent = md od.L0.Index = n @@ -267,14 +265,14 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M return md } -func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName { +func aberrantDeriveMessageName(t reflect.Type, name protoreflect.FullName) protoreflect.FullName { if name.IsValid() { return name } func() { defer func() { recover() }() // swallow possible nil panics if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok { - name = pref.FullName(m.XXX_MessageName()) + name = protoreflect.FullName(m.XXX_MessageName()) } }() if name.IsValid() { @@ -305,7 +303,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Index = n if fd.L1.IsWeak || fd.L1.HasPacked { - fd.L1.Options = func() pref.ProtoMessage { + fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() if fd.L1.IsWeak { opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) @@ -318,17 +316,17 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, } // Populate Enum and Message. - if fd.Enum() == nil && fd.Kind() == pref.EnumKind { + if fd.Enum() == nil && fd.Kind() == protoreflect.EnumKind { switch v := reflect.Zero(t).Interface().(type) { - case pref.Enum: + case protoreflect.Enum: fd.L1.Enum = v.Descriptor() default: fd.L1.Enum = LegacyLoadEnumDesc(t) } } - if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) { + if fd.Message() == nil && (fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind) { switch v := reflect.Zero(t).Interface().(type) { - case pref.ProtoMessage: + case protoreflect.ProtoMessage: fd.L1.Message = v.ProtoReflect().Descriptor() case messageV1: fd.L1.Message = LegacyLoadMessageDesc(t) @@ -337,13 +335,13 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, n := len(md.L1.Messages.List) md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)}) md2 := &md.L1.Messages.List[n] - md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name())))) + md2.L0.FullName = md.FullName().Append(protoreflect.Name(strs.MapEntryName(string(fd.Name())))) md2.L0.ParentFile = md.L0.ParentFile md2.L0.Parent = md md2.L0.Index = n md2.L1.IsMapEntry = true - md2.L2.Options = func() pref.ProtoMessage { + md2.L2.Options = func() protoreflect.ProtoMessage { opts := descopts.Message.ProtoReflect().New() opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true)) return opts.Interface() @@ -364,8 +362,8 @@ type placeholderEnumValues struct { protoreflect.EnumValueDescriptors } -func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor { - return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n))) +func (placeholderEnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + return filedesc.PlaceholderEnumValue(protoreflect.FullName(fmt.Sprintf("UNKNOWN_%d", n))) } // legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder. @@ -383,7 +381,7 @@ type legacyMerger interface { Merge(protoiface.MessageV1) } -var aberrantProtoMethods = &piface.Methods{ +var aberrantProtoMethods = &protoiface.Methods{ Marshal: legacyMarshal, Unmarshal: legacyUnmarshal, Merge: legacyMerge, @@ -392,40 +390,40 @@ var aberrantProtoMethods = &piface.Methods{ // supports deterministic serialization or not, but this // preserves the v1 implementation's behavior of always // calling Marshal methods when present. - Flags: piface.SupportMarshalDeterministic, + Flags: protoiface.SupportMarshalDeterministic, } -func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) { +func legacyMarshal(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() marshaler, ok := v.(legacyMarshaler) if !ok { - return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) + return protoiface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) } out, err := marshaler.Marshal() if in.Buf != nil { out = append(in.Buf, out...) } - return piface.MarshalOutput{ + return protoiface.MarshalOutput{ Buf: out, }, err } -func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { +func legacyUnmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() unmarshaler, ok := v.(legacyUnmarshaler) if !ok { - return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) + return protoiface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) } - return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) + return protoiface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) } -func legacyMerge(in piface.MergeInput) piface.MergeOutput { +func legacyMerge(in protoiface.MergeInput) protoiface.MergeOutput { // Check whether this supports the legacy merger. dstv := in.Destination.(unwrapper).protoUnwrap() merger, ok := dstv.(legacyMerger) if ok { merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } // If legacy merger is unavailable, implement merge in terms of @@ -433,29 +431,29 @@ func legacyMerge(in piface.MergeInput) piface.MergeOutput { srcv := in.Source.(unwrapper).protoUnwrap() marshaler, ok := srcv.(legacyMarshaler) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } dstv = in.Destination.(unwrapper).protoUnwrap() unmarshaler, ok := dstv.(legacyUnmarshaler) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } if !in.Source.IsValid() { // Legacy Marshal methods may not function on nil messages. // Check for a typed nil source only after we confirm that // legacy Marshal/Unmarshal methods are present, for // consistency. - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } b, err := marshaler.Marshal() if err != nil { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } err = unmarshaler.Unmarshal(b) if err != nil { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } // aberrantMessageType implements MessageType for all types other than pointer-to-struct. @@ -463,19 +461,19 @@ type aberrantMessageType struct { t reflect.Type } -func (mt aberrantMessageType) New() pref.Message { +func (mt aberrantMessageType) New() protoreflect.Message { if mt.t.Kind() == reflect.Ptr { return aberrantMessage{reflect.New(mt.t.Elem())} } return aberrantMessage{reflect.Zero(mt.t)} } -func (mt aberrantMessageType) Zero() pref.Message { +func (mt aberrantMessageType) Zero() protoreflect.Message { return aberrantMessage{reflect.Zero(mt.t)} } func (mt aberrantMessageType) GoType() reflect.Type { return mt.t } -func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor { +func (mt aberrantMessageType) Descriptor() protoreflect.MessageDescriptor { return LegacyLoadMessageDesc(mt.t) } @@ -499,56 +497,56 @@ func (m aberrantMessage) Reset() { } } -func (m aberrantMessage) ProtoReflect() pref.Message { +func (m aberrantMessage) ProtoReflect() protoreflect.Message { return m } -func (m aberrantMessage) Descriptor() pref.MessageDescriptor { +func (m aberrantMessage) Descriptor() protoreflect.MessageDescriptor { return LegacyLoadMessageDesc(m.v.Type()) } -func (m aberrantMessage) Type() pref.MessageType { +func (m aberrantMessage) Type() protoreflect.MessageType { return aberrantMessageType{m.v.Type()} } -func (m aberrantMessage) New() pref.Message { +func (m aberrantMessage) New() protoreflect.Message { if m.v.Type().Kind() == reflect.Ptr { return aberrantMessage{reflect.New(m.v.Type().Elem())} } return aberrantMessage{reflect.Zero(m.v.Type())} } -func (m aberrantMessage) Interface() pref.ProtoMessage { +func (m aberrantMessage) Interface() protoreflect.ProtoMessage { return m } -func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +func (m aberrantMessage) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { return } -func (m aberrantMessage) Has(pref.FieldDescriptor) bool { +func (m aberrantMessage) Has(protoreflect.FieldDescriptor) bool { return false } -func (m aberrantMessage) Clear(pref.FieldDescriptor) { +func (m aberrantMessage) Clear(protoreflect.FieldDescriptor) { panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { if fd.Default().IsValid() { return fd.Default() } panic("invalid Message.Get on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { +func (m aberrantMessage) Set(protoreflect.FieldDescriptor, protoreflect.Value) { panic("invalid Message.Set on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) Mutable(protoreflect.FieldDescriptor) protoreflect.Value { panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) NewField(protoreflect.FieldDescriptor) protoreflect.Value { panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { +func (m aberrantMessage) WhichOneof(protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) GetUnknown() pref.RawFields { +func (m aberrantMessage) GetUnknown() protoreflect.RawFields { return nil } -func (m aberrantMessage) SetUnknown(pref.RawFields) { +func (m aberrantMessage) SetUnknown(protoreflect.RawFields) { // SetUnknown discards its input on messages which don't support unknown field storage. } func (m aberrantMessage) IsValid() bool { @@ -557,7 +555,7 @@ func (m aberrantMessage) IsValid() bool { } return false } -func (m aberrantMessage) ProtoMethods() *piface.Methods { +func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } func (m aberrantMessage) protoUnwrap() interface{} { diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go index c65bbc0446ea..7e65f64f28e3 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -9,8 +9,8 @@ import ( "reflect" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) type mergeOptions struct{} @@ -20,17 +20,17 @@ func (o mergeOptions) Merge(dst, src proto.Message) { } // merge is protoreflect.Methods.Merge. -func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput { +func (mi *MessageInfo) merge(in protoiface.MergeInput) protoiface.MergeOutput { dp, ok := mi.getPointer(in.Destination) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } sp, ok := mi.getPointer(in.Source) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } mi.mergePointer(dp, sp, mergeOptions{}) - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { @@ -64,7 +64,7 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { continue } dx := (*dext)[num] - var dv pref.Value + var dv protoreflect.Value if dx.Type() == sx.Type() { dv = dx.Value() } @@ -85,15 +85,15 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { } } -func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeScalarValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { return src } -func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value { - return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) +func mergeBytesValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + return protoreflect.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) } -func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { @@ -102,29 +102,29 @@ func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { return dst } -func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeBytesListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { sb := srcl.Get(i).Bytes() db := append(emptyBuf[:], sb...) - dstl.Append(pref.ValueOfBytes(db)) + dstl.Append(protoreflect.ValueOfBytes(db)) } return dst } -func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeMessageListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { sm := srcl.Get(i).Message() dm := proto.Clone(sm.Interface()).ProtoReflect() - dstl.Append(pref.ValueOfMessage(dm)) + dstl.Append(protoreflect.ValueOfMessage(dm)) } return dst } -func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeMessageValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { opts.Merge(dst.Message().Interface(), src.Message().Interface()) return dst } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index a104e28e858f..4f5fb67a0ddb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -14,8 +14,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -29,7 +28,7 @@ type MessageInfo struct { GoReflectType reflect.Type // pointer to struct // Desc is the underlying message descriptor type and must be populated. - Desc pref.MessageDescriptor + Desc protoreflect.MessageDescriptor // Exporter must be provided in a purego environment in order to provide // access to unexported fields. @@ -54,7 +53,7 @@ type exporter func(v interface{}, i int) interface{} // is generated by our implementation of protoc-gen-go (for v2 and on). // If it is unable to obtain a MessageInfo, it returns nil. func getMessageInfo(mt reflect.Type) *MessageInfo { - m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage) + m, ok := reflect.Zero(mt).Interface().(protoreflect.ProtoMessage) if !ok { return nil } @@ -97,7 +96,7 @@ func (mi *MessageInfo) initOnce() { // getPointer returns the pointer for a message, which should be of // the type of the MessageInfo. If the message is of a different type, // it returns ok==false. -func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { +func (mi *MessageInfo) getPointer(m protoreflect.Message) (p pointer, ok bool) { switch m := m.(type) { case *messageState: return m.pointer(), m.messageInfo() == mi @@ -134,10 +133,10 @@ type structInfo struct { extensionOffset offset extensionType reflect.Type - fieldsByNumber map[pref.FieldNumber]reflect.StructField - oneofsByName map[pref.Name]reflect.StructField - oneofWrappersByType map[reflect.Type]pref.FieldNumber - oneofWrappersByNumber map[pref.FieldNumber]reflect.Type + fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField + oneofsByName map[protoreflect.Name]reflect.StructField + oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber + oneofWrappersByNumber map[protoreflect.FieldNumber]reflect.Type } func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { @@ -147,10 +146,10 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { unknownOffset: invalidOffset, extensionOffset: invalidOffset, - fieldsByNumber: map[pref.FieldNumber]reflect.StructField{}, - oneofsByName: map[pref.Name]reflect.StructField{}, - oneofWrappersByType: map[reflect.Type]pref.FieldNumber{}, - oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{}, + fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{}, + oneofsByName: map[protoreflect.Name]reflect.StructField{}, + oneofWrappersByType: map[reflect.Type]protoreflect.FieldNumber{}, + oneofWrappersByNumber: map[protoreflect.FieldNumber]reflect.Type{}, } fieldLoop: @@ -180,12 +179,12 @@ fieldLoop: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { n, _ := strconv.ParseUint(s, 10, 64) - si.fieldsByNumber[pref.FieldNumber(n)] = f + si.fieldsByNumber[protoreflect.FieldNumber(n)] = f continue fieldLoop } } if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 { - si.oneofsByName[pref.Name(s)] = f + si.oneofsByName[protoreflect.Name(s)] = f continue fieldLoop } } @@ -208,8 +207,8 @@ fieldLoop: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { n, _ := strconv.ParseUint(s, 10, 64) - si.oneofWrappersByType[tf] = pref.FieldNumber(n) - si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf + si.oneofWrappersByType[tf] = protoreflect.FieldNumber(n) + si.oneofWrappersByNumber[protoreflect.FieldNumber(n)] = tf break } } @@ -219,7 +218,11 @@ fieldLoop: } func (mi *MessageInfo) New() protoreflect.Message { - return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface()) + m := reflect.New(mi.GoReflectType.Elem()).Interface() + if r, ok := m.(protoreflect.ProtoMessage); ok { + return r.ProtoReflect() + } + return mi.MessageOf(m) } func (mi *MessageInfo) Zero() protoreflect.Message { return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) @@ -237,7 +240,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { fd := mi.Desc.Fields().Get(i) switch { case fd.IsWeak(): - mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName()) + mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) return mt case fd.IsMap(): return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 9488b7261313..d9ea010bef9a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -10,17 +10,17 @@ import ( "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type reflectMessageInfo struct { - fields map[pref.FieldNumber]*fieldInfo - oneofs map[pref.Name]*oneofInfo + fields map[protoreflect.FieldNumber]*fieldInfo + oneofs map[protoreflect.Name]*oneofInfo // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[pref.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]interface{} // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -30,8 +30,8 @@ type reflectMessageInfo struct { // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. rangeInfos []interface{} // either *fieldInfo or *oneofInfo - getUnknown func(pointer) pref.RawFields - setUnknown func(pointer, pref.RawFields) + getUnknown func(pointer) protoreflect.RawFields + setUnknown func(pointer, protoreflect.RawFields) extensionMap func(pointer) *extensionMap nilMessage atomicNilMessage @@ -52,7 +52,7 @@ func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { // This code assumes that the struct is well-formed and panics if there are // any discrepancies. func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { - mi.fields = map[pref.FieldNumber]*fieldInfo{} + mi.fields = map[protoreflect.FieldNumber]*fieldInfo{} md := mi.Desc fds := md.Fields() for i := 0; i < fds.Len(); i++ { @@ -82,7 +82,7 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { mi.fields[fd.Number()] = &fi } - mi.oneofs = map[pref.Name]*oneofInfo{} + mi.oneofs = map[protoreflect.Name]*oneofInfo{} for i := 0; i < md.Oneofs().Len(); i++ { od := md.Oneofs().Get(i) mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter) @@ -117,13 +117,13 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { switch { case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: // Handle as []byte. - mi.getUnknown = func(p pointer) pref.RawFields { + mi.getUnknown = func(p pointer) protoreflect.RawFields { if p.IsNil() { return nil } return *p.Apply(mi.unknownOffset).Bytes() } - mi.setUnknown = func(p pointer, b pref.RawFields) { + mi.setUnknown = func(p pointer, b protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -131,7 +131,7 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { } case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: // Handle as *[]byte. - mi.getUnknown = func(p pointer) pref.RawFields { + mi.getUnknown = func(p pointer) protoreflect.RawFields { if p.IsNil() { return nil } @@ -141,7 +141,7 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { } return **bp } - mi.setUnknown = func(p pointer, b pref.RawFields) { + mi.setUnknown = func(p pointer, b protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -152,10 +152,10 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { **bp = b } default: - mi.getUnknown = func(pointer) pref.RawFields { + mi.getUnknown = func(pointer) protoreflect.RawFields { return nil } - mi.setUnknown = func(p pointer, _ pref.RawFields) { + mi.setUnknown = func(p pointer, _ protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[pref.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -233,7 +233,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { type extensionMap map[int32]ExtensionField -func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { if m != nil { for _, x := range *m { xd := x.Type().TypeDescriptor() @@ -247,7 +247,7 @@ func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { } } } -func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { +func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { if m == nil { return false } @@ -266,10 +266,10 @@ func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { } return true } -func (m *extensionMap) Clear(xt pref.ExtensionType) { +func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { delete(*m, int32(xt.TypeDescriptor().Number())) } -func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { +func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { xd := xt.TypeDescriptor() if m != nil { if x, ok := (*m)[int32(xd.Number())]; ok { @@ -278,7 +278,7 @@ func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { } return xt.Zero() } -func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { +func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { xd := xt.TypeDescriptor() isValid := true switch { @@ -302,9 +302,9 @@ func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { x.Set(xt, v) (*m)[int32(xd.Number())] = x } -func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { +func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { xd := xt.TypeDescriptor() - if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() { + if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { panic("invalid Mutable on field with non-composite type") } if x, ok := (*m)[int32(xd.Number())]; ok { @@ -320,7 +320,6 @@ func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { // in an allocation-free way without needing to have a shadow Go type generated // for every message type. This technique only works using unsafe. // -// // Example generated code: // // type M struct { @@ -351,12 +350,11 @@ func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { // It has access to the message info as its first field, and a pointer to the // MessageState is identical to a pointer to the concrete message value. // -// // Requirements: -// • The type M must implement protoreflect.ProtoMessage. -// • The address of m must not be nil. -// • The address of m and the address of m.state must be equal, -// even though they are different Go types. +// - The type M must implement protoreflect.ProtoMessage. +// - The address of m must not be nil. +// - The address of m and the address of m.state must be equal, +// even though they are different Go types. type MessageState struct { pragma.NoUnkeyedLiterals pragma.DoNotCompare @@ -368,8 +366,8 @@ type MessageState struct { type messageState MessageState var ( - _ pref.Message = (*messageState)(nil) - _ unwrapper = (*messageState)(nil) + _ protoreflect.Message = (*messageState)(nil) + _ unwrapper = (*messageState)(nil) ) // messageDataType is a tuple of a pointer to the message data and @@ -387,16 +385,16 @@ type ( ) var ( - _ pref.Message = (*messageReflectWrapper)(nil) - _ unwrapper = (*messageReflectWrapper)(nil) - _ pref.ProtoMessage = (*messageIfaceWrapper)(nil) - _ unwrapper = (*messageIfaceWrapper)(nil) + _ protoreflect.Message = (*messageReflectWrapper)(nil) + _ unwrapper = (*messageReflectWrapper)(nil) + _ protoreflect.ProtoMessage = (*messageIfaceWrapper)(nil) + _ unwrapper = (*messageIfaceWrapper)(nil) ) // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { +func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -421,7 +419,7 @@ func (m *messageIfaceWrapper) Reset() { rv.Elem().Set(reflect.Zero(rv.Type().Elem())) } } -func (m *messageIfaceWrapper) ProtoReflect() pref.Message { +func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } func (m *messageIfaceWrapper) protoUnwrap() interface{} { @@ -430,7 +428,7 @@ func (m *messageIfaceWrapper) protoUnwrap() interface{} { // checkField verifies that the provided field descriptor is valid. // Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) { +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { var fi *fieldInfo if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { fi = mi.denseFields[n] @@ -455,7 +453,7 @@ func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.Ext if !mi.Desc.ExtensionRanges().Has(fd.Number()) { panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName())) } - xtd, ok := fd.(pref.ExtensionTypeDescriptor) + xtd, ok := fd.(protoreflect.ExtensionTypeDescriptor) if !ok { panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 343cf872197f..5e736c60efc7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -11,24 +11,24 @@ import ( "sync" "google.golang.org/protobuf/internal/flags" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) type fieldInfo struct { - fieldDesc pref.FieldDescriptor + fieldDesc protoreflect.FieldDescriptor // These fields are used for protobuf reflection support. has func(pointer) bool clear func(pointer) - get func(pointer) pref.Value - set func(pointer, pref.Value) - mutable func(pointer) pref.Value - newMessage func() pref.Message - newField func() pref.Value + get func(pointer) protoreflect.Value + set func(pointer, protoreflect.Value) + mutable func(pointer) protoreflect.Value + newMessage func() protoreflect.Message + newField func() protoreflect.Value } -func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { +func fieldInfoForMissing(fd protoreflect.FieldDescriptor) fieldInfo { // This never occurs for generated message types. // It implies that a hand-crafted type has missing Go fields // for specific protobuf message fields. @@ -40,19 +40,19 @@ func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { clear: func(p pointer) { panic("missing Go struct field for " + string(fd.FullName())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { return fd.Default() }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { panic("missing Go struct field for " + string(fd.FullName())) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { panic("missing Go struct field for " + string(fd.FullName())) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { panic("missing Go struct field for " + string(fd.FullName())) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { if v := fd.Default(); v.IsValid() { return v } @@ -61,7 +61,7 @@ func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { } } -func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { +func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Interface { panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft)) @@ -102,7 +102,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export } rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -113,7 +113,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv = rv.Elem().Elem().Field(0) return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { rv.Set(reflect.New(ot)) @@ -121,7 +121,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv = rv.Elem().Elem().Field(0) rv.Set(conv.GoValueOf(v)) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { if !isMessage { panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName())) } @@ -131,20 +131,20 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export } rv = rv.Elem().Elem().Field(0) if rv.Kind() == reflect.Ptr && rv.IsNil() { - rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) + rv.Set(conv.GoValueOf(protoreflect.ValueOfMessage(conv.New().Message()))) } return conv.PBValueOf(rv) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { return conv.New().Message() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Map { panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft)) @@ -166,7 +166,7 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -176,7 +176,7 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() pv := conv.GoValueOf(v) if pv.IsNil() { @@ -184,20 +184,20 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter } rv.Set(pv) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if v.IsNil() { v.Set(reflect.MakeMap(fs.Type)) } return conv.PBValueOf(v) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Slice { panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft)) @@ -219,7 +219,7 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -229,7 +229,7 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() pv := conv.GoValueOf(v) if pv.IsNil() { @@ -237,11 +237,11 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte } rv.Set(pv.Elem()) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { v := p.Apply(fieldOffset).AsValueOf(fs.Type) return conv.PBValueOf(v) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } @@ -252,7 +252,7 @@ var ( emptyBytes = reflect.ValueOf([]byte{}) ) -func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type nullable := fd.HasPresence() isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 @@ -300,7 +300,7 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -315,7 +315,7 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable && rv.Kind() == reflect.Ptr { if rv.IsNil() { @@ -332,23 +332,23 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor } } }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo { +func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { if !flags.ProtoLegacy { panic("no support for proto1 weak fields") } var once sync.Once - var messageType pref.MessageType + var messageType protoreflect.MessageType lazyInit := func() { once.Do(func() { messageName := fd.Message().FullName() - messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) if messageType == nil { panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) } @@ -368,18 +368,18 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn clear: func(p pointer) { p.Apply(weakOffset).WeakFields().clear(num) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { lazyInit() if p.IsNil() { - return pref.ValueOfMessage(messageType.Zero()) + return protoreflect.ValueOfMessage(messageType.Zero()) } m, ok := p.Apply(weakOffset).WeakFields().get(num) if !ok { - return pref.ValueOfMessage(messageType.Zero()) + return protoreflect.ValueOfMessage(messageType.Zero()) } - return pref.ValueOfMessage(m.ProtoReflect()) + return protoreflect.ValueOfMessage(m.ProtoReflect()) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { lazyInit() m := v.Message() if m.Descriptor() != messageType.Descriptor() { @@ -390,7 +390,7 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn } p.Apply(weakOffset).WeakFields().set(num, m.Interface()) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { lazyInit() fs := p.Apply(weakOffset).WeakFields() m, ok := fs.get(num) @@ -398,20 +398,20 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn m = messageType.New().Interface() fs.set(num, m) } - return pref.ValueOfMessage(m.ProtoReflect()) + return protoreflect.ValueOfMessage(m.ProtoReflect()) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { lazyInit() return messageType.New() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { lazyInit() - return pref.ValueOfMessage(messageType.New()) + return protoreflect.ValueOfMessage(messageType.New()) }, } } -func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) @@ -433,47 +433,47 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(conv.GoValueOf(v)) if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) } }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(conv.New())) } return conv.PBValueOf(rv) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { return conv.New().Message() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } type oneofInfo struct { - oneofDesc pref.OneofDescriptor - which func(pointer) pref.FieldNumber + oneofDesc protoreflect.OneofDescriptor + which func(pointer) protoreflect.FieldNumber } -func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo { +func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { oi := &oneofInfo{oneofDesc: od} if od.IsSynthetic() { fs := si.fieldsByNumber[od.Fields().Get(0).Number()] fieldOffset := offsetOf(fs, x) - oi.which = func(p pointer) pref.FieldNumber { + oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 } @@ -486,7 +486,7 @@ func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInf } else { fs := si.oneofsByName[od.Name()] fieldOffset := offsetOf(fs, x) - oi.which = func(p pointer) pref.FieldNumber { + oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 9e3ed821efb3..4c491bdf4825 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 9ecf23a85bb7..ee0e0573e395 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index 08cfb6054b43..a24e6bbd7a5f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -16,9 +16,9 @@ import ( "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) // ValidationStatus is the result of validating the wire-format encoding of a message. @@ -56,20 +56,20 @@ func (v ValidationStatus) String() string { // of the message type. // // This function is exposed for testing. -func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) { +func Validate(mt protoreflect.MessageType, in protoiface.UnmarshalInput) (out protoiface.UnmarshalOutput, _ ValidationStatus) { mi, ok := mt.(*MessageInfo) if !ok { return out, ValidationUnknown } if in.Resolver == nil { - in.Resolver = preg.GlobalTypes + in.Resolver = protoregistry.GlobalTypes } o, st := mi.validate(in.Buf, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, }) if o.initialized { - out.Flags |= piface.UnmarshalInitialized + out.Flags |= protoiface.UnmarshalInitialized } return out, st } @@ -106,22 +106,22 @@ const ( validationTypeMessageSetItem ) -func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo { +func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo { var vi validationInfo switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { vi.mi = getMessageInfo(ot.Field(0).Type) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { vi.mi = getMessageInfo(ot.Field(0).Type) } - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String } @@ -129,7 +129,7 @@ func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescrip default: vi = newValidationInfo(fd, ft) } - if fd.Cardinality() == pref.Required { + if fd.Cardinality() == protoreflect.Required { // Avoid overflow. The required field check is done with a 64-bit mask, with // any message containing more than 64 required fields always reported as // potentially uninitialized, so it is not important to get a precise count @@ -142,22 +142,22 @@ func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescrip return vi } -func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo { +func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo { var vi validationInfo switch { case fd.IsList(): switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } - case pref.StringKind: + case protoreflect.StringKind: vi.typ = validationTypeBytes if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String @@ -175,33 +175,33 @@ func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo case fd.IsMap(): vi.typ = validationTypeMap switch fd.MapKey().Kind() { - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.keyType = validationTypeUTF8String } } switch fd.MapValue().Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.valType = validationTypeMessage if ft.Kind() == reflect.Map { vi.mi = getMessageInfo(ft.Elem()) } - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.valType = validationTypeUTF8String } } default: switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if !fd.IsWeak() { vi.mi = getMessageInfo(ft) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup vi.mi = getMessageInfo(ft) - case pref.StringKind: + case protoreflect.StringKind: vi.typ = validationTypeBytes if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String @@ -314,11 +314,11 @@ State: break } messageName := fd.Message().FullName() - messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) switch err { case nil: vi.mi, _ = messageType.(*MessageInfo) - case preg.NotFound: + case protoregistry.NotFound: vi.typ = validationTypeBytes default: return out, ValidationUnknown @@ -335,7 +335,7 @@ State: // unmarshaling to begin failing. Supporting this requires some way to // determine if the resolver is frozen. xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num) - if err != nil && err != preg.NotFound { + if err != nil && err != protoregistry.NotFound { return out, ValidationUnknown } if err == nil { @@ -513,7 +513,7 @@ State: } xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid) switch { - case err == preg.NotFound: + case err == protoregistry.NotFound: b = b[n:] case err != nil: return out, ValidationUnknown diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go index 009cbefd1ed2..eb79a7ba94c0 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go @@ -7,7 +7,7 @@ package impl import ( "fmt" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -17,32 +17,32 @@ import ( // defined directly on it. type weakFields WeakFields -func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) { +func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { m, ok := w[int32(num)] return m, ok } -func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) { +func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { if *w == nil { *w = make(weakFields) } (*w)[int32(num)] = m } -func (w *weakFields) clear(num pref.FieldNumber) { +func (w *weakFields) clear(num protoreflect.FieldNumber) { delete(*w, int32(num)) } -func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool { +func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { _, ok := w[int32(num)] return ok } -func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) { +func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { delete(*w, int32(num)) } -func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage { +func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { if m, ok := w[int32(num)]; ok { return m } @@ -53,7 +53,7 @@ func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pr return mt.Zero().Interface() } -func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) { +func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { if m != nil { mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) if mt == nil { diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go index 2a24953f6a47..33745ed06254 100644 --- a/vendor/google.golang.org/protobuf/internal/order/order.go +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -5,12 +5,12 @@ package order import ( - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // FieldOrder specifies the ordering to visit message fields. // It is a function that reports whether x is ordered before y. -type FieldOrder func(x, y pref.FieldDescriptor) bool +type FieldOrder func(x, y protoreflect.FieldDescriptor) bool var ( // AnyFieldOrder specifies no specific field ordering. @@ -18,9 +18,9 @@ var ( // LegacyFieldOrder sorts fields in the same ordering as emitted by // wire serialization in the github.com/golang/protobuf implementation. - LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + LegacyFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { ox, oy := x.ContainingOneof(), y.ContainingOneof() - inOneof := func(od pref.OneofDescriptor) bool { + inOneof := func(od protoreflect.OneofDescriptor) bool { return od != nil && !od.IsSynthetic() } @@ -41,14 +41,14 @@ var ( } // NumberFieldOrder sorts fields by their field number. - NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + NumberFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { return x.Number() < y.Number() } // IndexNameFieldOrder sorts non-extension fields before extension fields. // Non-extensions are sorted according to their declaration index. // Extensions are sorted according to their full name. - IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + IndexNameFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { // Non-extension fields sort before extension fields. if x.IsExtension() != y.IsExtension() { return !x.IsExtension() && y.IsExtension() @@ -64,7 +64,7 @@ var ( // KeyOrder specifies the ordering to visit map entries. // It is a function that reports whether x is ordered before y. -type KeyOrder func(x, y pref.MapKey) bool +type KeyOrder func(x, y protoreflect.MapKey) bool var ( // AnyKeyOrder specifies no specific key ordering. @@ -72,7 +72,7 @@ var ( // GenericKeyOrder sorts false before true, numeric keys in ascending order, // and strings in lexicographical ordering according to UTF-8 codepoints. - GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool { + GenericKeyOrder KeyOrder = func(x, y protoreflect.MapKey) bool { switch x.Interface().(type) { case bool: return !x.Bool() && y.Bool() diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index c8090e0c547f..1665a68e5b7c 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -9,12 +9,12 @@ import ( "sort" "sync" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type messageField struct { - fd pref.FieldDescriptor - v pref.Value + fd protoreflect.FieldDescriptor + v protoreflect.Value } var messageFieldPool = sync.Pool{ @@ -25,8 +25,8 @@ type ( // FieldRnger is an interface for visiting all fields in a message. // The protoreflect.Message type implements this interface. FieldRanger interface{ Range(VisitField) } - // VisitField is called everytime a message field is visited. - VisitField = func(pref.FieldDescriptor, pref.Value) bool + // VisitField is called every time a message field is visited. + VisitField = func(protoreflect.FieldDescriptor, protoreflect.Value) bool ) // RangeFields iterates over the fields of fs according to the specified order. @@ -47,7 +47,7 @@ func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { }() // Collect all fields in the message and sort them. - fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + fs.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { fields = append(fields, messageField{fd, v}) return true }) @@ -64,8 +64,8 @@ func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { } type mapEntry struct { - k pref.MapKey - v pref.Value + k protoreflect.MapKey + v protoreflect.Value } var mapEntryPool = sync.Pool{ @@ -76,8 +76,8 @@ type ( // EntryRanger is an interface for visiting all fields in a message. // The protoreflect.Map type implements this interface. EntryRanger interface{ Range(VisitEntry) } - // VisitEntry is called everytime a map entry is visited. - VisitEntry = func(pref.MapKey, pref.Value) bool + // VisitEntry is called every time a map entry is visited. + VisitEntry = func(protoreflect.MapKey, protoreflect.Value) bool ) // RangeEntries iterates over the entries of es according to the specified order. @@ -98,7 +98,7 @@ func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { }() // Collect all entries in the map and sort them. - es.Range(func(k pref.MapKey, v pref.Value) bool { + es.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { entries = append(entries, mapEntry{k, v}) return true }) diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go index 85e074c977dc..a1f6f333860e 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 2160c7019145..fea589c457e9 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package strs @@ -9,7 +10,7 @@ package strs import ( "unsafe" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type ( @@ -58,7 +59,7 @@ type Builder struct { // AppendFullName is equivalent to protoreflect.FullName.Append, // but optimized for large batches where each name has a shared lifetime. -func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { n := len(prefix) + len(".") + len(name) if len(prefix) == 0 { n -= len(".") @@ -67,7 +68,7 @@ func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.Ful sb.buf = append(sb.buf, prefix...) sb.buf = append(sb.buf, '.') sb.buf = append(sb.buf, name...) - return pref.FullName(sb.last(n)) + return protoreflect.FullName(sb.last(n)) } // MakeString is equivalent to string(b), but optimized for large batches diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 14e774fb2ec7..b480c5010f1d 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -12,47 +12,46 @@ import ( // These constants determine the current version of this module. // -// // For our release process, we enforce the following rules: -// * Tagged releases use a tag that is identical to String. -// * Tagged releases never reference a commit where the String -// contains "devel". -// * The set of all commits in this repository where String -// does not contain "devel" must have a unique String. -// +// - Tagged releases use a tag that is identical to String. +// - Tagged releases never reference a commit where the String +// contains "devel". +// - The set of all commits in this repository where String +// does not contain "devel" must have a unique String. // // Steps for tagging a new release: -// 1. Create a new CL. // -// 2. Update Minor, Patch, and/or PreRelease as necessary. -// PreRelease must not contain the string "devel". +// 1. Create a new CL. // -// 3. Since the last released minor version, have there been any changes to -// generator that relies on new functionality in the runtime? -// If yes, then increment RequiredGenerated. +// 2. Update Minor, Patch, and/or PreRelease as necessary. +// PreRelease must not contain the string "devel". // -// 4. Since the last released minor version, have there been any changes to -// the runtime that removes support for old .pb.go source code? -// If yes, then increment SupportMinimum. +// 3. Since the last released minor version, have there been any changes to +// generator that relies on new functionality in the runtime? +// If yes, then increment RequiredGenerated. // -// 5. Send out the CL for review and submit it. -// Note that the next CL in step 8 must be submitted after this CL -// without any other CLs in-between. +// 4. Since the last released minor version, have there been any changes to +// the runtime that removes support for old .pb.go source code? +// If yes, then increment SupportMinimum. // -// 6. Tag a new version, where the tag is is the current String. +// 5. Send out the CL for review and submit it. +// Note that the next CL in step 8 must be submitted after this CL +// without any other CLs in-between. // -// 7. Write release notes for all notable changes -// between this release and the last release. +// 6. Tag a new version, where the tag is is the current String. // -// 8. Create a new CL. +// 7. Write release notes for all notable changes +// between this release and the last release. // -// 9. Update PreRelease to include the string "devel". -// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// 8. Create a new CL. // -// 10. Send out the CL for review and submit it. +// 9. Update PreRelease to include the string "devel". +// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// +// 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 27 + Minor = 28 Patch = 1 PreRelease = "" ) @@ -60,6 +59,7 @@ const ( // String formats the version string for this module in semver format. // // Examples: +// // v1.20.1 // v1.21.0-rc.1 func String() string { diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 49f9b8c88cfd..48d47946bb1a 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -19,7 +19,8 @@ import ( // UnmarshalOptions configures the unmarshaler. // // Example usage: -// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) +// +// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) type UnmarshalOptions struct { pragma.NoUnkeyedLiterals @@ -42,18 +43,25 @@ type UnmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m Message) error { - _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) + _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } _, err := o.unmarshal(b, m.ProtoReflect()) return err } @@ -63,6 +71,9 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // This method permits fine-grained control over the unmarshaler. // Most users should use Unmarshal instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } return o.unmarshal(in.Buf, in.Message) } @@ -86,12 +97,17 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto Message: m, Buf: b, Resolver: o.Resolver, + Depth: o.RecursionLimit, } if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } out, err = methods.Unmarshal(in) } else { + o.RecursionLimit-- + if o.RecursionLimit < 0 { + return out, errors.New("exceeded max recursion depth") + } err = o.unmarshalMessageSlow(b, m) } if err != nil { diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index c52d8c4ab79f..08d2a46f5352 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -6,18 +6,17 @@ // // For documentation on protocol buffers in general, see: // -// https://developers.google.com/protocol-buffers +// https://developers.google.com/protocol-buffers // // For a tutorial on using protocol buffers with Go, see: // -// https://developers.google.com/protocol-buffers/docs/gotutorial +// https://developers.google.com/protocol-buffers/docs/gotutorial // // For a guide to generated Go protocol buffer code, see: // -// https://developers.google.com/protocol-buffers/docs/reference/go-generated +// https://developers.google.com/protocol-buffers/docs/reference/go-generated // -// -// Binary serialization +// # Binary serialization // // This package contains functions to convert to and from the wire format, // an efficient binary serialization of protocol buffers. @@ -30,8 +29,7 @@ // • Unmarshal converts a message from the wire format. // The UnmarshalOptions type provides more control over wire unmarshaling. // -// -// Basic message operations +// # Basic message operations // // • Clone makes a deep copy of a message. // @@ -45,8 +43,7 @@ // // • CheckInitialized reports whether all required fields in a message are set. // -// -// Optional scalar constructors +// # Optional scalar constructors // // The API for some generated messages represents optional scalar fields // as pointers to a value. For example, an optional string field has the @@ -61,16 +58,14 @@ // // Optional scalar fields are only supported in proto2. // -// -// Extension accessors +// # Extension accessors // // • HasExtension, GetExtension, SetExtension, and ClearExtension // access extension field values in a protocol buffer message. // // Extension fields are only supported in proto2. // -// -// Related packages +// # Related packages // // • Package "google.golang.org/protobuf/encoding/protojson" converts messages to // and from JSON. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index d18239c23723..bf7f816d0e86 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -16,7 +16,8 @@ import ( // MarshalOptions configures the marshaler. // // Example usage: -// b, err := MarshalOptions{Deterministic: true}.Marshal(m) +// +// b, err := MarshalOptions{Deterministic: true}.Marshal(m) type MarshalOptions struct { pragma.NoUnkeyedLiterals @@ -101,7 +102,9 @@ func (o MarshalOptions) Marshal(m Message) ([]byte, error) { // otherwise it returns a non-nil empty buffer. // // This is to assist the edge-case where user-code does the following: +// // m1.OptionalBytes, _ = proto.Marshal(m2) +// // where they expect the proto2 "optional_bytes" field to be populated // if any only if m2 is a valid message. func emptyBytesForMessage(m Message) []byte { diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 4dba2b969972..67948dd1df8c 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -10,7 +10,7 @@ import ( "reflect" "google.golang.org/protobuf/encoding/protowire" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // Equal reports whether two messages are equal. @@ -33,6 +33,10 @@ func Equal(x, y Message) bool { if x == nil || y == nil { return x == nil && y == nil } + if reflect.TypeOf(x).Kind() == reflect.Ptr && x == y { + // Avoid an expensive comparison if both inputs are identical pointers. + return true + } mx := x.ProtoReflect() my := y.ProtoReflect() if mx.IsValid() != my.IsValid() { @@ -42,14 +46,14 @@ func Equal(x, y Message) bool { } // equalMessage compares two messages. -func equalMessage(mx, my pref.Message) bool { +func equalMessage(mx, my protoreflect.Message) bool { if mx.Descriptor() != my.Descriptor() { return false } nx := 0 equal := true - mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { nx++ vy := my.Get(fd) equal = my.Has(fd) && equalField(fd, vx, vy) @@ -59,7 +63,7 @@ func equalMessage(mx, my pref.Message) bool { return false } ny := 0 - my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + my.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { ny++ return true }) @@ -71,7 +75,7 @@ func equalMessage(mx, my pref.Message) bool { } // equalField compares two fields. -func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { +func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { switch { case fd.IsList(): return equalList(fd, x.List(), y.List()) @@ -83,12 +87,12 @@ func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { } // equalMap compares two maps. -func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { +func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool { if x.Len() != y.Len() { return false } equal := true - x.Range(func(k pref.MapKey, vx pref.Value) bool { + x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { vy := y.Get(k) equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) return equal @@ -97,7 +101,7 @@ func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { } // equalList compares two lists. -func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { +func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool { if x.Len() != y.Len() { return false } @@ -110,31 +114,31 @@ func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { } // equalValue compares two singular values. -func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { +func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: return x.Bool() == y.Bool() - case pref.EnumKind: + case protoreflect.EnumKind: return x.Enum() == y.Enum() - case pref.Int32Kind, pref.Sint32Kind, - pref.Int64Kind, pref.Sint64Kind, - pref.Sfixed32Kind, pref.Sfixed64Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, + protoreflect.Int64Kind, protoreflect.Sint64Kind, + protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: return x.Int() == y.Int() - case pref.Uint32Kind, pref.Uint64Kind, - pref.Fixed32Kind, pref.Fixed64Kind: + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, + protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: return x.Uint() == y.Uint() - case pref.FloatKind, pref.DoubleKind: + case protoreflect.FloatKind, protoreflect.DoubleKind: fx := x.Float() fy := y.Float() if math.IsNaN(fx) || math.IsNaN(fy) { return math.IsNaN(fx) && math.IsNaN(fy) } return fx == fy - case pref.StringKind: + case protoreflect.StringKind: return x.String() == y.String() - case pref.BytesKind: + case protoreflect.BytesKind: return bytes.Equal(x.Bytes(), y.Bytes()) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: return equalMessage(x.Message(), y.Message()) default: return x.Interface() == y.Interface() @@ -143,7 +147,7 @@ func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { // equalUnknown compares unknown fields by direct comparison on the raw bytes // of each individual field number. -func equalUnknown(x, y pref.RawFields) bool { +func equalUnknown(x, y protoreflect.RawFields) bool { if len(x) != len(y) { return false } @@ -151,8 +155,8 @@ func equalUnknown(x, y pref.RawFields) bool { return true } - mx := make(map[pref.FieldNumber]pref.RawFields) - my := make(map[pref.FieldNumber]pref.RawFields) + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) for len(x) > 0 { fnum, _, n := protowire.ConsumeField(x) mx[fnum] = append(mx[fnum], x[:n]...) diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go index d8dd604f6b67..465e057b3238 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_methods.go +++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build !protoreflect // +build !protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go index b103d43205c4..494d6ceef9e6 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_reflect.go +++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build protoreflect // +build protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index cebb36cdade6..27d7e35012d3 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -155,9 +155,9 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, // // Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", // then the following full names are searched: -// * fizz.buzz.Foo.Bar -// * fizz.Foo.Bar -// * Foo.Bar +// - fizz.buzz.Foo.Bar +// - fizz.Foo.Bar +// - Foo.Bar func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { if !ref.IsValid() { return nil, errors.New("invalid name reference: %q", ref) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index 6be5d16e9f37..d5d5af6ebedb 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -53,6 +53,7 @@ type ( FindExtensionByName(field FullName) (ExtensionType, error) FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) } + Depth int } unmarshalOutput = struct { pragma.NoUnkeyedLiterals diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index dd85915bd4bf..55aa14922b01 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -8,8 +8,7 @@ // defined in proto source files and value interfaces which provide the // ability to examine and manipulate the contents of messages. // -// -// Protocol Buffer Descriptors +// # Protocol Buffer Descriptors // // Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) // are immutable objects that represent protobuf type information. @@ -26,8 +25,7 @@ // The "google.golang.org/protobuf/reflect/protodesc" package converts between // google.protobuf.DescriptorProto messages and protobuf descriptors. // -// -// Go Type Descriptors +// # Go Type Descriptors // // A type descriptor (e.g., EnumType or MessageType) is a constructor for // a concrete Go type that represents the associated protobuf descriptor. @@ -41,8 +39,7 @@ // The "google.golang.org/protobuf/types/dynamicpb" package can be used to // create Go type descriptors from protobuf descriptors. // -// -// Value Interfaces +// # Value Interfaces // // The Enum and Message interfaces provide a reflective view over an // enum or message instance. For enums, it provides the ability to retrieve @@ -55,13 +52,11 @@ // The "github.com/golang/protobuf/proto".MessageReflect function can be used // to obtain a reflective view on older messages. // -// -// Relationships +// # Relationships // // The following diagrams demonstrate the relationships between // various types declared in this package. // -// // ┌───────────────────────────────────┐ // V │ // ┌────────────── New(n) ─────────────┐ │ @@ -83,7 +78,6 @@ // // • An Enum is a concrete enum instance. Generated enums implement Enum. // -// // ┌──────────────── New() ─────────────────┐ // │ │ // │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐ @@ -98,12 +92,22 @@ // // • A MessageType describes a concrete Go message type. // It has a MessageDescriptor and can construct a Message instance. +// Just as how Go's reflect.Type is a reflective description of a Go type, +// a MessageType is a reflective description of a Go type for a protobuf message. // // • A MessageDescriptor describes an abstract protobuf message type. -// -// • A Message is a concrete message instance. Generated messages implement -// ProtoMessage, which can convert to/from a Message. -// +// It has no understanding of Go types. In order to construct a MessageType +// from just a MessageDescriptor, you can consider looking up the message type +// in the global registry using protoregistry.GlobalTypes.FindMessageByName +// or constructing a dynamic MessageType using dynamicpb.NewMessageType. +// +// • A Message is a reflective view over a concrete message instance. +// Generated messages implement ProtoMessage, which can convert to a Message. +// Just as how Go's reflect.Value is a reflective view over a Go value, +// a Message is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the ProtoReflect method is similar to +// calling reflect.ValueOf, and the Message.Interface method is similar to +// calling reflect.Value.Interface. // // ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ // │ V │ V diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go index 121ba3a07bba..0b99428855fe 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -87,6 +87,7 @@ func (p1 SourcePath) Equal(p2 SourcePath) bool { // in a future version of this module. // // Example output: +// // .message_type[6].nested_type[15].field[3] func (p SourcePath) String() string { b := p.appendFileDescriptorProto(nil) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 8e53c44a9188..3867470d30ac 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -480,6 +480,7 @@ type ExtensionDescriptors interface { // relative to the parent that it is declared within. // // For example: +// // syntax = "proto2"; // package example; // message FooMessage { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 918e685e1d57..7ced876f4e89 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 5a3414724193..ca8e28c5bc8b 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -41,6 +41,32 @@ import ( // Converting to/from a Value and a concrete Go value panics on type mismatch. // For example, ValueOf("hello").Int() panics because this attempts to // retrieve an int64 from a string. +// +// List, Map, and Message Values are called "composite" values. +// +// A composite Value may alias (reference) memory at some location, +// such that changes to the Value updates the that location. +// A composite value acquired with a Mutable method, such as Message.Mutable, +// always references the source object. +// +// For example: +// +// // Append a 0 to a "repeated int32" field. +// // Since the Value returned by Mutable is guaranteed to alias +// // the source message, modifying the Value modifies the message. +// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) +// +// // Assign [0] to a "repeated int32" field by creating a new Value, +// // modifying it, and assigning it. +// list := message.NewField(fieldDesc).(List) +// list.Append(protoreflect.ValueOfInt32(0)) +// message.Set(fieldDesc, list) +// // ERROR: Since it is not defined whether Set aliases the source, +// // appending to the List here may or may not modify the message. +// list.Append(protoreflect.ValueOfInt32(0)) +// +// Some operations, such as Message.Get, may return an "empty, read-only" +// composite Value. Modifying an empty, read-only value panics. type Value value // The protoreflect API uses a custom Value union type instead of interface{} @@ -367,6 +393,7 @@ func (v Value) MapKey() MapKey { // ╚═════════╧═════════════════════════════════════╝ // // A MapKey is constructed and accessed through a Value: +// // k := ValueOf("hash").MapKey() // convert string to MapKey // s := k.String() // convert MapKey to string // diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index c45debdcac6c..702ddf22a274 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 59f024c444fc..58352a6978be 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -30,9 +30,11 @@ import ( // conflictPolicy configures the policy for handling registration conflicts. // // It can be over-written at compile time with a linker-initialized variable: +// // go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" // // It can be over-written at program execution with an environment variable: +// // GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main // // Neither of the above are covered by the compatibility promise and diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 32c04f67eb73..44cf467d8845 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -103,6 +103,7 @@ type UnmarshalInput = struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + Depth int } // UnmarshalOutput is output from the Unmarshal method. diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go index ff094e1ba44b..a105cb23e033 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go @@ -26,16 +26,19 @@ const ( // EnforceVersion is used by code generated by protoc-gen-go // to statically enforce minimum and maximum versions of this package. // A compilation failure implies either that: -// * the runtime package is too old and needs to be updated OR -// * the generated code is too old and needs to be regenerated. +// - the runtime package is too old and needs to be updated OR +// - the generated code is too old and needs to be regenerated. // // The runtime package can be upgraded by running: +// // go get google.golang.org/protobuf // // The generated code can be regenerated by running: +// // protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES} // // Example usage by generated code: +// // const ( // // Verify that this generated code is sufficiently up-to-date. // _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion) @@ -49,6 +52,7 @@ const ( type EnforceVersion uint // This enforces the following invariant: +// // MinVersion ≤ GenVersion ≤ MaxVersion const ( _ = EnforceVersion(GenVersion - MinVersion) diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index 7f94443d2699..1b2085d46905 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -394,7 +394,7 @@ func numValidPaths(m proto.Message, paths []string) int { // Identify the next message to search within. md = fd.Message() // may be nil - // Repeated fields are only allowed at the last postion. + // Repeated fields are only allowed at the last position. if fd.IsList() || fd.IsMap() { md = nil } diff --git a/vendor/gopkg.in/ini.v1/.editorconfig b/vendor/gopkg.in/ini.v1/.editorconfig new file mode 100644 index 000000000000..4a2d9180f96f --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.editorconfig @@ -0,0 +1,12 @@ +# http://editorconfig.org + +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[*_test.go] +trim_trailing_whitespace = false diff --git a/vendor/gopkg.in/ini.v1/.gitignore b/vendor/gopkg.in/ini.v1/.gitignore index 12411127b39e..588388bda28d 100644 --- a/vendor/gopkg.in/ini.v1/.gitignore +++ b/vendor/gopkg.in/ini.v1/.gitignore @@ -4,3 +4,4 @@ ini.sublime-workspace testdata/conf_reflect.ini .idea /.vscode +.DS_Store diff --git a/vendor/gopkg.in/ini.v1/.golangci.yml b/vendor/gopkg.in/ini.v1/.golangci.yml new file mode 100644 index 000000000000..631e369254d3 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.golangci.yml @@ -0,0 +1,27 @@ +linters-settings: + staticcheck: + checks: [ + "all", + "-SA1019" # There are valid use cases of strings.Title + ] + nakedret: + max-func-lines: 0 # Disallow any unnamed return statement + +linters: + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + - nakedret + - gofmt + - rowserrcheck + - unconvert + - goimports + - unparam diff --git a/vendor/gopkg.in/ini.v1/README.md b/vendor/gopkg.in/ini.v1/README.md index 5d65658b2941..30606d9700a8 100644 --- a/vendor/gopkg.in/ini.v1/README.md +++ b/vendor/gopkg.in/ini.v1/README.md @@ -1,6 +1,6 @@ # INI -[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/go-ini/ini/Go?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=workflow%3AGo) +[![GitHub Workflow Status](https://img.shields.io/github/checks-status/go-ini/ini/main?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=branch%3Amain) [![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini) [![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) @@ -24,7 +24,7 @@ Package ini provides INI file read and write functionality in Go. ## Installation -The minimum requirement of Go is **1.6**. +The minimum requirement of Go is **1.13**. ```sh $ go get gopkg.in/ini.v1 diff --git a/vendor/gopkg.in/ini.v1/codecov.yml b/vendor/gopkg.in/ini.v1/codecov.yml index fc947f230820..e02ec84bc05f 100644 --- a/vendor/gopkg.in/ini.v1/codecov.yml +++ b/vendor/gopkg.in/ini.v1/codecov.yml @@ -4,6 +4,13 @@ coverage: project: default: threshold: 1% + informational: true + patch: + defualt: + only_pulls: true + informational: true comment: - layout: 'diff, files' + layout: 'diff' + +github_checks: false diff --git a/vendor/gopkg.in/ini.v1/deprecated.go b/vendor/gopkg.in/ini.v1/deprecated.go index e8bda06e6ffe..48b8e66d6d6f 100644 --- a/vendor/gopkg.in/ini.v1/deprecated.go +++ b/vendor/gopkg.in/ini.v1/deprecated.go @@ -14,12 +14,9 @@ package ini -const ( +var ( // Deprecated: Use "DefaultSection" instead. DEFAULT_SECTION = DefaultSection -) - -var ( // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. AllCapsUnderscore = SnackCase ) diff --git a/vendor/gopkg.in/ini.v1/error.go b/vendor/gopkg.in/ini.v1/error.go index d88347c54bf6..f66bc94b8b69 100644 --- a/vendor/gopkg.in/ini.v1/error.go +++ b/vendor/gopkg.in/ini.v1/error.go @@ -32,3 +32,18 @@ func IsErrDelimiterNotFound(err error) bool { func (err ErrDelimiterNotFound) Error() string { return fmt.Sprintf("key-value delimiter not found: %s", err.Line) } + +// ErrEmptyKeyName indicates the error type of no key name is found which there should be one. +type ErrEmptyKeyName struct { + Line string +} + +// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName. +func IsErrEmptyKeyName(err error) bool { + _, ok := err.(ErrEmptyKeyName) + return ok +} + +func (err ErrEmptyKeyName) Error() string { + return fmt.Sprintf("empty key name: %s", err.Line) +} diff --git a/vendor/gopkg.in/ini.v1/file.go b/vendor/gopkg.in/ini.v1/file.go index f95606f90fa3..f8b22408be51 100644 --- a/vendor/gopkg.in/ini.v1/file.go +++ b/vendor/gopkg.in/ini.v1/file.go @@ -55,6 +55,9 @@ func newFile(dataSources []dataSource, opts LoadOptions) *File { if len(opts.KeyValueDelimiterOnWrite) == 0 { opts.KeyValueDelimiterOnWrite = "=" } + if len(opts.ChildSectionDelimiter) == 0 { + opts.ChildSectionDelimiter = "." + } return &File{ BlockMode: true, @@ -82,7 +85,7 @@ func (f *File) NewSection(name string) (*Section, error) { return nil, errors.New("empty section name") } - if f.options.Insensitive && name != DefaultSection { + if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { name = strings.ToLower(name) } @@ -139,12 +142,18 @@ func (f *File) GetSection(name string) (*Section, error) { return secs[0], err } +// HasSection returns true if the file contains a section with given name. +func (f *File) HasSection(name string) bool { + section, _ := f.GetSection(name) + return section != nil +} + // SectionsByName returns all sections with given name. func (f *File) SectionsByName(name string) ([]*Section, error) { if len(name) == 0 { name = DefaultSection } - if f.options.Insensitive { + if f.options.Insensitive || f.options.InsensitiveSections { name = strings.ToLower(name) } @@ -165,8 +174,9 @@ func (f *File) SectionsByName(name string) ([]*Section, error) { func (f *File) Section(name string) *Section { sec, err := f.GetSection(name) if err != nil { - // Note: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. + if name == "" { + name = DefaultSection + } sec, _ = f.NewSection(name) return sec } @@ -236,7 +246,7 @@ func (f *File) DeleteSectionWithIndex(name string, index int) error { if len(name) == 0 { name = DefaultSection } - if f.options.Insensitive { + if f.options.Insensitive || f.options.InsensitiveSections { name = strings.ToLower(name) } @@ -299,6 +309,9 @@ func (f *File) Reload() (err error) { } return err } + if f.options.ShortCircuit { + return nil + } } return nil } @@ -329,6 +342,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { // Use buffer to make sure target is safe until finish encoding. buf := bytes.NewBuffer(nil) + lastSectionIdx := len(f.sectionList) - 1 for i, sname := range f.sectionList { sec := f.SectionWithIndex(sname, f.sectionIndexes[i]) if len(sec.Comment) > 0 { @@ -347,7 +361,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { } } - if i > 0 || DefaultHeader { + if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { return nil, err } @@ -358,12 +372,13 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { } } + isLastSection := i == lastSectionIdx if sec.isRawSection { if _, err := buf.WriteString(sec.rawBody); err != nil { return nil, err } - if PrettySection { + if PrettySection && !isLastSection { // Put a line between sections if _, err := buf.WriteString(LineBreak); err != nil { return nil, err @@ -429,16 +444,14 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { kname = `"""` + kname + `"""` } - for _, val := range key.ValueWithShadows() { + writeKeyValue := func(val string) (bool, error) { if _, err := buf.WriteString(kname); err != nil { - return nil, err + return false, err } if key.isBooleanType { - if kname != sec.keyList[len(sec.keyList)-1] { - buf.WriteString(LineBreak) - } - continue KeyList + buf.WriteString(LineBreak) + return true, nil } // Write out alignment spaces before "=" sign @@ -451,9 +464,28 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { val = `"""` + val + `"""` } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { val = "`" + val + "`" + } else if len(strings.TrimSpace(val)) != len(val) { + val = `"` + val + `"` } if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return false, err + } + return false, nil + } + + shadows := key.ValueWithShadows() + if len(shadows) == 0 { + if _, err := writeKeyValue(""); err != nil { + return nil, err + } + } + + for _, val := range shadows { + exitLoop, err := writeKeyValue(val) + if err != nil { return nil, err + } else if exitLoop { + continue KeyList } } @@ -464,7 +496,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { } } - if PrettySection { + if PrettySection && !isLastSection { // Put a line between sections if _, err := buf.WriteString(LineBreak); err != nil { return nil, err @@ -494,7 +526,7 @@ func (f *File) WriteTo(w io.Writer) (int64, error) { // SaveToIndent writes content to file system with given value indention. func (f *File) SaveToIndent(filename, indent string) error { // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename afte done. + // so it's safer to save to a temporary file location and rename after done. buf, err := f.writeToBuffer(indent) if err != nil { return err diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go index 2961543f918b..99e7f86511a4 100644 --- a/vendor/gopkg.in/ini.v1/ini.go +++ b/vendor/gopkg.in/ini.v1/ini.go @@ -1,5 +1,3 @@ -// +build go1.6 - // Copyright 2014 Unknwon // // Licensed under the Apache License, Version 2.0 (the "License"): you may @@ -25,15 +23,15 @@ import ( ) const ( - // DefaultSection is the name of default section. You can use this constant or the string literal. - // In most of cases, an empty string is all you need to access the section. - DefaultSection = "DEFAULT" - // Maximum allowed depth when recursively substituing variable names. depthValues = 99 ) var ( + // DefaultSection is the name of default section. You can use this var or the string literal. + // In most of cases, an empty string is all you need to access the section. + DefaultSection = "DEFAULT" + // LineBreak is the delimiter to determine or compose a new line. // This variable will be changed to "\r\n" automatically on Windows at package init time. LineBreak = "\n" @@ -71,12 +69,18 @@ type LoadOptions struct { Loose bool // Insensitive indicates whether the parser forces all section and key names to lowercase. Insensitive bool + // InsensitiveSections indicates whether the parser forces all section to lowercase. + InsensitiveSections bool + // InsensitiveKeys indicates whether the parser forces all key names to lowercase. + InsensitiveKeys bool // IgnoreContinuation indicates whether to ignore continuation lines while parsing. IgnoreContinuation bool // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. IgnoreInlineComment bool // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. SkipUnrecognizableLines bool + // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source. + ShortCircuit bool // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. // This type of keys are mostly used in my.cnf. AllowBooleanKeys bool @@ -107,8 +111,10 @@ type LoadOptions struct { UnparseableSections []string // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". KeyValueDelimiters string - // KeyValueDelimiters is the delimiter that are used to separate key and value output. By default, it is "=". + // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=". KeyValueDelimiterOnWrite string + // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". + ChildSectionDelimiter string // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). PreserveSurroundedQuote bool // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). @@ -117,6 +123,8 @@ type LoadOptions struct { ReaderBufferSize int // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. AllowNonUniqueSections bool + // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated. + AllowDuplicateShadowValues bool } // DebugFunc is the type of function called to log parse events. diff --git a/vendor/gopkg.in/ini.v1/key.go b/vendor/gopkg.in/ini.v1/key.go index 8baafd9ea6d0..a19d9f38ef14 100644 --- a/vendor/gopkg.in/ini.v1/key.go +++ b/vendor/gopkg.in/ini.v1/key.go @@ -54,14 +54,16 @@ func (k *Key) addShadow(val string) error { return errors.New("cannot add shadow to auto-increment or boolean key") } - // Deduplicate shadows based on their values. - if k.value == val { - return nil - } - for i := range k.shadows { - if k.shadows[i].value == val { + if !k.s.f.options.AllowDuplicateShadowValues { + // Deduplicate shadows based on their values. + if k.value == val { return nil } + for i := range k.shadows { + if k.shadows[i].value == val { + return nil + } + } } shadow := newKey(k.s, k.name, val) @@ -108,15 +110,24 @@ func (k *Key) Value() string { return k.value } -// ValueWithShadows returns raw values of key and its shadows if any. +// ValueWithShadows returns raw values of key and its shadows if any. Shadow +// keys with empty values are ignored from the returned list. func (k *Key) ValueWithShadows() []string { if len(k.shadows) == 0 { + if k.value == "" { + return []string{} + } return []string{k.value} } - vals := make([]string, len(k.shadows)+1) - vals[0] = k.value - for i := range k.shadows { - vals[i+1] = k.shadows[i].value + + vals := make([]string, 0, len(k.shadows)+1) + if k.value != "" { + vals = append(vals, k.value) + } + for _, s := range k.shadows { + if s.value != "" { + vals = append(vals, s.value) + } } return vals } @@ -781,10 +792,8 @@ func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]u return vals, err } - type Parser func(str string) (interface{}, error) - // parseTimesFormat transforms strings to times in given format. func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { vals := make([]time.Time, 0, len(strs)) @@ -801,7 +810,6 @@ func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnO return vals, err } - // doParse transforms strings to different types func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { vals := make([]interface{}, 0, len(strs)) diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go index ea6c08b02918..44fc526c2cb6 100644 --- a/vendor/gopkg.in/ini.v1/parser.go +++ b/vendor/gopkg.in/ini.v1/parser.go @@ -131,7 +131,7 @@ func readKeyName(delimiters string, in []byte) (string, int, error) { // Check if key name surrounded by quotes. var keyQuote string if line[0] == '"' { - if len(line) > 6 && string(line[0:3]) == `"""` { + if len(line) > 6 && line[0:3] == `"""` { keyQuote = `"""` } else { keyQuote = `"` @@ -164,6 +164,10 @@ func readKeyName(delimiters string, in []byte) (string, int, error) { if endIdx < 0 { return "", -1, ErrDelimiterNotFound{line} } + if endIdx == 0 { + return "", -1, ErrEmptyKeyName{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil } @@ -232,7 +236,7 @@ func (p *parser) readValue(in []byte, bufferSize int) (string, error) { } var valQuote string - if len(line) > 3 && string(line[0:3]) == `"""` { + if len(line) > 3 && line[0:3] == `"""` { valQuote = `"""` } else if line[0] == '`' { valQuote = "`" @@ -289,12 +293,8 @@ func (p *parser) readValue(in []byte, bufferSize int) (string, error) { hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { line = line[1 : len(line)-1] } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { - if strings.Contains(line, `\;`) { - line = strings.Replace(line, `\;`, ";", -1) - } - if strings.Contains(line, `\#`) { - line = strings.Replace(line, `\#`, "#", -1) - } + line = strings.ReplaceAll(line, `\;`, ";") + line = strings.ReplaceAll(line, `\#`, "#") } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { return p.readPythonMultilines(line, bufferSize) } @@ -306,15 +306,9 @@ func (p *parser) readPythonMultilines(line string, bufferSize int) (string, erro parserBufferPeekResult, _ := p.buf.Peek(bufferSize) peekBuffer := bytes.NewBuffer(parserBufferPeekResult) - indentSize := 0 for { peekData, peekErr := peekBuffer.ReadBytes('\n') - if peekErr != nil { - if peekErr == io.EOF { - p.debug("readPythonMultilines: io.EOF, peekData: %q, line: %q", string(peekData), line) - return line, nil - } - + if peekErr != nil && peekErr != io.EOF { p.debug("readPythonMultilines: failed to peek with error: %v", peekErr) return "", peekErr } @@ -333,19 +327,6 @@ func (p *parser) readPythonMultilines(line string, bufferSize int) (string, erro return line, nil } - // Determine indent size and line prefix. - currentIndentSize := len(peekMatches[1]) - if indentSize < 1 { - indentSize = currentIndentSize - p.debug("readPythonMultilines: indent size is %d", indentSize) - } - - // Make sure each line is indented at least as far as first line. - if currentIndentSize < indentSize { - p.debug("readPythonMultilines: end of value, current indent: %d, expected indent: %d, line: %q", currentIndentSize, indentSize, line) - return line, nil - } - // Advance the parser reader (buffer) in-sync with the peek buffer. _, err := p.buf.Discard(len(peekData)) if err != nil { @@ -353,8 +334,7 @@ func (p *parser) readPythonMultilines(line string, bufferSize int) (string, erro return "", err } - // Handle indented empty line. - line += "\n" + peekMatches[1][indentSize:] + peekMatches[2] + line += "\n" + peekMatches[0] } } @@ -377,7 +357,7 @@ func (f *File) parse(reader io.Reader) (err error) { // Ignore error because default section name is never empty string. name := DefaultSection - if f.options.Insensitive { + if f.options.Insensitive || f.options.InsensitiveSections { name = strings.ToLower(DefaultSection) } section, _ := f.NewSection(name) @@ -465,11 +445,13 @@ func (f *File) parse(reader io.Reader) (err error) { // Reset auto-counter and comments p.comment.Reset() p.count = 1 + // Nested values can't span sections + isLastValueEmpty = false inUnparseableSection = false for i := range f.options.UnparseableSections { if f.options.UnparseableSections[i] == name || - (f.options.Insensitive && strings.EqualFold(f.options.UnparseableSections[i], name)) { + ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { inUnparseableSection = true continue } @@ -485,8 +467,9 @@ func (f *File) parse(reader io.Reader) (err error) { kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) if err != nil { + switch { // Treat as boolean key when desired, and whole line is key name. - if IsErrDelimiterNotFound(err) { + case IsErrDelimiterNotFound(err): switch { case f.options.AllowBooleanKeys: kname, err := p.readValue(line, parserBufferSize) @@ -504,6 +487,8 @@ func (f *File) parse(reader io.Reader) (err error) { case f.options.SkipUnrecognizableLines: continue } + case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines: + continue } return err } diff --git a/vendor/gopkg.in/ini.v1/section.go b/vendor/gopkg.in/ini.v1/section.go index 6ba5ac2905c8..a3615d820b7a 100644 --- a/vendor/gopkg.in/ini.v1/section.go +++ b/vendor/gopkg.in/ini.v1/section.go @@ -66,7 +66,7 @@ func (s *Section) SetBody(body string) { func (s *Section) NewKey(name, val string) (*Key, error) { if len(name) == 0 { return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive { + } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { name = strings.ToLower(name) } @@ -109,7 +109,7 @@ func (s *Section) GetKey(name string) (*Key, error) { if s.f.BlockMode { s.f.lock.RLock() } - if s.f.options.Insensitive { + if s.f.options.Insensitive || s.f.options.InsensitiveKeys { name = strings.ToLower(name) } key := s.keys[name] @@ -121,7 +121,7 @@ func (s *Section) GetKey(name string) (*Key, error) { // Check if it is a child-section. sname := s.name for { - if i := strings.LastIndex(sname, "."); i > -1 { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { sname = sname[:i] sec, err := s.f.GetSection(sname) if err != nil { @@ -188,7 +188,7 @@ func (s *Section) ParentKeys() []*Key { var parentKeys []*Key sname := s.name for { - if i := strings.LastIndex(sname, "."); i > -1 { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { sname = sname[:i] sec, err := s.f.GetSection(sname) if err != nil { @@ -217,7 +217,7 @@ func (s *Section) KeysHash() map[string]string { defer s.f.lock.RUnlock() } - hash := map[string]string{} + hash := make(map[string]string, len(s.keysHash)) for key, value := range s.keysHash { hash[key] = value } @@ -245,7 +245,7 @@ func (s *Section) DeleteKey(name string) { // For example, "[parent.child1]" and "[parent.child12]" are child sections // of section "[parent]". func (s *Section) ChildSections() []*Section { - prefix := s.name + "." + prefix := s.name + s.f.options.ChildSectionDelimiter children := make([]*Section, 0, 3) for _, name := range s.f.sectionList { if strings.HasPrefix(name, prefix) { diff --git a/vendor/gopkg.in/ini.v1/struct.go b/vendor/gopkg.in/ini.v1/struct.go index 9be40a920068..a486b2fe0fdc 100644 --- a/vendor/gopkg.in/ini.v1/struct.go +++ b/vendor/gopkg.in/ini.v1/struct.go @@ -263,24 +263,21 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri return nil } -func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool) { - opts := strings.SplitN(tag, ",", 4) +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) { + opts := strings.SplitN(tag, ",", 5) rawName = opts[0] - if len(opts) > 1 { - omitEmpty = opts[1] == "omitempty" + for _, opt := range opts[1:] { + omitEmpty = omitEmpty || (opt == "omitempty") + allowShadow = allowShadow || (opt == "allowshadow") + allowNonUnique = allowNonUnique || (opt == "nonunique") + extends = extends || (opt == "extends") } - if len(opts) > 2 { - allowShadow = opts[2] == "allowshadow" - } - if len(opts) > 3 { - allowNonUnique = opts[3] == "nonunique" - } - return rawName, omitEmpty, allowShadow, allowNonUnique + return rawName, omitEmpty, allowShadow, allowNonUnique, extends } // mapToField maps the given value to the matching field of the given section. // The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. -func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) error { +func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error { if val.Kind() == reflect.Ptr { val = val.Elem() } @@ -295,7 +292,7 @@ func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) continue } - rawName, _, allowShadow, allowNonUnique := parseTagOptions(tag) + rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag) fieldName := s.parseFieldName(tpField.Name, rawName) if len(fieldName) == 0 || !field.CanSet() { continue @@ -303,12 +300,26 @@ func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) isStruct := tpField.Type.Kind() == reflect.Struct isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct - isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - if isAnonymous { + isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + if isAnonymousPtr { field.Set(reflect.New(tpField.Type.Elem())) } - if isAnonymous || isStruct || isStructPtr { + if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) { + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + fieldSection := s + if rawName != "" { + sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName + if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) { + fieldSection = secs[sectionIndex] + } + } + if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + } else if isAnonymousPtr || isStruct || isStructPtr { if secs, err := s.f.SectionsByName(fieldName); err == nil { if len(secs) <= sectionIndex { return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) @@ -318,7 +329,7 @@ func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) if isStructPtr && field.IsNil() { field.Set(reflect.New(tpField.Type.Elem())) } - if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex); err != nil { + if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil { return fmt.Errorf("map to field %q: %v", fieldName, err) } continue @@ -357,7 +368,7 @@ func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) ( typ := val.Type().Elem() for i, sec := range secs { elem := reflect.New(typ) - if err = sec.mapToField(elem, isStrict, i); err != nil { + if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil { return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) } @@ -387,7 +398,7 @@ func (s *Section) mapTo(v interface{}, isStrict bool) error { return nil } - return s.mapToField(val, isStrict, 0) + return s.mapToField(val, isStrict, 0, s.name) } // MapTo maps section to given struct. @@ -479,7 +490,7 @@ func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, all _ = keyWithShadows.AddShadow(val) } } - key = keyWithShadows + *key = *keyWithShadows return nil } @@ -581,7 +592,7 @@ func (s *Section) reflectFrom(val reflect.Value) error { continue } - rawName, omitEmpty, allowShadow, allowNonUnique := parseTagOptions(tag) + rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag) if omitEmpty && isEmptyValue(field) { continue } @@ -595,7 +606,14 @@ func (s *Section) reflectFrom(val reflect.Value) error { continue } - if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) { + if err := s.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) || (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { // Note: The only error here is section doesn't exist. sec, err := s.f.GetSection(fieldName) diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go index df36e3a30f55..0173b6982e84 100644 --- a/vendor/gopkg.in/yaml.v3/decode.go +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -100,7 +100,10 @@ func (p *parser) peek() yaml_event_type_t { if p.event.typ != yaml_NO_EVENT { return p.event.typ } - if !yaml_parser_parse(&p.parser, &p.event) { + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { p.fail() } return p.event.typ @@ -320,6 +323,8 @@ type decoder struct { decodeCount int aliasCount int aliasDepth int + + mergedFields map[interface{}]bool } var ( @@ -808,6 +813,11 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + mapIsNew := false if out.IsNil() { out.Set(reflect.MakeMap(outt)) @@ -815,11 +825,18 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } for i := 0; i < l; i += 2 { if isMerge(n.Content[i]) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } k := reflect.New(kt).Elem() if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if mergedFields[ki] { + continue + } + mergedFields[ki] = true + } kkind := k.Kind() if kkind == reflect.Interface { kkind = k.Elem().Kind() @@ -833,6 +850,12 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + d.stringMapType = stringMapType d.generalMapType = generalMapType return true @@ -844,7 +867,8 @@ func isStringMap(n *Node) bool { } l := len(n.Content) for i := 0; i < l; i += 2 { - if n.Content[i].ShortTag() != strTag { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { return false } } @@ -861,7 +885,6 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { var elemType reflect.Type if sinfo.InlineMap != -1 { inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) elemType = inlineMap.Type().Elem() } @@ -870,6 +893,9 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.prepare(n, field) } + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node var doneFields []bool if d.uniqueKeys { doneFields = make([]bool, len(sinfo.FieldsList)) @@ -879,13 +905,20 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { for i := 0; i < l; i += 2 { ni := n.Content[i] if isMerge(ni) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } if !d.unmarshal(ni, name) { continue } - if info, ok := sinfo.FieldsMap[name.String()]; ok { + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { if d.uniqueKeys { if doneFields[info.Id] { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) @@ -911,6 +944,11 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } return true } @@ -918,19 +956,29 @@ func failWantMap() { failf("map merge requires map or sequence of maps as the value") } -func (d *decoder) merge(n *Node, out reflect.Value) { - switch n.Kind { +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.mergedFields[k.Interface()] = true + } + } + } + + switch merge.Kind { case MappingNode: - d.unmarshal(n, out) + d.unmarshal(merge, out) case AliasNode: - if n.Alias != nil && n.Alias.Kind != MappingNode { + if merge.Alias != nil && merge.Alias.Kind != MappingNode { failWantMap() } - d.unmarshal(n, out) + d.unmarshal(merge, out) case SequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.Content) - 1; i >= 0; i-- { - ni := n.Content[i] + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] if ni.Kind == AliasNode { if ni.Alias != nil && ni.Alias.Kind != MappingNode { failWantMap() @@ -943,6 +991,8 @@ func (d *decoder) merge(n *Node, out reflect.Value) { default: failWantMap() } + + d.mergedFields = mergedFields } func isMerge(n *Node) bool { diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go index ac66fccc059e..268558a0d632 100644 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -687,6 +687,9 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -786,7 +789,7 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { } token := peek_token(parser) - if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { return } @@ -813,6 +816,9 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -922,6 +928,9 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS index ad5063fdf10d..8cccebf2e9c9 100644 --- a/vendor/k8s.io/klog/v2/OWNERS +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -1,19 +1,13 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: - - jayunit100 - - hoegaarden - - andyxning - - neolit123 - pohly - - yagonobre - - vincepri - - detiber approvers: - dims - thockin - - justinsb - - tallclair - - piosz + - serathius +emeritus_approvers: - brancz + - justinsb - lavalamp - - serathius + - piosz + - tallclair diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md index 64d29622e897..d45cbe1720f4 100644 --- a/vendor/k8s.io/klog/v2/README.md +++ b/vendor/k8s.io/klog/v2/README.md @@ -23,6 +23,20 @@ Historical context is available here: * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ +## Release versioning + +Semantic versioning is used in this repository. It contains several Go modules +with different levels of stability: +- `k8s.io/klog/v2` - stable API, `vX.Y.Z` tags +- `examples` - no stable API, no tags, no intention to ever stabilize + +Exempt from the API stability guarantee are items (packages, functions, etc.) +which are marked explicitly as `EXPERIMENTAL` in their docs comment. Those +may still change in incompatible ways or get removed entirely. This can only +be used for code that is used in tests to avoid situations where non-test +code from two different Kubernetes dependencies depends on incompatible +releases of klog because an experimental API was changed. + ---- How to use klog @@ -32,6 +46,7 @@ How to use klog - You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) - For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) +- See our documentation on [pkg.go.dev/k8s.io](https://pkg.go.dev/k8s.io/klog). **NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater. @@ -85,7 +100,7 @@ The comment from glog.go introduces the ideas: glog.Fatalf("Initialization failed: %s", err) - See the documentation for the V function for an explanation + See the documentation of the V function for an explanation of these examples: if glog.V(2) { diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go new file mode 100644 index 000000000000..65ac479ab6ae --- /dev/null +++ b/vendor/k8s.io/klog/v2/contextual.go @@ -0,0 +1,185 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "context" + + "github.com/go-logr/logr" +) + +// This file provides the implementation of +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1602-structured-logging +// +// SetLogger and ClearLogger were originally added to klog.go and got moved +// here. Contextual logging adds a way to retrieve a Logger for direct logging +// without the logging calls in klog.go. +// +// The global variables are expected to be modified only during sequential +// parts of a program (init, serial tests) and therefore are not protected by +// mutex locking. + +var ( + // klogLogger is used as fallback for logging through the normal klog code + // when no Logger is set. + klogLogger logr.Logger = logr.New(&klogger{}) +) + +// SetLogger sets a Logger implementation that will be used as backing +// implementation of the traditional klog log calls. klog will do its own +// verbosity checks before calling logger.V().Info. logger.Error is always +// called, regardless of the klog verbosity settings. +// +// If set, all log lines will be suppressed from the regular output, and +// redirected to the logr implementation. +// Use as: +// ... +// klog.SetLogger(zapr.NewLogger(zapLog)) +// +// To remove a backing logr implemention, use ClearLogger. Setting an +// empty logger with SetLogger(logr.Logger{}) does not work. +// +// Modifying the logger is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. +func SetLogger(logger logr.Logger) { + SetLoggerWithOptions(logger) +} + +// SetLoggerWithOptions is a more flexible version of SetLogger. Without +// additional options, it behaves exactly like SetLogger. By passing +// ContextualLogger(true) as option, it can be used to set a logger that then +// will also get called directly by applications which retrieve it via +// FromContext, Background, or TODO. +// +// Supporting direct calls is recommended because it avoids the overhead of +// routing log entries through klogr into klog and then into the actual Logger +// backend. +func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) { + logging.logger = &logger + logging.loggerOptions = loggerOptions{} + for _, opt := range opts { + opt(&logging.loggerOptions) + } +} + +// ContextualLogger determines whether the logger passed to +// SetLoggerWithOptions may also get called directly. Such a logger cannot rely +// on verbosity checking in klog. +func ContextualLogger(enabled bool) LoggerOption { + return func(o *loggerOptions) { + o.contextualLogger = enabled + } +} + +// FlushLogger provides a callback for flushing data buffered by the logger. +func FlushLogger(flush func()) LoggerOption { + return func(o *loggerOptions) { + o.flush = flush + } +} + +// LoggerOption implements the functional parameter paradigm for +// SetLoggerWithOptions. +type LoggerOption func(o *loggerOptions) + +type loggerOptions struct { + contextualLogger bool + flush func() +} + +// ClearLogger removes a backing Logger implementation if one was set earlier +// with SetLogger. +// +// Modifying the logger is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. +func ClearLogger() { + logging.logger = nil + logging.loggerOptions = loggerOptions{} +} + +// EnableContextualLogging controls whether contextual logging is enabled. +// By default it is enabled. When disabled, FromContext avoids looking up +// the logger in the context and always returns the global logger. +// LoggerWithValues, LoggerWithName, and NewContext become no-ops +// and return their input logger respectively context. This may be useful +// to avoid the additional overhead for contextual logging. +// +// This must be called during initialization before goroutines are started. +func EnableContextualLogging(enabled bool) { + logging.contextualLoggingEnabled = enabled +} + +// FromContext retrieves a logger set by the caller or, if not set, +// falls back to the program's global logger (a Logger instance or klog +// itself). +func FromContext(ctx context.Context) Logger { + if logging.contextualLoggingEnabled { + if logger, err := logr.FromContext(ctx); err == nil { + return logger + } + } + + return Background() +} + +// TODO can be used as a last resort by code that has no means of +// receiving a logger from its caller. FromContext or an explicit logger +// parameter should be used instead. +func TODO() Logger { + return Background() +} + +// Background retrieves the fallback logger. It should not be called before +// that logger was initialized by the program and not by code that should +// better receive a logger via its parameters. TODO can be used as a temporary +// solution for such code. +func Background() Logger { + if logging.loggerOptions.contextualLogger { + // Is non-nil because logging.loggerOptions.contextualLogger is + // only true if a logger was set. + return *logging.logger + } + + return klogLogger +} + +// LoggerWithValues returns logger.WithValues(...kv) when +// contextual logging is enabled, otherwise the logger. +func LoggerWithValues(logger Logger, kv ...interface{}) Logger { + if logging.contextualLoggingEnabled { + return logger.WithValues(kv...) + } + return logger +} + +// LoggerWithName returns logger.WithName(name) when contextual logging is +// enabled, otherwise the logger. +func LoggerWithName(logger Logger, name string) Logger { + if logging.contextualLoggingEnabled { + return logger.WithName(name) + } + return logger +} + +// NewContext returns logr.NewContext(ctx, logger) when +// contextual logging is enabled, otherwise ctx. +func NewContext(ctx context.Context, logger Logger) context.Context { + if logging.contextualLoggingEnabled { + return logr.NewContext(ctx, logger) + } + return ctx +} diff --git a/vendor/k8s.io/klog/v2/exit.go b/vendor/k8s.io/klog/v2/exit.go new file mode 100644 index 000000000000..320a14772810 --- /dev/null +++ b/vendor/k8s.io/klog/v2/exit.go @@ -0,0 +1,69 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package klog + +import ( + "fmt" + "os" + "time" +) + +var ( + + // ExitFlushTimeout is the timeout that klog has traditionally used during + // calls like Fatal or Exit when flushing log data right before exiting. + // Applications that replace those calls and do not have some specific + // requirements like "exit immediately" can use this value as parameter + // for FlushAndExit. + // + // Can be set for testing purpose or to change the application's + // default. + ExitFlushTimeout = 10 * time.Second + + // OsExit is the function called by FlushAndExit to terminate the program. + // + // Can be set for testing purpose or to change the application's + // default behavior. Note that the function should not simply return + // because callers of functions like Fatal will not expect that. + OsExit = os.Exit +) + +// FlushAndExit flushes log data for a certain amount of time and then calls +// os.Exit. Combined with some logging call it provides a replacement for +// traditional calls like Fatal or Exit. +func FlushAndExit(flushTimeout time.Duration, exitCode int) { + timeoutFlush(flushTimeout) + OsExit(exitCode) +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when klog.Fatal is called from a hook that holds +// a lock. Flushing also might take too long. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) + } +} diff --git a/vendor/k8s.io/klog/v2/imports.go b/vendor/k8s.io/klog/v2/imports.go new file mode 100644 index 000000000000..602c3ed9e6d5 --- /dev/null +++ b/vendor/k8s.io/klog/v2/imports.go @@ -0,0 +1,38 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "github.com/go-logr/logr" +) + +// The reason for providing these aliases is to allow code to work with logr +// without directly importing it. + +// Logger in this package is exactly the same as logr.Logger. +type Logger = logr.Logger + +// LogSink in this package is exactly the same as logr.LogSink. +type LogSink = logr.LogSink + +// Runtimeinfo in this package is exactly the same as logr.RuntimeInfo. +type RuntimeInfo = logr.RuntimeInfo + +var ( + // New is an alias for logr.New. + New = logr.New +) diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go new file mode 100644 index 000000000000..ac88682a2c44 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -0,0 +1,159 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package buffer provides a cache for byte.Buffer instances that can be reused +// to avoid frequent allocation and deallocation. It also has utility code +// for log header formatting that use these buffers. +package buffer + +import ( + "bytes" + "os" + "sync" + "time" + + "k8s.io/klog/v2/internal/severity" +) + +var ( + // Pid is inserted into log headers. Can be overridden for tests. + Pid = os.Getpid() +) + +// Buffer holds a single byte.Buffer for reuse. The zero value is ready for +// use. It also provides some helper methods for output formatting. +type Buffer struct { + bytes.Buffer + Tmp [64]byte // temporary byte array for creating headers. + next *Buffer +} + +// Buffers manages the reuse of individual buffer instances. It is thread-safe. +type Buffers struct { + // mu protects the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + mu sync.Mutex + + // freeList is a list of byte buffers, maintained under mu. + freeList *Buffer +} + +// GetBuffer returns a new, ready-to-use buffer. +func (bl *Buffers) GetBuffer() *Buffer { + bl.mu.Lock() + b := bl.freeList + if b != nil { + bl.freeList = b.next + } + bl.mu.Unlock() + if b == nil { + b = new(Buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// PutBuffer returns a buffer to the free list. +func (bl *Buffers) PutBuffer(b *Buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + bl.mu.Lock() + b.next = bl.freeList + bl.freeList = b + bl.mu.Unlock() +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.Tmp[i]. +func (buf *Buffer) twoDigits(i, d int) { + buf.Tmp[i+1] = digits[d%10] + d /= 10 + buf.Tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.Tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *Buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.Tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.Tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.Tmp[i]. +func (buf *Buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.Tmp) + for { + j-- + buf.Tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.Tmp[i:], buf.Tmp[j:]) +} + +// FormatHeader formats a log header using the provided file name and line number. +func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) { + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > severity.FatalLog { + s = severity.InfoLog // for safety. + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.Tmp[0] = severity.Char[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.Tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.Tmp[8] = ':' + buf.twoDigits(9, minute) + buf.Tmp[11] = ':' + buf.twoDigits(12, second) + buf.Tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.Tmp[21] = ' ' + buf.nDigits(7, 22, Pid, ' ') // TODO: should be TID + buf.Tmp[29] = ' ' + buf.Write(buf.Tmp[:30]) + buf.WriteString(file) + buf.Tmp[0] = ':' + n := buf.someDigits(1, line) + buf.Tmp[n+1] = ']' + buf.Tmp[n+2] = ' ' + buf.Write(buf.Tmp[:n+3]) +} diff --git a/vendor/k8s.io/klog/v2/internal/clock/README.md b/vendor/k8s.io/klog/v2/internal/clock/README.md new file mode 100644 index 000000000000..03d692c8f83a --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/clock/README.md @@ -0,0 +1,7 @@ +# Clock + +This package provides an interface for time-based operations. It allows +mocking time for testing. + +This is a copy of k8s.io/utils/clock. We have to copy it to avoid a circular +dependency (k8s.io/klog -> k8s.io/utils -> k8s.io/klog). diff --git a/vendor/k8s.io/klog/v2/internal/clock/clock.go b/vendor/k8s.io/klog/v2/internal/clock/clock.go new file mode 100644 index 000000000000..b8b6af5c817f --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/clock/clock.go @@ -0,0 +1,178 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import "time" + +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + PassiveClock + // After returns the channel of a new Timer. + // This method does not allow to free/GC the backing timer before it fires. Use + // NewTimer instead. + After(d time.Duration) <-chan time.Time + // NewTimer returns a new Timer. + NewTimer(d time.Duration) Timer + // Sleep sleeps for the provided duration d. + // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. + Sleep(d time.Duration) + // Tick returns the channel of a new Ticker. + // This method does not allow to free/GC the backing ticker. Use + // NewTicker from WithTicker instead. + Tick(d time.Duration) <-chan time.Time +} + +// WithTicker allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type WithTicker interface { + Clock + // NewTicker returns a new Ticker. + NewTicker(time.Duration) Ticker +} + +// WithDelayedExecution allows for injecting fake or real clocks into +// code that needs to make use of AfterFunc functionality. +type WithDelayedExecution interface { + Clock + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// WithTickerAndDelayedExecution allows for injecting fake or real clocks +// into code that needs Ticker and AfterFunc functionality +type WithTickerAndDelayedExecution interface { + WithTicker + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// Ticker defines the Ticker interface. +type Ticker interface { + C() <-chan time.Time + Stop() +} + +var _ = WithTicker(RealClock{}) + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// After is the same as time.After(d). +// This method does not allow to free/GC the backing timer before it fires. Use +// NewTimer instead. +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +// NewTimer is the same as time.NewTimer(d) +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +// AfterFunc is the same as time.AfterFunc(d, f). +func (RealClock) AfterFunc(d time.Duration, f func()) Timer { + return &realTimer{ + timer: time.AfterFunc(d, f), + } +} + +// Tick is the same as time.Tick(d) +// This method does not allow to free/GC the backing ticker. Use +// NewTicker instead. +func (RealClock) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) +} + +// NewTicker returns a new Ticker. +func (RealClock) NewTicker(d time.Duration) Ticker { + return &realTicker{ + ticker: time.NewTicker(d), + } +} + +// Sleep is the same as time.Sleep(d) +// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +var _ = Timer(&realTimer{}) + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +} + +type realTicker struct { + ticker *time.Ticker +} + +func (r *realTicker) C() <-chan time.Time { + return r.ticker.C +} + +func (r *realTicker) Stop() { + r.ticker.Stop() +} diff --git a/vendor/k8s.io/klog/v2/internal/dbg/dbg.go b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go new file mode 100644 index 000000000000..f27bd14472f3 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go @@ -0,0 +1,42 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dbg provides some helper code for call traces. +package dbg + +import ( + "runtime" +) + +// Stacks is a wrapper for runtime.Stack that attempts to recover the data for +// all goroutines or the calling one. +func Stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go new file mode 100644 index 000000000000..f85d7ccf83b8 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -0,0 +1,253 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/go-logr/logr" +) + +// WithValues implements LogSink.WithValues. The old key/value pairs are +// assumed to be well-formed, the new ones are checked and padded if +// necessary. It returns a new slice. +func WithValues(oldKV, newKV []interface{}) []interface{} { + if len(newKV) == 0 { + return oldKV + } + newLen := len(oldKV) + len(newKV) + hasMissingValue := newLen%2 != 0 + if hasMissingValue { + newLen++ + } + // The new LogSink must have its own slice. + kv := make([]interface{}, 0, newLen) + kv = append(kv, oldKV...) + kv = append(kv, newKV...) + if hasMissingValue { + kv = append(kv, missingValue) + } + return kv +} + +// MergeKVs deduplicates elements provided in two key/value slices. +// +// Keys in each slice are expected to be unique, so duplicates can only occur +// when the first and second slice contain the same key. When that happens, the +// key/value pair from the second slice is used. The first slice must be well-formed +// (= even key/value pairs). The second one may have a missing value, in which +// case the special "missing value" is added to the result. +func MergeKVs(first, second []interface{}) []interface{} { + maxLength := len(first) + (len(second)+1)/2*2 + if maxLength == 0 { + // Nothing to do at all. + return nil + } + + if len(first) == 0 && len(second)%2 == 0 { + // Nothing to be overridden, second slice is well-formed + // and can be used directly. + return second + } + + // Determine which keys are in the second slice so that we can skip + // them when iterating over the first one. The code intentionally + // favors performance over completeness: we assume that keys are string + // constants and thus compare equal when the string values are equal. A + // string constant being overridden by, for example, a fmt.Stringer is + // not handled. + overrides := map[interface{}]bool{} + for i := 0; i < len(second); i += 2 { + overrides[second[i]] = true + } + merged := make([]interface{}, 0, maxLength) + for i := 0; i+1 < len(first); i += 2 { + key := first[i] + if overrides[key] { + continue + } + merged = append(merged, key, first[i+1]) + } + merged = append(merged, second...) + if len(merged)%2 != 0 { + merged = append(merged, missingValue) + } + return merged +} + +const missingValue = "(MISSING)" + +// KVListFormat serializes all key/value pairs into the provided buffer. +// A space gets inserted before the first pair and between each pair. +func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { + for i := 0; i < len(keysAndValues); i += 2 { + var v interface{} + k := keysAndValues[i] + if i+1 < len(keysAndValues) { + v = keysAndValues[i+1] + } else { + v = missingValue + } + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case fmt.Stringer: + writeStringValue(b, true, StringerToString(v)) + case string: + writeStringValue(b, true, v) + case error: + writeStringValue(b, true, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, true, value) + default: + writeStringValue(b, false, fmt.Sprintf("%+v", v)) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + writeStringValue(b, false, fmt.Sprintf("%+v", v)) + } + } +} + +// StringerToString converts a Stringer to a string, +// handling panics if they occur. +func StringerToString(s fmt.Stringer) (ret string) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = s.String() + return +} + +// MarshalerToValue invokes a marshaler and catches +// panics. +func MarshalerToValue(m logr.Marshaler) (ret interface{}) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = m.MarshalLog() + return +} + +// ErrorToString converts an error to a string, +// handling panics if they occur. +func ErrorToString(err error) (ret string) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = err.Error() + return +} + +func writeStringValue(b *bytes.Buffer, quote bool, v string) { + data := []byte(v) + index := bytes.IndexByte(data, '\n') + if index == -1 { + b.WriteByte('=') + if quote { + // Simple string, quote quotation marks and non-printable characters. + b.WriteString(strconv.Quote(v)) + return + } + // Non-string with no line breaks. + b.WriteString(v) + return + } + + // Complex multi-line string, show as-is with indention like this: + // I... "hello world" key=< + // line 1 + // line 2 + // > + // + // Tabs indent the lines of the value while the end of string delimiter + // is indented with a space. That has two purposes: + // - visual difference between the two for a human reader because indention + // will be different + // - no ambiguity when some value line starts with the end delimiter + // + // One downside is that the output cannot distinguish between strings that + // end with a line break and those that don't because the end delimiter + // will always be on the next line. + b.WriteString("=<\n") + for index != -1 { + b.WriteByte('\t') + b.Write(data[0 : index+1]) + data = data[index+1:] + index = bytes.IndexByte(data, '\n') + } + if len(data) == 0 { + // String ended with line break, don't add another. + b.WriteString(" >") + } else { + // No line break at end of last line, write rest of string and + // add one. + b.WriteByte('\t') + b.Write(data) + b.WriteString("\n >") + } +} diff --git a/vendor/k8s.io/klog/v2/internal/severity/severity.go b/vendor/k8s.io/klog/v2/internal/severity/severity.go new file mode 100644 index 000000000000..30fa1834f0fb --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/severity/severity.go @@ -0,0 +1,58 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package severity provides definitions for klog severity (info, warning, ...) +package severity + +import ( + "strings" +) + +// severity identifies the sort of log: info, warning etc. The binding to flag.Value +// is handled in klog.go +type Severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + InfoLog Severity = iota + WarningLog + ErrorLog + FatalLog + NumSeverity = 4 +) + +// Char contains one shortcut letter per severity level. +const Char = "IWEF" + +// Name contains one name per severity level. +var Name = []string{ + InfoLog: "INFO", + WarningLog: "WARNING", + ErrorLog: "ERROR", + FatalLog: "FATAL", +} + +// ByName looks up a severity level by name. +func ByName(s string) (Severity, bool) { + s = strings.ToUpper(s) + for i, name := range Name { + if name == s { + return Severity(i), true + } + } + return 0, false +} diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go new file mode 100644 index 000000000000..2c218f698c7a --- /dev/null +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -0,0 +1,158 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "fmt" + "reflect" + + "github.com/go-logr/logr" +) + +// ObjectRef references a kubernetes object +type ObjectRef struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` +} + +func (ref ObjectRef) String() string { + if ref.Namespace != "" { + return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) + } + return ref.Name +} + +// MarshalLog ensures that loggers with support for structured output will log +// as a struct by removing the String method via a custom type. +func (ref ObjectRef) MarshalLog() interface{} { + type or ObjectRef + return or(ref) +} + +var _ logr.Marshaler = ObjectRef{} + +// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface +// this interface may expand in the future, but will always be a subset of the +// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface +type KMetadata interface { + GetName() string + GetNamespace() string +} + +// KObj returns ObjectRef from ObjectMeta +func KObj(obj KMetadata) ObjectRef { + if obj == nil { + return ObjectRef{} + } + if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() { + return ObjectRef{} + } + + return ObjectRef{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } +} + +// KRef returns ObjectRef from name and namespace +func KRef(namespace, name string) ObjectRef { + return ObjectRef{ + Name: name, + Namespace: namespace, + } +} + +// KObjs returns slice of ObjectRef from an slice of ObjectMeta +// +// DEPRECATED: Use KObjSlice instead, it has better performance. +func KObjs(arg interface{}) []ObjectRef { + s := reflect.ValueOf(arg) + if s.Kind() != reflect.Slice { + return nil + } + objectRefs := make([]ObjectRef, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + if v, ok := s.Index(i).Interface().(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil + } + } + return objectRefs +} + +// KObjSlice takes a slice of objects that implement the KMetadata interface +// and returns an object that gets logged as a slice of ObjectRef values or a +// string containing those values, depending on whether the logger prefers text +// output or structured output. +// +// An error string is logged when KObjSlice is not passed a suitable slice. +// +// Processing of the argument is delayed until the value actually gets logged, +// in contrast to KObjs where that overhead is incurred regardless of whether +// the result is needed. +func KObjSlice(arg interface{}) interface{} { + return kobjSlice{arg: arg} +} + +type kobjSlice struct { + arg interface{} +} + +var _ fmt.Stringer = kobjSlice{} +var _ logr.Marshaler = kobjSlice{} + +func (ks kobjSlice) String() string { + objectRefs, err := ks.process() + if err != nil { + return err.Error() + } + return fmt.Sprintf("%v", objectRefs) +} + +func (ks kobjSlice) MarshalLog() interface{} { + objectRefs, err := ks.process() + if err != nil { + return err.Error() + } + return objectRefs +} + +func (ks kobjSlice) process() ([]interface{}, error) { + s := reflect.ValueOf(ks.arg) + switch s.Kind() { + case reflect.Invalid: + // nil parameter, print as nil. + return nil, nil + case reflect.Slice: + // Okay, handle below. + default: + return nil, fmt.Errorf("", ks.arg) + } + objectRefs := make([]interface{}, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + item := s.Index(i).Interface() + if item == nil { + objectRefs = append(objectRefs, nil) + } else if v, ok := item.(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil, fmt.Errorf("", item) + } + } + return objectRefs, nil +} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index dacdf91120a7..652fadcd4e4d 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -41,6 +41,10 @@ // // -logtostderr=true // Logs are written to standard error instead of to files. +// This shortcuts most of the usual output routing: +// -alsologtostderr, -stderrthreshold and -log_dir have no +// effect and output redirection at runtime with SetOutput is +// ignored. // -alsologtostderr=false // Logs are written to standard error as well as to files. // -stderrthreshold=ERROR @@ -81,7 +85,6 @@ import ( "math" "os" "path/filepath" - "reflect" "runtime" "strconv" "strings" @@ -90,81 +93,59 @@ import ( "time" "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/clock" + "k8s.io/klog/v2/internal/dbg" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" ) -// severity identifies the sort of log: info, warning etc. It also implements +// severityValue identifies the sort of log: info, warning etc. It also implements // the flag.Value interface. The -stderrthreshold flag is of type severity and // should be modified only through the flag.Value interface. The values match // the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", +type severityValue struct { + severity.Severity } // get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) +func (s *severityValue) get() severity.Severity { + return severity.Severity(atomic.LoadInt32((*int32)(&s.Severity))) } // set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) +func (s *severityValue) set(val severity.Severity) { + atomic.StoreInt32((*int32)(&s.Severity), int32(val)) } // String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) +func (s *severityValue) String() string { + return strconv.FormatInt(int64(s.Severity), 10) } // Get is part of the flag.Getter interface. -func (s *severity) Get() interface{} { - return *s +func (s *severityValue) Get() interface{} { + return s.Severity } // Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity +func (s *severityValue) Set(value string) error { + var threshold severity.Severity // Is it a known name? - if v, ok := severityByName(value); ok { + if v, ok := severity.ByName(value); ok { threshold = v } else { v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } - threshold = severity(v) + threshold = severity.Severity(v) } logging.stderrThreshold.set(threshold) return nil } -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - // OutputStats tracks the number of output lines and bytes written. type OutputStats struct { lines int64 @@ -187,10 +168,10 @@ var Stats struct { Info, Warning, Error OutputStats } -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, +var severityStats = [severity.NumSeverity]*OutputStats{ + severity.InfoLog: &Stats.Info, + severity.WarningLog: &Stats.Warning, + severity.ErrorLog: &Stats.Error, } // Level is exported because it appears in the arguments to V and is @@ -266,6 +247,10 @@ func (m *moduleSpec) String() string { // Lock because the type is not atomic. TODO: clean this up. logging.mu.Lock() defer logging.mu.Unlock() + return m.serialize() +} + +func (m *moduleSpec) serialize() string { var b bytes.Buffer for i, f := range m.filter { if i > 0 { @@ -287,6 +272,17 @@ var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of // Set will sets module value // Syntax: -vmodule=recordio=2,file=1,gfs*=3 func (m *moduleSpec) Set(value string) error { + filter, err := parseModuleSpec(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +func parseModuleSpec(value string) ([]modulePat, error) { var filter []modulePat for _, pat := range strings.Split(value, ",") { if len(pat) == 0 { @@ -295,15 +291,15 @@ func (m *moduleSpec) Set(value string) error { } patLev := strings.Split(pat, "=") if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax + return nil, errVmoduleSyntax } pattern := patLev[0] v, err := strconv.ParseInt(patLev[1], 10, 32) if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") + return nil, errors.New("syntax error: expect comma-separated list of filename=N") } if v < 0 { - return errors.New("negative value for vmodule level") + return nil, errors.New("negative value for vmodule level") } if v == 0 { continue // Ignore. It's harmless but no point in paying the overhead. @@ -311,10 +307,7 @@ func (m *moduleSpec) Set(value string) error { // TODO: check syntax of filter? filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil + return filter, nil } // isLiteral reports whether the pattern is a literal string, that is, has no metacharacters @@ -404,9 +397,11 @@ type flushSyncWriter interface { io.Writer } -// init sets up the defaults and runs flushDaemon. +// init sets up the defaults. func init() { - logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. + logging.stderrThreshold = severityValue{ + Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. + } logging.setVState(0, nil, false) logging.logDir = "" logging.logFile = "" @@ -417,7 +412,7 @@ func init() { logging.addDirHeader = false logging.skipLogHeaders = false logging.oneOutput = false - go logging.flushDaemon() + logging.flushD = newFlushDaemon(logging.lockAndFlushAll, nil) } // InitFlags is for explicitly initializing the flags. @@ -426,19 +421,19 @@ func InitFlags(flagset *flag.FlagSet) { flagset = flag.CommandLine } - flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") - flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") + flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory (no effect when -logtostderr=true)") + flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file (no effect when -logtostderr=true)") flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, - "Defines the maximum size a log file can grow to. Unit is megabytes. "+ + "Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. "+ "If the value is 0, the maximum file size is unlimited.") flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") - flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") + flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files (no effect when -logtostderr=true)") flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages") flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") - flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level)") - flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") - flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)") + flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files (no effect when -logtostderr=true)") + flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)") flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") } @@ -448,8 +443,20 @@ func Flush() { logging.lockAndFlushAll() } -// loggingT collects all the global state of the logging setup. -type loggingT struct { +// settings collects global settings. +type settings struct { + // contextualLoggingEnabled controls whether contextual logging is + // active. Disabling it may have some small performance benefit. + contextualLoggingEnabled bool + + // logger is the global Logger chosen by users of klog, nil if + // none is available. + logger *Logger + + // loggerOptions contains the options that were supplied for + // globalLogger. + loggerOptions loggerOptions + // Boolean flags. Not handled atomically because the flag.Value interface // does not let us avoid the =true, and that shorthand is necessary for // compatibility. TODO: does this matter enough to fix? Seems unlikely. @@ -457,25 +464,16 @@ type loggingT struct { alsoToStderr bool // The -alsologtostderr flag. // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. + stderrThreshold severityValue // The -stderrthreshold flag. - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex + // Access to all of the following fields must be protected via a mutex. - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level + file [severity.NumSeverity]flushSyncWriter + // flushInterval is the interval for periodic flushing. If zero, + // the global default will be used. + flushInterval time.Duration + // filterLength stores the length of the vmodule filter chain. If greater // than zero, it means vmodule is enabled. It may be read safely // using sync.LoadInt32, but is only modified under mu. @@ -508,9 +506,6 @@ type loggingT struct { // If true, add the file directory to the header addDirHeader bool - // If set, all output will be redirected unconditionally to the provided logr.Logger - logr *logr.Logger - // If true, messages will not be propagated to lower severity log levels oneOutput bool @@ -518,14 +513,48 @@ type loggingT struct { filter LogFilter } -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer +// deepCopy creates a copy that doesn't share anything with the original +// instance. +func (s settings) deepCopy() settings { + // vmodule is a slice and would be shared, so we have copy it. + filter := make([]modulePat, len(s.vmodule.filter)) + for i := range s.vmodule.filter { + filter[i] = s.vmodule.filter[i] + } + s.vmodule.filter = filter + + return s +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + settings + + // bufferCache maintains the free list. It uses its own mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + bufferCache buffer.Buffers + + // flushD holds a flushDaemon that frequently flushes log file buffers. + // Uses its own mutex. + flushD *flushDaemon + + // mu protects the remaining elements of this structure and the fields + // in settingsT which need a mutex lock. + mu sync.Mutex + + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level } -var logging loggingT +var logging = loggingT{ + settings: settings{ + contextualLoggingEnabled: true, + }, +} // setVState sets a consistent state for V logging. // l.mu is held. @@ -547,36 +576,56 @@ func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool l.verbosity.set(verbosity) } -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() +var timeNow = time.Now // Stubbed out for testing. + +// CaptureState gathers information about all current klog settings. +// The result can be used to restore those settings. +func CaptureState() State { + logging.mu.Lock() + defer logging.mu.Unlock() + return &state{ + settings: logging.settings.deepCopy(), + flushDRunning: logging.flushD.isRunning(), + maxSize: MaxSize, } - return b } -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() +// State stores a snapshot of klog settings. It gets created with CaptureState +// and can be used to restore the entire state. Modifying individual settings +// is supported via the command line flags. +type State interface { + // Restore restore the entire state. It may get called more than once. + Restore() } -var timeNow = time.Now // Stubbed out for testing. +type state struct { + settings + + flushDRunning bool + maxSize uint64 +} + +func (s *state) Restore() { + // This needs to be done before mutex locking. + if s.flushDRunning && !logging.flushD.isRunning() { + // This is not quite accurate: StartFlushDaemon might + // have been called with some different interval. + interval := s.flushInterval + if interval == 0 { + interval = flushInterval + } + logging.flushD.run(interval) + } else if !s.flushDRunning && logging.flushD.isRunning() { + logging.flushD.stop() + } + + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.settings = s.settings + logging.setVState(s.verbosity, s.vmodule.filter, true) + MaxSize = s.maxSize +} /* header formats a log header as defined by the C++ implementation. @@ -595,7 +644,7 @@ where the fields are defined as follows: line The line number msg The user-supplied message */ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { +func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, string, int) { _, file, line, ok := runtime.Caller(3 + depth) if !ok { file = "???" @@ -615,133 +664,68 @@ func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { } // formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() +func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer { + buf := l.bufferCache.GetBuffer() if l.skipHeaders { return buf } - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) + now := timeNow() + buf.FormatHeader(s, file, line, now) return buf } -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) +func (l *loggingT) println(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { + l.printlnDepth(s, logger, filter, 1, args...) } -func (l *loggingT) println(s severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { - buf, file, line := l.header(s, 0) +func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) // if logger is set, we clear the generated header as we rely on the backing // logger implementation to print headers if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { args = filter.Filter(args) } fmt.Fprintln(buf, args...) - l.output(s, logger, buf, 0 /* depth */, file, line, false) + l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) print(s severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) print(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { l.printDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printDepth(s severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { args = filter.Filter(args) } fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { + if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) printf(s severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) +func (l *loggingT) printf(s severity.Severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { + l.printfDepth(s, logger, filter, 1, format, args...) +} + +func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, format string, args ...interface{}) { + buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { format, args = filter.FilterF(format, args) @@ -750,19 +734,19 @@ func (l *loggingT) printf(s severity, logger *logr.Logger, filter LogFilter, for if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logger, buf, 0 /* depth */, file, line, false) + l.output(s, logger, buf, depth, file, line, false) } // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -783,7 +767,7 @@ func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, dept logger.WithCallDepth(depth+2).Error(err, msg, keysAndValues...) return } - l.printS(err, errorLog, depth+1, msg, keysAndValues...) + l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...) } // if loggr is specified, will call loggr.Info, otherwise output with logging module. @@ -795,140 +779,25 @@ func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg s logger.WithCallDepth(depth+2).Info(msg, keysAndValues...) return } - l.printS(nil, infoLog, depth+1, msg, keysAndValues...) + l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...) } // printS is called from infoS and errorS if loggr is not specified. // set log severity by s -func (l *loggingT) printS(err error, s severity, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { // Only create a new buffer if we don't have one cached. - b := l.getBuffer() + b := l.bufferCache.GetBuffer() // The message is always quoted, even if it contains line breaks. // If developers want multi-line output, they should use a small, fixed // message and put the multi-line output into a value. b.WriteString(strconv.Quote(msg)) if err != nil { - kvListFormat(&b.Buffer, "err", err) + serialize.KVListFormat(&b.Buffer, "err", err) } - kvListFormat(&b.Buffer, keysAndValues...) - l.printDepth(s, logging.logr, nil, depth+1, &b.Buffer) + serialize.KVListFormat(&b.Buffer, keysAndValues...) + l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. - l.putBuffer(b) -} - -const missingValue = "(MISSING)" - -func kvListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { - for i := 0; i < len(keysAndValues); i += 2 { - var v interface{} - k := keysAndValues[i] - if i+1 < len(keysAndValues) { - v = keysAndValues[i+1] - } else { - v = missingValue - } - b.WriteByte(' ') - // Keys are assumed to be well-formed according to - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments - // for the sake of performance. Keys with spaces, - // special characters, etc. will break parsing. - if k, ok := k.(string); ok { - // Avoid one allocation when the key is a string, which - // normally it should be. - b.WriteString(k) - } else { - b.WriteString(fmt.Sprintf("%s", k)) - } - - // The type checks are sorted so that more frequently used ones - // come first because that is then faster in the common - // cases. In Kubernetes, ObjectRef (a Stringer) is more common - // than plain strings - // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). - switch v := v.(type) { - case fmt.Stringer: - writeStringValue(b, true, stringerToString(v)) - case string: - writeStringValue(b, true, v) - case error: - writeStringValue(b, true, v.Error()) - case []byte: - // In https://github.com/kubernetes/klog/pull/237 it was decided - // to format byte slices with "%+q". The advantages of that are: - // - readable output if the bytes happen to be printable - // - non-printable bytes get represented as unicode escape - // sequences (\uxxxx) - // - // The downsides are that we cannot use the faster - // strconv.Quote here and that multi-line output is not - // supported. If developers know that a byte array is - // printable and they want multi-line output, they can - // convert the value to string before logging it. - b.WriteByte('=') - b.WriteString(fmt.Sprintf("%+q", v)) - default: - writeStringValue(b, false, fmt.Sprintf("%+v", v)) - } - } -} - -func stringerToString(s fmt.Stringer) (ret string) { - defer func() { - if err := recover(); err != nil { - ret = "nil" - } - }() - ret = s.String() - return -} - -func writeStringValue(b *bytes.Buffer, quote bool, v string) { - data := []byte(v) - index := bytes.IndexByte(data, '\n') - if index == -1 { - b.WriteByte('=') - if quote { - // Simple string, quote quotation marks and non-printable characters. - b.WriteString(strconv.Quote(v)) - return - } - // Non-string with no line breaks. - b.WriteString(v) - return - } - - // Complex multi-line string, show as-is with indention like this: - // I... "hello world" key=< - // line 1 - // line 2 - // > - // - // Tabs indent the lines of the value while the end of string delimiter - // is indented with a space. That has two purposes: - // - visual difference between the two for a human reader because indention - // will be different - // - no ambiguity when some value line starts with the end delimiter - // - // One downside is that the output cannot distinguish between strings that - // end with a line break and those that don't because the end delimiter - // will always be on the next line. - b.WriteString("=<\n") - for index != -1 { - b.WriteByte('\t') - b.Write(data[0 : index+1]) - data = data[index+1:] - index = bytes.IndexByte(data, '\n') - } - if len(data) == 0 { - // String ended with line break, don't add another. - b.WriteString(" >") - } else { - // No line break at end of last line, write rest of string and - // add one. - b.WriteByte('\t') - b.Write(data) - b.WriteString("\n >") - } + l.bufferCache.PutBuffer(b) } // redirectBuffer is used to set an alternate destination for the logs @@ -948,36 +817,11 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { return rb.w.Write(bytes) } -// SetLogger will set the backing logr implementation for klog. -// If set, all log lines will be suppressed from the regular Output, and -// redirected to the logr implementation. -// Use as: -// ... -// klog.SetLogger(zapr.NewLogger(zapLog)) -// -// To remove a backing logr implemention, use ClearLogger. Setting an -// empty logger with SetLogger(logr.Logger{}) does not work. -func SetLogger(logr logr.Logger) { - logging.mu.Lock() - defer logging.mu.Unlock() - - logging.logr = &logr -} - -// ClearLogger removes a backing logr implementation if one was set earlier -// with SetLogger. -func ClearLogger() { - logging.mu.Lock() - defer logging.mu.Unlock() - - logging.logr = nil -} - // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() - for s := fatalLog; s >= infoLog; s-- { + for s := severity.FatalLog; s >= severity.InfoLog; s-- { rb := &redirectBuffer{ w: w, } @@ -989,7 +833,7 @@ func SetOutput(w io.Writer) { func SetOutputBySeverity(name string, w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() - sev, ok := severityByName(name) + sev, ok := severity.ByName(name) if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) } @@ -1008,7 +852,7 @@ func LogToStderr(stderr bool) { } // output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, file string, line int, alsoToStderr bool) { +func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) { var isLocked = true l.mu.Lock() defer func() { @@ -1020,15 +864,15 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) + buf.Write(dbg.Stacks(false)) } } data := buf.Bytes() if log != nil { // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} - if s == errorLog { - l.logr.WithCallDepth(depth+3).Error(nil, string(data)) + if s == severity.ErrorLog { + logging.logger.WithCallDepth(depth+3).Error(nil, string(data)) } else { log.WithCallDepth(depth + 3).Info(string(data)) } @@ -1042,13 +886,13 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, if logging.logFile != "" { // Since we are using a single log file, all of the items in l.file array // will point to the same file, so just use one of them to write data. - if l.file[infoLog] == nil { - if err := l.createFiles(infoLog); err != nil { + if l.file[severity.InfoLog] == nil { + if err := l.createFiles(severity.InfoLog); err != nil { os.Stderr.Write(data) // Make sure the message appears somewhere. l.exit(err) } } - l.file[infoLog].Write(data) + l.file[severity.InfoLog].Write(data) } else { if l.file[s] == nil { if err := l.createFiles(s); err != nil { @@ -1061,48 +905,51 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, l.file[s].Write(data) } else { switch s { - case fatalLog: - l.file[fatalLog].Write(data) + case severity.FatalLog: + l.file[severity.FatalLog].Write(data) fallthrough - case errorLog: - l.file[errorLog].Write(data) + case severity.ErrorLog: + l.file[severity.ErrorLog].Write(data) fallthrough - case warningLog: - l.file[warningLog].Write(data) + case severity.WarningLog: + l.file[severity.WarningLog].Write(data) fallthrough - case infoLog: - l.file[infoLog].Write(data) + case severity.InfoLog: + l.file[severity.InfoLog].Write(data) } } } } - if s == fatalLog { + if s == severity.FatalLog { // If we got here via Exit rather than Fatal, print no stacks. if atomic.LoadUint32(&fatalNoStacks) > 0 { l.mu.Unlock() isLocked = false - timeoutFlush(10 * time.Second) - os.Exit(1) + timeoutFlush(ExitFlushTimeout) + OsExit(1) } // Dump all goroutine stacks before exiting. - trace := stacks(true) - // Write the stack trace for all goroutines to the stderr. - if l.toStderr || l.alsoToStderr || s >= l.stderrThreshold.get() || alsoToStderr { - os.Stderr.Write(trace) + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(dbg.Stacks(false)) } + // Write the stack trace for all goroutines to the files. + trace := dbg.Stacks(true) logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { + for log := severity.FatalLog; log >= severity.InfoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. f.Write(trace) } } l.mu.Unlock() isLocked = false - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded(&) with 255 anyway. + timeoutFlush(ExitFlushTimeout) + OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. } - l.putBuffer(buf) + l.bufferCache.PutBuffer(buf) if stats := severityStats[s]; stats != nil { atomic.AddInt64(&stats.lines, 1) @@ -1110,42 +957,6 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, } } -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when klog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - // logExitFunc provides a simple mechanism to override the default behavior // of exiting on error. Used in testing and to guarantee we reach a required exit // for fatal logs. Instead, exit could be a function rather than a method but that @@ -1163,7 +974,7 @@ func (l *loggingT) exit(err error) { return } l.flushAll() - os.Exit(2) + OsExit(2) } // syncBuffer joins a bufio.Writer to its underlying file, providing access to the @@ -1174,7 +985,7 @@ type syncBuffer struct { logger *loggingT *bufio.Writer file *os.File - sev severity + sev severity.Severity nbytes uint64 // The number of bytes written to this file maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } @@ -1220,7 +1031,7 @@ func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { sb.file.Close() } var err error - sb.file, _, err = create(severityName[sb.sev], now, startup) + sb.file, _, err = create(severity.Name[sb.sev], now, startup) if err != nil { return err } @@ -1258,11 +1069,16 @@ const bufferSize = 256 * 1024 // createFiles creates all the log files for severity from sev down to infoLog. // l.mu is held. -func (l *loggingT) createFiles(sev severity) error { +func (l *loggingT) createFiles(sev severity.Severity) error { + interval := l.flushInterval + if interval == 0 { + interval = flushInterval + } + l.flushD.run(interval) now := time.Now() // Files are created in decreasing severity order, so as soon as we find one // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { + for s := sev; s >= severity.InfoLog && l.file[s] == nil; s-- { sb := &syncBuffer{ logger: l, sev: s, @@ -1279,10 +1095,91 @@ func (l *loggingT) createFiles(sev severity) error { const flushInterval = 5 * time.Second // flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() +type flushDaemon struct { + mu sync.Mutex + clock clock.WithTicker + flush func() + stopC chan struct{} + stopDone chan struct{} +} + +// newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a +// clock.RealClock is used. +func newFlushDaemon(flush func(), tickClock clock.WithTicker) *flushDaemon { + if tickClock == nil { + tickClock = clock.RealClock{} + } + return &flushDaemon{ + flush: flush, + clock: tickClock, + } +} + +// run starts a goroutine that periodically calls the daemons flush function. +// Calling run on an already running daemon will have no effect. +func (f *flushDaemon) run(interval time.Duration) { + f.mu.Lock() + defer f.mu.Unlock() + + if f.stopC != nil { // daemon already running + return } + + f.stopC = make(chan struct{}, 1) + f.stopDone = make(chan struct{}, 1) + + ticker := f.clock.NewTicker(interval) + go func() { + defer ticker.Stop() + defer func() { f.stopDone <- struct{}{} }() + for { + select { + case <-ticker.C(): + f.flush() + case <-f.stopC: + f.flush() + return + } + } + }() +} + +// stop stops the running flushDaemon and waits until the daemon has shut down. +// Calling stop on a daemon that isn't running will have no effect. +func (f *flushDaemon) stop() { + f.mu.Lock() + defer f.mu.Unlock() + + if f.stopC == nil { // daemon not running + return + } + + f.stopC <- struct{}{} + <-f.stopDone + + f.stopC = nil + f.stopDone = nil +} + +// isRunning returns true if the flush daemon is running. +func (f *flushDaemon) isRunning() bool { + f.mu.Lock() + defer f.mu.Unlock() + return f.stopC != nil +} + +// StopFlushDaemon stops the flush daemon, if running, and flushes once. +// This prevents klog from leaking goroutines on shutdown. After stopping +// the daemon, you can still manually flush buffers again by calling Flush(). +func StopFlushDaemon() { + logging.flushD.stop() +} + +// StartFlushDaemon ensures that the flush daemon runs with the given delay +// between flush calls. If it is already running, it gets restarted. +func StartFlushDaemon(interval time.Duration) { + StopFlushDaemon() + logging.flushD.run(interval) } // lockAndFlushAll is like flushAll but locks l.mu first. @@ -1296,13 +1193,16 @@ func (l *loggingT) lockAndFlushAll() { // l.mu is held. func (l *loggingT) flushAll() { // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { + for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] if file != nil { file.Flush() // ignore error file.Sync() // ignore error } } + if logging.loggerOptions.flush != nil { + logging.loggerOptions.flush() + } } // CopyStandardLogTo arranges for messages written to the Go "log" package's @@ -1313,7 +1213,7 @@ func (l *loggingT) flushAll() { // Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not // recognized, CopyStandardLogTo panics. func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) + sev, ok := severity.ByName(name) if !ok { panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) } @@ -1325,7 +1225,7 @@ func CopyStandardLogTo(name string) { // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. -type logBridge severity +type logBridge severity.Severity // Write parses the standard logging line and passes its components to the // logger for severity(lb). @@ -1349,7 +1249,7 @@ func (lb logBridge) Write(b []byte) (n int, err error) { } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. - logging.printWithFileLine(severity(lb), logging.logr, logging.filter, file, line, true, text) + logging.printWithFileLine(severity.Severity(lb), logging.logger, logging.filter, file, line, true, text) return len(b), nil } @@ -1384,22 +1284,21 @@ func (l *loggingT) setV(pc uintptr) Level { type Verbose struct { enabled bool logr *logr.Logger - filter LogFilter } func newVerbose(level Level, b bool) Verbose { - if logging.logr == nil { - return Verbose{b, nil, logging.filter} + if logging.logger == nil { + return Verbose{b, nil} } - v := logging.logr.V(int(level)) - return Verbose{b, &v, logging.filter} + v := logging.logger.V(int(level)) + return Verbose{b, &v} } // V reports whether verbosity at the call site is at least the requested level. // The returned value is a struct of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either -// if glog.V(2).Enabled() { klog.Info("log this") } +// if klog.V(2).Enabled() { klog.Info("log this") } // or // klog.V(2).Info("log this") // The second form is shorter but the first is cheaper if logging is off because it does @@ -1455,7 +1354,15 @@ func (v Verbose) Enabled() bool { // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v.enabled { - logging.print(infoLog, v.logr, v.filter, args...) + logging.print(severity.InfoLog, v.logr, logging.filter, args...) + } +} + +// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoDepth(depth int, args ...interface{}) { + if v.enabled { + logging.printDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) } } @@ -1463,7 +1370,15 @@ func (v Verbose) Info(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v.enabled { - logging.println(infoLog, v.logr, v.filter, args...) + logging.println(severity.InfoLog, v.logr, logging.filter, args...) + } +} + +// InfolnDepth is equivalent to the global InfolnDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfolnDepth(depth int, args ...interface{}) { + if v.enabled { + logging.printlnDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) } } @@ -1471,7 +1386,15 @@ func (v Verbose) Infoln(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v.enabled { - logging.printf(infoLog, v.logr, v.filter, format, args...) + logging.printf(severity.InfoLog, v.logr, logging.filter, format, args...) + } +} + +// InfofDepth is equivalent to the global InfofDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) { + if v.enabled { + logging.printfDepth(severity.InfoLog, v.logr, logging.filter, depth, format, args...) } } @@ -1479,28 +1402,28 @@ func (v Verbose) Infof(format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, v.filter, 0, msg, keysAndValues...) + logging.infoS(v.logr, logging.filter, 0, msg, keysAndValues...) } } // InfoSDepth acts as InfoS but uses depth to determine which call frame to log. // InfoSDepth(0, "msg") is the same as InfoS("msg"). func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, logging.filter, depth, msg, keysAndValues...) + logging.infoS(logging.logger, logging.filter, depth, msg, keysAndValues...) } // InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v. // See the documentation of V for usage. func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, v.filter, depth, msg, keysAndValues...) + logging.infoS(v.logr, logging.filter, depth, msg, keysAndValues...) } } // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, v.filter, 0, msg, args...) + logging.errorS(err, v.logr, logging.filter, 0, msg, args...) } } @@ -1508,32 +1431,44 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, v.filter, 0, msg, keysAndValues...) + logging.errorS(err, v.logr, logging.filter, 0, msg, keysAndValues...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { - logging.print(infoLog, logging.logr, logging.filter, args...) + logging.print(severity.InfoLog, logging.logger, logging.filter, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { - logging.println(infoLog, logging.logr, logging.filter, args...) + logging.println(severity.InfoLog, logging.logger, logging.filter, args...) +} + +// InfolnDepth acts as Infoln but uses depth to determine which call frame to log. +// InfolnDepth(0, "msg") is the same as Infoln("msg"). +func InfolnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { - logging.printf(infoLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.InfoLog, logging.logger, logging.filter, format, args...) +} + +// InfofDepth acts as Infof but uses depth to determine which call frame to log. +// InfofDepth(0, "msg", args...) is the same as Infof("msg", args...). +func InfofDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.InfoLog, logging.logger, logging.filter, depth, format, args...) } // InfoS structured logs to the INFO log. @@ -1545,55 +1480,79 @@ func Infof(format string, args ...interface{}) { // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, logging.filter, 0, msg, keysAndValues...) + logging.infoS(logging.logger, logging.filter, 0, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { - logging.print(warningLog, logging.logr, logging.filter, args...) + logging.print(severity.WarningLog, logging.logger, logging.filter, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { - logging.println(warningLog, logging.logr, logging.filter, args...) + logging.println(severity.WarningLog, logging.logger, logging.filter, args...) +} + +// WarninglnDepth acts as Warningln but uses depth to determine which call frame to log. +// WarninglnDepth(0, "msg") is the same as Warningln("msg"). +func WarninglnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.WarningLog, logging.logger, logging.filter, format, args...) +} + +// WarningfDepth acts as Warningf but uses depth to determine which call frame to log. +// WarningfDepth(0, "msg", args...) is the same as Warningf("msg", args...). +func WarningfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.WarningLog, logging.logger, logging.filter, depth, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { - logging.print(errorLog, logging.logr, logging.filter, args...) + logging.print(severity.ErrorLog, logging.logger, logging.filter, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { - logging.println(errorLog, logging.logr, logging.filter, args...) + logging.println(severity.ErrorLog, logging.logger, logging.filter, args...) +} + +// ErrorlnDepth acts as Errorln but uses depth to determine which call frame to log. +// ErrorlnDepth(0, "msg") is the same as Errorln("msg"). +func ErrorlnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.ErrorLog, logging.logger, logging.filter, format, args...) +} + +// ErrorfDepth acts as Errorf but uses depth to determine which call frame to log. +// ErrorfDepth(0, "msg", args...) is the same as Errorf("msg", args...). +func ErrorfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.ErrorLog, logging.logger, logging.filter, depth, format, args...) } // ErrorS structured logs to the ERROR, WARNING, and INFO logs. @@ -1606,71 +1565,108 @@ func Errorf(format string, args ...interface{}) { // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, logging.filter, 0, msg, keysAndValues...) + logging.errorS(err, logging.logger, logging.filter, 0, msg, keysAndValues...) } // ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log. // ErrorSDepth(0, "msg") is the same as ErrorS("msg"). func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, logging.filter, depth, msg, keysAndValues...) + logging.errorS(err, logging.logger, logging.filter, depth, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// prints stack trace(s), then calls OsExit(255). +// +// Stderr only receives a dump of the current goroutine's stack trace. Log files, +// if there are any, receive a dump of the stack traces in all goroutines. +// +// Callers who want more control over handling of fatal events may instead use a +// combination of different functions: +// - some info or error logging function, optionally with a stack trace +// value generated by github.com/go-logr/lib/dbg.Backtrace +// - Flush to flush pending log data +// - panic, os.Exit or returning to the caller with an error +// // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { - logging.print(fatalLog, logging.logr, logging.filter, args...) + logging.print(severity.FatalLog, logging.logger, logging.filter, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { - logging.println(fatalLog, logging.logr, logging.filter, args...) + logging.println(severity.FatalLog, logging.logger, logging.filter, args...) +} + +// FatallnDepth acts as Fatalln but uses depth to determine which call frame to log. +// FatallnDepth(0, "msg") is the same as Fatalln("msg"). +func FatallnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...) +} + +// FatalfDepth acts as Fatalf but uses depth to determine which call frame to log. +// FatalfDepth(0, "msg", args...) is the same as Fatalf("msg", args...). +func FatalfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. // It allows Exit and relatives to use the Fatal logs. var fatalNoStacks uint32 -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, logging.logr, logging.filter, args...) + logging.print(severity.FatalLog, logging.logger, logging.filter, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, logging.logr, logging.filter, args...) + logging.println(severity.FatalLog, logging.logger, logging.filter, args...) +} + +// ExitlnDepth acts as Exitln but uses depth to determine which call frame to log. +// ExitlnDepth(0, "msg") is the same as Exitln("msg"). +func ExitlnDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...) +} + +// ExitfDepth acts as Exitf but uses depth to determine which call frame to log. +// ExitfDepth(0, "msg", args...) is the same as Exitf("msg", args...). +func ExitfDepth(depth int, format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...) } // LogFilter is a collection of functions that can filter all logging calls, @@ -1681,79 +1677,10 @@ type LogFilter interface { FilterS(msg string, keysAndValues []interface{}) (string, []interface{}) } +// SetLogFilter installs a filter that is used for all log calls. +// +// Modifying the filter is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. func SetLogFilter(filter LogFilter) { - logging.mu.Lock() - defer logging.mu.Unlock() - logging.filter = filter } - -// ObjectRef references a kubernetes object -type ObjectRef struct { - Name string `json:"name"` - Namespace string `json:"namespace,omitempty"` -} - -func (ref ObjectRef) String() string { - if ref.Namespace != "" { - return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) - } - return ref.Name -} - -// MarshalLog ensures that loggers with support for structured output will log -// as a struct by removing the String method via a custom type. -func (ref ObjectRef) MarshalLog() interface{} { - type or ObjectRef - return or(ref) -} - -var _ logr.Marshaler = ObjectRef{} - -// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface -// this interface may expand in the future, but will always be a subset of the -// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface -type KMetadata interface { - GetName() string - GetNamespace() string -} - -// KObj returns ObjectRef from ObjectMeta -func KObj(obj KMetadata) ObjectRef { - if obj == nil { - return ObjectRef{} - } - if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() { - return ObjectRef{} - } - - return ObjectRef{ - Name: obj.GetName(), - Namespace: obj.GetNamespace(), - } -} - -// KRef returns ObjectRef from name and namespace -func KRef(namespace, name string) ObjectRef { - return ObjectRef{ - Name: name, - Namespace: namespace, - } -} - -// KObjs returns slice of ObjectRef from an slice of ObjectMeta -func KObjs(arg interface{}) []ObjectRef { - s := reflect.ValueOf(arg) - if s.Kind() != reflect.Slice { - return nil - } - objectRefs := make([]ObjectRef, 0, s.Len()) - for i := 0; i < s.Len(); i++ { - if v, ok := s.Index(i).Interface().(KMetadata); ok { - objectRefs = append(objectRefs, KObj(v)) - } else { - return nil - } - } - return objectRefs -} diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go new file mode 100644 index 000000000000..027a4014af5b --- /dev/null +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -0,0 +1,87 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/serialize" +) + +// NewKlogr returns a logger that is functionally identical to +// klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The +// difference is that it uses a simpler implementation. +func NewKlogr() Logger { + return New(&klogger{}) +} + +// klogger is a subset of klogr/klogr.go. It had to be copied to break an +// import cycle (klogr wants to use klog, and klog wants to use klogr). +type klogger struct { + level int + callDepth int + prefix string + values []interface{} +} + +func (l *klogger) Init(info logr.RuntimeInfo) { + l.callDepth += info.CallDepth +} + +func (l klogger) Info(level int, msg string, kvList ...interface{}) { + merged := serialize.MergeKVs(l.values, kvList) + if l.prefix != "" { + msg = l.prefix + ": " + msg + } + V(Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) +} + +func (l klogger) Enabled(level int) bool { + return V(Level(level)).Enabled() +} + +func (l klogger) Error(err error, msg string, kvList ...interface{}) { + merged := serialize.MergeKVs(l.values, kvList) + if l.prefix != "" { + msg = l.prefix + ": " + msg + } + ErrorSDepth(l.callDepth+1, err, msg, merged...) +} + +// WithName returns a new logr.Logger with the specified name appended. klogr +// uses '/' characters to separate name elements. Callers should not pass '/' +// in the provided name string, but this library does not actually enforce that. +func (l klogger) WithName(name string) logr.LogSink { + if len(l.prefix) > 0 { + l.prefix = l.prefix + "/" + } + l.prefix += name + return &l +} + +func (l klogger) WithValues(kvList ...interface{}) logr.LogSink { + l.values = serialize.WithValues(l.values, kvList) + return &l +} + +func (l klogger) WithCallDepth(depth int) logr.LogSink { + l.callDepth += depth + return &l +} + +var _ logr.LogSink = &klogger{} +var _ logr.CallDepthLogSink = &klogger{} diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index 2af4967ca03d..3023d1066e30 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -56,7 +56,7 @@ func writeTraceItemSummary(b *bytes.Buffer, msg string, totalTime time.Duration, b.WriteString(" ") } - b.WriteString(fmt.Sprintf("%vms (%v)", durationToMilliseconds(totalTime), startTime.Format("15:04:00.000"))) + b.WriteString(fmt.Sprintf("%vms (%v)", durationToMilliseconds(totalTime), startTime.Format("15:04:05.000"))) } func durationToMilliseconds(timeDuration time.Duration) int64 { diff --git a/vendor/modules.txt b/vendor/modules.txt index 201df80004db..67892a375ab8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,44 +1,56 @@ -# cloud.google.com/go v0.100.2 -## explicit; go 1.11 +# cloud.google.com/go v0.107.0 +## explicit; go 1.19 cloud.google.com/go cloud.google.com/go/internal cloud.google.com/go/internal/optional +cloud.google.com/go/internal/pubsub cloud.google.com/go/internal/testutil cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -cloud.google.com/go/longrunning -cloud.google.com/go/longrunning/autogen # cloud.google.com/go/bigtable v1.3.0 ## explicit; go 1.11 cloud.google.com/go/bigtable cloud.google.com/go/bigtable/bttest cloud.google.com/go/bigtable/internal/option -# cloud.google.com/go/compute v1.3.0 -## explicit; go 1.15 +# cloud.google.com/go/compute v1.14.0 +## explicit; go 1.19 +cloud.google.com/go/compute/internal +# cloud.google.com/go/compute/metadata v0.2.2 +## explicit; go 1.19 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v0.1.0 -## explicit; go 1.11 +# cloud.google.com/go/iam v0.8.0 +## explicit; go 1.19 cloud.google.com/go/iam -# cloud.google.com/go/kms v1.0.0 -## explicit; go 1.16 -# cloud.google.com/go/pubsub v1.3.1 -## explicit; go 1.11 +cloud.google.com/go/iam/apiv1/iampb +# cloud.google.com/go/longrunning v0.3.0 +## explicit; go 1.19 +cloud.google.com/go/longrunning +cloud.google.com/go/longrunning/autogen +cloud.google.com/go/longrunning/autogen/longrunningpb +# cloud.google.com/go/pubsub v1.27.1 +## explicit; go 1.19 cloud.google.com/go/pubsub cloud.google.com/go/pubsub/apiv1 +cloud.google.com/go/pubsub/apiv1/pubsubpb +cloud.google.com/go/pubsub/internal cloud.google.com/go/pubsub/internal/distribution +cloud.google.com/go/pubsub/internal/scheduler cloud.google.com/go/pubsub/pstest -# cloud.google.com/go/storage v1.10.0 -## explicit; go 1.11 +# cloud.google.com/go/storage v1.28.1 +## explicit; go 1.19 cloud.google.com/go/storage +cloud.google.com/go/storage/internal +cloud.google.com/go/storage/internal/apiv2 +cloud.google.com/go/storage/internal/apiv2/stubs # github.com/Azure/azure-pipeline-go v0.2.3 ## explicit; go 1.14 github.com/Azure/azure-pipeline-go/pipeline -# github.com/Azure/azure-sdk-for-go v62.0.0+incompatible => github.com/Azure/azure-sdk-for-go v36.2.0+incompatible +# github.com/Azure/azure-sdk-for-go v65.0.0+incompatible => github.com/Azure/azure-sdk-for-go v36.2.0+incompatible ## explicit github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network github.com/Azure/azure-sdk-for-go/version -# github.com/Azure/azure-storage-blob-go v0.13.0 => github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20220216145902-b5e698eff68e +# github.com/Azure/azure-storage-blob-go v0.15.0 => github.com/MasslessParticle/azure-storage-blob-go v0.14.1-0.20220216145902-b5e698eff68e ## explicit; go 1.15 github.com/Azure/azure-storage-blob-go/azblob # github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 @@ -48,18 +60,18 @@ github.com/Azure/go-ansiterm/winterm # github.com/Azure/go-autorest v14.2.0+incompatible ## explicit github.com/Azure/go-autorest -# github.com/Azure/go-autorest/autorest v0.11.24 +# github.com/Azure/go-autorest/autorest v0.11.28 ## explicit; go 1.15 github.com/Azure/go-autorest/autorest github.com/Azure/go-autorest/autorest/azure -# github.com/Azure/go-autorest/autorest/adal v0.9.18 +# github.com/Azure/go-autorest/autorest/adal v0.9.21 ## explicit; go 1.15 github.com/Azure/go-autorest/autorest/adal -# github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 -## explicit; go 1.12 +# github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 +## explicit; go 1.15 github.com/Azure/go-autorest/autorest/azure/auth -# github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 -## explicit; go 1.12 +# github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 +## explicit; go 1.15 github.com/Azure/go-autorest/autorest/azure/cli # github.com/Azure/go-autorest/autorest/date v0.3.0 ## explicit; go 1.12 @@ -85,21 +97,15 @@ github.com/Masterminds/semver/v3 # github.com/Masterminds/sprig/v3 v3.2.2 ## explicit; go 1.13 github.com/Masterminds/sprig/v3 -# github.com/Microsoft/go-winio v0.4.17 -## explicit; go 1.12 +# github.com/Microsoft/go-winio v0.5.2 +## explicit; go 1.13 github.com/Microsoft/go-winio github.com/Microsoft/go-winio/pkg/guid # github.com/NYTimes/gziphandler v1.1.1 ## explicit; go 1.11 github.com/NYTimes/gziphandler -# github.com/PuerkitoBio/purell v1.1.1 -## explicit -github.com/PuerkitoBio/purell -# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 -## explicit -github.com/PuerkitoBio/urlesc -# github.com/Shopify/sarama v1.30.0 -## explicit; go 1.13 +# github.com/Shopify/sarama v1.37.2 +## explicit; go 1.17 github.com/Shopify/sarama # github.com/Workiva/go-datastructures v1.0.53 ## explicit; go 1.15 @@ -120,7 +126,7 @@ github.com/alicebob/gopher-json github.com/alicebob/miniredis/v2 github.com/alicebob/miniredis/v2/geohash github.com/alicebob/miniredis/v2/server -# github.com/armon/go-metrics v0.3.9 +# github.com/armon/go-metrics v0.3.10 ## explicit; go 1.12 github.com/armon/go-metrics github.com/armon/go-metrics/prometheus @@ -203,17 +209,12 @@ github.com/c2h5oh/datasize # github.com/cespare/xxhash v1.1.0 ## explicit github.com/cespare/xxhash -# github.com/cespare/xxhash/v2 v2.1.2 +# github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 # github.com/cloudflare/cloudflare-go v0.27.0 => github.com/cyriltovena/cloudflare-go v0.27.1-0.20211118103540-ff77400bcb93 ## explicit; go 1.15 github.com/cloudflare/cloudflare-go -# github.com/containerd/containerd v1.5.9 -## explicit; go 1.16 -github.com/containerd/containerd/errdefs -github.com/containerd/containerd/log -github.com/containerd/containerd/platforms # github.com/containerd/fifo v1.0.0 ## explicit; go 1.13 github.com/containerd/fifo @@ -221,8 +222,8 @@ github.com/containerd/fifo ## explicit github.com/coreos/etcd/pkg/fileutil github.com/coreos/etcd/pkg/tlsutil -# github.com/coreos/go-semver v0.3.0 -## explicit +# github.com/coreos/go-semver v0.3.1 +## explicit; go 1.8 github.com/coreos/go-semver/semver # github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf ## explicit @@ -255,12 +256,11 @@ github.com/digitalocean/godo/metrics # github.com/dimchansky/utfbom v1.1.1 ## explicit github.com/dimchansky/utfbom -# github.com/docker/distribution v2.7.1+incompatible +# github.com/docker/distribution v2.8.1+incompatible ## explicit github.com/docker/distribution/digestset github.com/docker/distribution/reference -github.com/docker/distribution/registry/api/errcode -# github.com/docker/docker v20.10.12+incompatible +# github.com/docker/docker v20.10.17+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -312,7 +312,7 @@ github.com/docker/go-metrics # github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8 ## explicit github.com/docker/go-plugins-helpers/sdk -# github.com/docker/go-units v0.4.0 +# github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units # github.com/drone/envsubst v1.0.2 @@ -323,8 +323,8 @@ github.com/drone/envsubst/path # github.com/dustin/go-humanize v1.0.0 ## explicit github.com/dustin/go-humanize -# github.com/eapache/go-resiliency v1.2.0 -## explicit +# github.com/eapache/go-resiliency v1.3.0 +## explicit; go 1.13 github.com/eapache/go-resiliency/breaker # github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 ## explicit @@ -361,14 +361,14 @@ github.com/fsouza/fake-gcs-server/internal/backend ## explicit; go 1.17 github.com/go-kit/kit/log github.com/go-kit/kit/log/level -# github.com/go-kit/log v0.2.0 +# github.com/go-kit/log v0.2.1 ## explicit; go 1.17 github.com/go-kit/log github.com/go-kit/log/level # github.com/go-logfmt/logfmt v0.5.1 ## explicit; go 1.17 github.com/go-logfmt/logfmt -# github.com/go-logr/logr v1.2.2 +# github.com/go-logr/logr v1.2.3 ## explicit; go 1.16 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -385,9 +385,10 @@ github.com/go-openapi/errors # github.com/go-openapi/jsonpointer v0.19.5 ## explicit; go 1.13 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.19.5 +# github.com/go-openapi/jsonreference v0.20.0 ## explicit; go 1.13 github.com/go-openapi/jsonreference +github.com/go-openapi/jsonreference/internal # github.com/go-openapi/loads v0.20.2 ## explicit; go 1.13 github.com/go-openapi/loads @@ -400,14 +401,14 @@ github.com/go-openapi/spec # github.com/go-openapi/strfmt v0.21.2 ## explicit; go 1.13 github.com/go-openapi/strfmt -# github.com/go-openapi/swag v0.19.15 -## explicit; go 1.11 +# github.com/go-openapi/swag v0.22.1 +## explicit; go 1.18 github.com/go-openapi/swag # github.com/go-openapi/validate v0.20.2 ## explicit; go 1.14 github.com/go-openapi/validate -# github.com/go-redis/redis/v8 v8.11.4 -## explicit; go 1.13 +# github.com/go-redis/redis/v8 v8.11.5 +## explicit; go 1.17 github.com/go-redis/redis/v8 github.com/go-redis/redis/v8/internal github.com/go-redis/redis/v8/internal/hashtag @@ -416,9 +417,6 @@ github.com/go-redis/redis/v8/internal/pool github.com/go-redis/redis/v8/internal/proto github.com/go-redis/redis/v8/internal/rand github.com/go-redis/redis/v8/internal/util -# github.com/go-stack/stack v1.8.0 -## explicit -github.com/go-stack/stack # github.com/go-zookeeper/zk v1.0.2 ## explicit; go 1.13 github.com/go-zookeeper/zk @@ -445,14 +443,15 @@ github.com/gogo/protobuf/types # github.com/gogo/status v1.1.0 ## explicit; go 1.12 github.com/gogo/status -# github.com/golang-jwt/jwt/v4 v4.2.0 -## explicit; go 1.15 +# github.com/golang-jwt/jwt/v4 v4.4.2 +## explicit; go 1.16 github.com/golang-jwt/jwt/v4 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.2 ## explicit; go 1.9 +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any @@ -466,8 +465,8 @@ github.com/golang/snappy # github.com/google/btree v1.0.1 ## explicit; go 1.12 github.com/google/btree -# github.com/google/go-cmp v0.5.7 -## explicit; go 1.11 +# github.com/google/go-cmp v0.5.9 +## explicit; go 1.13 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags @@ -476,29 +475,35 @@ github.com/google/go-cmp/cmp/internal/value # github.com/google/go-querystring v1.0.0 ## explicit github.com/google/go-querystring/query -# github.com/google/gofuzz v1.1.0 +# github.com/google/gofuzz v1.2.0 ## explicit; go 1.12 github.com/google/gofuzz +github.com/google/gofuzz/bytesource # github.com/google/pprof v0.0.0-20220218203455-0368bd9e19a7 ## explicit; go 1.14 github.com/google/pprof/profile # github.com/google/renameio/v2 v2.0.0 ## explicit; go 1.13 github.com/google/renameio/v2 -# github.com/google/uuid v1.2.0 +# github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid -# github.com/googleapis/gax-go/v2 v2.1.1 -## explicit; go 1.11 +# github.com/googleapis/enterprise-certificate-proxy v0.2.1 +## explicit; go 1.19 +github.com/googleapis/enterprise-certificate-proxy/client +github.com/googleapis/enterprise-certificate-proxy/client/util +# github.com/googleapis/gax-go/v2 v2.7.0 +## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto +github.com/googleapis/gax-go/v2/internal # github.com/googleapis/gnostic v0.4.1 ## explicit; go 1.12 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions github.com/googleapis/gnostic/openapiv2 -# github.com/gophercloud/gophercloud v0.24.0 +# github.com/gophercloud/gophercloud v1.0.0 ## explicit; go 1.14 github.com/gophercloud/gophercloud github.com/gophercloud/gophercloud/openstack @@ -515,7 +520,7 @@ github.com/gophercloud/gophercloud/pagination # github.com/gorilla/mux v1.8.0 ## explicit; go 1.12 github.com/gorilla/mux -# github.com/gorilla/websocket v1.4.2 +# github.com/gorilla/websocket v1.5.0 ## explicit; go 1.12 github.com/gorilla/websocket # github.com/grafana/dskit v0.0.0-20220331160727-49faf69f72ca @@ -567,16 +572,16 @@ github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc # github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed ## explicit github.com/hailocab/go-hostpool -# github.com/hashicorp/consul/api v1.12.0 +# github.com/hashicorp/consul/api v1.18.0 ## explicit; go 1.12 github.com/hashicorp/consul/api -# github.com/hashicorp/errwrap v1.0.0 +# github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap # github.com/hashicorp/go-cleanhttp v0.5.2 ## explicit; go 1.13 github.com/hashicorp/go-cleanhttp -# github.com/hashicorp/go-hclog v0.16.2 +# github.com/hashicorp/go-hclog v1.2.2 ## explicit; go 1.13 github.com/hashicorp/go-hclog # github.com/hashicorp/go-immutable-radix v1.3.1 @@ -585,8 +590,8 @@ github.com/hashicorp/go-immutable-radix # github.com/hashicorp/go-msgpack v0.5.5 ## explicit github.com/hashicorp/go-msgpack/codec -# github.com/hashicorp/go-multierror v1.1.0 -## explicit; go 1.14 +# github.com/hashicorp/go-multierror v1.1.1 +## explicit; go 1.13 github.com/hashicorp/go-multierror # github.com/hashicorp/go-rootcerts v1.0.2 ## explicit; go 1.12 @@ -594,17 +599,17 @@ github.com/hashicorp/go-rootcerts # github.com/hashicorp/go-sockaddr v1.0.2 ## explicit github.com/hashicorp/go-sockaddr -# github.com/hashicorp/go-uuid v1.0.2 +# github.com/hashicorp/go-uuid v1.0.3 ## explicit github.com/hashicorp/go-uuid # github.com/hashicorp/golang-lru v0.5.4 ## explicit; go 1.12 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru -# github.com/hashicorp/memberlist v0.3.0 +# github.com/hashicorp/memberlist v0.5.0 ## explicit; go 1.12 github.com/hashicorp/memberlist -# github.com/hashicorp/serf v0.9.6 +# github.com/hashicorp/serf v0.10.1 ## explicit; go 1.12 github.com/hashicorp/serf/coordinate # github.com/hpcloud/tail v1.0.0 => github.com/grafana/tail v0.0.0-20201004203643-7aa4e4a91f03 @@ -614,7 +619,7 @@ github.com/hpcloud/tail/ratelimiter github.com/hpcloud/tail/util github.com/hpcloud/tail/watch github.com/hpcloud/tail/winfile -# github.com/huandu/xstrings v1.3.1 +# github.com/huandu/xstrings v1.3.2 ## explicit; go 1.12 github.com/huandu/xstrings # github.com/imdario/mergo v0.3.12 @@ -627,8 +632,8 @@ github.com/influxdata/go-syslog/v3/common github.com/influxdata/go-syslog/v3/nontransparent github.com/influxdata/go-syslog/v3/octetcounting github.com/influxdata/go-syslog/v3/rfc5424 -# github.com/influxdata/telegraf v1.16.3 -## explicit; go 1.15 +# github.com/influxdata/telegraf v1.25.2 +## explicit; go 1.19 github.com/influxdata/telegraf github.com/influxdata/telegraf/plugins/inputs # github.com/jcmturner/aescts/v2 v2.0.0 @@ -637,12 +642,12 @@ github.com/jcmturner/aescts/v2 # github.com/jcmturner/dnsutils/v2 v2.0.0 ## explicit; go 1.13 github.com/jcmturner/dnsutils/v2 -# github.com/jcmturner/gofork v1.0.0 -## explicit +# github.com/jcmturner/gofork v1.7.6 +## explicit; go 1.7 github.com/jcmturner/gofork/encoding/asn1 github.com/jcmturner/gofork/x/crypto/pbkdf2 -# github.com/jcmturner/gokrb5/v8 v8.4.2 -## explicit; go 1.14 +# github.com/jcmturner/gokrb5/v8 v8.4.3 +## explicit; go 1.16 github.com/jcmturner/gokrb5/v8/asn1tools github.com/jcmturner/gokrb5/v8/client github.com/jcmturner/gokrb5/v8/config @@ -695,13 +700,14 @@ github.com/json-iterator/go # github.com/julienschmidt/httprouter v1.3.0 ## explicit; go 1.7 github.com/julienschmidt/httprouter -# github.com/klauspost/compress v1.14.1 -## explicit; go 1.15 +# github.com/klauspost/compress v1.15.15 +## explicit; go 1.17 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/gzip github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash @@ -715,24 +721,24 @@ github.com/klauspost/pgzip ## explicit github.com/leodido/ragel-machinery github.com/leodido/ragel-machinery/parser -# github.com/mailru/easyjson v0.7.6 +# github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter -# github.com/mattn/go-colorable v0.1.9 -## explicit; go 1.13 +# github.com/mattn/go-colorable v0.1.13 +## explicit; go 1.15 github.com/mattn/go-colorable # github.com/mattn/go-ieproxy v0.0.1 ## explicit; go 1.14 github.com/mattn/go-ieproxy -# github.com/mattn/go-isatty v0.0.14 -## explicit; go 1.12 +# github.com/mattn/go-isatty v0.0.16 +## explicit; go 1.15 github.com/mattn/go-isatty -# github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 +# github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/miekg/dns v1.1.46 +# github.com/miekg/dns v1.1.50 ## explicit; go 1.14 github.com/miekg/dns # github.com/minio/md5-simd v1.1.0 @@ -754,16 +760,16 @@ github.com/minio/minio-go/v7/pkg/tags # github.com/minio/sha256-simd v0.1.1 ## explicit; go 1.12 github.com/minio/sha256-simd -# github.com/mitchellh/copystructure v1.0.0 -## explicit +# github.com/mitchellh/copystructure v1.2.0 +## explicit; go 1.15 github.com/mitchellh/copystructure # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir -# github.com/mitchellh/mapstructure v1.4.3 +# github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure -# github.com/mitchellh/reflectwalk v1.0.1 +# github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk # github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 @@ -794,7 +800,7 @@ github.com/oklog/ulid # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.0.2 +# github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 ## explicit github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 @@ -804,16 +810,12 @@ github.com/opentracing-contrib/go-grpc # github.com/opentracing-contrib/go-stdlib v1.0.0 ## explicit; go 1.14 github.com/opentracing-contrib/go-stdlib/nethttp -# github.com/opentracing/opentracing-go v1.2.0 +# github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b ## explicit; go 1.14 github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log -# github.com/pierrec/lz4 v2.6.1+incompatible -## explicit -github.com/pierrec/lz4 -github.com/pierrec/lz4/internal/xxh32 -# github.com/pierrec/lz4/v4 v4.1.12 +# github.com/pierrec/lz4/v4 v4.1.17 ## explicit; go 1.14 github.com/pierrec/lz4/v4 github.com/pierrec/lz4/v4/internal/lz4block @@ -829,8 +831,8 @@ github.com/pmezard/go-difflib/difflib # github.com/prometheus/alertmanager v0.23.1-0.20210914172521-e35efbddb66a ## explicit; go 1.16 github.com/prometheus/alertmanager/api/v2/models -# github.com/prometheus/client_golang v1.12.1 -## explicit; go 1.13 +# github.com/prometheus/client_golang v1.14.0 +## explicit; go 1.17 github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 github.com/prometheus/client_golang/prometheus @@ -841,11 +843,11 @@ github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/push github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil/promlint -# github.com/prometheus/client_model v0.2.0 +# github.com/prometheus/client_model v0.3.0 ## explicit; go 1.9 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.32.1 -## explicit; go 1.13 +# github.com/prometheus/common v0.39.0 +## explicit; go 1.17 github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg @@ -858,8 +860,8 @@ github.com/prometheus/common/sigv4 # github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 ## explicit; go 1.13 github.com/prometheus/node_exporter/https -# github.com/prometheus/procfs v0.7.3 -## explicit; go 1.13 +# github.com/prometheus/procfs v0.8.0 +## explicit; go 1.17 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util @@ -925,6 +927,8 @@ github.com/prometheus/prometheus/web/api/v1 # github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 ## explicit github.com/rcrowley/go-metrics +# github.com/rogpeppe/go-internal v1.9.0 +## explicit; go 1.17 # github.com/rs/xid v1.2.1 ## explicit github.com/rs/xid @@ -948,7 +952,7 @@ github.com/shurcooL/httpfs/vfsutil # github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 ## explicit github.com/shurcooL/vfsgen -# github.com/sirupsen/logrus v1.8.1 +# github.com/sirupsen/logrus v1.9.0 ## explicit; go 1.13 github.com/sirupsen/logrus # github.com/sony/gobreaker v0.4.1 @@ -964,10 +968,10 @@ github.com/spf13/cast # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/stretchr/objx v0.2.0 +# github.com/stretchr/objx v0.5.0 ## explicit; go 1.12 github.com/stretchr/objx -# github.com/stretchr/testify v1.7.0 +# github.com/stretchr/testify v1.8.1 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/mock @@ -1041,10 +1045,10 @@ github.com/weaveworks/promrus # github.com/xdg-go/pbkdf2 v1.0.0 ## explicit; go 1.9 github.com/xdg-go/pbkdf2 -# github.com/xdg-go/scram v1.0.2 +# github.com/xdg-go/scram v1.1.1 ## explicit; go 1.11 github.com/xdg-go/scram -# github.com/xdg-go/stringprep v1.0.2 +# github.com/xdg-go/stringprep v1.0.3 ## explicit; go 1.11 github.com/xdg-go/stringprep # github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da @@ -1059,7 +1063,7 @@ go.etcd.io/bbolt # go.etcd.io/etcd v3.3.25+incompatible ## explicit go.etcd.io/etcd/pkg/transport -# go.etcd.io/etcd/api/v3 v3.5.0 +# go.etcd.io/etcd/api/v3 v3.5.4 ## explicit; go 1.16 go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/etcdserverpb @@ -1078,8 +1082,8 @@ go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/internal/endpoint go.etcd.io/etcd/client/v3/internal/resolver -# go.mongodb.org/mongo-driver v1.7.5 -## explicit; go 1.10 +# go.mongodb.org/mongo-driver v1.11.1 +## explicit; go 1.13 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec go.mongodb.org/mongo-driver/bson/bsonoptions @@ -1087,7 +1091,7 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore -# go.opencensus.io v0.23.0 +# go.opencensus.io v0.24.0 ## explicit; go 1.13 go.opencensus.io go.opencensus.io/internal @@ -1109,45 +1113,44 @@ go.opencensus.io/trace/tracestate # go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 ## explicit; go 1.16 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp -# go.opentelemetry.io/otel v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel v1.12.0 +## explicit; go 1.18 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes go.opentelemetry.io/otel/internal +go.opentelemetry.io/otel/internal/attribute go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/internal go.opentelemetry.io/otel/semconv/v1.7.0 -# go.opentelemetry.io/otel/internal/metric v0.27.0 -## explicit; go 1.16 -go.opentelemetry.io/otel/internal/metric/global -go.opentelemetry.io/otel/internal/metric/registry -# go.opentelemetry.io/otel/metric v0.27.0 -## explicit; go 1.16 +# go.opentelemetry.io/otel/metric v0.35.0 +## explicit; go 1.18 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/global -go.opentelemetry.io/otel/metric/number -go.opentelemetry.io/otel/metric/sdkapi +go.opentelemetry.io/otel/metric/instrument +go.opentelemetry.io/otel/metric/internal/global go.opentelemetry.io/otel/metric/unit -# go.opentelemetry.io/otel/trace v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel/trace v1.12.0 +## explicit; go 1.18 go.opentelemetry.io/otel/trace -# go.uber.org/atomic v1.9.0 -## explicit; go 1.13 +# go.uber.org/atomic v1.10.0 +## explicit; go 1.18 go.uber.org/atomic # go.uber.org/goleak v1.1.12 ## explicit; go 1.13 go.uber.org/goleak go.uber.org/goleak/internal/stack -# go.uber.org/multierr v1.7.0 -## explicit; go 1.14 +# go.uber.org/multierr v1.9.0 +## explicit; go 1.19 go.uber.org/multierr -# go.uber.org/zap v1.19.1 -## explicit; go 1.13 +# go.uber.org/zap v1.24.0 +## explicit; go 1.19 go.uber.org/zap go.uber.org/zap/buffer +go.uber.org/zap/internal go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit @@ -1159,7 +1162,7 @@ go4.org/intern # go4.org/unsafe/assume-no-moving-gc v0.0.0-20201222180813-1025295fd063 ## explicit; go 1.11 go4.org/unsafe/assume-no-moving-gc -# golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 +# golang.org/x/crypto v0.3.0 ## explicit; go 1.17 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -1171,10 +1174,10 @@ golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 -# golang.org/x/mod v0.5.1 +# golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 ## explicit; go 1.17 golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd +# golang.org/x/net v0.5.0 ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context @@ -1194,8 +1197,8 @@ golang.org/x/net/netutil golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 -## explicit; go 1.11 +# golang.org/x/oauth2 v0.3.0 +## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/clientcredentials @@ -1204,11 +1207,11 @@ golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c +# golang.org/x/sync v0.1.0 ## explicit golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20220222172238-00053529121e +# golang.org/x/sys v0.4.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -1218,24 +1221,24 @@ golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc/eventlog -# golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 +# golang.org/x/term v0.4.0 ## explicit; go 1.17 golang.org/x/term -# golang.org/x/text v0.3.7 +# golang.org/x/text v0.6.0 ## explicit; go 1.17 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -golang.org/x/text/width -# golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 +# golang.org/x/time v0.1.0 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.1.9 -## explicit; go 1.17 +# golang.org/x/tools v0.1.12 +## explicit; go 1.18 golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/gcimporter golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/go/internal/pkgbits golang.org/x/tools/go/packages golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core @@ -1245,16 +1248,18 @@ golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal -# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 -## explicit; go 1.11 +# golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 +## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.70.0 -## explicit; go 1.15 +# google.golang.org/api v0.104.0 +## explicit; go 1.19 google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v1 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport +google.golang.org/api/iamcredentials/v1 +google.golang.org/api/impersonate google.golang.org/api/internal google.golang.org/api/internal/gensupport google.golang.org/api/internal/impersonate @@ -1264,6 +1269,7 @@ google.golang.org/api/option google.golang.org/api/option/internaloption google.golang.org/api/storage/v1 google.golang.org/api/support/bundler +google.golang.org/api/transport google.golang.org/api/transport/cert google.golang.org/api/transport/grpc google.golang.org/api/transport/http @@ -1283,8 +1289,9 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20220222154240-daf995802d7b -## explicit; go 1.15 +# google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 +## explicit; go 1.19 +google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/bigtable/admin/v2 google.golang.org/genproto/googleapis/bigtable/v2 @@ -1294,10 +1301,11 @@ google.golang.org/genproto/googleapis/pubsub/v1 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status +google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.44.0 -## explicit; go 1.14 +# google.golang.org/grpc v1.52.3 +## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1308,6 +1316,7 @@ google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials @@ -1328,6 +1337,7 @@ google.golang.org/grpc/grpclog google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer @@ -1340,6 +1350,7 @@ google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough @@ -1359,8 +1370,8 @@ google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap google.golang.org/grpc/test/bufconn -# google.golang.org/protobuf v1.27.1 -## explicit; go 1.9 +# google.golang.org/protobuf v1.28.1 +## explicit; go 1.11 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -1411,7 +1422,7 @@ gopkg.in/fsnotify/fsnotify.v1 # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0 -# gopkg.in/ini.v1 v1.57.0 +# gopkg.in/ini.v1 v1.67.0 ## explicit gopkg.in/ini.v1 # gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 @@ -1420,13 +1431,13 @@ gopkg.in/tomb.v1 # gopkg.in/yaml.v2 v2.4.0 ## explicit; go 1.15 gopkg.in/yaml.v2 -# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b +# gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 # inet.af/netaddr v0.0.0-20210707202901-70468d781e6c ## explicit; go 1.12 inet.af/netaddr -# k8s.io/api v0.22.7 => k8s.io/api v0.21.0 +# k8s.io/api v0.25.3 => k8s.io/api v0.21.0 ## explicit; go 1.16 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 @@ -1471,7 +1482,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.22.7 => k8s.io/apimachinery v0.21.0 +# k8s.io/apimachinery v0.25.3 => k8s.io/apimachinery v0.21.0 ## explicit; go 1.16 k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -1629,10 +1640,15 @@ k8s.io/client-go/util/workqueue # k8s.io/klog v1.0.0 ## explicit; go 1.12 k8s.io/klog -# k8s.io/klog/v2 v2.40.1 +# k8s.io/klog/v2 v2.70.1 ## explicit; go 1.13 k8s.io/klog/v2 -# k8s.io/utils v0.0.0-20201110183641-67b214c5f920 +k8s.io/klog/v2/internal/buffer +k8s.io/klog/v2/internal/clock +k8s.io/klog/v2/internal/dbg +k8s.io/klog/v2/internal/serialize +k8s.io/klog/v2/internal/severity +# k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed ## explicit; go 1.12 k8s.io/utils/buffer k8s.io/utils/integer @@ -1641,7 +1657,7 @@ k8s.io/utils/trace ## explicit; go 1.12 rsc.io/binaryregexp rsc.io/binaryregexp/syntax -# sigs.k8s.io/structured-merge-diff/v4 v4.1.0 +# sigs.k8s.io/structured-merge-diff/v4 v4.2.3 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/schema diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go index 01103b38a90a..7e5dc7582744 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go @@ -16,7 +16,9 @@ limitations under the License. package schema -import "sync" +import ( + "sync" +) // Schema is a list of named types. // @@ -27,6 +29,11 @@ type Schema struct { once sync.Once m map[string]TypeDef + + lock sync.Mutex + // Cached results of resolving type references to atoms. Only stores + // type references which require fields of Atom to be overriden. + resolvedTypes map[TypeRef]Atom } // A TypeSpecifier references a particular type in a schema. @@ -48,6 +55,12 @@ type TypeRef struct { // Either the name or one member of Atom should be set. NamedType *string `yaml:"namedType,omitempty"` Inlined Atom `yaml:",inline,omitempty"` + + // If this reference refers to a map-type or list-type, this field overrides + // the `ElementRelationship` of the referred type when resolved. + // If this field is nil, then it has no effect. + // See `Map` and `List` for more information about `ElementRelationship` + ElementRelationship *ElementRelationship `yaml:"elementRelationship,omitempty"` } // Atom represents the smallest possible pieces of the type system. @@ -88,11 +101,11 @@ const ( // Map is a key-value pair. Its default semantics are the same as an // associative list, but: -// * It is serialized differently: +// - It is serialized differently: // map: {"k": {"value": "v"}} // list: [{"key": "k", "value": "v"}] -// * Keys must be string typed. -// * Keys can't have multiple components. +// - Keys must be string typed. +// - Keys can't have multiple components. // // Optionally, maps may be atomic (for example, imagine representing an RGB // color value--it doesn't make sense to have different actors own the R and G @@ -146,6 +159,31 @@ func (m *Map) FindField(name string) (StructField, bool) { return sf, ok } +// CopyInto this instance of Map into the other +// If other is nil this method does nothing. +// If other is already initialized, overwrites it with this instance +// Warning: Not thread safe +func (m *Map) CopyInto(dst *Map) { + if dst == nil { + return + } + + // Map type is considered immutable so sharing references + dst.Fields = m.Fields + dst.ElementType = m.ElementType + dst.Unions = m.Unions + dst.ElementRelationship = m.ElementRelationship + + if m.m != nil { + // If cache is non-nil then the once token had been consumed. + // Must reset token and use it again to ensure same semantics. + dst.once = sync.Once{} + dst.once.Do(func() { + dst.m = m.m + }) + } +} + // UnionFields are mapping between the fields that are part of the union and // their discriminated value. The discriminated value has to be set, and // should not conflict with other discriminated value in the list. @@ -244,18 +282,93 @@ func (s *Schema) FindNamedType(name string) (TypeDef, bool) { return t, ok } +func (s *Schema) resolveNoOverrides(tr TypeRef) (Atom, bool) { + result := Atom{} + + if tr.NamedType != nil { + t, ok := s.FindNamedType(*tr.NamedType) + if !ok { + return Atom{}, false + } + + result = t.Atom + } else { + result = tr.Inlined + } + + return result, true +} + // Resolve is a convenience function which returns the atom referenced, whether // it is inline or named. Returns (Atom{}, false) if the type can't be resolved. // // This allows callers to not care about the difference between a (possibly // inlined) reference and a definition. func (s *Schema) Resolve(tr TypeRef) (Atom, bool) { - if tr.NamedType != nil { - t, ok := s.FindNamedType(*tr.NamedType) - if !ok { + // If this is a plain reference with no overrides, just return the type + if tr.ElementRelationship == nil { + return s.resolveNoOverrides(tr) + } + + s.lock.Lock() + defer s.lock.Unlock() + + if s.resolvedTypes == nil { + s.resolvedTypes = make(map[TypeRef]Atom) + } + + var result Atom + var exists bool + + // Return cached result if available + // If not, calculate result and cache it + if result, exists = s.resolvedTypes[tr]; !exists { + if result, exists = s.resolveNoOverrides(tr); exists { + // Allow field-level electives to override the referred type's modifiers + switch { + case result.Map != nil: + mapCopy := Map{} + result.Map.CopyInto(&mapCopy) + mapCopy.ElementRelationship = *tr.ElementRelationship + result.Map = &mapCopy + case result.List != nil: + listCopy := *result.List + listCopy.ElementRelationship = *tr.ElementRelationship + result.List = &listCopy + case result.Scalar != nil: + return Atom{}, false + default: + return Atom{}, false + } + } else { return Atom{}, false } - return t.Atom, true + + // Save result. If it is nil, that is also recorded as not existing. + s.resolvedTypes[tr] = result + } + + return result, true +} + +// Clones this instance of Schema into the other +// If other is nil this method does nothing. +// If other is already initialized, overwrites it with this instance +// Warning: Not thread safe +func (s *Schema) CopyInto(dst *Schema) { + if dst == nil { + return + } + + // Schema type is considered immutable so sharing references + dst.Types = s.Types + + if s.m != nil { + // If cache is non-nil then the once token had been consumed. + // Must reset token and use it again to ensure same semantics. + dst.once = sync.Once{} + dst.once.Do(func() { + dst.m = s.m + }) } - return tr.Inlined, true } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go index 4c303eecc54d..b668eff8339d 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go @@ -52,6 +52,9 @@ func (a *TypeRef) Equals(b *TypeRef) bool { } //return true } + if a.ElementRelationship != b.ElementRelationship { + return false + } return a.Inlined.Equals(&b.Inlined) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go index bb60e2a5fda9..7d64d1308ceb 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go @@ -66,6 +66,9 @@ var SchemaSchemaYAML = `types: - name: untyped type: namedType: untyped + - name: elementRelationship + type: + scalar: string - name: scalar scalar: string - name: map diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go index 6b2b2cb4a1e7..19c77334f6cc 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go @@ -105,7 +105,11 @@ type atomHandler interface { func resolveSchema(s *schema.Schema, tr schema.TypeRef, v value.Value, ah atomHandler) ValidationErrors { a, ok := s.Resolve(tr) if !ok { - return errorf("schema error: no type found matching: %v", *tr.NamedType) + typeName := "inlined type" + if tr.NamedType != nil { + typeName = *tr.NamedType + } + return errorf("schema error: no type found matching: %v", typeName) } a = deduceAtom(a, v) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go index 7e20f4083a72..913644083076 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go @@ -17,8 +17,6 @@ limitations under the License. package typed import ( - "math" - "sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/schema" "sigs.k8s.io/structured-merge-diff/v4/value" @@ -82,7 +80,12 @@ func (w *mergingWalker) merge(prefixFn func() string) (errs ValidationErrors) { alhs := deduceAtom(a, w.lhs) arhs := deduceAtom(a, w.rhs) - if alhs.Equals(&arhs) { + + // deduceAtom does not fix the type for nil values + // nil is a wildcard and will accept whatever form the other operand takes + if w.rhs == nil { + errs = append(errs, handleAtom(alhs, w.typeRef, w)...) + } else if w.lhs == nil || alhs.Equals(&arhs) { errs = append(errs, handleAtom(arhs, w.typeRef, w)...) } else { w2 := *w @@ -170,78 +173,94 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err if lhs != nil { lLen = lhs.Length() } - out := make([]interface{}, 0, int(math.Max(float64(rLen), float64(lLen)))) - - // TODO: ordering is totally wrong. - // TODO: might as well make the map order work the same way. + outLen := lLen + if outLen < rLen { + outLen = rLen + } + out := make([]interface{}, 0, outLen) + + rhsOrder, observedRHS, rhsErrs := w.indexListPathElements(t, rhs) + errs = append(errs, rhsErrs...) + lhsOrder, observedLHS, lhsErrs := w.indexListPathElements(t, lhs) + errs = append(errs, lhsErrs...) + + sharedOrder := make([]*fieldpath.PathElement, 0, rLen) + for i := range rhsOrder { + pe := &rhsOrder[i] + if _, ok := observedLHS.Get(*pe); ok { + sharedOrder = append(sharedOrder, pe) + } + } - // This is a cheap hack to at least make the output order stable. - rhsOrder := make([]fieldpath.PathElement, 0, rLen) + var nextShared *fieldpath.PathElement + if len(sharedOrder) > 0 { + nextShared = sharedOrder[0] + sharedOrder = sharedOrder[1:] + } - // First, collect all RHS children. - observedRHS := fieldpath.MakePathElementValueMap(rLen) - if rhs != nil { - for i := 0; i < rhs.Length(); i++ { - child := rhs.At(i) - pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) - if err != nil { - errs = append(errs, errorf("rhs: element %v: %v", i, err.Error())...) - // If we can't construct the path element, we can't - // even report errors deeper in the schema, so bail on - // this element. + lLen, rLen = len(lhsOrder), len(rhsOrder) + for lI, rI := 0, 0; lI < lLen || rI < rLen; { + if lI < lLen && rI < rLen { + pe := lhsOrder[lI] + if pe.Equals(rhsOrder[rI]) { + // merge LHS & RHS items + lChild, _ := observedLHS.Get(pe) + rChild, _ := observedRHS.Get(pe) + mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) + errs = append(errs, errs...) + if mergeOut != nil { + out = append(out, *mergeOut) + } + lI++ + rI++ + + nextShared = nil + if len(sharedOrder) > 0 { + nextShared = sharedOrder[0] + sharedOrder = sharedOrder[1:] + } continue } - if _, ok := observedRHS.Get(pe); ok { - errs = append(errs, errorf("rhs: duplicate entries for key %v", pe.String())...) - } - observedRHS.Insert(pe, child) - rhsOrder = append(rhsOrder, pe) - } - } - - // Then merge with LHS children. - observedLHS := fieldpath.MakePathElementSet(lLen) - if lhs != nil { - for i := 0; i < lhs.Length(); i++ { - child := lhs.At(i) - pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) - if err != nil { - errs = append(errs, errorf("lhs: element %v: %v", i, err.Error())...) - // If we can't construct the path element, we can't - // even report errors deeper in the schema, so bail on - // this element. + if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsOrder[lI]) { + // shared item, but not the one we want in this round + lI++ continue } - if observedLHS.Has(pe) { - errs = append(errs, errorf("lhs: duplicate entries for key %v", pe.String())...) + } + if lI < lLen { + pe := lhsOrder[lI] + if _, ok := observedRHS.Get(pe); !ok { + // take LHS item + lChild, _ := observedLHS.Get(pe) + mergeOut, errs := w.mergeListItem(t, pe, lChild, nil) + errs = append(errs, errs...) + if mergeOut != nil { + out = append(out, *mergeOut) + } + lI++ continue } - observedLHS.Insert(pe) - w2 := w.prepareDescent(pe, t.ElementType) - w2.lhs = value.Value(child) - if rchild, ok := observedRHS.Get(pe); ok { - w2.rhs = rchild + } + if rI < rLen { + // Take the RHS item, merge with matching LHS item if possible + pe := rhsOrder[rI] + lChild, _ := observedLHS.Get(pe) // may be nil + rChild, _ := observedRHS.Get(pe) + mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) + errs = append(errs, errs...) + if mergeOut != nil { + out = append(out, *mergeOut) } - errs = append(errs, w2.merge(pe.String)...) - if w2.out != nil { - out = append(out, *w2.out) + rI++ + // Advance nextShared, if we are merging nextShared. + if nextShared != nil && nextShared.Equals(pe) { + nextShared = nil + if len(sharedOrder) > 0 { + nextShared = sharedOrder[0] + sharedOrder = sharedOrder[1:] + } } - w.finishDescent(w2) - } - } - - for _, pe := range rhsOrder { - if observedLHS.Has(pe) { - continue - } - value, _ := observedRHS.Get(pe) - w2 := w.prepareDescent(pe, t.ElementType) - w2.rhs = value - errs = append(errs, w2.merge(pe.String)...) - if w2.out != nil { - out = append(out, *w2.out) } - w.finishDescent(w2) } if len(out) > 0 { @@ -252,6 +271,46 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err return errs } +func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { + var errs ValidationErrors + length := 0 + if list != nil { + length = list.Length() + } + observed := fieldpath.MakePathElementValueMap(length) + pes := make([]fieldpath.PathElement, 0, length) + for i := 0; i < length; i++ { + child := list.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if _, found := observed.Get(pe); found { + errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) + continue + } + observed.Insert(pe, child) + pes = append(pes, pe) + } + return pes, observed, errs +} + +func (w *mergingWalker) mergeListItem(t *schema.List, pe fieldpath.PathElement, lChild, rChild value.Value) (out *interface{}, errs ValidationErrors) { + w2 := w.prepareDescent(pe, t.ElementType) + w2.lhs = lChild + w2.rhs = rChild + errs = append(errs, w2.merge(pe.String)...) + if w2.out != nil { + out = w2.out + } + w.finishDescent(w2) + return +} + func (w *mergingWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) { if v == nil { return nil, nil diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go index 5a8214ae2d4e..6a7697e3b765 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go @@ -110,7 +110,7 @@ func (v *reconcileWithSchemaWalker) finishDescent(v2 *reconcileWithSchemaWalker) } // ReconcileFieldSetWithSchema reconciles the a field set with any changes to the -//// object's schema since the field set was written. Returns the reconciled field set, or nil of +// object's schema since the field set was written. Returns the reconciled field set, or nil of // no changes were made to the field set. // // Supports: @@ -124,13 +124,6 @@ func ReconcileFieldSetWithSchema(fieldset *fieldpath.Set, tv *TypedValue) (*fiel v.schema = tv.schema v.typeRef = tv.typeRef - // We don't reconcile deduced types, which are primarily for use by unstructured CRDs. Deduced - // types do not support atomic or granular tags. Nor does the dynamic schema deduction - // interact well with the reconcile logic. - if v.schema == DeducedParseableType.Schema { - return nil, nil - } - defer v.finished() errs := v.reconcile() @@ -187,19 +180,17 @@ func (v *reconcileWithSchemaWalker) visitListItems(t *schema.List, element *fiel } func (v *reconcileWithSchemaWalker) doList(t *schema.List) (errs ValidationErrors) { - // reconcile lists changed from granular to atomic + // reconcile lists changed from granular to atomic. + // Note that migrations from atomic to granular are not recommended and will + // be treated as if they were always granular. + // + // In this case, the manager that owned the previously atomic field (and all subfields), + // will now own just the top-level field and none of the subfields. if !v.isAtomic && t.ElementRelationship == schema.Atomic { v.toRemove = fieldpath.NewSet(v.path) // remove all root and all children fields v.toAdd = fieldpath.NewSet(v.path) // add the root of the atomic return errs } - // reconcile lists changed from atomic to granular - if v.isAtomic && t.ElementRelationship == schema.Associative { - v.toAdd, errs = buildGranularFieldSet(v.path, v.value) - if errs != nil { - return errs - } - } if v.fieldSet != nil { errs = v.visitListItems(t, v.fieldSet) } @@ -231,7 +222,18 @@ func (v *reconcileWithSchemaWalker) visitMapItems(t *schema.Map, element *fieldp } func (v *reconcileWithSchemaWalker) doMap(t *schema.Map) (errs ValidationErrors) { - // reconcile maps and structs changed from granular to atomic + // We don't currently reconcile deduced types (unstructured CRDs) or maps that contain only unknown + // fields since deduced types do not yet support atomic or granular tags. + if isUntypedDeducedMap(t) { + return errs + } + + // reconcile maps and structs changed from granular to atomic. + // Note that migrations from atomic to granular are not recommended and will + // be treated as if they were always granular. + // + // In this case the manager that owned the previously atomic field (and all subfields), + // will now own just the top-level field and none of the subfields. if !v.isAtomic && t.ElementRelationship == schema.Atomic { if v.fieldSet != nil && v.fieldSet.Size() > 0 { v.toRemove = fieldpath.NewSet(v.path) // remove all root and all children fields @@ -239,34 +241,12 @@ func (v *reconcileWithSchemaWalker) doMap(t *schema.Map) (errs ValidationErrors) } return errs } - // reconcile maps changed from atomic to granular - if v.isAtomic && (t.ElementRelationship == schema.Separable || t.ElementRelationship == "") { - v.toAdd, errs = buildGranularFieldSet(v.path, v.value) - if errs != nil { - return errs - } - } if v.fieldSet != nil { errs = v.visitMapItems(t, v.fieldSet) } return errs } -func buildGranularFieldSet(path fieldpath.Path, value *TypedValue) (*fieldpath.Set, ValidationErrors) { - - valueFieldSet, err := value.ToFieldSet() - if err != nil { - return nil, errorf("toFieldSet: %v", err) - } - if valueFieldSetAtPath, ok := fieldSetAtPath(valueFieldSet, path); ok { - result := fieldpath.NewSet(path) - resultAtPath := descendToPath(result, path) - *resultAtPath = *valueFieldSetAtPath - return result, nil - } - return nil, nil -} - func fieldSetAtPath(node *fieldpath.Set, path fieldpath.Path) (*fieldpath.Set, bool) { ok := true for _, pe := range path { @@ -293,3 +273,18 @@ func typeRefAtPath(t *schema.Map, pe fieldpath.PathElement) (schema.TypeRef, boo } return tr, tr != schema.TypeRef{} } + +// isUntypedDeducedMap returns true if m has no fields defined, but allows untyped elements. +// This is equivalent to a openAPI object that has x-kubernetes-preserve-unknown-fields=true +// but does not have any properties defined on the object. +func isUntypedDeducedMap(m *schema.Map) bool { + return isUntypedDeducedRef(m.ElementType) && m.Fields == nil +} + +func isUntypedDeducedRef(t schema.TypeRef) bool { + if t.NamedType != nil { + return *t.NamedType == "__untyped_deduced_" + } + atom := t.Inlined + return atom.Scalar != nil && *atom.Scalar == "untyped" +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go index c3e15180a7b1..a338d761d43f 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go @@ -51,10 +51,22 @@ func (w *removingWalker) doScalar(t *schema.Scalar) ValidationErrors { } func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { + if !w.value.IsList() { + return nil + } l := w.value.AsListUsing(w.allocator) defer w.allocator.Free(l) - // If list is null, empty, or atomic just return - if l == nil || l.Length() == 0 || t.ElementRelationship == schema.Atomic { + // If list is null or empty just return + if l == nil || l.Length() == 0 { + return nil + } + + // atomic lists should return everything in the case of extract + // and nothing in the case of remove (!w.shouldExtract) + if t.ElementRelationship == schema.Atomic { + if w.shouldExtract { + w.out = w.value.Unstructured() + } return nil } @@ -70,7 +82,7 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { // but ignore them when we are removing (i.e. !w.shouldExtract) if w.toRemove.Has(path) { if w.shouldExtract { - newItems = append(newItems, item.Unstructured()) + newItems = append(newItems, removeItemsWithSchema(item, w.toRemove, w.schema, t.ElementType, w.shouldExtract).Unstructured()) } else { continue } @@ -92,12 +104,24 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { } func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { + if !w.value.IsMap() { + return nil + } m := w.value.AsMapUsing(w.allocator) if m != nil { defer w.allocator.Free(m) } - // If map is null, empty, or atomic just return - if m == nil || m.Empty() || t.ElementRelationship == schema.Atomic { + // If map is null or empty just return + if m == nil || m.Empty() { + return nil + } + + // atomic maps should return everything in the case of extract + // and nothing in the case of remove (!w.shouldExtract) + if t.ElementRelationship == schema.Atomic { + if w.shouldExtract { + w.out = w.value.Unstructured() + } return nil } @@ -118,7 +142,8 @@ func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { // but ignore them when we are removing (i.e. !w.shouldExtract) if w.toRemove.Has(path) { if w.shouldExtract { - newMap[k] = val.Unstructured() + newMap[k] = removeItemsWithSchema(val, w.toRemove, w.schema, fieldType, w.shouldExtract).Unstructured() + } return true } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go index e9e6be8befaa..d63a97fe20a2 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go @@ -99,12 +99,13 @@ func (tv TypedValue) ToFieldSet() (*fieldpath.Set, error) { // Merge returns the result of merging tv and pso ("partially specified // object") together. Of note: -// * No fields can be removed by this operation. -// * If both tv and pso specify a given leaf field, the result will keep pso's -// value. -// * Container typed elements will have their items ordered: -// * like tv, if pso doesn't change anything in the container -// * like pso, if pso does change something in the container. +// - No fields can be removed by this operation. +// - If both tv and pso specify a given leaf field, the result will keep pso's +// value. +// - Container typed elements will have their items ordered: +// 1. like tv, if pso doesn't change anything in the container +// 2. like pso, if pso does change something in the container. +// // tv and pso must both be of the same type (their Schema and TypeRef must // match), or an error will be returned. Validation errors will be returned if // the objects don't conform to the schema.